SlowGuess commited on
Commit
814e446
·
verified ·
1 Parent(s): 14f8356

Add Batch 959a0644-2af7-44d3-990b-088eeb23c6fc

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2401.12xxx/2401.12503/4bc10b6c-537b-4aac-b190-2c35591d39a5_content_list.json +1602 -0
  3. 2401.12xxx/2401.12503/4bc10b6c-537b-4aac-b190-2c35591d39a5_model.json +2226 -0
  4. 2401.12xxx/2401.12503/4bc10b6c-537b-4aac-b190-2c35591d39a5_origin.pdf +3 -0
  5. 2401.12xxx/2401.12503/full.md +303 -0
  6. 2401.12xxx/2401.12503/images.zip +3 -0
  7. 2401.12xxx/2401.12503/layout.json +0 -0
  8. 2401.12xxx/2401.12554/1ac8e6f1-9e91-4660-91d5-33063262f8cf_content_list.json +0 -0
  9. 2401.12xxx/2401.12554/1ac8e6f1-9e91-4660-91d5-33063262f8cf_model.json +0 -0
  10. 2401.12xxx/2401.12554/1ac8e6f1-9e91-4660-91d5-33063262f8cf_origin.pdf +3 -0
  11. 2401.12xxx/2401.12554/full.md +493 -0
  12. 2401.12xxx/2401.12554/images.zip +3 -0
  13. 2401.12xxx/2401.12554/layout.json +0 -0
  14. 2401.12xxx/2401.12586/fc908757-3ff6-4bd6-a609-8ec6463c04cf_content_list.json +0 -0
  15. 2401.12xxx/2401.12586/fc908757-3ff6-4bd6-a609-8ec6463c04cf_model.json +0 -0
  16. 2401.12xxx/2401.12586/fc908757-3ff6-4bd6-a609-8ec6463c04cf_origin.pdf +3 -0
  17. 2401.12xxx/2401.12586/full.md +0 -0
  18. 2401.12xxx/2401.12586/images.zip +3 -0
  19. 2401.12xxx/2401.12586/layout.json +0 -0
  20. 2401.12xxx/2401.12592/e8da1853-39d1-41e6-8ee3-71b374b562d5_content_list.json +1206 -0
  21. 2401.12xxx/2401.12592/e8da1853-39d1-41e6-8ee3-71b374b562d5_model.json +0 -0
  22. 2401.12xxx/2401.12592/e8da1853-39d1-41e6-8ee3-71b374b562d5_origin.pdf +3 -0
  23. 2401.12xxx/2401.12592/full.md +280 -0
  24. 2401.12xxx/2401.12592/images.zip +3 -0
  25. 2401.12xxx/2401.12592/layout.json +0 -0
  26. 2401.12xxx/2401.12599/1b5a3a10-2f46-443f-9cb0-ad6eb32b9945_content_list.json +0 -0
  27. 2401.12xxx/2401.12599/1b5a3a10-2f46-443f-9cb0-ad6eb32b9945_model.json +0 -0
  28. 2401.12xxx/2401.12599/1b5a3a10-2f46-443f-9cb0-ad6eb32b9945_origin.pdf +3 -0
  29. 2401.12xxx/2401.12599/full.md +906 -0
  30. 2401.12xxx/2401.12599/images.zip +3 -0
  31. 2401.12xxx/2401.12599/layout.json +0 -0
  32. 2401.12xxx/2401.12603/4f4ba12d-6258-4c7f-8b4d-1da802d6095f_content_list.json +1306 -0
  33. 2401.12xxx/2401.12603/4f4ba12d-6258-4c7f-8b4d-1da802d6095f_model.json +1807 -0
  34. 2401.12xxx/2401.12603/4f4ba12d-6258-4c7f-8b4d-1da802d6095f_origin.pdf +3 -0
  35. 2401.12xxx/2401.12603/full.md +234 -0
  36. 2401.12xxx/2401.12603/images.zip +3 -0
  37. 2401.12xxx/2401.12603/layout.json +0 -0
  38. 2401.12xxx/2401.12665/0c8eb889-4021-4ec5-b2eb-27542fa21f64_content_list.json +0 -0
  39. 2401.12xxx/2401.12665/0c8eb889-4021-4ec5-b2eb-27542fa21f64_model.json +0 -0
  40. 2401.12xxx/2401.12665/0c8eb889-4021-4ec5-b2eb-27542fa21f64_origin.pdf +3 -0
  41. 2401.12xxx/2401.12665/full.md +611 -0
  42. 2401.12xxx/2401.12665/images.zip +3 -0
  43. 2401.12xxx/2401.12665/layout.json +0 -0
  44. 2401.12xxx/2401.12690/144bbb49-024f-4544-960f-9726a73d392b_content_list.json +1633 -0
  45. 2401.12xxx/2401.12690/144bbb49-024f-4544-960f-9726a73d392b_model.json +2077 -0
  46. 2401.12xxx/2401.12690/144bbb49-024f-4544-960f-9726a73d392b_origin.pdf +3 -0
  47. 2401.12xxx/2401.12690/full.md +319 -0
  48. 2401.12xxx/2401.12690/images.zip +3 -0
  49. 2401.12xxx/2401.12690/layout.json +0 -0
  50. 2401.12xxx/2401.12698/5b3c828e-248a-44b0-ba84-ef9eb969c11d_content_list.json +0 -0
.gitattributes CHANGED
@@ -10262,3 +10262,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10262
  2402.01xxx/2402.01712/cd860e7e-cea9-4cfb-a584-55721b6c908e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10263
  2402.01xxx/2402.01715/574ddcbe-6659-413e-ac39-a0e219ba994e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10264
  2402.09xxx/2402.09432/5de9be86-d49c-4d86-93d0-30bcff928d14_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10262
  2402.01xxx/2402.01712/cd860e7e-cea9-4cfb-a584-55721b6c908e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10263
  2402.01xxx/2402.01715/574ddcbe-6659-413e-ac39-a0e219ba994e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10264
  2402.09xxx/2402.09432/5de9be86-d49c-4d86-93d0-30bcff928d14_origin.pdf filter=lfs diff=lfs merge=lfs -text
10265
+ 2401.12xxx/2401.12503/4bc10b6c-537b-4aac-b190-2c35591d39a5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10266
+ 2401.12xxx/2401.12554/1ac8e6f1-9e91-4660-91d5-33063262f8cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10267
+ 2401.12xxx/2401.12586/fc908757-3ff6-4bd6-a609-8ec6463c04cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10268
+ 2401.12xxx/2401.12592/e8da1853-39d1-41e6-8ee3-71b374b562d5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10269
+ 2401.12xxx/2401.12599/1b5a3a10-2f46-443f-9cb0-ad6eb32b9945_origin.pdf filter=lfs diff=lfs merge=lfs -text
10270
+ 2401.12xxx/2401.12603/4f4ba12d-6258-4c7f-8b4d-1da802d6095f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10271
+ 2401.12xxx/2401.12665/0c8eb889-4021-4ec5-b2eb-27542fa21f64_origin.pdf filter=lfs diff=lfs merge=lfs -text
10272
+ 2401.12xxx/2401.12690/144bbb49-024f-4544-960f-9726a73d392b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10273
+ 2401.12xxx/2401.12698/5b3c828e-248a-44b0-ba84-ef9eb969c11d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10274
+ 2401.12xxx/2401.12699/7fa20597-4f73-4d48-ba74-82d111e4ed84_origin.pdf filter=lfs diff=lfs merge=lfs -text
10275
+ 2401.12xxx/2401.12794/b0a55fcd-9544-4de9-a6d0-3b0e5f7caf57_origin.pdf filter=lfs diff=lfs merge=lfs -text
10276
+ 2401.12xxx/2401.12863/ad74e2fe-edec-4df4-b71f-13f4dfbacc4c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10277
+ 2401.12xxx/2401.12869/21765f2c-e8c2-4cea-a533-32f101105e96_origin.pdf filter=lfs diff=lfs merge=lfs -text
10278
+ 2401.12xxx/2401.12873/eca0a954-b6f9-4167-8d45-8ec219bbc36e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10279
+ 2401.12xxx/2401.12874/00e5ce0f-d0f6-45eb-83f2-8a9b5de112ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
10280
+ 2401.12xxx/2401.12902/14617c4f-c0d7-4235-a062-1d846030adbd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10281
+ 2401.12xxx/2401.12915/3f5768a8-cb38-4935-86c3-53ee3f7c4842_origin.pdf filter=lfs diff=lfs merge=lfs -text
10282
+ 2401.12xxx/2401.12926/e8b18a2c-5ab0-414b-8e7f-8eecebf16cb2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10283
+ 2401.12xxx/2401.12945/6b08ec7f-c3ef-4a98-815b-8219d14c987c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10284
+ 2401.12xxx/2401.12954/cf16a8a0-c4b3-4faf-a9fa-cdbdad4ff77a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10285
+ 2401.12xxx/2401.12963/3920502f-2f0b-4f6a-adba-11ecb6c74027_origin.pdf filter=lfs diff=lfs merge=lfs -text
10286
+ 2401.12xxx/2401.12970/d869166e-9cde-48f1-8d3a-093c1521f26a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10287
+ 2401.12xxx/2401.12973/0bb47d17-41b5-49c5-9e12-085397d9fb81_origin.pdf filter=lfs diff=lfs merge=lfs -text
10288
+ 2401.12xxx/2401.12975/29aa928e-9cde-4527-abf3-4592422e9011_origin.pdf filter=lfs diff=lfs merge=lfs -text
10289
+ 2401.13xxx/2401.13110/677981e8-eb1c-4550-9bca-410e8ee1f532_origin.pdf filter=lfs diff=lfs merge=lfs -text
10290
+ 2401.13xxx/2401.13136/5d6dd850-ebae-4f1a-aa13-78e89b413bc0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10291
+ 2401.13xxx/2401.13138/bc50ec8d-c7d9-4cd2-9301-4de1238b4676_origin.pdf filter=lfs diff=lfs merge=lfs -text
10292
+ 2401.13xxx/2401.13154/45fc2a37-8639-493f-9cb5-9cd20de981e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10293
+ 2401.13xxx/2401.13169/d2043fd5-ae6c-4584-b606-420a0098c502_origin.pdf filter=lfs diff=lfs merge=lfs -text
10294
+ 2401.13xxx/2401.13178/954431ab-541a-49db-bbfb-c6379482d786_origin.pdf filter=lfs diff=lfs merge=lfs -text
10295
+ 2401.13xxx/2401.13220/39d5e31a-29a3-473b-8855-c077a037f1ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
10296
+ 2401.13xxx/2401.13256/58a97ffd-0e37-4e27-84b9-78593ed9f0c1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10297
+ 2401.13xxx/2401.13260/933588fc-7bc7-4767-9a73-3ac75f681d58_origin.pdf filter=lfs diff=lfs merge=lfs -text
10298
+ 2401.13xxx/2401.13266/83d99778-baaa-441b-a612-064311b7c005_origin.pdf filter=lfs diff=lfs merge=lfs -text
10299
+ 2401.13xxx/2401.13275/a0a075f4-c44c-48f7-8863-e23f5811c1cb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10300
+ 2401.13xxx/2401.13298/887d6ba3-3dc2-48c2-b944-745ee00821b9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10301
+ 2401.13xxx/2401.13303/94b2af14-39b6-4645-a479-ab54a541d0e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10302
+ 2401.13xxx/2401.13313/4f7248f9-0c52-47f6-af4e-95b81a8c40e8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10303
+ 2401.13xxx/2401.13420/ef47a068-acd8-47a6-90f9-1af4cfb3a384_origin.pdf filter=lfs diff=lfs merge=lfs -text
10304
+ 2401.13xxx/2401.13505/3dc51df7-2f38-4f2d-a220-01ec84ba4bad_origin.pdf filter=lfs diff=lfs merge=lfs -text
10305
+ 2401.13xxx/2401.13512/5b9a6d2b-37b9-4b4f-865d-b077018577c5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10306
+ 2401.13xxx/2401.13527/bfe93040-0f5e-4536-be37-3f0ff71c1a4e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10307
+ 2401.13xxx/2401.13531/c69f4c2e-0d66-4c4f-b748-fe9d41e6e01b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10308
+ 2401.13xxx/2401.13537/1b2f51d7-6227-400b-9a43-8aa2f3d7e439_origin.pdf filter=lfs diff=lfs merge=lfs -text
10309
+ 2401.13xxx/2401.13544/57ef9e1d-226e-4f74-8c29-2d4b59b1df93_origin.pdf filter=lfs diff=lfs merge=lfs -text
10310
+ 2401.13xxx/2401.13554/be8f5fc6-0659-4e3f-bef7-2655374ee794_origin.pdf filter=lfs diff=lfs merge=lfs -text
10311
+ 2401.13xxx/2401.13560/7340db5d-093e-49d9-bdce-0b831a6cb247_origin.pdf filter=lfs diff=lfs merge=lfs -text
10312
+ 2401.13xxx/2401.13598/5872aaa3-6086-4e40-bc02-3f9dddd08414_origin.pdf filter=lfs diff=lfs merge=lfs -text
10313
+ 2401.13xxx/2401.13601/3c9f5b52-bcf8-4a84-81dc-d49d66505649_origin.pdf filter=lfs diff=lfs merge=lfs -text
10314
+ 2401.13xxx/2401.13627/043657c8-de62-4d5e-852f-3a240360ce57_origin.pdf filter=lfs diff=lfs merge=lfs -text
10315
+ 2401.13xxx/2401.13641/60476ae9-d7a8-42c4-ae87-080d8c2323d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10316
+ 2401.13xxx/2401.13660/029a0a92-a53d-4558-be33-76b8cb4bcf35_origin.pdf filter=lfs diff=lfs merge=lfs -text
10317
+ 2401.13xxx/2401.13716/8aae8036-88ac-4cc1-b63f-4ce02c002205_origin.pdf filter=lfs diff=lfs merge=lfs -text
10318
+ 2401.13xxx/2401.13726/3cb7f8ee-8ddd-4246-95e4-a93c8fa1353f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10319
+ 2401.13xxx/2401.13744/2659f5bd-74aa-4cef-a254-812312bfcdbc_origin.pdf filter=lfs diff=lfs merge=lfs -text
10320
+ 2401.13xxx/2401.13794/5bba3a11-85d4-4e18-a6be-2c8674076353_origin.pdf filter=lfs diff=lfs merge=lfs -text
10321
+ 2401.13xxx/2401.13796/1905ff53-bb9c-4f78-bc82-a146949ec9f9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10322
+ 2401.13xxx/2401.13802/2fcbea65-eea1-4003-bbb8-8ae2aaddb072_origin.pdf filter=lfs diff=lfs merge=lfs -text
10323
+ 2401.13xxx/2401.13803/74c3c2d6-e4eb-48e3-85f8-f3a833727acd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10324
+ 2401.13xxx/2401.13810/9a67cfda-55a5-4a6b-b7a7-efcab65c46d9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10325
+ 2401.14xxx/2401.14423/a4839dcf-9d3c-436a-abca-72df90a13e42_origin.pdf filter=lfs diff=lfs merge=lfs -text
10326
+ 2401.14xxx/2401.14428/85975a4a-a8ce-466a-96ce-83a5d85389e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10327
+ 2401.16xxx/2401.16212/e2dd918d-2cac-4025-8669-5f7d79efc61b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10328
+ 2402.01xxx/2402.01694/a15b1fd7-3b9d-4bd3-bee2-8a8f032c0523_origin.pdf filter=lfs diff=lfs merge=lfs -text
2401.12xxx/2401.12503/4bc10b6c-537b-4aac-b190-2c35591d39a5_content_list.json ADDED
@@ -0,0 +1,1602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Small Language Model Meets with Reinforced Vision Vocabulary",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 300,
8
+ 122,
9
+ 722,
10
+ 172
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Haoran Wei $^{1,*}$ Lingyu Kong $^{2,*}$ Jinyue Chen $^{2}$ Liang Zhao $^{1}$ \nZheng Ge $^{1\\dagger}$ En Yu $^{3}$ Jianjian Sun $^{1}$ Chunrui Han $^{1}$ Xiangyu Zhang $^{1}$ $^{1}$ MEGVII Technology University of Chinese Academy of Sciences \n $^{3}$ Huazhong University of Science and Technology \nhttps://varytoy.github.io/",
17
+ "bbox": [
18
+ 235,
19
+ 224,
20
+ 759,
21
+ 299
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract",
28
+ "text_level": 1,
29
+ "bbox": [
30
+ 459,
31
+ 333,
32
+ 537,
33
+ 349
34
+ ],
35
+ "page_idx": 0
36
+ },
37
+ {
38
+ "type": "text",
39
+ "text": "Playing Large Vision Language Models (LVLMs) in 2023 is trendy among the AI community. However, the relatively large number of parameters (more than 7B) of popular LVLMs makes it difficult to train and deploy on consumer GPUs, discouraging many researchers with limited resources. Imagine how cool it would be to experience all the features of current LVLMs on an old GTX1080ti (our only game card). Accordingly, we present Vary-toy in this report, a small-size Vary along with Qwen-1.8B as the base \"large\" language model. In Vary-toy, we introduce an improved vision vocabulary, allowing the model to not only possess all features of Vary but also gather more generality. Specifically, we replace negative samples of natural images with positive sample data driven by object detection in the procedure of generating vision vocabulary, more sufficiently utilizing the capacity of the vocabulary network and enabling it to efficiently encode visual information corresponding to natural objects. For experiments, Vary-toy can achieve $65.6\\%$ ANLS on DocVQA, $59.1\\%$ accuracy on ChartQA, $88.1\\%$ accuracy on RefCOCO, and $29\\%$ on MMVet. The code will be publicly available on the homepage.",
40
+ "bbox": [
41
+ 228,
42
+ 364,
43
+ 767,
44
+ 574
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "1 Introduction",
51
+ "text_level": 1,
52
+ "bbox": [
53
+ 171,
54
+ 599,
55
+ 313,
56
+ 616
57
+ ],
58
+ "page_idx": 0
59
+ },
60
+ {
61
+ "type": "text",
62
+ "text": "Large Vision Language Model (LVLM) is one of the hottest research topics [1, 22, 26, 34, 48, 60] in the field of artificial intelligence among the last year. The exciting part is that one LVLM can achieve satisfactory performance in many downstream tasks [4, 24, 30, 32, 41, 45] guided by different prompts. However, there is still significant room for improvement in LVLM's overall image perception capacity. Intuitively, an advanced perceptual ability for visual concepts is essential to enhance the further development and implementation of a model. We deem that there are two main challenges to achieve that: 1) the shortcomings of the current vision vocabulary network [35, 48] in extracting rich visual information; 2) the huge model iteration cost in the optimization of a large number of parameters.",
63
+ "bbox": [
64
+ 169,
65
+ 632,
66
+ 826,
67
+ 744
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "text",
73
+ "text": "As aforementioned, current LVLMs demonstrate amazing ability in many tasks, especially the Computer Vision (CV) and Natural Language Processing (NLP) intersected ones (e.g., image caption [24], VQA [41], memes understanding, scene OCR [32], etc), based on the almost perfect vision vocabulary network — CLIP [35]. The structures of popular LVLMs can be divided into two main streams: 1) image tokens as prefixes like MetaLM [14]; 2) cross-attention for feature fusion like Flamingo [1]. Regardless of which structure is used, the upper limit of the model may be hindered by the visual signals encoding efficiency of its vision vocabulary network. To break through the potential bottleneck, Vary [48] introduces a simple and effective manner to scale up the vision",
74
+ "bbox": [
75
+ 169,
76
+ 750,
77
+ 826,
78
+ 861
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "aside_text",
84
+ "text": "arXiv:2401.12503v1 [cs.CV] 23 Jan 2024",
85
+ "bbox": [
86
+ 22,
87
+ 267,
88
+ 57,
89
+ 707
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "page_footnote",
95
+ "text": "*Equal contribution",
96
+ "bbox": [
97
+ 189,
98
+ 871,
99
+ 312,
100
+ 883
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "page_footnote",
106
+ "text": "†Project leader",
107
+ "bbox": [
108
+ 192,
109
+ 883,
110
+ 284,
111
+ 898
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "footer",
117
+ "text": "Tech Report",
118
+ "bbox": [
119
+ 171,
120
+ 922,
121
+ 250,
122
+ 936
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "image",
128
+ "img_path": "images/c6b20cd7b3d326e2efa15fd7818f9a47fd9384c76ee95988050ec8d0db5c1fbe.jpg",
129
+ "image_caption": [
130
+ "Figure 1: Features of Vary-toy. Based on a 1.8B language model, Vary-toy can achieve all features of vanilla Vary-base, including document OCR, image caption, VQA, general conversation, and so on. Besides, we introduce the natural object perception (location) ability for Vary-toy. Most importantly, with just only a single GTX1080ti GPU, you can experience all of the above."
131
+ ],
132
+ "image_footnote": [],
133
+ "bbox": [
134
+ 176,
135
+ 88,
136
+ 820,
137
+ 438
138
+ ],
139
+ "page_idx": 1
140
+ },
141
+ {
142
+ "type": "text",
143
+ "text": "vocabulary for an LVLM. The scaling law is to first train a new visual vocabulary network using a small auto-regressive model (OPT-125M [57]), and then merge the old and new vocabularies to form the final LVLM (Vary-base [48]). However, Vary suffers two drawbacks to being a user-friendly baseline: 1) The waste of network capacity in the new vision vocabulary (which in vanilla Vary is only used to compress text information in PDF images). 2) The Vary-base with 7B LLM takes high iteration costs (requiring multiple A100 machines to train).",
144
+ "bbox": [
145
+ 169,
146
+ 545,
147
+ 823,
148
+ 630
149
+ ],
150
+ "page_idx": 1
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "In this report, we present a small-size Vary, i.e., Vary-toy, to alleviate the aforementioned issues. Overall, Vary-toy enjoys the same pipeline as vanilla Vary, including a vision vocabulary generating and scaling up processes. Considering the original Vary masks natural images as negative samples during the creation of a new visual vocabulary. We believe this procedure, to some extent, wastes network capacity, leaving room for optimization. Instead, we regard the natural image as the object detection task [6, 19, 23, 37, 38, 49, 59]. Thus in processing the vision vocabulary, we incorporate both dense textual data (PDF) and natural object location data into the vocabulary network of Vary-toy, making it more universal. After completing the new and reinforced vocabulary, we merge it with the genuine $(224\\times 224)$ CLIP and then integrate them into a 1.8B language model [2].",
155
+ "bbox": [
156
+ 169,
157
+ 635,
158
+ 826,
159
+ 760
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "In experiments, we report metrics on several challenging benchmarks, i.e., DocVQA [30], ChartQA [29], MMvet [54], and RefCOCO [15]. Specifically, Vary-toy can achieve $65.6\\%$ ANLS on DocVQA, $59.1\\%$ accuracy on ChartQA, $29\\%$ accuracy on MMvet, and $88.1\\%$ accuracy on RefCOCO val. More specifically, it can gather on par performance compared to Qwen-VL-7B [3] on DocVQA and RefCOCO as well as a better accuracy than LLaVA-7B [26] on the general benchmark MMVet.",
166
+ "bbox": [
167
+ 169,
168
+ 766,
169
+ 826,
170
+ 835
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "In conclusion, Vary-toy is a toy because it is at least three times smaller compared to popular LVLMs $(>7\\mathrm{B})$ . Vary-toy is not a toy due to its demonstrated excellent potential in challenging tasks. We believe that Vary-toy still enjoys many improvement rooms and we hope that our small-size LVLM can encourage more attention in corresponding research and become a practical baseline, especially for those researchers with limited resources.",
177
+ "bbox": [
178
+ 169,
179
+ 842,
180
+ 823,
181
+ 910
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "page_number",
187
+ "text": "2",
188
+ "bbox": [
189
+ 493,
190
+ 935,
191
+ 503,
192
+ 946
193
+ ],
194
+ "page_idx": 1
195
+ },
196
+ {
197
+ "type": "image",
198
+ "img_path": "images/4183366d5553e76d6c60c2e01bc242513099651a3eecd0c75beca4a79e824185.jpg",
199
+ "image_caption": [
200
+ "Figure 2: Architecture of the Vary-toy. We utilize the Vary-tiny+ pipeline to generate the new vision vocabulary of Vary-toy. Such vision vocabulary can efficiently encode dense text and natural object location information into tokens. Based on the improved vocabulary, Vary-toy not only possesses all the previous features (document OCR) but also handles object detection tasks well."
201
+ ],
202
+ "image_footnote": [],
203
+ "bbox": [
204
+ 181,
205
+ 93,
206
+ 815,
207
+ 424
208
+ ],
209
+ "page_idx": 2
210
+ },
211
+ {
212
+ "type": "text",
213
+ "text": "2 Related Works",
214
+ "text_level": 1,
215
+ "bbox": [
216
+ 171,
217
+ 521,
218
+ 330,
219
+ 537
220
+ ],
221
+ "page_idx": 2
222
+ },
223
+ {
224
+ "type": "text",
225
+ "text": "Over the past years, Large Language Models (LLMs), such as the GPT family [5, 34, 36], LLaMA family [8, 42, 44], OPT [57], and the GLM family [55] gain significantly advanced performance in NLP tasks. With the help of LLMs' language reasoning abilities, Vision Language Models (VLMs) like Flamingo [1], BLIP2 [22], LLaVA [25, 26], Vary [48], etc [3, 12, 53, 58, 60] have achieved impressive results in various computer vision tasks such as image caption [24], VQA [4, 30, 32], image generation [12], visual grounding [3, 53, 60], document OCR [48] and so on. These models not only can follow human instructions but also possess remarkable few-shot and even zero-shot learning abilities, thereby driving the AI community toward the development of artificial general intelligence (AGI).",
226
+ "bbox": [
227
+ 169,
228
+ 553,
229
+ 823,
230
+ 679
231
+ ],
232
+ "page_idx": 2
233
+ },
234
+ {
235
+ "type": "text",
236
+ "text": "However, most popular open-source VLMs are parameter-heavy, with sizes like 7B (e.g., Qwen-VL [3] and mPIUG-Owl [52]) or 13B [26], which to some extent hinder the participation of researchers with limited resources and poses challenges for the implementation of VLMs in resource-constrained environments like home computer. Recently, there has been a growing interest in and development of smaller language models, such as Phi-2 (2.7B) [31] and Qwen-1.8B [2] for NLP tasks, and Gemini-nano (1.8B/3.25B) [43], MobileVLM (1.4B/2.7B) [9] for vision-language tasks.",
237
+ "bbox": [
238
+ 169,
239
+ 684,
240
+ 823,
241
+ 768
242
+ ],
243
+ "page_idx": 2
244
+ },
245
+ {
246
+ "type": "text",
247
+ "text": "In this report, Vary-toy will be an open-source small model that possesses features of the most popular LVLMs and demonstrates exceptional potential in fine-grained perception tasks.",
248
+ "bbox": [
249
+ 169,
250
+ 773,
251
+ 823,
252
+ 803
253
+ ],
254
+ "page_idx": 2
255
+ },
256
+ {
257
+ "type": "text",
258
+ "text": "3 Method",
259
+ "text_level": 1,
260
+ "bbox": [
261
+ 171,
262
+ 823,
263
+ 272,
264
+ 839
265
+ ],
266
+ "page_idx": 2
267
+ },
268
+ {
269
+ "type": "text",
270
+ "text": "In this section, we will delve into the details of how to devise Vary-toy. As shown in Figure 2, there are two main parts in implementing the model: 1) how to generate a more practical vision vocabulary based on the Vary-tiny+ pipeline. 2) how to utilize the new vision vocabulary to make the 1.8B Vary-toy gather new features on the premise of not harming the original model features.",
271
+ "bbox": [
272
+ 169,
273
+ 854,
274
+ 823,
275
+ 912
276
+ ],
277
+ "page_idx": 2
278
+ },
279
+ {
280
+ "type": "page_number",
281
+ "text": "3",
282
+ "bbox": [
283
+ 493,
284
+ 935,
285
+ 503,
286
+ 946
287
+ ],
288
+ "page_idx": 2
289
+ },
290
+ {
291
+ "type": "text",
292
+ "text": "3.1 Generating A Reinforced Vision Vocabulary Upon Vary-tiny+",
293
+ "text_level": 1,
294
+ "bbox": [
295
+ 171,
296
+ 90,
297
+ 642,
298
+ 107
299
+ ],
300
+ "page_idx": 3
301
+ },
302
+ {
303
+ "type": "text",
304
+ "text": "Vary-tiny [48] is a tiny vision language model to generate a specific PDF-parsing vision vocabulary for Vary. The vision vocabulary network comprises a SAM-base [17] main body and paired convolutions to reshape the output, enjoying about 80M parameters. Experiments in Vary prove that using the SAM initializing to gain intensive text perception is effective. However, the vocabulary-generating procedure in vanilla Vary suffers the risk of forgetting SAM's original natural object perception ability. What's more, we also think that writing only the visual knowledge of dense text into an 80M network is wasteful. Thus we generate a new and more reasonable vision vocabulary upon the Vary-tiny+ pipeline.",
305
+ "bbox": [
306
+ 169,
307
+ 116,
308
+ 826,
309
+ 227
310
+ ],
311
+ "page_idx": 3
312
+ },
313
+ {
314
+ "type": "text",
315
+ "text": "Provide the OCR results of this image:",
316
+ "text_level": 1,
317
+ "bbox": [
318
+ 179,
319
+ 246,
320
+ 352,
321
+ 258
322
+ ],
323
+ "page_idx": 3
324
+ },
325
+ {
326
+ "type": "image",
327
+ "img_path": "images/e17543c3aae7ef2d529f87642358dd2c47cdd34338dc15acce6a04cfa4a00ba4.jpg",
328
+ "image_caption": [],
329
+ "image_footnote": [],
330
+ "bbox": [
331
+ 183,
332
+ 266,
333
+ 200,
334
+ 276
335
+ ],
336
+ "page_idx": 3
337
+ },
338
+ {
339
+ "type": "text",
340
+ "text": "MARKETS AND STRATEGY",
341
+ "text_level": 1,
342
+ "bbox": [
343
+ 243,
344
+ 268,
345
+ 312,
346
+ 277
347
+ ],
348
+ "page_idx": 3
349
+ },
350
+ {
351
+ "type": "text",
352
+ "text": "have also taken up this practice. It can be a very successful way of introducing new products and services to existing customers, up-selling customers, or influencing them to purchase more products.",
353
+ "bbox": [
354
+ 178,
355
+ 285,
356
+ 375,
357
+ 305
358
+ ],
359
+ "page_idx": 3
360
+ },
361
+ {
362
+ "type": "text",
363
+ "text": "Loyalty Programs",
364
+ "text_level": 1,
365
+ "bbox": [
366
+ 179,
367
+ 311,
368
+ 246,
369
+ 319
370
+ ],
371
+ "page_idx": 3
372
+ },
373
+ {
374
+ "type": "text",
375
+ "text": "Many companies develop loyalty or frequency-marketing programs in order to further engage the consumers with their products and increase customer loyalty. These programs are very effective for targeting the company's most valuable customers. Most airlines develop frequent-fliter programs, which allow customers to earn points toward their next flight. Other businesses, such as coffee shops, also offer frequency cards, that entitle the customer to a free beverage, for example, after purchasing a certain number of beverages.",
376
+ "bbox": [
377
+ 179,
378
+ 320,
379
+ 375,
380
+ 369
381
+ ],
382
+ "page_idx": 3
383
+ },
384
+ {
385
+ "type": "text",
386
+ "text": "Loyalty programs have been very effective in generating repeat business. They offer an added value to the consumer, whereby the purchaser is not simply enjoying the value of the current purchase, but is being rewarded. It is important, however, that the loyalty program be relative to the product and service offering of the organization and that it should be a means of promoting the brand's importance or frustration if, with an airline ticket as an example, they are unable to redeem their ticket when they want to travel, or if the restrictions on the reward are so high that it is not worth the hassle of redemption.",
387
+ "bbox": [
388
+ 179,
389
+ 369,
390
+ 375,
391
+ 422
392
+ ],
393
+ "page_idx": 3
394
+ },
395
+ {
396
+ "type": "text",
397
+ "text": "PUBLIC RELATIONS AND PUBLICITY",
398
+ "text_level": 1,
399
+ "bbox": [
400
+ 179,
401
+ 431,
402
+ 318,
403
+ 441
404
+ ],
405
+ "page_idx": 3
406
+ },
407
+ {
408
+ "type": "text",
409
+ "text": "An organization's public relations and publicity activities are the means to foster its relationships with its various audiences and to communicate with them. Public relations efforts are undertaken in order to form a favorable view in the public eye. Favorable publicity can enhance an organization's image and increase demand for its products. A positive article or review about a product or service adds credibility, believability, and legitimacy in a much more effective manner than paid-for advertising. Negative publicity, on the other hand, can tarnish an organization's reputation. Most public relations strategies include press releases, special events, and press conferences.",
410
+ "bbox": [
411
+ 179,
412
+ 446,
413
+ 375,
414
+ 506
415
+ ],
416
+ "page_idx": 3
417
+ },
418
+ {
419
+ "type": "text",
420
+ "text": "Press releases are articles or brief news releases that are submitted",
421
+ "bbox": [
422
+ 192,
423
+ 506,
424
+ 374,
425
+ 513
426
+ ],
427
+ "page_idx": 3
428
+ },
429
+ {
430
+ "type": "text",
431
+ "text": "184 MARKETS AND STRATEGY",
432
+ "text_level": 1,
433
+ "bbox": [
434
+ 395,
435
+ 250,
436
+ 509,
437
+ 258
438
+ ],
439
+ "page_idx": 3
440
+ },
441
+ {
442
+ "type": "text",
443
+ "text": "have also taken up this practice. It can be a very successful way of in-producing new products and services to existing customers, up-selling customers, or influencing them to purchase more products.",
444
+ "bbox": [
445
+ 395,
446
+ 258,
447
+ 609,
448
+ 285
449
+ ],
450
+ "page_idx": 3
451
+ },
452
+ {
453
+ "type": "text",
454
+ "text": "Loyalty Programs",
455
+ "text_level": 1,
456
+ "bbox": [
457
+ 395,
458
+ 285,
459
+ 457,
460
+ 292
461
+ ],
462
+ "page_idx": 3
463
+ },
464
+ {
465
+ "type": "text",
466
+ "text": "Many companies develop loyalty or frequency-marketing programs in order to further engage the consumers with their products and increase customer loyalty. These programs are very effective for targeting the company's most valuable customers. Most airlines develop frequent-flyer programs, which allow customers to earn points toward their next flight. Other businesses, such as coffee shops, also offer frequency cards, that entitle the customer to a free beverage, for example, after purchasing a certain number of beverages.",
467
+ "bbox": [
468
+ 395,
469
+ 292,
470
+ 609,
471
+ 353
472
+ ],
473
+ "page_idx": 3
474
+ },
475
+ {
476
+ "type": "text",
477
+ "text": "Loyalty programs have been very effective in generating repeat business. They offer an added value to the consumer, whereby the purchaser is not simply enjoying the value of the current purchase, but is being rewarded. It is important, however, that the loyalty program be relative to the product and service offering of the organization and that the award be attainable. Customers may experience frustration if, with an airline ticket as an example, they are unable to redeem their ticket when they want to travel, or if the restrictions on the reward are so high that it is not worth the hassle of redemption. PUBLIC RELATIONS AND PUBLICITY",
478
+ "bbox": [
479
+ 395,
480
+ 353,
481
+ 609,
482
+ 428
483
+ ],
484
+ "page_idx": 3
485
+ },
486
+ {
487
+ "type": "text",
488
+ "text": "An organization's public relations and publicity activities are the means to foster its relationships with its various audiences and to communicate with them. Public relations efforts are undertaken in order to form a favorable view in the public eye. Favorable publicity can enhance an organization's image and increase demand for its products. A positive article or review about a product or service adds credibility, believability, and legitimacy in a much more effective manner than paid-for advertising. Negative publicity, on the other hand, can tarnish an organization's reputation. Most public relations strategies include press releases, special events, and press conferences. Press releases are articles or brief news releases that are submitted",
489
+ "bbox": [
490
+ 395,
491
+ 429,
492
+ 607,
493
+ 518
494
+ ],
495
+ "page_idx": 3
496
+ },
497
+ {
498
+ "type": "image",
499
+ "img_path": "images/d65a31caf3a162f64a67fe1dca5740cfadfe290580f262b00ff733715678aca2.jpg",
500
+ "image_caption": [],
501
+ "image_footnote": [],
502
+ "bbox": [
503
+ 633,
504
+ 246,
505
+ 647,
506
+ 258
507
+ ],
508
+ "page_idx": 3
509
+ },
510
+ {
511
+ "type": "text",
512
+ "text": "Detect all objects in this image:",
513
+ "text_level": 1,
514
+ "bbox": [
515
+ 650,
516
+ 247,
517
+ 774,
518
+ 257
519
+ ],
520
+ "page_idx": 3
521
+ },
522
+ {
523
+ "type": "image",
524
+ "img_path": "images/075dfdb3177a0fc3872d9fcf5ea64fc3596c1aee2c59fdf7a3ac99ed5688bc9d.jpg",
525
+ "image_caption": [],
526
+ "image_footnote": [
527
+ "Person:[535,544,568,591]; Car:[009,552,058,737], [682,598,999,976], [910,558,999,600]; Bus:[044,070,913,909]"
528
+ ],
529
+ "bbox": [
530
+ 650,
531
+ 258,
532
+ 818,
533
+ 337
534
+ ],
535
+ "page_idx": 3
536
+ },
537
+ {
538
+ "type": "image",
539
+ "img_path": "images/3e77a06c848c394fc09652b57517377fd865681b297b80c5bd42ba059453e380.jpg",
540
+ "image_caption": [
541
+ "Detect Tuba Gloves and Bow Tie in this image in this image:"
542
+ ],
543
+ "image_footnote": [],
544
+ "bbox": [
545
+ 635,
546
+ 382,
547
+ 647,
548
+ 395
549
+ ],
550
+ "page_idx": 3
551
+ },
552
+ {
553
+ "type": "image",
554
+ "img_path": "images/56c59b7bcd19d332b427ee9f73c96709d55c21dfe53880154d3495d2ded8c798.jpg",
555
+ "image_caption": [
556
+ "Figure 3: Visualization of image-text pairs used by Vary-tiny+. For PDF image-text pair, there is only one prompt, while for the object detection task, we utilize two types of prompts as shown in the right half of the figure because some images may have too many objects that exceed the maximum token length (4096) of the OPT125M after interpolation."
557
+ ],
558
+ "image_footnote": [
559
+ "Tuba: [512, 181, 971, 1000]; \nGloves: [703, 730, 782, 862]; \nBow Tie: [075, 590, 144, 630], \n[570, 491, 662, 562]."
560
+ ],
561
+ "bbox": [
562
+ 653,
563
+ 400,
564
+ 816,
565
+ 489
566
+ ],
567
+ "page_idx": 3
568
+ },
569
+ {
570
+ "type": "text",
571
+ "text": "3.1.1 Data Engine",
572
+ "text_level": 1,
573
+ "bbox": [
574
+ 171,
575
+ 618,
576
+ 313,
577
+ 633
578
+ ],
579
+ "page_idx": 3
580
+ },
581
+ {
582
+ "type": "text",
583
+ "text": "PDF data. We prepare about 4M PDF image-text pairs in this stage. Following Vary, we use the PDF processing packages to extract the texts of each PDF page, which we find many Python packages can realize (e.g., pdfminer, pdfplumber, and fitz). Each page will be saved as a JPEG image and form an image-text pair with the corresponding text. In this way, we get 2M samples for English and 2M for Chinese. We use the sentence: \"Provide the OCR results of this image.\" as the prompt for both English and Chinese tasks. The PDFs are mainly from arXiv, CC-MAIN-2021-31-PDF-UNTRUNCATED, and e-books. Figure 3 shows a sample of the PDF image-pair.",
584
+ "bbox": [
585
+ 169,
586
+ 641,
587
+ 826,
588
+ 739
589
+ ],
590
+ "page_idx": 3
591
+ },
592
+ {
593
+ "type": "text",
594
+ "text": "Object detection data. To fully utilize the capacity of the visual vocabulary network and obtain the natural image perception ability from SAM initialization, we introduce object detection data in the vision vocabulary generating process. We gather the samples from two large open-source datasets, i.e., Object365 [40] and OpenImage [18]. Due to the low efficiency of coordinate (number texts) encoding in OPT's [57] text tokenizer, for images with too many objects, the number of tokens in the ground truth may exceed the maximum token length supported by OPT-125M (although we interpolate it to 4096). Therefore, we re-organize the annotations into two tasks: 1) Object Detection: If there are no more than 30 object-boxes in the image, we will allow the Vary-tiny+ detect all objects with the prompt: \"Detect all objects in this image\". 2) REC: If the object-box number is over 30, we will regard this image as a REC task using a prompt template: \"Detect class1, class2, ..., in this image\". The selected classes are random so one image can be used multiple times. Through the above manner, we obtain approximately 3M of detection data. Some samples can be seen in Figure 3.",
595
+ "bbox": [
596
+ 169,
597
+ 744,
598
+ 826,
599
+ 912
600
+ ],
601
+ "page_idx": 3
602
+ },
603
+ {
604
+ "type": "page_number",
605
+ "text": "4",
606
+ "bbox": [
607
+ 493,
608
+ 935,
609
+ 504,
610
+ 946
611
+ ],
612
+ "page_idx": 3
613
+ },
614
+ {
615
+ "type": "text",
616
+ "text": "3.1.2 Input Format",
617
+ "text_level": 1,
618
+ "bbox": [
619
+ 171,
620
+ 90,
621
+ 320,
622
+ 104
623
+ ],
624
+ "page_idx": 4
625
+ },
626
+ {
627
+ "type": "text",
628
+ "text": "Different from the single input/output form of Vary-tiny, Vary-tiny+ needs various input formats to adapt to corresponding tasks due to it requires different prompts to guide the model output correct results. For simplicity, we use the template of Vicuna v1 [8] to construct all ground truth in a conversation format as USER: <img>\"image>\"</img> \"texts input\" ASSITANT: \"texts output\" </s>. We add the \"<img>\" and \"< img>\" as special tokens of the text tokenizer of OPT-125M and we find that it can adapt very well to the Vicuna template. For the vision input branch, we don't utilize any augmentations and only resize the image to a fixed resolution, i.e., $1024 \\times 1024$ .",
629
+ "bbox": [
630
+ 169,
631
+ 114,
632
+ 826,
633
+ 212
634
+ ],
635
+ "page_idx": 4
636
+ },
637
+ {
638
+ "type": "text",
639
+ "text": "3.2 Forge the Cost-Effective Vary-Toy",
640
+ "text_level": 1,
641
+ "bbox": [
642
+ 171,
643
+ 229,
644
+ 452,
645
+ 244
646
+ ],
647
+ "page_idx": 4
648
+ },
649
+ {
650
+ "type": "text",
651
+ "text": "In this section, we depict the design details of Vary-toy, mainly including the structure of the network and the data construction utilized in the pre-training and SFT stages.",
652
+ "bbox": [
653
+ 169,
654
+ 256,
655
+ 823,
656
+ 285
657
+ ],
658
+ "page_idx": 4
659
+ },
660
+ {
661
+ "type": "text",
662
+ "text": "3.2.1 Architecture",
663
+ "text_level": 1,
664
+ "bbox": [
665
+ 171,
666
+ 300,
667
+ 313,
668
+ 313
669
+ ],
670
+ "page_idx": 4
671
+ },
672
+ {
673
+ "type": "text",
674
+ "text": "As shown in Figure 2, we follow the Vary pipeline to devise the main body of Vary-toy but there are some minor differences. When fed an input image with a shape of $\\mathrm{H} \\times \\mathrm{W}$ , the new vision vocabulary branch will directly resize the image to $1024 \\times 1024$ , while the CLIP [35] branch gains a $224 \\times 224$ image by the center crop. Both the two branches output 256 tokens with channels of 1024. The dimension of the Qwen-1.8B's input channel is also 2048, so the simplest manner is to concatenate the image tokens in two branches directly as the input image tokens of the language model. In terms of code implementation, to maintain consistency with the Vary structure, we still add input embedding layers behind the vision vocabulary networks.",
675
+ "bbox": [
676
+ 169,
677
+ 324,
678
+ 823,
679
+ 434
680
+ ],
681
+ "page_idx": 4
682
+ },
683
+ {
684
+ "type": "table",
685
+ "img_path": "images/3f644c7db4b5ddbed29fb83aa6708345715bb3d4fce7efd84324a2f4ed751e61.jpg",
686
+ "table_caption": [],
687
+ "table_footnote": [],
688
+ "table_body": "<table><tr><td>Task</td><td>Dataset</td><td>Sample</td><td>A prompt example</td></tr><tr><td rowspan=\"2\">Cap.</td><td>Laion-COCO [39]</td><td>4M</td><td>Describe the content of this image in a sentence.</td></tr><tr><td>BLIP558k [26]</td><td>558K</td><td>Describe the image with one saying.</td></tr><tr><td rowspan=\"2\">PDF</td><td>Pure OCR</td><td>1M</td><td>Provide the OCR results of this image.</td></tr><tr><td>Markdown</td><td>500K</td><td>Convert the image to markdown format.</td></tr><tr><td rowspan=\"2\">Det.</td><td>COCO [24]</td><td>50K</td><td>Detect all objects in this image.</td></tr><tr><td>RefCOCO</td><td>train set</td><td>Detect an object: the left woman.</td></tr><tr><td rowspan=\"3\">NLP</td><td>ShareGPT</td><td>125K</td><td>Original conversation</td></tr><tr><td>Baize [50]</td><td>112K</td><td>Original conversation</td></tr><tr><td>Alpaca [42]</td><td>52K</td><td>Original conversation</td></tr><tr><td rowspan=\"2\">VQA</td><td>DocVQA [30]</td><td>train set</td><td>Question.AnAnswer using a single word or phrase.</td></tr><tr><td>ChartVQA [29]</td><td>train set</td><td>Question.AnAnswer using a single-word or phrase.</td></tr></table>",
689
+ "bbox": [
690
+ 173,
691
+ 448,
692
+ 818,
693
+ 657
694
+ ],
695
+ "page_idx": 4
696
+ },
697
+ {
698
+ "type": "text",
699
+ "text": "Table 1: Multi-task training data. We introduce 5 types of data in the pretrain stage, including weakly supervised pair data, PDF image-text pair data, detection data, pure text auto-regressive data, and VQA data. All data annotations are reorganized to a conversation format.",
700
+ "bbox": [
701
+ 169,
702
+ 666,
703
+ 823,
704
+ 708
705
+ ],
706
+ "page_idx": 4
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "3.2.2 Data Details",
711
+ "text_level": 1,
712
+ "bbox": [
713
+ 171,
714
+ 741,
715
+ 310,
716
+ 755
717
+ ],
718
+ "page_idx": 4
719
+ },
720
+ {
721
+ "type": "text",
722
+ "text": "Intuitively, the sensitivity of the 1.8B model to data quantity and ratio is higher than that of the 7B or above models, so we put more effort into the data processing aspect for Vary-toy.",
723
+ "bbox": [
724
+ 169,
725
+ 766,
726
+ 823,
727
+ 795
728
+ ],
729
+ "page_idx": 4
730
+ },
731
+ {
732
+ "type": "text",
733
+ "text": "Pre-training & SFT data. For Vary-toy, the pretrain stage is actually a multi-task training stage, wherein we prepare a large amount of image-text pairs in various formats. As summarized in Table 1, we mainly focus on a total of 5 types of data in such stage, containing weakly annotated image caption, PDF dense OCR, object detection, pure text conversation, and VQA. Specifically, for natural images, we sample 4M image-text pair in the Laion-COCO [39] dataset, and we also use the BLIP-558K data proposed in LLaVA [26]. For PDF image-text pair, we prepare two types of data following Vary. One is pure dense text OCR, and the other is a task that converts the PDF image to a markdown format. The previous type of data is randomly sampled from the PDF data used in Vary-tiny+ and the last",
734
+ "bbox": [
735
+ 169,
736
+ 800,
737
+ 826,
738
+ 912
739
+ ],
740
+ "page_idx": 4
741
+ },
742
+ {
743
+ "type": "page_number",
744
+ "text": "5",
745
+ "bbox": [
746
+ 493,
747
+ 935,
748
+ 504,
749
+ 946
750
+ ],
751
+ "page_idx": 4
752
+ },
753
+ {
754
+ "type": "text",
755
+ "text": "one is obtained via LaTeX rendering. Compared to vanilla Vary, we reduce the proportion of PDF data to maintain universal capability. For the detection data, we gather images from the COCO [24] dataset. We sample 50K images with fewer objects included for the pure object detection task and use all train data of RefCOCO for the REC task. We normalize the coordinates of each box and then magnify them to 1000 times. To prevent the language ability of the LLM from deteriorating, we also introduce pure NLP conversation data, including ShareGPT, Baize [50], and Alpaca [42]. For the last downstream VQA tasks, we choose two challenge datasets (DocVQA and ChartQA [29]) to monitor the text perception and reasoning performance of Vary-toy for artificial data. There are at least 10 prompts made through GPT3.5 [5] for each task, and Table 1 shows one example of them.",
756
+ "bbox": [
757
+ 169,
758
+ 90,
759
+ 823,
760
+ 217
761
+ ],
762
+ "page_idx": 5
763
+ },
764
+ {
765
+ "type": "text",
766
+ "text": "In the SFT stage, we only use the LLaVA-80K [26] to instruction tuning the model. LLaVA-80K is a dataset with detailed descriptions and prompts of various types of images, produced by GPT4 [26, 33].",
767
+ "bbox": [
768
+ 169,
769
+ 222,
770
+ 826,
771
+ 252
772
+ ],
773
+ "page_idx": 5
774
+ },
775
+ {
776
+ "type": "text",
777
+ "text": "3.2.3 Data Format",
778
+ "text_level": 1,
779
+ "bbox": [
780
+ 171,
781
+ 263,
782
+ 316,
783
+ 277
784
+ ],
785
+ "page_idx": 5
786
+ },
787
+ {
788
+ "type": "text",
789
+ "text": "In Vary-toy, we are pleased to keep the Chinese PDF-parsing feature to some extent because there is very little exploration in this area, which is also one of the reasons that we select Qwen-1.8B [2] as our base language model (due to the relatively comprehensive text vocabulary). The data input to Qwen-1.8B follows the vanilla Vary [48] format. That is: <lim_start>user: <img>\"image></img>\"human prompts\"<lim_end> assistant: \"model outputs\"<lim_end>.",
790
+ "bbox": [
791
+ 169,
792
+ 287,
793
+ 823,
794
+ 359
795
+ ],
796
+ "page_idx": 5
797
+ },
798
+ {
799
+ "type": "text",
800
+ "text": "4 Experiments",
801
+ "text_level": 1,
802
+ "bbox": [
803
+ 169,
804
+ 377,
805
+ 313,
806
+ 395
807
+ ],
808
+ "page_idx": 5
809
+ },
810
+ {
811
+ "type": "text",
812
+ "text": "4.1 Evaluation Metrics",
813
+ "text_level": 1,
814
+ "bbox": [
815
+ 171,
816
+ 407,
817
+ 346,
818
+ 421
819
+ ],
820
+ "page_idx": 5
821
+ },
822
+ {
823
+ "type": "text",
824
+ "text": "We report the accuracy of Vary-toy on four popular and challenging benchmarks: DocVQA [30], ChartQA [29], RefCOCO [15], and MM Vet [54]. Wherein, the DocVQA and ChartQA can measure the text perception and reasoning ability of the model in manual images, RefCOCO can be used to test the model's ability to locate natural objects, while MM Vet, including 6 measurement areas, can be utilized to monitor the general ability of Vary-toy. We use the evaluation metrics introduced in their original paper for fair comparison. Specifically, we utilize ANLS, relaxed accuracy, accuracy under 0.5 IoU, and GPT4 scoring as the metrics for the above four datasets.",
825
+ "bbox": [
826
+ 169,
827
+ 431,
828
+ 823,
829
+ 530
830
+ ],
831
+ "page_idx": 5
832
+ },
833
+ {
834
+ "type": "text",
835
+ "text": "4.2 Implementation Details",
836
+ "text_level": 1,
837
+ "bbox": [
838
+ 171,
839
+ 546,
840
+ 375,
841
+ 561
842
+ ],
843
+ "page_idx": 5
844
+ },
845
+ {
846
+ "type": "text",
847
+ "text": "For Vary-tiny+, we unfreeze all the parameters and train the whole model with a batch size of 512 for 2 epochs. We select the AdamW [28] optimizer with a cosine annealing scheduler [27]. The initial learning rate is set to 5e-5 and the end is 0. It is worth noting that the Vary-tiny is initialized by the weights of Vary-tiny for faster convergence.",
848
+ "bbox": [
849
+ 169,
850
+ 571,
851
+ 823,
852
+ 628
853
+ ],
854
+ "page_idx": 5
855
+ },
856
+ {
857
+ "type": "text",
858
+ "text": "For Vary-toy, following vanilla Vary, we freeze all weights of two vision vocabulary networks and only optimize the parameters of the input embedding layers and language model (Qwen-1.8B). In the multi-task training (pre-training) stage, we set the start learning rate to be 5e-5 while it is set to 2e-5 in SFT. We train the model with a batch size of 512 for only 1 epoch in both two stages.",
859
+ "bbox": [
860
+ 169,
861
+ 633,
862
+ 823,
863
+ 690
864
+ ],
865
+ "page_idx": 5
866
+ },
867
+ {
868
+ "type": "table",
869
+ "img_path": "images/680d924a14af3a090dbd302547807422d245c5b399d95a1e6ce003475aa0dc51.jpg",
870
+ "table_caption": [],
871
+ "table_footnote": [],
872
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td rowspan=\"2\">Size</td><td colspan=\"2\">DocVQA</td><td colspan=\"3\">ChartQA</td></tr><tr><td>val</td><td>test</td><td>human</td><td>augmented</td><td>Average</td></tr><tr><td>Dessurt [10]</td><td>-</td><td>46.5</td><td>63.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Donut [16]</td><td>-</td><td>-</td><td>67.5</td><td>-</td><td>-</td><td>41.8</td></tr><tr><td>Pix2Sturct [20]</td><td>-</td><td>-</td><td>72.1</td><td>30.5</td><td>81.6</td><td>56.0</td></tr><tr><td>mPLUG-DocOwl [52]</td><td>7B</td><td>62.2</td><td>-</td><td>-</td><td>-</td><td>57.4</td></tr><tr><td>Qwen-VL-chat [2]</td><td>7B</td><td>65.1</td><td>-</td><td>-</td><td>-</td><td>65.7</td></tr><tr><td>Vary-toy</td><td>1.8B</td><td>65.6</td><td>65.0</td><td>33.4</td><td>84.8</td><td>59.1</td></tr></table>",
873
+ "bbox": [
874
+ 215,
875
+ 700,
876
+ 774,
877
+ 843
878
+ ],
879
+ "page_idx": 5
880
+ },
881
+ {
882
+ "type": "text",
883
+ "text": "Table 2: Performance comparison to popular methods on DocVQA and ChartQA. Vary-toy can achieve $65.6\\%$ ANLS on DocVQA which is on par with the 7B Qwen-VL-chat and $59.1\\%$ accuracy on ChartQA which is higher than 7B-size mPLUG-DocOwl.",
884
+ "bbox": [
885
+ 169,
886
+ 849,
887
+ 823,
888
+ 892
889
+ ],
890
+ "page_idx": 5
891
+ },
892
+ {
893
+ "type": "page_number",
894
+ "text": "6",
895
+ "bbox": [
896
+ 493,
897
+ 936,
898
+ 504,
899
+ 946
900
+ ],
901
+ "page_idx": 5
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "4.3 Manual Image Understanding Ability",
906
+ "text_level": 1,
907
+ "bbox": [
908
+ 171,
909
+ 90,
910
+ 473,
911
+ 107
912
+ ],
913
+ "page_idx": 6
914
+ },
915
+ {
916
+ "type": "text",
917
+ "text": "We evaluate the fine-grained text perception and reasoning ability via the DocVQA [30] and ChartQA [29]. As shown in Table 2, along with the only 1.8B language model, Vary-toy can achieve $65.6\\%$ ANLS on DocVQA and $59.1\\%$ accuracy on ChartQA. For DocVQA, the Vary-toy enjoys comparable performance to the 7B-size Qwen-VL-chat, proving the excellent document-level text perception ability of the model and also proving that the new vision vocabulary is available on tokenizing PDF images. For ChartQA, Vary-toy can achieve $59.1\\%$ average accuracy, which is better than the 7B size mPLUG-DocOwl, demonstrating the effectiveness of our model further.",
918
+ "bbox": [
919
+ 169,
920
+ 116,
921
+ 826,
922
+ 214
923
+ ],
924
+ "page_idx": 6
925
+ },
926
+ {
927
+ "type": "table",
928
+ "img_path": "images/ca19867f32193fb40d2a28c8230e1aa0608b5706e26b1b39357504d69b50cbdd.jpg",
929
+ "table_caption": [],
930
+ "table_footnote": [],
931
+ "table_body": "<table><tr><td rowspan=\"2\">Type</td><td rowspan=\"2\">Method</td><td rowspan=\"2\">Size</td><td colspan=\"3\">RefCOCO</td></tr><tr><td>val</td><td>testA</td><td>testB</td></tr><tr><td rowspan=\"4\">Traditional</td><td>OFA-L [46]</td><td>-</td><td>80.0</td><td>83.7</td><td>76.4</td></tr><tr><td>TransVG [11]</td><td>-</td><td>81.0</td><td>82.7</td><td>78.4</td></tr><tr><td>VILLA [13]</td><td>-</td><td>82.4</td><td>87.5</td><td>74.8</td></tr><tr><td>UniTAB [51]</td><td>-</td><td>86.3</td><td>88.8</td><td>80.6</td></tr><tr><td rowspan=\"5\">LLM-based</td><td>VisionLLM-H [47]</td><td>-</td><td>-</td><td>86.7</td><td>-</td></tr><tr><td>Shikra-7B [7]</td><td>7B</td><td>87.0</td><td>90.6</td><td>80.2</td></tr><tr><td>Shikra-13B [7]</td><td>13B</td><td>87.8</td><td>91.1</td><td>81.7</td></tr><tr><td>Qwen-VL-chat [2]</td><td>7B</td><td>88.6</td><td>92.3</td><td>84.5</td></tr><tr><td>Next-chat [56]</td><td>7B</td><td>85.5</td><td>90.0</td><td>77.9</td></tr><tr><td></td><td>Vary-toy</td><td>1.8B</td><td>88.1</td><td>90.6</td><td>85.7</td></tr></table>",
932
+ "bbox": [
933
+ 267,
934
+ 224,
935
+ 725,
936
+ 429
937
+ ],
938
+ "page_idx": 6
939
+ },
940
+ {
941
+ "type": "text",
942
+ "text": "4.4 Natural Object Perception Ability",
943
+ "text_level": 1,
944
+ "bbox": [
945
+ 171,
946
+ 489,
947
+ 449,
948
+ 506
949
+ ],
950
+ "page_idx": 6
951
+ },
952
+ {
953
+ "type": "text",
954
+ "text": "The vision vocabulary network generated by Vary-tiny+ should enjoy two main advanced perception abilities: one for dense text and the other for natural objects. In this part, We test the latter ability of Vary-toy after accessing the improved vision vocabulary. It is worth noting that a center crop operation processes the input image of the CLIP branch. Therefore, it can be ruled out that the model uses CLIP for object localization.",
955
+ "bbox": [
956
+ 169,
957
+ 516,
958
+ 823,
959
+ 585
960
+ ],
961
+ "page_idx": 6
962
+ },
963
+ {
964
+ "type": "text",
965
+ "text": "As shown in Table 3, Vary-toy can get $88.1\\%$ accuracy@0.5 on the RefCOCO validation set, which is also on par with Qwen-VL-chat (7B) and even better than the Shikra-13B. The results show that under the knowledgeable vision vocabulary, Vary-toy gathers great natural object perception ability, proving the effectiveness of using the Vary-tiny+ architecture to build a vision vocabulary, allowing us to further reflect on the necessity of CLIP if we add a large amount of weakly labeled image caption data, e.g., Laion-400M [39], during the new vocabulary generating process.",
966
+ "bbox": [
967
+ 169,
968
+ 590,
969
+ 826,
970
+ 676
971
+ ],
972
+ "page_idx": 6
973
+ },
974
+ {
975
+ "type": "table",
976
+ "img_path": "images/fe216f9062f35b975a4507f4be0fc12330ac26c2c29c9b1cf5b91f0ba3299bd7.jpg",
977
+ "table_caption": [
978
+ "Table 3: Comparison with popular methods on RefCOCO. Benefiting from the new vision vocabulary, Vary-toy can achieve $88.1\\%$ accuracy on RefCOCO val, which is on par with the 7B Qwen-VL-chat."
979
+ ],
980
+ "table_footnote": [],
981
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"7\">MM-Vet</td></tr><tr><td>Rec</td><td>OCR</td><td>Know</td><td>Gen</td><td>Spat</td><td>Math</td><td>Total</td></tr><tr><td>BLIP-2 [22]</td><td>27.5</td><td>11.1</td><td>11.8</td><td>7.0</td><td>16.2</td><td>5.8</td><td>22.4</td></tr><tr><td>LLaVA-7B [26]</td><td>28.0</td><td>17.1</td><td>16.3</td><td>18.9</td><td>21.2</td><td>11.5</td><td>23.8</td></tr><tr><td>MiniGPT-4 [60]</td><td>29.9</td><td>16.1</td><td>20.4</td><td>22.1</td><td>22.2</td><td>3.8</td><td>24.4</td></tr><tr><td>Otter [21]</td><td>27.3</td><td>17.8</td><td>14.2</td><td>13.8</td><td>24.4</td><td>3.8</td><td>24.7</td></tr><tr><td>OpenFlamingo [1]</td><td>28.7</td><td>16.7</td><td>16.4</td><td>13.1</td><td>21.0</td><td>7.7</td><td>24.8</td></tr><tr><td>LLaVA1.5-7B [25]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>30.5</td></tr><tr><td>Vary-toy (1.8B)</td><td>33.4</td><td>20.3</td><td>19.9</td><td>17.5</td><td>24</td><td>10.8</td><td>29.0</td></tr></table>",
982
+ "bbox": [
983
+ 230,
984
+ 686,
985
+ 758,
986
+ 843
987
+ ],
988
+ "page_idx": 6
989
+ },
990
+ {
991
+ "type": "text",
992
+ "text": "Table 4: Comparison with popular LVLMs on MMVet. With only a 1.8B language model, Varytoy can get a promising $29.0\\%$ accuracy. The abbreviations represent Rec: Recognition; Know: Knowledge; Gen: Language generation; Spat: Spatial awareness.",
993
+ "bbox": [
994
+ 169,
995
+ 849,
996
+ 826,
997
+ 893
998
+ ],
999
+ "page_idx": 6
1000
+ },
1001
+ {
1002
+ "type": "page_number",
1003
+ "text": "7",
1004
+ "bbox": [
1005
+ 493,
1006
+ 935,
1007
+ 504,
1008
+ 946
1009
+ ],
1010
+ "page_idx": 6
1011
+ },
1012
+ {
1013
+ "type": "text",
1014
+ "text": "4.5 General Ability",
1015
+ "text_level": 1,
1016
+ "bbox": [
1017
+ 171,
1018
+ 90,
1019
+ 321,
1020
+ 107
1021
+ ],
1022
+ "page_idx": 7
1023
+ },
1024
+ {
1025
+ "type": "text",
1026
+ "text": "We report the accuracy of Vary-toy on MMVet [54] to test its general performance. As summarized in table 4, Vary-toy gains $29.0\\%$ of the total accuracy, which is higher than the classical LLaVA-7B [26] $(23.8\\%)$ , BLIP-2 [22], and MiniGPT-4 [60], demonstrating Vary-toy enjoys excellent general knowledge. For a 1.8B model, the generation ability is relatively poor and needs to be strengthened.",
1027
+ "bbox": [
1028
+ 169,
1029
+ 116,
1030
+ 828,
1031
+ 174
1032
+ ],
1033
+ "page_idx": 7
1034
+ },
1035
+ {
1036
+ "type": "image",
1037
+ "img_path": "images/f6b2c6df24cb60e6159179a19c8df93f525c89578e7b47f6691c7c374ae5ca4b.jpg",
1038
+ "image_caption": [],
1039
+ "image_footnote": [],
1040
+ "bbox": [
1041
+ 194,
1042
+ 191,
1043
+ 210,
1044
+ 210
1045
+ ],
1046
+ "page_idx": 7
1047
+ },
1048
+ {
1049
+ "type": "text",
1050
+ "text": "What is funny about this image?",
1051
+ "text_level": 1,
1052
+ "bbox": [
1053
+ 214,
1054
+ 195,
1055
+ 388,
1056
+ 208
1057
+ ],
1058
+ "page_idx": 7
1059
+ },
1060
+ {
1061
+ "type": "image",
1062
+ "img_path": "images/0c5b4907fd5007d20597006260a6b8b1939d51adc3aa300e8ebeab5aa2e86fcd.jpg",
1063
+ "image_caption": [],
1064
+ "image_footnote": [],
1065
+ "bbox": [
1066
+ 200,
1067
+ 214,
1068
+ 333,
1069
+ 364
1070
+ ],
1071
+ "page_idx": 7
1072
+ },
1073
+ {
1074
+ "type": "text",
1075
+ "text": "The funny aspect of this image lies in the fire hydrant painted with a smiling face, which gives it an unconventional and whimsical appearance. The cute drawing on the red fire hydrant attracts attention and can be considered a creative expression or an artistic statement. In addition, there's a car parked nearby, emphasizing the urban setting and reinforcing the idea that the image might have been taken in a city environment where such decorative objects and artistic installations are more common.",
1076
+ "bbox": [
1077
+ 341,
1078
+ 220,
1079
+ 506,
1080
+ 359
1081
+ ],
1082
+ "page_idx": 7
1083
+ },
1084
+ {
1085
+ "type": "image",
1086
+ "img_path": "images/563ea805d37b3d753beafd3a125c191d1e5d6ce910bc76e54956970665eff6f5.jpg",
1087
+ "image_caption": [],
1088
+ "image_footnote": [],
1089
+ "bbox": [
1090
+ 535,
1091
+ 193,
1092
+ 552,
1093
+ 210
1094
+ ],
1095
+ "page_idx": 7
1096
+ },
1097
+ {
1098
+ "type": "text",
1099
+ "text": "Detect all objects in this image:",
1100
+ "text_level": 1,
1101
+ "bbox": [
1102
+ 553,
1103
+ 195,
1104
+ 723,
1105
+ 208
1106
+ ],
1107
+ "page_idx": 7
1108
+ },
1109
+ {
1110
+ "type": "image",
1111
+ "img_path": "images/6651eabff288f71a5d3a4074c5337ccfc1a009b71a7027abc8694e71b7a4dff5.jpg",
1112
+ "image_caption": [],
1113
+ "image_footnote": [],
1114
+ "bbox": [
1115
+ 537,
1116
+ 214,
1117
+ 797,
1118
+ 364
1119
+ ],
1120
+ "page_idx": 7
1121
+ },
1122
+ {
1123
+ "type": "image",
1124
+ "img_path": "images/de5099e331aba41405c84dcaf36b1ad77db2cb8812ac6a342ff4c211c978fd18.jpg",
1125
+ "image_caption": [],
1126
+ "image_footnote": [],
1127
+ "bbox": [
1128
+ 194,
1129
+ 378,
1130
+ 210,
1131
+ 395
1132
+ ],
1133
+ "page_idx": 7
1134
+ },
1135
+ {
1136
+ "type": "text",
1137
+ "text": "Provide the OCR results of this image.",
1138
+ "text_level": 1,
1139
+ "bbox": [
1140
+ 217,
1141
+ 382,
1142
+ 423,
1143
+ 393
1144
+ ],
1145
+ "page_idx": 7
1146
+ },
1147
+ {
1148
+ "type": "image",
1149
+ "img_path": "images/386ecfbd46a75ec2ffd8d8c1b24f083c9cd4aaf8dd48c3fbf8c915f83300d061.jpg",
1150
+ "image_caption": [
1151
+ "Figure 4: Visualization of high-quality results of our model in four common fields. We can see that Vary-toy has satisfactory general ability and enjoys strong text and object perception abilities."
1152
+ ],
1153
+ "image_footnote": [],
1154
+ "bbox": [
1155
+ 475,
1156
+ 378,
1157
+ 498,
1158
+ 397
1159
+ ],
1160
+ "page_idx": 7
1161
+ },
1162
+ {
1163
+ "type": "image",
1164
+ "img_path": "images/216a363ee11a1c01732301a264fd720154aa0b3cad6ba3c6092ef88e632e80aa.jpg",
1165
+ "image_caption": [],
1166
+ "image_footnote": [],
1167
+ "bbox": [
1168
+ 210,
1169
+ 425,
1170
+ 243,
1171
+ 450
1172
+ ],
1173
+ "page_idx": 7
1174
+ },
1175
+ {
1176
+ "type": "text",
1177
+ "text": "ou are cordially invited to join us for the dramatic retum of Pagliacci , Le Prophète andLubéric . Each one ofBilli Boudi , Don Cardi , and Orlyl . Each one is given",
1178
+ "bbox": [
1179
+ 246,
1180
+ 440,
1181
+ 482,
1182
+ 450
1183
+ ],
1184
+ "page_idx": 7
1185
+ },
1186
+ {
1187
+ "type": "text",
1188
+ "text": "the Metropolitan Opera's first production of Kurt Weill and Bertolt Brech's twentieth century masterpiece, Rise and Fall of the City of Mahogany. And a new production of Mozart's delightful Abduction from the Seraglio.",
1189
+ "bbox": [
1190
+ 210,
1191
+ 452,
1192
+ 480,
1193
+ 473
1194
+ ],
1195
+ "page_idx": 7
1196
+ },
1197
+ {
1198
+ "type": "text",
1199
+ "text": "A trial subscription for the 1979-90 season at The Met gives you a chance to appreciate some of the many advantages enjoyed by our regular Subscribers. Choose any one of the twenty-three mini-series described inside and you'll enjoy the same seats for each performance. Your tickets will be sent to you in August, well in advance of box office lines and sold out notices.",
1200
+ "bbox": [
1201
+ 210,
1202
+ 476,
1203
+ 482,
1204
+ 503
1205
+ ],
1206
+ "page_idx": 7
1207
+ },
1208
+ {
1209
+ "type": "text",
1210
+ "text": "Next",
1211
+ "bbox": [
1212
+ 210,
1213
+ 505,
1214
+ 259,
1215
+ 511
1216
+ ],
1217
+ "page_idx": 7
1218
+ },
1219
+ {
1220
+ "type": "text",
1221
+ "text": "season, we hope your trial series experience will have convinced you that subscribing is the best way to be a part of the Metropolitan.",
1222
+ "bbox": [
1223
+ 210,
1224
+ 512,
1225
+ 279,
1226
+ 550
1227
+ ],
1228
+ "page_idx": 7
1229
+ },
1230
+ {
1231
+ "type": "text",
1232
+ "text": "Opening Night is just a summer away. Won't you take a few moments now to",
1233
+ "bbox": [
1234
+ 210,
1235
+ 551,
1236
+ 274,
1237
+ 575
1238
+ ],
1239
+ "page_idx": 7
1240
+ },
1241
+ {
1242
+ "type": "text",
1243
+ "text": "guarantee your series and seating for the glorious season ahead? It's a season that promises grand opera performed by some of the world's greatest artists. We hope you'll be with us.",
1244
+ "bbox": [
1245
+ 210,
1246
+ 577,
1247
+ 482,
1248
+ 589
1249
+ ],
1250
+ "page_idx": 7
1251
+ },
1252
+ {
1253
+ "type": "text",
1254
+ "text": "P. S. If you are already a Met Subscriber, you may want to use a mini-series for operas not on your regular subscription. Or perhaps you'd like to give this brochure to a friend who may be interested in this introductory offer.",
1255
+ "bbox": [
1256
+ 210,
1257
+ 590,
1258
+ 483,
1259
+ 611
1260
+ ],
1261
+ "page_idx": 7
1262
+ },
1263
+ {
1264
+ "type": "image",
1265
+ "img_path": "images/620a0ad6d83ceec73d93b42b908ebdf4815ba351207750f8bf3daa8a652dfe24.jpg",
1266
+ "image_caption": [
1267
+ "Ocillo Set: Franco Zefendi, Costamco: Peter J. Hall"
1268
+ ],
1269
+ "image_footnote": [],
1270
+ "bbox": [
1271
+ 282,
1272
+ 511,
1273
+ 483,
1274
+ 569
1275
+ ],
1276
+ "page_idx": 7
1277
+ },
1278
+ {
1279
+ "type": "text",
1280
+ "text": "you are cordially invited to join us for the dramatic return of Pagliazzi, Le Prophets and Lohengrin. For the power of Billy Budd, Don Carlo and Otello. For the excitement of the Metropolitan Opera's first production of Kurt Weill and Bertolt Brecht's twentieth century masterpiece, Rise and Fall of the City of Mahagony. And a new production of Mozart's delightful Abduction from the Seraglio.",
1281
+ "bbox": [
1282
+ 504,
1283
+ 393,
1284
+ 790,
1285
+ 457
1286
+ ],
1287
+ "page_idx": 7
1288
+ },
1289
+ {
1290
+ "type": "text",
1291
+ "text": "At trial subscription for the 1979-80 season at The Met gives you a chance to appreciate some of the many advantages enjoyed by our regular Subscribers. Choose any one of the twenty-three mini-series described inside and you'll enjoy the same seats for each performance.",
1292
+ "bbox": [
1293
+ 504,
1294
+ 458,
1295
+ 795,
1296
+ 503
1297
+ ],
1298
+ "page_idx": 7
1299
+ },
1300
+ {
1301
+ "type": "text",
1302
+ "text": "Your tickets will be sent to you in August, well in advance of box office lines and sold out notices.",
1303
+ "bbox": [
1304
+ 504,
1305
+ 505,
1306
+ 789,
1307
+ 521
1308
+ ],
1309
+ "page_idx": 7
1310
+ },
1311
+ {
1312
+ "type": "text",
1313
+ "text": "Next season, we hope your trial series experience",
1314
+ "bbox": [
1315
+ 504,
1316
+ 522,
1317
+ 728,
1318
+ 531
1319
+ ],
1320
+ "page_idx": 7
1321
+ },
1322
+ {
1323
+ "type": "text",
1324
+ "text": "will have convinced you that subscribing is the best way to be a part of the Metropolitan,",
1325
+ "bbox": [
1326
+ 504,
1327
+ 532,
1328
+ 785,
1329
+ 549
1330
+ ],
1331
+ "page_idx": 7
1332
+ },
1333
+ {
1334
+ "type": "text",
1335
+ "text": "Opening Night is just a summer",
1336
+ "bbox": [
1337
+ 506,
1338
+ 550,
1339
+ 645,
1340
+ 559
1341
+ ],
1342
+ "page_idx": 7
1343
+ },
1344
+ {
1345
+ "type": "text",
1346
+ "text": "away. Won't you take a few moments now to guarantee your series and seating for the glorious season ahead? It's a season that promises grand opera performed by some of the world's greatest artists. We hope you'll be with us.",
1347
+ "bbox": [
1348
+ 504,
1349
+ 560,
1350
+ 785,
1351
+ 595
1352
+ ],
1353
+ "page_idx": 7
1354
+ },
1355
+ {
1356
+ "type": "text",
1357
+ "text": "P.S. If you are already a Met Subscriber, you may want to use a mini-series for operas not on your regular subscription.",
1358
+ "bbox": [
1359
+ 504,
1360
+ 597,
1361
+ 789,
1362
+ 614
1363
+ ],
1364
+ "page_idx": 7
1365
+ },
1366
+ {
1367
+ "type": "text",
1368
+ "text": "Or perhaps you'd like to give this brochure to a friend who may be interested in this introductory offer.",
1369
+ "bbox": [
1370
+ 504,
1371
+ 614,
1372
+ 784,
1373
+ 632
1374
+ ],
1375
+ "page_idx": 7
1376
+ },
1377
+ {
1378
+ "type": "text",
1379
+ "text": "4.6 Visualization",
1380
+ "text_level": 1,
1381
+ "bbox": [
1382
+ 171,
1383
+ 696,
1384
+ 305,
1385
+ 710
1386
+ ],
1387
+ "page_idx": 7
1388
+ },
1389
+ {
1390
+ "type": "text",
1391
+ "text": "Figure 4 shows high-quality results of Vary-toy on four different downstream fields. We can see that the model enjoys good vision concept understanding and localization capacities, indicating that a reinforced vision vocabulary with a small language model can also perform well in multimodal tasks.",
1392
+ "bbox": [
1393
+ 169,
1394
+ 722,
1395
+ 826,
1396
+ 765
1397
+ ],
1398
+ "page_idx": 7
1399
+ },
1400
+ {
1401
+ "type": "text",
1402
+ "text": "5 Conclusion",
1403
+ "text_level": 1,
1404
+ "bbox": [
1405
+ 171,
1406
+ 782,
1407
+ 302,
1408
+ 799
1409
+ ],
1410
+ "page_idx": 7
1411
+ },
1412
+ {
1413
+ "type": "text",
1414
+ "text": "In this report, we propose a small LVLM — Vary-toy, which can be deployed on a GTX1080ti GPU and enjoys fine performance in many downstream tasks. What's more, we generate a new and more comprehensive vision vocabulary for the presented model, which is the key to the success of Vary-toy. We hope the promising and user-friendly Vary-toy can become a new baseline in such fields as well as draw more attention to LVLM, especially for researchers who previously lacked computing resources. We also encourage researchers to use our reinforced vision vocabulary for more downstream tasks. Finally, we firmly confirm that the Vary-toy will evolve beyond just a toy.",
1415
+ "bbox": [
1416
+ 169,
1417
+ 814,
1418
+ 828,
1419
+ 912
1420
+ ],
1421
+ "page_idx": 7
1422
+ },
1423
+ {
1424
+ "type": "page_number",
1425
+ "text": "8",
1426
+ "bbox": [
1427
+ 493,
1428
+ 935,
1429
+ 504,
1430
+ 946
1431
+ ],
1432
+ "page_idx": 7
1433
+ },
1434
+ {
1435
+ "type": "text",
1436
+ "text": "References",
1437
+ "text_level": 1,
1438
+ "bbox": [
1439
+ 173,
1440
+ 89,
1441
+ 269,
1442
+ 106
1443
+ ],
1444
+ "page_idx": 8
1445
+ },
1446
+ {
1447
+ "type": "list",
1448
+ "sub_type": "ref_text",
1449
+ "list_items": [
1450
+ "[1] Alayrac, J., Donahue, J., Luc, P., Miech, A., Barr, I., Hasson, Y., Lenc, K., Mensch, A., Millican, K., Reynolds, M., Ring, R., Rutherford, E., Cabi, S., Han, T., Gong, Z., Samangooei, S., Monteiro, M., Menick, J.L., Borgeaud, S., Brock, A., Nematzadeh, A., Sharifzadeh, S., Binkowski, M., Barreira, R., Vinyals, O., Zisserman, A., Simonyan, K.: Flamingo: a visual language model for few-shot learning. In: NeurIPS (2022) 1, 3, 7",
1451
+ "[2] Bai, J., Bai, S., Chu, Y., Cui, Z., Dang, K., Deng, X., Fan, Y., Ge, W., Han, Y., Huang, F., Hui, B., Ji, L., Li, M., Lin, J., Lin, R., Liu, D., Liu, G., Lu, C., Lu, K., Ma, J., Men, R., Ren, X., Ren, X., Tan, C., Tan, S., Tu, J., Wang, P., Wang, S., Wang, W., Wu, S., Xu, B., Xu, J., Yang, A., Yang, H., Yang, J., Yang, S., Yao, Y., Yu, B., Yuan, H., Yuan, Z., Zhang, J., Zhang, X., Zhang, Y., Zhang, Z., Zhou, C., Zhou, J., Zhou, X., Zhu, T.: Qwen technical report. arXiv preprint arXiv:2309.16609 (2023) 2, 3, 6, 7",
1452
+ "[3] Bai, J., Bai, S., Yang, S., Wang, S., Tan, S., Wang, P., Lin, J., Zhou, C., Zhou, J.: Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966 (2023) 2, 3",
1453
+ "[4] Biten, A.F., Litman, R., Xie, Y., Appalaraju, S., Manmatha, R.: Latr: Layout-aware transformer for scene-text vqa. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 16548-16558 (2022) 1, 3",
1454
+ "[5] Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020) 3, 6",
1455
+ "[6] Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 213-229. Springer (2020) 2",
1456
+ "[7] Chen, K., Zhang, Z., Zeng, W., Zhang, R., Zhu, F., Zhao, R.: Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195 (2023) 7",
1457
+ "[8] Chiang, W.L., Li, Z., Lin, Z., Sheng, Y., Wu, Z., Zhang, H., Zheng, L., Zhuang, S., Zhuang, Y., Gonzalez, J.E., Stoica, I., Xing, E.P.: Vicuna: An open-source chatbot impressing gpt-4 with $90\\%$ * chatgpt quality. https://lmsys.org/blog/2023-03-30-vicuna/ (2023) 3, 5",
1458
+ "[9] Chu, X., Qiao, L., Lin, X., Xu, S., Yang, Y., Hu, Y., Wei, F., Zhang, X., Zhang, B., Wei, X., Shen, C.: Mobilevlm: A fast, strong and open vision language assistant for mobile devices (2023) 3",
1459
+ "[10] Davis, B., Morse, B., Price, B., Tensmeyer, C., Wigington, C., Morariu, V.: End-to-end document recognition and understanding with dessurt. In: European Conference on Computer Vision. pp. 280-296. Springer (2022) 6",
1460
+ "[11] Deng, J., Yang, Z., Chen, T., Zhou, W., Li, H.: Transvg: End-to-end visual grounding with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1769-1779 (2021) 7",
1461
+ "[12] Dong, R., Han, C., Peng, Y., Qi, Z., Ge, Z., Yang, J., Zhao, L., Sun, J., Zhou, H., Wei, H., et al.: Dreamllm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499 (2023) 3",
1462
+ "[13] Gan, Z., Chen, Y.C., Li, L., Zhu, C., Cheng, Y., Liu, J.: Large-scale adversarial training for vision-and-language representation learning. Advances in Neural Information Processing Systems 33, 6616-6628 (2020) 7",
1463
+ "[14] Hao, Y., Song, H., Dong, L., Huang, S., Chi, Z., Wang, W., Ma, S., Wei, F.: Language models are general-purpose interfaces. arXiv preprint arXiv:2206.06336 (2022) 1",
1464
+ "[15] Kazemzadeh, S., Ordonez, V., Matten, M., Berg, T.: Referitgame: Referring to objects in photographs of natural scenes. In: Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP). pp. 787-798 (2014) 2, 6",
1465
+ "[16] Kim, G., Hong, T., Yim, M., Nam, J., Park, J., Yim, J., Hwang, W., Yun, S., Han, D., Park, S.: Ocr-free document understanding transformer. In: European Conference on Computer Vision. pp. 498-517. Springer (2022) 6",
1466
+ "[17] Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) 4"
1467
+ ],
1468
+ "bbox": [
1469
+ 173,
1470
+ 112,
1471
+ 826,
1472
+ 912
1473
+ ],
1474
+ "page_idx": 8
1475
+ },
1476
+ {
1477
+ "type": "page_number",
1478
+ "text": "9",
1479
+ "bbox": [
1480
+ 493,
1481
+ 935,
1482
+ 504,
1483
+ 946
1484
+ ],
1485
+ "page_idx": 8
1486
+ },
1487
+ {
1488
+ "type": "list",
1489
+ "sub_type": "ref_text",
1490
+ "list_items": [
1491
+ "[18] Kuznetsova, A., Rom, H., Alldrin, N., Uijlings, J., Krasin, I., Pont-Tuset, J., Kamali, S., Popov, S., Malloci, M., Kolesnikov, A., et al.: The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision 128(7), 1956–1981 (2020) 4",
1492
+ "[19] Law, H., Deng, J.: Cornernet: Detecting objects as paired keypoints. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 734-750 (2018) 2",
1493
+ "[20] Lee, K., Joshi, M., Turc, I.R., Hu, H., Liu, F., Eisenschlos, J.M., Khandelwal, U., Shaw, P., Chang, M.W., Toutanova, K.: Pix2struct: Screenshot parsing as pretraining for visual language understanding. In: International Conference on Machine Learning. pp. 18893-18912. PMLR (2023) 6",
1494
+ "[21] Li, B., Zhang, Y., Chen, L., Wang, J., Yang, J., Liu, Z.: Otter: A multi-modal model with in-context instruction tuning. arXiv preprint arXiv:2305.03726 (2023) 7",
1495
+ "[22] Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023) 1, 3, 7, 8",
1496
+ "[23] Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision. pp. 2980-2988 (2017) 2",
1497
+ "[24] Lin, T., Maire, M., Belongie, S.J., Hays, J., Perona, P., Ramanan, D., Dollár, P., Zitnick, C.L.: Microsoft COCO: common objects in context. In: ECCV. pp. 740-755 (2014) 1, 3, 5, 6",
1498
+ "[25] Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning (2023) 3, 7",
1499
+ "[26] Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023) 1, 2, 3, 5, 6, 7, 8",
1500
+ "[27] Loshchilov, I., Hutter, F.: Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983 (2016) 6",
1501
+ "[28] Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019) 6",
1502
+ "[29] Masry, A., Long, D.X., Tan, J.Q., Joty, S., Hoque, E.: Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244 (2022) 2, 5, 6, 7",
1503
+ "[30] Mathew, M., Karatzas, D., Jawahar, C.: Docvqa: A dataset for vqa on document images. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 2200-2209 (2021) 1, 2, 3, 5, 6, 7",
1504
+ "[31] Microsoft: Phi-2: The surprising power of small language models. https://www.microsoft.com/en-us/research/blog/phi-2-the-surprising-power-of-small-language-models/ (2023) 3",
1505
+ "[32] Mishra, A., Shekhar, S., Singh, A.K., Chakraborty, A.: Ocr-vqa: Visual question answering by reading text in images. In: 2019 international conference on document analysis and recognition (ICDAR). pp. 947-952. IEEE (2019) 1, 3",
1506
+ "[33] OpenAI: Gpt-4 technical report (2023) 6",
1507
+ "[34] Ouyang, L., Wu, J., Jiang, X., Almeida, D., Wainwright, C.L., Mishkin, P., Zhang, C., Agarwal, S., Slama, K., Ray, A., Schulman, J., Hilton, J., Kelton, F., Miller, L., Simens, M., Askell, A., Welinder, P., Christiano, P.F., Leike, J., Lowe, R.: Training language models to follow instructions with human feedback. In: NeurIPS (2022) 1, 3",
1508
+ "[35] Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) 1, 5",
1509
+ "[36] Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019) 3",
1510
+ "[37] Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: Unified, real-time object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 779-788 (2016) 2",
1511
+ "[38] Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems 28 (2015) 2",
1512
+ "[39] Schuhmann, C., Vencu, R., Beaumont, R., Kaczmarczyk, R., Mullis, C., Katta, A., Coombes, T., Jitsev, J., Komatsuzaki, A.: Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114 (2021) 5, 7"
1513
+ ],
1514
+ "bbox": [
1515
+ 173,
1516
+ 90,
1517
+ 826,
1518
+ 910
1519
+ ],
1520
+ "page_idx": 9
1521
+ },
1522
+ {
1523
+ "type": "page_number",
1524
+ "text": "10",
1525
+ "bbox": [
1526
+ 490,
1527
+ 935,
1528
+ 508,
1529
+ 946
1530
+ ],
1531
+ "page_idx": 9
1532
+ },
1533
+ {
1534
+ "type": "list",
1535
+ "sub_type": "ref_text",
1536
+ "list_items": [
1537
+ "[40] Shao, S., Li, Z., Zhang, T., Peng, C., Yu, G., Zhang, X., Li, J., Sun, J.: Objects365: A large-scale, high-quality dataset for object detection. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 8430-8439 (2019) 4",
1538
+ "[41] Singh, A., Natarajan, V., Shah, M., Jiang, Y., Chen, X., Batra, D., Parikh, D., Rohrbach, M.: Towards vqa models that can read. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8317-8326 (2019) 1",
1539
+ "[42] Taori, R., Gulrajani, I., Zhang, T., Dubois, Y., Li, X., Guestrin, C., Liang, P., Hashimoto, T.B.: Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca (2023) 3, 5, 6",
1540
+ "[43] Team, G., Anil, R., Borgeaud, S., Wu, Y., Alayrac, J.B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A.M., Hauth, A., et al.: Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023) 3",
1541
+ "[44] Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., Rodriguez, A., Joulin, A., Grave, E., Lample, G.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023) 3",
1542
+ "[45] Veit, A., Matera, T., Neumann, L., Matas, J., Belongie, S.: Coco-text: Dataset and benchmark for text detection and recognition in natural images. arXiv preprint arXiv:1601.07140 (2016) 1",
1543
+ "[46] Wang, P., Yang, A., Men, R., Lin, J., Bai, S., Li, Z., Ma, J., Zhou, C., Zhou, J., Yang, H.: Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In: International Conference on Machine Learning. pp. 23318-23340. PMLR (2022) 7",
1544
+ "[47] Wang, W., Chen, Z., Chen, X., Wu, J., Zhu, X., Zeng, G., Luo, P., Lu, T., Zhou, J., Qiao, Y., et al.: Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. arXiv preprint arXiv:2305.11175 (2023) 7",
1545
+ "[48] Wei, H., Kong, L., Chen, J., Zhao, L., Ge, Z., Yang, J., Sun, J., Han, C., Zhang, X.: Vary: Scaling up the vision vocabulary for large vision-language models. arXiv preprint arXiv:2312.06109 (2023) 1, 2, 3, 4, 6",
1546
+ "[49] Wei, H., Liu, C., Guo, P., Zhu, Y., Fu, J., Wang, B., Wang, P.: Corner affinity: A robust grouping algorithm to make corner-guided detector great again. In: Raedt, L.D. (ed.) Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22. pp. 1458–1464. International Joint Conferences on Artificial Intelligence Organization (7 2022). https://doi.org/10.24963/ijcai.2022/203, https://doi.org/10.24963/ijcai.2022/203, main Track 2",
1547
+ "[50] Xu, C., Guo, D., Duan, N., McAuley, J.: Baize: An open-source chat model with parameter-efficient tuning on self-chat data. arXiv preprint arXiv:2304.01196 (2023) 5, 6",
1548
+ "[51] Yang, Z., Gan, Z., Wang, J., Hu, X., Ahmed, F., Liu, Z., Lu, Y., Wang, L.: Unitab: Unifying text and box outputs for grounded vision-language modeling. In: European Conference on Computer Vision. pp. 521-539. Springer (2022) 7",
1549
+ "[52] Ye, J., Hu, A., Xu, H., Ye, Q., Yan, M., Dan, Y., Zhao, C., Xu, G., Li, C., Tian, J., et al.: mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499 (2023) 3, 6",
1550
+ "[53] Yu, E., Zhao, L., Wei, Y., Yang, J., Wu, D., Kong, L., Wei, H., Wang, T., Ge, Z., Zhang, X., et al.: Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589 (2023) 3",
1551
+ "[54] Yu, W., Yang, Z., Li, L., Wang, J., Lin, K., Liu, Z., Wang, X., Wang, L.: Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490 (2023) 2, 6, 8",
1552
+ "[55] Zeng, A., Liu, X., Du, Z., Wang, Z., Lai, H., Ding, M., Yang, Z., Xu, Y., Zheng, W., Xia, X., et al.: Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414 (2022) 3",
1553
+ "[56] Zhang, A., Zhao, L., Xie, C.W., Zheng, Y., Ji, W., Chua, T.S.: Next-chat: An lmm for chat, detection and segmentation. arXiv preprint arXiv:2311.04498 (2023) 7",
1554
+ "[57] Zhang, S., Roller, S., Goyal, N., Artetxe, M., Chen, M., Chen, S., Dewan, C., Diab, M., Li, X., Lin, X.V., et al.: Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022) 2, 3, 4",
1555
+ "[58] Zhao, L., Yu, E., Ge, Z., Yang, J., Wei, H., Zhou, H., Sun, J., Peng, Y., Dong, R., Han, C., et al.: Chatspot: Bootstrapping multimodal llms via precise referring instruction tuning. arXiv preprint arXiv:2307.09474 (2023) 3"
1556
+ ],
1557
+ "bbox": [
1558
+ 173,
1559
+ 90,
1560
+ 825,
1561
+ 911
1562
+ ],
1563
+ "page_idx": 10
1564
+ },
1565
+ {
1566
+ "type": "page_number",
1567
+ "text": "11",
1568
+ "bbox": [
1569
+ 490,
1570
+ 935,
1571
+ 506,
1572
+ 946
1573
+ ],
1574
+ "page_idx": 10
1575
+ },
1576
+ {
1577
+ "type": "list",
1578
+ "sub_type": "ref_text",
1579
+ "list_items": [
1580
+ "[59] Zhou, X., Zhuo, J., Krahenbuhl, P.: Bottom-up object detection by grouping extreme and center points. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 850-859 (2019) 2",
1581
+ "[60] Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023) 1, 3, 7, 8"
1582
+ ],
1583
+ "bbox": [
1584
+ 171,
1585
+ 90,
1586
+ 825,
1587
+ 154
1588
+ ],
1589
+ "page_idx": 11
1590
+ },
1591
+ {
1592
+ "type": "page_number",
1593
+ "text": "12",
1594
+ "bbox": [
1595
+ 490,
1596
+ 935,
1597
+ 508,
1598
+ 946
1599
+ ],
1600
+ "page_idx": 11
1601
+ }
1602
+ ]
2401.12xxx/2401.12503/4bc10b6c-537b-4aac-b190-2c35591d39a5_model.json ADDED
@@ -0,0 +1,2226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.268,
8
+ 0.058,
9
+ 0.708
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2401.12503v1 [cs.CV] 23 Jan 2024"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.302,
18
+ 0.123,
19
+ 0.723,
20
+ 0.174
21
+ ],
22
+ "angle": 0,
23
+ "content": "Small Language Model Meets with Reinforced Vision Vocabulary"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.236,
29
+ 0.225,
30
+ 0.761,
31
+ 0.3
32
+ ],
33
+ "angle": 0,
34
+ "content": "Haoran Wei\\(^{1,*}\\) Lingyu Kong\\(^{2,*}\\) Jinyue Chen\\(^{2}\\) Liang Zhao\\(^{1}\\) \nZheng Ge\\(^{1\\dagger}\\) En Yu\\(^{3}\\) Jianjian Sun\\(^{1}\\) Chunrui Han\\(^{1}\\) Xiangyu Zhang\\(^{1}\\) \n\\(^{1}\\)MEGVII Technology University of Chinese Academy of Sciences \n\\(^{3}\\)Huazhong University of Science and Technology \nhttps://varytoy.github.io/"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.46,
40
+ 0.334,
41
+ 0.538,
42
+ 0.35
43
+ ],
44
+ "angle": 0,
45
+ "content": "Abstract"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.23,
51
+ 0.366,
52
+ 0.768,
53
+ 0.575
54
+ ],
55
+ "angle": 0,
56
+ "content": "Playing Large Vision Language Models (LVLMs) in 2023 is trendy among the AI community. However, the relatively large number of parameters (more than 7B) of popular LVLMs makes it difficult to train and deploy on consumer GPUs, discouraging many researchers with limited resources. Imagine how cool it would be to experience all the features of current LVLMs on an old GTX1080ti (our only game card). Accordingly, we present Vary-toy in this report, a small-size Vary along with Qwen-1.8B as the base \"large\" language model. In Vary-toy, we introduce an improved vision vocabulary, allowing the model to not only possess all features of Vary but also gather more generality. Specifically, we replace negative samples of natural images with positive sample data driven by object detection in the procedure of generating vision vocabulary, more sufficiently utilizing the capacity of the vocabulary network and enabling it to efficiently encode visual information corresponding to natural objects. For experiments, Vary-toy can achieve \\(65.6\\%\\) ANLS on DocVQA, \\(59.1\\%\\) accuracy on ChartQA, \\(88.1\\%\\) accuracy on RefCOCO, and \\(29\\%\\) on MMVet. The code will be publicly available on the homepage."
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.172,
62
+ 0.601,
63
+ 0.314,
64
+ 0.617
65
+ ],
66
+ "angle": 0,
67
+ "content": "1 Introduction"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.17,
73
+ 0.633,
74
+ 0.828,
75
+ 0.745
76
+ ],
77
+ "angle": 0,
78
+ "content": "Large Vision Language Model (LVLM) is one of the hottest research topics [1, 22, 26, 34, 48, 60] in the field of artificial intelligence among the last year. The exciting part is that one LVLM can achieve satisfactory performance in many downstream tasks [4, 24, 30, 32, 41, 45] guided by different prompts. However, there is still significant room for improvement in LVLM's overall image perception capacity. Intuitively, an advanced perceptual ability for visual concepts is essential to enhance the further development and implementation of a model. We deem that there are two main challenges to achieve that: 1) the shortcomings of the current vision vocabulary network [35, 48] in extracting rich visual information; 2) the huge model iteration cost in the optimization of a large number of parameters."
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.17,
84
+ 0.75,
85
+ 0.828,
86
+ 0.862
87
+ ],
88
+ "angle": 0,
89
+ "content": "As aforementioned, current LVLMs demonstrate amazing ability in many tasks, especially the Computer Vision (CV) and Natural Language Processing (NLP) intersected ones (e.g., image caption [24], VQA [41], memes understanding, scene OCR [32], etc), based on the almost perfect vision vocabulary network — CLIP [35]. The structures of popular LVLMs can be divided into two main streams: 1) image tokens as prefixes like MetaLM [14]; 2) cross-attention for feature fusion like Flamingo [1]. Regardless of which structure is used, the upper limit of the model may be hindered by the visual signals encoding efficiency of its vision vocabulary network. To break through the potential bottleneck, Vary [48] introduces a simple and effective manner to scale up the vision"
90
+ },
91
+ {
92
+ "type": "page_footnote",
93
+ "bbox": [
94
+ 0.191,
95
+ 0.872,
96
+ 0.313,
97
+ 0.884
98
+ ],
99
+ "angle": 0,
100
+ "content": "*Equal contribution"
101
+ },
102
+ {
103
+ "type": "page_footnote",
104
+ "bbox": [
105
+ 0.193,
106
+ 0.885,
107
+ 0.285,
108
+ 0.899
109
+ ],
110
+ "angle": 0,
111
+ "content": "†Project leader"
112
+ },
113
+ {
114
+ "type": "footer",
115
+ "bbox": [
116
+ 0.172,
117
+ 0.923,
118
+ 0.25,
119
+ 0.937
120
+ ],
121
+ "angle": 0,
122
+ "content": "Tech Report"
123
+ }
124
+ ],
125
+ [
126
+ {
127
+ "type": "image",
128
+ "bbox": [
129
+ 0.177,
130
+ 0.089,
131
+ 0.821,
132
+ 0.439
133
+ ],
134
+ "angle": 0,
135
+ "content": null
136
+ },
137
+ {
138
+ "type": "image_caption",
139
+ "bbox": [
140
+ 0.171,
141
+ 0.451,
142
+ 0.828,
143
+ 0.509
144
+ ],
145
+ "angle": 0,
146
+ "content": "Figure 1: Features of Vary-toy. Based on a 1.8B language model, Vary-toy can achieve all features of vanilla Vary-base, including document OCR, image caption, VQA, general conversation, and so on. Besides, we introduce the natural object perception (location) ability for Vary-toy. Most importantly, with just only a single GTX1080ti GPU, you can experience all of the above."
147
+ },
148
+ {
149
+ "type": "text",
150
+ "bbox": [
151
+ 0.171,
152
+ 0.546,
153
+ 0.825,
154
+ 0.631
155
+ ],
156
+ "angle": 0,
157
+ "content": "vocabulary for an LVLM. The scaling law is to first train a new visual vocabulary network using a small auto-regressive model (OPT-125M [57]), and then merge the old and new vocabularies to form the final LVLM (Vary-base [48]). However, Vary suffers two drawbacks to being a user-friendly baseline: 1) The waste of network capacity in the new vision vocabulary (which in vanilla Vary is only used to compress text information in PDF images). 2) The Vary-base with 7B LLM takes high iteration costs (requiring multiple A100 machines to train)."
158
+ },
159
+ {
160
+ "type": "text",
161
+ "bbox": [
162
+ 0.171,
163
+ 0.636,
164
+ 0.827,
165
+ 0.761
166
+ ],
167
+ "angle": 0,
168
+ "content": "In this report, we present a small-size Vary, i.e., Vary-toy, to alleviate the aforementioned issues. Overall, Vary-toy enjoys the same pipeline as vanilla Vary, including a vision vocabulary generating and scaling up processes. Considering the original Vary masks natural images as negative samples during the creation of a new visual vocabulary. We believe this procedure, to some extent, wastes network capacity, leaving room for optimization. Instead, we regard the natural image as the object detection task [6, 19, 23, 37, 38, 49, 59]. Thus in processing the vision vocabulary, we incorporate both dense textual data (PDF) and natural object location data into the vocabulary network of Vary-toy, making it more universal. After completing the new and reinforced vocabulary, we merge it with the genuine \\((224\\times 224)\\) CLIP and then integrate them into a 1.8B language model [2]."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.171,
174
+ 0.767,
175
+ 0.827,
176
+ 0.837
177
+ ],
178
+ "angle": 0,
179
+ "content": "In experiments, we report metrics on several challenging benchmarks, i.e., DocVQA [30], ChartQA [29], MMvet [54], and RefCOCO [15]. Specifically, Vary-toy can achieve \\(65.6\\%\\) ANLS on DocVQA, \\(59.1\\%\\) accuracy on ChartQA, \\(29\\%\\) accuracy on MMvet, and \\(88.1\\%\\) accuracy on RefCOCO val. More specifically, it can gather on par performance compared to Qwen-VL-7B [3] on DocVQA and RefCOCO as well as a better accuracy than LLaVA-7B [26] on the general benchmark MMVet."
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.171,
185
+ 0.843,
186
+ 0.825,
187
+ 0.911
188
+ ],
189
+ "angle": 0,
190
+ "content": "In conclusion, Vary-toy is a toy because it is at least three times smaller compared to popular LVLMs \\((>7\\mathrm{B})\\). Vary-toy is not a toy due to its demonstrated excellent potential in challenging tasks. We believe that Vary-toy still enjoys many improvement rooms and we hope that our small-size LVLM can encourage more attention in corresponding research and become a practical baseline, especially for those researchers with limited resources."
191
+ },
192
+ {
193
+ "type": "page_number",
194
+ "bbox": [
195
+ 0.494,
196
+ 0.936,
197
+ 0.504,
198
+ 0.947
199
+ ],
200
+ "angle": 0,
201
+ "content": "2"
202
+ }
203
+ ],
204
+ [
205
+ {
206
+ "type": "image",
207
+ "bbox": [
208
+ 0.182,
209
+ 0.094,
210
+ 0.816,
211
+ 0.425
212
+ ],
213
+ "angle": 0,
214
+ "content": null
215
+ },
216
+ {
217
+ "type": "image_caption",
218
+ "bbox": [
219
+ 0.171,
220
+ 0.441,
221
+ 0.825,
222
+ 0.498
223
+ ],
224
+ "angle": 0,
225
+ "content": "Figure 2: Architecture of the Vary-toy. We utilize the Vary-tiny+ pipeline to generate the new vision vocabulary of Vary-toy. Such vision vocabulary can efficiently encode dense text and natural object location information into tokens. Based on the improved vocabulary, Vary-toy not only possesses all the previous features (document OCR) but also handles object detection tasks well."
226
+ },
227
+ {
228
+ "type": "title",
229
+ "bbox": [
230
+ 0.172,
231
+ 0.522,
232
+ 0.331,
233
+ 0.538
234
+ ],
235
+ "angle": 0,
236
+ "content": "2 Related Works"
237
+ },
238
+ {
239
+ "type": "text",
240
+ "bbox": [
241
+ 0.171,
242
+ 0.554,
243
+ 0.825,
244
+ 0.68
245
+ ],
246
+ "angle": 0,
247
+ "content": "Over the past years, Large Language Models (LLMs), such as the GPT family [5, 34, 36], LLaMA family [8, 42, 44], OPT [57], and the GLM family [55] gain significantly advanced performance in NLP tasks. With the help of LLMs' language reasoning abilities, Vision Language Models (VLMs) like Flamingo [1], BLIP2 [22], LLaVA [25, 26], Vary [48], etc [3, 12, 53, 58, 60] have achieved impressive results in various computer vision tasks such as image caption [24], VQA [4, 30, 32], image generation [12], visual grounding [3, 53, 60], document OCR [48] and so on. These models not only can follow human instructions but also possess remarkable few-shot and even zero-shot learning abilities, thereby driving the AI community toward the development of artificial general intelligence (AGI)."
248
+ },
249
+ {
250
+ "type": "text",
251
+ "bbox": [
252
+ 0.171,
253
+ 0.685,
254
+ 0.825,
255
+ 0.77
256
+ ],
257
+ "angle": 0,
258
+ "content": "However, most popular open-source VLMs are parameter-heavy, with sizes like 7B (e.g., Qwen-VL [3] and mPIUG-Owl [52]) or 13B [26], which to some extent hinder the participation of researchers with limited resources and poses challenges for the implementation of VLMs in resource-constrained environments like home computer. Recently, there has been a growing interest in and development of smaller language models, such as Phi-2 (2.7B) [31] and Qwen-1.8B [2] for NLP tasks, and Gemini-nano (1.8B/3.25B) [43], MobileVLM (1.4B/2.7B) [9] for vision-language tasks."
259
+ },
260
+ {
261
+ "type": "text",
262
+ "bbox": [
263
+ 0.171,
264
+ 0.775,
265
+ 0.825,
266
+ 0.804
267
+ ],
268
+ "angle": 0,
269
+ "content": "In this report, Vary-toy will be an open-source small model that possesses features of the most popular LVLMs and demonstrates exceptional potential in fine-grained perception tasks."
270
+ },
271
+ {
272
+ "type": "title",
273
+ "bbox": [
274
+ 0.172,
275
+ 0.824,
276
+ 0.273,
277
+ 0.84
278
+ ],
279
+ "angle": 0,
280
+ "content": "3 Method"
281
+ },
282
+ {
283
+ "type": "text",
284
+ "bbox": [
285
+ 0.171,
286
+ 0.856,
287
+ 0.825,
288
+ 0.913
289
+ ],
290
+ "angle": 0,
291
+ "content": "In this section, we will delve into the details of how to devise Vary-toy. As shown in Figure 2, there are two main parts in implementing the model: 1) how to generate a more practical vision vocabulary based on the Vary-tiny+ pipeline. 2) how to utilize the new vision vocabulary to make the 1.8B Vary-toy gather new features on the premise of not harming the original model features."
292
+ },
293
+ {
294
+ "type": "page_number",
295
+ "bbox": [
296
+ 0.494,
297
+ 0.936,
298
+ 0.504,
299
+ 0.948
300
+ ],
301
+ "angle": 0,
302
+ "content": "3"
303
+ }
304
+ ],
305
+ [
306
+ {
307
+ "type": "title",
308
+ "bbox": [
309
+ 0.172,
310
+ 0.092,
311
+ 0.643,
312
+ 0.108
313
+ ],
314
+ "angle": 0,
315
+ "content": "3.1 Generating A Reinforced Vision Vocabulary Upon Vary-tiny+"
316
+ },
317
+ {
318
+ "type": "text",
319
+ "bbox": [
320
+ 0.171,
321
+ 0.117,
322
+ 0.827,
323
+ 0.228
324
+ ],
325
+ "angle": 0,
326
+ "content": "Vary-tiny [48] is a tiny vision language model to generate a specific PDF-parsing vision vocabulary for Vary. The vision vocabulary network comprises a SAM-base [17] main body and paired convolutions to reshape the output, enjoying about 80M parameters. Experiments in Vary prove that using the SAM initializing to gain intensive text perception is effective. However, the vocabulary-generating procedure in vanilla Vary suffers the risk of forgetting SAM's original natural object perception ability. What's more, we also think that writing only the visual knowledge of dense text into an 80M network is wasteful. Thus we generate a new and more reasonable vision vocabulary upon the Vary-tiny+ pipeline."
327
+ },
328
+ {
329
+ "type": "title",
330
+ "bbox": [
331
+ 0.18,
332
+ 0.247,
333
+ 0.353,
334
+ 0.26
335
+ ],
336
+ "angle": 0,
337
+ "content": "Provide the OCR results of this image:"
338
+ },
339
+ {
340
+ "type": "image",
341
+ "bbox": [
342
+ 0.184,
343
+ 0.267,
344
+ 0.201,
345
+ 0.277
346
+ ],
347
+ "angle": 0,
348
+ "content": null
349
+ },
350
+ {
351
+ "type": "title",
352
+ "bbox": [
353
+ 0.244,
354
+ 0.269,
355
+ 0.313,
356
+ 0.278
357
+ ],
358
+ "angle": 0,
359
+ "content": "MARKETS AND STRATEGY"
360
+ },
361
+ {
362
+ "type": "text",
363
+ "bbox": [
364
+ 0.179,
365
+ 0.286,
366
+ 0.376,
367
+ 0.306
368
+ ],
369
+ "angle": 0,
370
+ "content": "have also taken up this practice. It can be a very successful way of introducing new products and services to existing customers, up-selling customers, or influencing them to purchase more products."
371
+ },
372
+ {
373
+ "type": "title",
374
+ "bbox": [
375
+ 0.18,
376
+ 0.312,
377
+ 0.247,
378
+ 0.32
379
+ ],
380
+ "angle": 0,
381
+ "content": "Loyalty Programs"
382
+ },
383
+ {
384
+ "type": "text",
385
+ "bbox": [
386
+ 0.18,
387
+ 0.321,
388
+ 0.376,
389
+ 0.37
390
+ ],
391
+ "angle": 0,
392
+ "content": "Many companies develop loyalty or frequency-marketing programs in order to further engage the consumers with their products and increase customer loyalty. These programs are very effective for targeting the company's most valuable customers. Most airlines develop frequent-fliter programs, which allow customers to earn points toward their next flight. Other businesses, such as coffee shops, also offer frequency cards, that entitle the customer to a free beverage, for example, after purchasing a certain number of beverages."
393
+ },
394
+ {
395
+ "type": "text",
396
+ "bbox": [
397
+ 0.18,
398
+ 0.37,
399
+ 0.376,
400
+ 0.424
401
+ ],
402
+ "angle": 0,
403
+ "content": "Loyalty programs have been very effective in generating repeat business. They offer an added value to the consumer, whereby the purchaser is not simply enjoying the value of the current purchase, but is being rewarded. It is important, however, that the loyalty program be relative to the product and service offering of the organization and that it should be a means of promoting the brand's importance or frustration if, with an airline ticket as an example, they are unable to redeem their ticket when they want to travel, or if the restrictions on the reward are so high that it is not worth the hassle of redemption."
404
+ },
405
+ {
406
+ "type": "title",
407
+ "bbox": [
408
+ 0.18,
409
+ 0.433,
410
+ 0.319,
411
+ 0.442
412
+ ],
413
+ "angle": 0,
414
+ "content": "PUBLIC RELATIONS AND PUBLICITY"
415
+ },
416
+ {
417
+ "type": "text",
418
+ "bbox": [
419
+ 0.18,
420
+ 0.447,
421
+ 0.376,
422
+ 0.507
423
+ ],
424
+ "angle": 0,
425
+ "content": "An organization's public relations and publicity activities are the means to foster its relationships with its various audiences and to communicate with them. Public relations efforts are undertaken in order to form a favorable view in the public eye. Favorable publicity can enhance an organization's image and increase demand for its products. A positive article or review about a product or service adds credibility, believability, and legitimacy in a much more effective manner than paid-for advertising. Negative publicity, on the other hand, can tarnish an organization's reputation. Most public relations strategies include press releases, special events, and press conferences."
426
+ },
427
+ {
428
+ "type": "text",
429
+ "bbox": [
430
+ 0.194,
431
+ 0.507,
432
+ 0.375,
433
+ 0.514
434
+ ],
435
+ "angle": 0,
436
+ "content": "Press releases are articles or brief news releases that are submitted"
437
+ },
438
+ {
439
+ "type": "title",
440
+ "bbox": [
441
+ 0.396,
442
+ 0.251,
443
+ 0.51,
444
+ 0.259
445
+ ],
446
+ "angle": 0,
447
+ "content": "184 MARKETS AND STRATEGY"
448
+ },
449
+ {
450
+ "type": "text",
451
+ "bbox": [
452
+ 0.396,
453
+ 0.259,
454
+ 0.611,
455
+ 0.286
456
+ ],
457
+ "angle": 0,
458
+ "content": "have also taken up this practice. It can be a very successful way of in-producing new products and services to existing customers, up-selling customers, or influencing them to purchase more products."
459
+ },
460
+ {
461
+ "type": "title",
462
+ "bbox": [
463
+ 0.397,
464
+ 0.286,
465
+ 0.458,
466
+ 0.293
467
+ ],
468
+ "angle": 0,
469
+ "content": "Loyalty Programs"
470
+ },
471
+ {
472
+ "type": "text",
473
+ "bbox": [
474
+ 0.396,
475
+ 0.293,
476
+ 0.61,
477
+ 0.354
478
+ ],
479
+ "angle": 0,
480
+ "content": "Many companies develop loyalty or frequency-marketing programs in order to further engage the consumers with their products and increase customer loyalty. These programs are very effective for targeting the company's most valuable customers. Most airlines develop frequent-flyer programs, which allow customers to earn points toward their next flight. Other businesses, such as coffee shops, also offer frequency cards, that entitle the customer to a free beverage, for example, after purchasing a certain number of beverages."
481
+ },
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.396,
486
+ 0.354,
487
+ 0.61,
488
+ 0.429
489
+ ],
490
+ "angle": 0,
491
+ "content": "Loyalty programs have been very effective in generating repeat business. They offer an added value to the consumer, whereby the purchaser is not simply enjoying the value of the current purchase, but is being rewarded. It is important, however, that the loyalty program be relative to the product and service offering of the organization and that the award be attainable. Customers may experience frustration if, with an airline ticket as an example, they are unable to redeem their ticket when they want to travel, or if the restrictions on the reward are so high that it is not worth the hassle of redemption. PUBLIC RELATIONS AND PUBLICITY"
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.396,
497
+ 0.43,
498
+ 0.608,
499
+ 0.52
500
+ ],
501
+ "angle": 0,
502
+ "content": "An organization's public relations and publicity activities are the means to foster its relationships with its various audiences and to communicate with them. Public relations efforts are undertaken in order to form a favorable view in the public eye. Favorable publicity can enhance an organization's image and increase demand for its products. A positive article or review about a product or service adds credibility, believability, and legitimacy in a much more effective manner than paid-for advertising. Negative publicity, on the other hand, can tarnish an organization's reputation. Most public relations strategies include press releases, special events, and press conferences. Press releases are articles or brief news releases that are submitted"
503
+ },
504
+ {
505
+ "type": "image",
506
+ "bbox": [
507
+ 0.634,
508
+ 0.247,
509
+ 0.648,
510
+ 0.259
511
+ ],
512
+ "angle": 0,
513
+ "content": null
514
+ },
515
+ {
516
+ "type": "title",
517
+ "bbox": [
518
+ 0.651,
519
+ 0.248,
520
+ 0.776,
521
+ 0.258
522
+ ],
523
+ "angle": 0,
524
+ "content": "Detect all objects in this image:"
525
+ },
526
+ {
527
+ "type": "image",
528
+ "bbox": [
529
+ 0.651,
530
+ 0.26,
531
+ 0.819,
532
+ 0.338
533
+ ],
534
+ "angle": 0,
535
+ "content": null
536
+ },
537
+ {
538
+ "type": "image_footnote",
539
+ "bbox": [
540
+ 0.681,
541
+ 0.342,
542
+ 0.787,
543
+ 0.378
544
+ ],
545
+ "angle": 0,
546
+ "content": "Person:[535,544,568,591]; Car:[009,552,058,737], [682,598,999,976], [910,558,999,600]; Bus:[044,070,913,909]"
547
+ },
548
+ {
549
+ "type": "image",
550
+ "bbox": [
551
+ 0.636,
552
+ 0.383,
553
+ 0.648,
554
+ 0.396
555
+ ],
556
+ "angle": 0,
557
+ "content": null
558
+ },
559
+ {
560
+ "type": "image_caption",
561
+ "bbox": [
562
+ 0.653,
563
+ 0.381,
564
+ 0.793,
565
+ 0.398
566
+ ],
567
+ "angle": 0,
568
+ "content": "Detect Tuba Gloves and Bow Tie in this image in this image:"
569
+ },
570
+ {
571
+ "type": "image",
572
+ "bbox": [
573
+ 0.654,
574
+ 0.401,
575
+ 0.818,
576
+ 0.49
577
+ ],
578
+ "angle": 0,
579
+ "content": null
580
+ },
581
+ {
582
+ "type": "image_footnote",
583
+ "bbox": [
584
+ 0.686,
585
+ 0.493,
586
+ 0.797,
587
+ 0.522
588
+ ],
589
+ "angle": 0,
590
+ "content": "Tuba: [512, 181, 971, 1000]; \nGloves: [703, 730, 782, 862]; \nBow Tie: [075, 590, 144, 630], \n[570, 491, 662, 562]."
591
+ },
592
+ {
593
+ "type": "image_caption",
594
+ "bbox": [
595
+ 0.171,
596
+ 0.544,
597
+ 0.825,
598
+ 0.601
599
+ ],
600
+ "angle": 0,
601
+ "content": "Figure 3: Visualization of image-text pairs used by Vary-tiny+. For PDF image-text pair, there is only one prompt, while for the object detection task, we utilize two types of prompts as shown in the right half of the figure because some images may have too many objects that exceed the maximum token length (4096) of the OPT125M after interpolation."
602
+ },
603
+ {
604
+ "type": "title",
605
+ "bbox": [
606
+ 0.172,
607
+ 0.619,
608
+ 0.314,
609
+ 0.635
610
+ ],
611
+ "angle": 0,
612
+ "content": "3.1.1 Data Engine"
613
+ },
614
+ {
615
+ "type": "text",
616
+ "bbox": [
617
+ 0.171,
618
+ 0.642,
619
+ 0.827,
620
+ 0.741
621
+ ],
622
+ "angle": 0,
623
+ "content": "PDF data. We prepare about 4M PDF image-text pairs in this stage. Following Vary, we use the PDF processing packages to extract the texts of each PDF page, which we find many Python packages can realize (e.g., pdfminer, pdfplumber, and fitz). Each page will be saved as a JPEG image and form an image-text pair with the corresponding text. In this way, we get 2M samples for English and 2M for Chinese. We use the sentence: \"Provide the OCR results of this image.\" as the prompt for both English and Chinese tasks. The PDFs are mainly from arXiv, CC-MAIN-2021-31-PDF-UNTRUNCATED, and e-books. Figure 3 shows a sample of the PDF image-pair."
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.171,
629
+ 0.746,
630
+ 0.828,
631
+ 0.913
632
+ ],
633
+ "angle": 0,
634
+ "content": "Object detection data. To fully utilize the capacity of the visual vocabulary network and obtain the natural image perception ability from SAM initialization, we introduce object detection data in the vision vocabulary generating process. We gather the samples from two large open-source datasets, i.e., Object365 [40] and OpenImage [18]. Due to the low efficiency of coordinate (number texts) encoding in OPT's [57] text tokenizer, for images with too many objects, the number of tokens in the ground truth may exceed the maximum token length supported by OPT-125M (although we interpolate it to 4096). Therefore, we re-organize the annotations into two tasks: 1) Object Detection: If there are no more than 30 object-boxes in the image, we will allow the Vary-tiny+ detect all objects with the prompt: \"Detect all objects in this image\". 2) REC: If the object-box number is over 30, we will regard this image as a REC task using a prompt template: \"Detect class1, class2, ..., in this image\". The selected classes are random so one image can be used multiple times. Through the above manner, we obtain approximately 3M of detection data. Some samples can be seen in Figure 3."
635
+ },
636
+ {
637
+ "type": "page_number",
638
+ "bbox": [
639
+ 0.494,
640
+ 0.936,
641
+ 0.505,
642
+ 0.948
643
+ ],
644
+ "angle": 0,
645
+ "content": "4"
646
+ }
647
+ ],
648
+ [
649
+ {
650
+ "type": "title",
651
+ "bbox": [
652
+ 0.172,
653
+ 0.092,
654
+ 0.321,
655
+ 0.106
656
+ ],
657
+ "angle": 0,
658
+ "content": "3.1.2 Input Format"
659
+ },
660
+ {
661
+ "type": "text",
662
+ "bbox": [
663
+ 0.171,
664
+ 0.116,
665
+ 0.827,
666
+ 0.213
667
+ ],
668
+ "angle": 0,
669
+ "content": "Different from the single input/output form of Vary-tiny, Vary-tiny+ needs various input formats to adapt to corresponding tasks due to it requires different prompts to guide the model output correct results. For simplicity, we use the template of Vicuna v1 [8] to construct all ground truth in a conversation format as USER: <img>\"image>\"</img> \"texts input\" ASSITANT: \"texts output\" </s>. We add the \"<img>\" and \"< img>\" as special tokens of the text tokenizer of OPT-125M and we find that it can adapt very well to the Vicuna template. For the vision input branch, we don't utilize any augmentations and only resize the image to a fixed resolution, i.e., \\(1024 \\times 1024\\)."
670
+ },
671
+ {
672
+ "type": "title",
673
+ "bbox": [
674
+ 0.172,
675
+ 0.231,
676
+ 0.453,
677
+ 0.246
678
+ ],
679
+ "angle": 0,
680
+ "content": "3.2 Forge the Cost-Effective Vary-Toy"
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.171,
686
+ 0.257,
687
+ 0.825,
688
+ 0.286
689
+ ],
690
+ "angle": 0,
691
+ "content": "In this section, we depict the design details of Vary-toy, mainly including the structure of the network and the data construction utilized in the pre-training and SFT stages."
692
+ },
693
+ {
694
+ "type": "title",
695
+ "bbox": [
696
+ 0.172,
697
+ 0.301,
698
+ 0.315,
699
+ 0.314
700
+ ],
701
+ "angle": 0,
702
+ "content": "3.2.1 Architecture"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.171,
708
+ 0.325,
709
+ 0.825,
710
+ 0.435
711
+ ],
712
+ "angle": 0,
713
+ "content": "As shown in Figure 2, we follow the Vary pipeline to devise the main body of Vary-toy but there are some minor differences. When fed an input image with a shape of \\(\\mathrm{H} \\times \\mathrm{W}\\), the new vision vocabulary branch will directly resize the image to \\(1024 \\times 1024\\), while the CLIP [35] branch gains a \\(224 \\times 224\\) image by the center crop. Both the two branches output 256 tokens with channels of 1024. The dimension of the Qwen-1.8B's input channel is also 2048, so the simplest manner is to concatenate the image tokens in two branches directly as the input image tokens of the language model. In terms of code implementation, to maintain consistency with the Vary structure, we still add input embedding layers behind the vision vocabulary networks."
714
+ },
715
+ {
716
+ "type": "table",
717
+ "bbox": [
718
+ 0.174,
719
+ 0.449,
720
+ 0.819,
721
+ 0.659
722
+ ],
723
+ "angle": 0,
724
+ "content": "<table><tr><td>Task</td><td>Dataset</td><td>Sample</td><td>A prompt example</td></tr><tr><td rowspan=\"2\">Cap.</td><td>Laion-COCO [39]</td><td>4M</td><td>Describe the content of this image in a sentence.</td></tr><tr><td>BLIP558k [26]</td><td>558K</td><td>Describe the image with one saying.</td></tr><tr><td rowspan=\"2\">PDF</td><td>Pure OCR</td><td>1M</td><td>Provide the OCR results of this image.</td></tr><tr><td>Markdown</td><td>500K</td><td>Convert the image to markdown format.</td></tr><tr><td rowspan=\"2\">Det.</td><td>COCO [24]</td><td>50K</td><td>Detect all objects in this image.</td></tr><tr><td>RefCOCO</td><td>train set</td><td>Detect an object: the left woman.</td></tr><tr><td rowspan=\"3\">NLP</td><td>ShareGPT</td><td>125K</td><td>Original conversation</td></tr><tr><td>Baize [50]</td><td>112K</td><td>Original conversation</td></tr><tr><td>Alpaca [42]</td><td>52K</td><td>Original conversation</td></tr><tr><td rowspan=\"2\">VQA</td><td>DocVQA [30]</td><td>train set</td><td>Question.AnAnswer using a single word or phrase.</td></tr><tr><td>ChartVQA [29]</td><td>train set</td><td>Question.AnAnswer using a single-word or phrase.</td></tr></table>"
725
+ },
726
+ {
727
+ "type": "table_caption",
728
+ "bbox": [
729
+ 0.171,
730
+ 0.667,
731
+ 0.825,
732
+ 0.709
733
+ ],
734
+ "angle": 0,
735
+ "content": "Table 1: Multi-task training data. We introduce 5 types of data in the pretrain stage, including weakly supervised pair data, PDF image-text pair data, detection data, pure text auto-regressive data, and VQA data. All data annotations are reorganized to a conversation format."
736
+ },
737
+ {
738
+ "type": "title",
739
+ "bbox": [
740
+ 0.172,
741
+ 0.742,
742
+ 0.312,
743
+ 0.756
744
+ ],
745
+ "angle": 0,
746
+ "content": "3.2.2 Data Details"
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.171,
752
+ 0.767,
753
+ 0.825,
754
+ 0.796
755
+ ],
756
+ "angle": 0,
757
+ "content": "Intuitively, the sensitivity of the 1.8B model to data quantity and ratio is higher than that of the 7B or above models, so we put more effort into the data processing aspect for Vary-toy."
758
+ },
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.171,
763
+ 0.801,
764
+ 0.827,
765
+ 0.913
766
+ ],
767
+ "angle": 0,
768
+ "content": "Pre-training & SFT data. For Vary-toy, the pretrain stage is actually a multi-task training stage, wherein we prepare a large amount of image-text pairs in various formats. As summarized in Table 1, we mainly focus on a total of 5 types of data in such stage, containing weakly annotated image caption, PDF dense OCR, object detection, pure text conversation, and VQA. Specifically, for natural images, we sample 4M image-text pair in the Laion-COCO [39] dataset, and we also use the BLIP-558K data proposed in LLaVA [26]. For PDF image-text pair, we prepare two types of data following Vary. One is pure dense text OCR, and the other is a task that converts the PDF image to a markdown format. The previous type of data is randomly sampled from the PDF data used in Vary-tiny+ and the last"
769
+ },
770
+ {
771
+ "type": "page_number",
772
+ "bbox": [
773
+ 0.494,
774
+ 0.936,
775
+ 0.505,
776
+ 0.948
777
+ ],
778
+ "angle": 0,
779
+ "content": "5"
780
+ }
781
+ ],
782
+ [
783
+ {
784
+ "type": "text",
785
+ "bbox": [
786
+ 0.17,
787
+ 0.092,
788
+ 0.825,
789
+ 0.218
790
+ ],
791
+ "angle": 0,
792
+ "content": "one is obtained via LaTeX rendering. Compared to vanilla Vary, we reduce the proportion of PDF data to maintain universal capability. For the detection data, we gather images from the COCO [24] dataset. We sample 50K images with fewer objects included for the pure object detection task and use all train data of RefCOCO for the REC task. We normalize the coordinates of each box and then magnify them to 1000 times. To prevent the language ability of the LLM from deteriorating, we also introduce pure NLP conversation data, including ShareGPT, Baize [50], and Alpaca [42]. For the last downstream VQA tasks, we choose two challenge datasets (DocVQA and ChartQA [29]) to monitor the text perception and reasoning performance of Vary-toy for artificial data. There are at least 10 prompts made through GPT3.5 [5] for each task, and Table 1 shows one example of them."
793
+ },
794
+ {
795
+ "type": "text",
796
+ "bbox": [
797
+ 0.171,
798
+ 0.223,
799
+ 0.827,
800
+ 0.253
801
+ ],
802
+ "angle": 0,
803
+ "content": "In the SFT stage, we only use the LLaVA-80K [26] to instruction tuning the model. LLaVA-80K is a dataset with detailed descriptions and prompts of various types of images, produced by GPT4 [26, 33]."
804
+ },
805
+ {
806
+ "type": "title",
807
+ "bbox": [
808
+ 0.172,
809
+ 0.265,
810
+ 0.317,
811
+ 0.279
812
+ ],
813
+ "angle": 0,
814
+ "content": "3.2.3 Data Format"
815
+ },
816
+ {
817
+ "type": "text",
818
+ "bbox": [
819
+ 0.171,
820
+ 0.289,
821
+ 0.825,
822
+ 0.36
823
+ ],
824
+ "angle": 0,
825
+ "content": "In Vary-toy, we are pleased to keep the Chinese PDF-parsing feature to some extent because there is very little exploration in this area, which is also one of the reasons that we select Qwen-1.8B [2] as our base language model (due to the relatively comprehensive text vocabulary). The data input to Qwen-1.8B follows the vanilla Vary [48] format. That is: <lim_start>user: <img>\"image></img>\"human prompts\"<lim_end> assistant: \"model outputs\"<lim_end>."
826
+ },
827
+ {
828
+ "type": "title",
829
+ "bbox": [
830
+ 0.171,
831
+ 0.378,
832
+ 0.315,
833
+ 0.396
834
+ ],
835
+ "angle": 0,
836
+ "content": "4 Experiments"
837
+ },
838
+ {
839
+ "type": "title",
840
+ "bbox": [
841
+ 0.172,
842
+ 0.408,
843
+ 0.348,
844
+ 0.422
845
+ ],
846
+ "angle": 0,
847
+ "content": "4.1 Evaluation Metrics"
848
+ },
849
+ {
850
+ "type": "text",
851
+ "bbox": [
852
+ 0.17,
853
+ 0.433,
854
+ 0.825,
855
+ 0.531
856
+ ],
857
+ "angle": 0,
858
+ "content": "We report the accuracy of Vary-toy on four popular and challenging benchmarks: DocVQA [30], ChartQA [29], RefCOCO [15], and MM Vet [54]. Wherein, the DocVQA and ChartQA can measure the text perception and reasoning ability of the model in manual images, RefCOCO can be used to test the model's ability to locate natural objects, while MM Vet, including 6 measurement areas, can be utilized to monitor the general ability of Vary-toy. We use the evaluation metrics introduced in their original paper for fair comparison. Specifically, we utilize ANLS, relaxed accuracy, accuracy under 0.5 IoU, and GPT4 scoring as the metrics for the above four datasets."
859
+ },
860
+ {
861
+ "type": "title",
862
+ "bbox": [
863
+ 0.172,
864
+ 0.547,
865
+ 0.377,
866
+ 0.562
867
+ ],
868
+ "angle": 0,
869
+ "content": "4.2 Implementation Details"
870
+ },
871
+ {
872
+ "type": "text",
873
+ "bbox": [
874
+ 0.17,
875
+ 0.572,
876
+ 0.825,
877
+ 0.629
878
+ ],
879
+ "angle": 0,
880
+ "content": "For Vary-tiny+, we unfreeze all the parameters and train the whole model with a batch size of 512 for 2 epochs. We select the AdamW [28] optimizer with a cosine annealing scheduler [27]. The initial learning rate is set to 5e-5 and the end is 0. It is worth noting that the Vary-tiny is initialized by the weights of Vary-tiny for faster convergence."
881
+ },
882
+ {
883
+ "type": "text",
884
+ "bbox": [
885
+ 0.17,
886
+ 0.634,
887
+ 0.825,
888
+ 0.691
889
+ ],
890
+ "angle": 0,
891
+ "content": "For Vary-toy, following vanilla Vary, we freeze all weights of two vision vocabulary networks and only optimize the parameters of the input embedding layers and language model (Qwen-1.8B). In the multi-task training (pre-training) stage, we set the start learning rate to be 5e-5 while it is set to 2e-5 in SFT. We train the model with a batch size of 512 for only 1 epoch in both two stages."
892
+ },
893
+ {
894
+ "type": "table",
895
+ "bbox": [
896
+ 0.216,
897
+ 0.701,
898
+ 0.776,
899
+ 0.844
900
+ ],
901
+ "angle": 0,
902
+ "content": "<table><tr><td rowspan=\"2\">Method</td><td rowspan=\"2\">Size</td><td colspan=\"2\">DocVQA</td><td colspan=\"3\">ChartQA</td></tr><tr><td>val</td><td>test</td><td>human</td><td>augmented</td><td>Average</td></tr><tr><td>Dessurt [10]</td><td>-</td><td>46.5</td><td>63.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Donut [16]</td><td>-</td><td>-</td><td>67.5</td><td>-</td><td>-</td><td>41.8</td></tr><tr><td>Pix2Sturct [20]</td><td>-</td><td>-</td><td>72.1</td><td>30.5</td><td>81.6</td><td>56.0</td></tr><tr><td>mPLUG-DocOwl [52]</td><td>7B</td><td>62.2</td><td>-</td><td>-</td><td>-</td><td>57.4</td></tr><tr><td>Qwen-VL-chat [2]</td><td>7B</td><td>65.1</td><td>-</td><td>-</td><td>-</td><td>65.7</td></tr><tr><td>Vary-toy</td><td>1.8B</td><td>65.6</td><td>65.0</td><td>33.4</td><td>84.8</td><td>59.1</td></tr></table>"
903
+ },
904
+ {
905
+ "type": "table_caption",
906
+ "bbox": [
907
+ 0.17,
908
+ 0.851,
909
+ 0.825,
910
+ 0.893
911
+ ],
912
+ "angle": 0,
913
+ "content": "Table 2: Performance comparison to popular methods on DocVQA and ChartQA. Vary-toy can achieve \\(65.6\\%\\) ANLS on DocVQA which is on par with the 7B Qwen-VL-chat and \\(59.1\\%\\) accuracy on ChartQA which is higher than 7B-size mPLUG-DocOwl."
914
+ },
915
+ {
916
+ "type": "page_number",
917
+ "bbox": [
918
+ 0.494,
919
+ 0.937,
920
+ 0.506,
921
+ 0.948
922
+ ],
923
+ "angle": 0,
924
+ "content": "6"
925
+ }
926
+ ],
927
+ [
928
+ {
929
+ "type": "title",
930
+ "bbox": [
931
+ 0.172,
932
+ 0.092,
933
+ 0.475,
934
+ 0.108
935
+ ],
936
+ "angle": 0,
937
+ "content": "4.3 Manual Image Understanding Ability"
938
+ },
939
+ {
940
+ "type": "text",
941
+ "bbox": [
942
+ 0.17,
943
+ 0.117,
944
+ 0.827,
945
+ 0.215
946
+ ],
947
+ "angle": 0,
948
+ "content": "We evaluate the fine-grained text perception and reasoning ability via the DocVQA [30] and ChartQA [29]. As shown in Table 2, along with the only 1.8B language model, Vary-toy can achieve \\(65.6\\%\\) ANLS on DocVQA and \\(59.1\\%\\) accuracy on ChartQA. For DocVQA, the Vary-toy enjoys comparable performance to the 7B-size Qwen-VL-chat, proving the excellent document-level text perception ability of the model and also proving that the new vision vocabulary is available on tokenizing PDF images. For ChartQA, Vary-toy can achieve \\(59.1\\%\\) average accuracy, which is better than the 7B size mPLUG-DocOwl, demonstrating the effectiveness of our model further."
949
+ },
950
+ {
951
+ "type": "table",
952
+ "bbox": [
953
+ 0.268,
954
+ 0.226,
955
+ 0.726,
956
+ 0.43
957
+ ],
958
+ "angle": 0,
959
+ "content": "<table><tr><td rowspan=\"2\">Type</td><td rowspan=\"2\">Method</td><td rowspan=\"2\">Size</td><td colspan=\"3\">RefCOCO</td></tr><tr><td>val</td><td>testA</td><td>testB</td></tr><tr><td rowspan=\"4\">Traditional</td><td>OFA-L [46]</td><td>-</td><td>80.0</td><td>83.7</td><td>76.4</td></tr><tr><td>TransVG [11]</td><td>-</td><td>81.0</td><td>82.7</td><td>78.4</td></tr><tr><td>VILLA [13]</td><td>-</td><td>82.4</td><td>87.5</td><td>74.8</td></tr><tr><td>UniTAB [51]</td><td>-</td><td>86.3</td><td>88.8</td><td>80.6</td></tr><tr><td rowspan=\"5\">LLM-based</td><td>VisionLLM-H [47]</td><td>-</td><td>-</td><td>86.7</td><td>-</td></tr><tr><td>Shikra-7B [7]</td><td>7B</td><td>87.0</td><td>90.6</td><td>80.2</td></tr><tr><td>Shikra-13B [7]</td><td>13B</td><td>87.8</td><td>91.1</td><td>81.7</td></tr><tr><td>Qwen-VL-chat [2]</td><td>7B</td><td>88.6</td><td>92.3</td><td>84.5</td></tr><tr><td>Next-chat [56]</td><td>7B</td><td>85.5</td><td>90.0</td><td>77.9</td></tr><tr><td></td><td>Vary-toy</td><td>1.8B</td><td>88.1</td><td>90.6</td><td>85.7</td></tr></table>"
960
+ },
961
+ {
962
+ "type": "table_caption",
963
+ "bbox": [
964
+ 0.171,
965
+ 0.437,
966
+ 0.828,
967
+ 0.467
968
+ ],
969
+ "angle": 0,
970
+ "content": "Table 3: Comparison with popular methods on RefCOCO. Benefiting from the new vision vocabulary, Vary-toy can achieve \\(88.1\\%\\) accuracy on RefCOCO val, which is on par with the 7B Qwen-VL-chat."
971
+ },
972
+ {
973
+ "type": "title",
974
+ "bbox": [
975
+ 0.172,
976
+ 0.491,
977
+ 0.45,
978
+ 0.507
979
+ ],
980
+ "angle": 0,
981
+ "content": "4.4 Natural Object Perception Ability"
982
+ },
983
+ {
984
+ "type": "text",
985
+ "bbox": [
986
+ 0.17,
987
+ 0.517,
988
+ 0.825,
989
+ 0.587
990
+ ],
991
+ "angle": 0,
992
+ "content": "The vision vocabulary network generated by Vary-tiny+ should enjoy two main advanced perception abilities: one for dense text and the other for natural objects. In this part, We test the latter ability of Vary-toy after accessing the improved vision vocabulary. It is worth noting that a center crop operation processes the input image of the CLIP branch. Therefore, it can be ruled out that the model uses CLIP for object localization."
993
+ },
994
+ {
995
+ "type": "text",
996
+ "bbox": [
997
+ 0.17,
998
+ 0.592,
999
+ 0.827,
1000
+ 0.678
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "As shown in Table 3, Vary-toy can get \\(88.1\\%\\) accuracy@0.5 on the RefCOCO validation set, which is also on par with Qwen-VL-chat (7B) and even better than the Shikra-13B. The results show that under the knowledgeable vision vocabulary, Vary-toy gathers great natural object perception ability, proving the effectiveness of using the Vary-tiny+ architecture to build a vision vocabulary, allowing us to further reflect on the necessity of CLIP if we add a large amount of weakly labeled image caption data, e.g., Laion-400M [39], during the new vocabulary generating process."
1004
+ },
1005
+ {
1006
+ "type": "table",
1007
+ "bbox": [
1008
+ 0.232,
1009
+ 0.688,
1010
+ 0.759,
1011
+ 0.844
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"7\">MM-Vet</td></tr><tr><td>Rec</td><td>OCR</td><td>Know</td><td>Gen</td><td>Spat</td><td>Math</td><td>Total</td></tr><tr><td>BLIP-2 [22]</td><td>27.5</td><td>11.1</td><td>11.8</td><td>7.0</td><td>16.2</td><td>5.8</td><td>22.4</td></tr><tr><td>LLaVA-7B [26]</td><td>28.0</td><td>17.1</td><td>16.3</td><td>18.9</td><td>21.2</td><td>11.5</td><td>23.8</td></tr><tr><td>MiniGPT-4 [60]</td><td>29.9</td><td>16.1</td><td>20.4</td><td>22.1</td><td>22.2</td><td>3.8</td><td>24.4</td></tr><tr><td>Otter [21]</td><td>27.3</td><td>17.8</td><td>14.2</td><td>13.8</td><td>24.4</td><td>3.8</td><td>24.7</td></tr><tr><td>OpenFlamingo [1]</td><td>28.7</td><td>16.7</td><td>16.4</td><td>13.1</td><td>21.0</td><td>7.7</td><td>24.8</td></tr><tr><td>LLaVA1.5-7B [25]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>30.5</td></tr><tr><td>Vary-toy (1.8B)</td><td>33.4</td><td>20.3</td><td>19.9</td><td>17.5</td><td>24</td><td>10.8</td><td>29.0</td></tr></table>"
1015
+ },
1016
+ {
1017
+ "type": "table_caption",
1018
+ "bbox": [
1019
+ 0.17,
1020
+ 0.85,
1021
+ 0.828,
1022
+ 0.894
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "Table 4: Comparison with popular LVLMs on MMVet. With only a 1.8B language model, Varytoy can get a promising \\(29.0\\%\\) accuracy. The abbreviations represent Rec: Recognition; Know: Knowledge; Gen: Language generation; Spat: Spatial awareness."
1026
+ },
1027
+ {
1028
+ "type": "page_number",
1029
+ "bbox": [
1030
+ 0.494,
1031
+ 0.936,
1032
+ 0.506,
1033
+ 0.948
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "7"
1037
+ }
1038
+ ],
1039
+ [
1040
+ {
1041
+ "type": "title",
1042
+ "bbox": [
1043
+ 0.172,
1044
+ 0.092,
1045
+ 0.323,
1046
+ 0.108
1047
+ ],
1048
+ "angle": 0,
1049
+ "content": "4.5 General Ability"
1050
+ },
1051
+ {
1052
+ "type": "text",
1053
+ "bbox": [
1054
+ 0.171,
1055
+ 0.117,
1056
+ 0.83,
1057
+ 0.175
1058
+ ],
1059
+ "angle": 0,
1060
+ "content": "We report the accuracy of Vary-toy on MMVet [54] to test its general performance. As summarized in table 4, Vary-toy gains \\(29.0\\%\\) of the total accuracy, which is higher than the classical LLaVA-7B [26] \\((23.8\\%)\\), BLIP-2 [22], and MiniGPT-4 [60], demonstrating Vary-toy enjoys excellent general knowledge. For a 1.8B model, the generation ability is relatively poor and needs to be strengthened."
1061
+ },
1062
+ {
1063
+ "type": "image",
1064
+ "bbox": [
1065
+ 0.195,
1066
+ 0.193,
1067
+ 0.212,
1068
+ 0.211
1069
+ ],
1070
+ "angle": 0,
1071
+ "content": null
1072
+ },
1073
+ {
1074
+ "type": "title",
1075
+ "bbox": [
1076
+ 0.215,
1077
+ 0.196,
1078
+ 0.39,
1079
+ 0.209
1080
+ ],
1081
+ "angle": 0,
1082
+ "content": "What is funny about this image?"
1083
+ },
1084
+ {
1085
+ "type": "image",
1086
+ "bbox": [
1087
+ 0.202,
1088
+ 0.215,
1089
+ 0.334,
1090
+ 0.366
1091
+ ],
1092
+ "angle": 0,
1093
+ "content": null
1094
+ },
1095
+ {
1096
+ "type": "text",
1097
+ "bbox": [
1098
+ 0.342,
1099
+ 0.221,
1100
+ 0.508,
1101
+ 0.361
1102
+ ],
1103
+ "angle": 0,
1104
+ "content": "The funny aspect of this image lies in the fire hydrant painted with a smiling face, which gives it an unconventional and whimsical appearance. The cute drawing on the red fire hydrant attracts attention and can be considered a creative expression or an artistic statement. In addition, there's a car parked nearby, emphasizing the urban setting and reinforcing the idea that the image might have been taken in a city environment where such decorative objects and artistic installations are more common."
1105
+ },
1106
+ {
1107
+ "type": "image",
1108
+ "bbox": [
1109
+ 0.536,
1110
+ 0.194,
1111
+ 0.553,
1112
+ 0.211
1113
+ ],
1114
+ "angle": 0,
1115
+ "content": null
1116
+ },
1117
+ {
1118
+ "type": "title",
1119
+ "bbox": [
1120
+ 0.554,
1121
+ 0.196,
1122
+ 0.724,
1123
+ 0.209
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "Detect all objects in this image:"
1127
+ },
1128
+ {
1129
+ "type": "image",
1130
+ "bbox": [
1131
+ 0.539,
1132
+ 0.215,
1133
+ 0.798,
1134
+ 0.366
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": null
1138
+ },
1139
+ {
1140
+ "type": "image",
1141
+ "bbox": [
1142
+ 0.196,
1143
+ 0.38,
1144
+ 0.212,
1145
+ 0.396
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": null
1149
+ },
1150
+ {
1151
+ "type": "title",
1152
+ "bbox": [
1153
+ 0.218,
1154
+ 0.383,
1155
+ 0.424,
1156
+ 0.395
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "Provide the OCR results of this image."
1160
+ },
1161
+ {
1162
+ "type": "image",
1163
+ "bbox": [
1164
+ 0.476,
1165
+ 0.38,
1166
+ 0.499,
1167
+ 0.398
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": null
1171
+ },
1172
+ {
1173
+ "type": "image",
1174
+ "bbox": [
1175
+ 0.211,
1176
+ 0.426,
1177
+ 0.245,
1178
+ 0.452
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": null
1182
+ },
1183
+ {
1184
+ "type": "text",
1185
+ "bbox": [
1186
+ 0.247,
1187
+ 0.441,
1188
+ 0.483,
1189
+ 0.451
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "ou are cordially invited to join us for the dramatic retum of Pagliacci , Le Prophète andLubéric . Each one ofBilli Boudi , Don Cardi , and Orlyl . Each one is given"
1193
+ },
1194
+ {
1195
+ "type": "text",
1196
+ "bbox": [
1197
+ 0.211,
1198
+ 0.453,
1199
+ 0.481,
1200
+ 0.474
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "the Metropolitan Opera's first production of Kurt Weill and Bertolt Brech's twentieth century masterpiece, Rise and Fall of the City of Mahogany. And a new production of Mozart's delightful Abduction from the Seraglio."
1204
+ },
1205
+ {
1206
+ "type": "text",
1207
+ "bbox": [
1208
+ 0.211,
1209
+ 0.477,
1210
+ 0.483,
1211
+ 0.505
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "A trial subscription for the 1979-90 season at The Met gives you a chance to appreciate some of the many advantages enjoyed by our regular Subscribers. Choose any one of the twenty-three mini-series described inside and you'll enjoy the same seats for each performance. Your tickets will be sent to you in August, well in advance of box office lines and sold out notices."
1215
+ },
1216
+ {
1217
+ "type": "text",
1218
+ "bbox": [
1219
+ 0.211,
1220
+ 0.506,
1221
+ 0.261,
1222
+ 0.512
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "Next"
1226
+ },
1227
+ {
1228
+ "type": "text",
1229
+ "bbox": [
1230
+ 0.211,
1231
+ 0.513,
1232
+ 0.28,
1233
+ 0.551
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": "season, we hope your trial series experience will have convinced you that subscribing is the best way to be a part of the Metropolitan."
1237
+ },
1238
+ {
1239
+ "type": "text",
1240
+ "bbox": [
1241
+ 0.211,
1242
+ 0.553,
1243
+ 0.276,
1244
+ 0.577
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": "Opening Night is just a summer away. Won't you take a few moments now to"
1248
+ },
1249
+ {
1250
+ "type": "text",
1251
+ "bbox": [
1252
+ 0.211,
1253
+ 0.578,
1254
+ 0.483,
1255
+ 0.59
1256
+ ],
1257
+ "angle": 0,
1258
+ "content": "guarantee your series and seating for the glorious season ahead? It's a season that promises grand opera performed by some of the world's greatest artists. We hope you'll be with us."
1259
+ },
1260
+ {
1261
+ "type": "text",
1262
+ "bbox": [
1263
+ 0.211,
1264
+ 0.591,
1265
+ 0.484,
1266
+ 0.612
1267
+ ],
1268
+ "angle": 0,
1269
+ "content": "P. S. If you are already a Met Subscriber, you may want to use a mini-series for operas not on your regular subscription. Or perhaps you'd like to give this brochure to a friend who may be interested in this introductory offer."
1270
+ },
1271
+ {
1272
+ "type": "image",
1273
+ "bbox": [
1274
+ 0.284,
1275
+ 0.512,
1276
+ 0.484,
1277
+ 0.57
1278
+ ],
1279
+ "angle": 0,
1280
+ "content": null
1281
+ },
1282
+ {
1283
+ "type": "image_caption",
1284
+ "bbox": [
1285
+ 0.404,
1286
+ 0.571,
1287
+ 0.484,
1288
+ 0.575
1289
+ ],
1290
+ "angle": 0,
1291
+ "content": "Ocillo Set: Franco Zefendi, Costamco: Peter J. Hall"
1292
+ },
1293
+ {
1294
+ "type": "text",
1295
+ "bbox": [
1296
+ 0.505,
1297
+ 0.394,
1298
+ 0.791,
1299
+ 0.458
1300
+ ],
1301
+ "angle": 0,
1302
+ "content": "you are cordially invited to join us for the dramatic return of Pagliazzi, Le Prophets and Lohengrin. For the power of Billy Budd, Don Carlo and Otello. For the excitement of the Metropolitan Opera's first production of Kurt Weill and Bertolt Brecht's twentieth century masterpiece, Rise and Fall of the City of Mahagony. And a new production of Mozart's delightful Abduction from the Seraglio."
1303
+ },
1304
+ {
1305
+ "type": "text",
1306
+ "bbox": [
1307
+ 0.505,
1308
+ 0.459,
1309
+ 0.796,
1310
+ 0.505
1311
+ ],
1312
+ "angle": 0,
1313
+ "content": "At trial subscription for the 1979-80 season at The Met gives you a chance to appreciate some of the many advantages enjoyed by our regular Subscribers. Choose any one of the twenty-three mini-series described inside and you'll enjoy the same seats for each performance."
1314
+ },
1315
+ {
1316
+ "type": "text",
1317
+ "bbox": [
1318
+ 0.506,
1319
+ 0.506,
1320
+ 0.79,
1321
+ 0.522
1322
+ ],
1323
+ "angle": 0,
1324
+ "content": "Your tickets will be sent to you in August, well in advance of box office lines and sold out notices."
1325
+ },
1326
+ {
1327
+ "type": "text",
1328
+ "bbox": [
1329
+ 0.506,
1330
+ 0.523,
1331
+ 0.73,
1332
+ 0.532
1333
+ ],
1334
+ "angle": 0,
1335
+ "content": "Next season, we hope your trial series experience"
1336
+ },
1337
+ {
1338
+ "type": "text",
1339
+ "bbox": [
1340
+ 0.506,
1341
+ 0.533,
1342
+ 0.787,
1343
+ 0.55
1344
+ ],
1345
+ "angle": 0,
1346
+ "content": "will have convinced you that subscribing is the best way to be a part of the Metropolitan,"
1347
+ },
1348
+ {
1349
+ "type": "text",
1350
+ "bbox": [
1351
+ 0.507,
1352
+ 0.551,
1353
+ 0.647,
1354
+ 0.56
1355
+ ],
1356
+ "angle": 0,
1357
+ "content": "Opening Night is just a summer"
1358
+ },
1359
+ {
1360
+ "type": "text",
1361
+ "bbox": [
1362
+ 0.506,
1363
+ 0.561,
1364
+ 0.787,
1365
+ 0.597
1366
+ ],
1367
+ "angle": 0,
1368
+ "content": "away. Won't you take a few moments now to guarantee your series and seating for the glorious season ahead? It's a season that promises grand opera performed by some of the world's greatest artists. We hope you'll be with us."
1369
+ },
1370
+ {
1371
+ "type": "text",
1372
+ "bbox": [
1373
+ 0.506,
1374
+ 0.598,
1375
+ 0.79,
1376
+ 0.615
1377
+ ],
1378
+ "angle": 0,
1379
+ "content": "P.S. If you are already a Met Subscriber, you may want to use a mini-series for operas not on your regular subscription."
1380
+ },
1381
+ {
1382
+ "type": "text",
1383
+ "bbox": [
1384
+ 0.506,
1385
+ 0.616,
1386
+ 0.785,
1387
+ 0.633
1388
+ ],
1389
+ "angle": 0,
1390
+ "content": "Or perhaps you'd like to give this brochure to a friend who may be interested in this introductory offer."
1391
+ },
1392
+ {
1393
+ "type": "image_caption",
1394
+ "bbox": [
1395
+ 0.171,
1396
+ 0.657,
1397
+ 0.825,
1398
+ 0.687
1399
+ ],
1400
+ "angle": 0,
1401
+ "content": "Figure 4: Visualization of high-quality results of our model in four common fields. We can see that Vary-toy has satisfactory general ability and enjoys strong text and object perception abilities."
1402
+ },
1403
+ {
1404
+ "type": "title",
1405
+ "bbox": [
1406
+ 0.172,
1407
+ 0.698,
1408
+ 0.307,
1409
+ 0.711
1410
+ ],
1411
+ "angle": 0,
1412
+ "content": "4.6 Visualization"
1413
+ },
1414
+ {
1415
+ "type": "text",
1416
+ "bbox": [
1417
+ 0.171,
1418
+ 0.723,
1419
+ 0.828,
1420
+ 0.766
1421
+ ],
1422
+ "angle": 0,
1423
+ "content": "Figure 4 shows high-quality results of Vary-toy on four different downstream fields. We can see that the model enjoys good vision concept understanding and localization capacities, indicating that a reinforced vision vocabulary with a small language model can also perform well in multimodal tasks."
1424
+ },
1425
+ {
1426
+ "type": "title",
1427
+ "bbox": [
1428
+ 0.172,
1429
+ 0.784,
1430
+ 0.303,
1431
+ 0.8
1432
+ ],
1433
+ "angle": 0,
1434
+ "content": "5 Conclusion"
1435
+ },
1436
+ {
1437
+ "type": "text",
1438
+ "bbox": [
1439
+ 0.171,
1440
+ 0.815,
1441
+ 0.829,
1442
+ 0.913
1443
+ ],
1444
+ "angle": 0,
1445
+ "content": "In this report, we propose a small LVLM — Vary-toy, which can be deployed on a GTX1080ti GPU and enjoys fine performance in many downstream tasks. What's more, we generate a new and more comprehensive vision vocabulary for the presented model, which is the key to the success of Vary-toy. We hope the promising and user-friendly Vary-toy can become a new baseline in such fields as well as draw more attention to LVLM, especially for researchers who previously lacked computing resources. We also encourage researchers to use our reinforced vision vocabulary for more downstream tasks. Finally, we firmly confirm that the Vary-toy will evolve beyond just a toy."
1446
+ },
1447
+ {
1448
+ "type": "page_number",
1449
+ "bbox": [
1450
+ 0.494,
1451
+ 0.936,
1452
+ 0.505,
1453
+ 0.948
1454
+ ],
1455
+ "angle": 0,
1456
+ "content": "8"
1457
+ }
1458
+ ],
1459
+ [
1460
+ {
1461
+ "type": "title",
1462
+ "bbox": [
1463
+ 0.174,
1464
+ 0.09,
1465
+ 0.27,
1466
+ 0.107
1467
+ ],
1468
+ "angle": 0,
1469
+ "content": "References"
1470
+ },
1471
+ {
1472
+ "type": "ref_text",
1473
+ "bbox": [
1474
+ 0.182,
1475
+ 0.113,
1476
+ 0.826,
1477
+ 0.178
1478
+ ],
1479
+ "angle": 0,
1480
+ "content": "[1] Alayrac, J., Donahue, J., Luc, P., Miech, A., Barr, I., Hasson, Y., Lenc, K., Mensch, A., Millican, K., Reynolds, M., Ring, R., Rutherford, E., Cabi, S., Han, T., Gong, Z., Samangooei, S., Monteiro, M., Menick, J.L., Borgeaud, S., Brock, A., Nematzadeh, A., Sharifzadeh, S., Binkowski, M., Barreira, R., Vinyals, O., Zisserman, A., Simonyan, K.: Flamingo: a visual language model for few-shot learning. In: NeurIPS (2022) 1, 3, 7"
1481
+ },
1482
+ {
1483
+ "type": "ref_text",
1484
+ "bbox": [
1485
+ 0.182,
1486
+ 0.187,
1487
+ 0.827,
1488
+ 0.253
1489
+ ],
1490
+ "angle": 0,
1491
+ "content": "[2] Bai, J., Bai, S., Chu, Y., Cui, Z., Dang, K., Deng, X., Fan, Y., Ge, W., Han, Y., Huang, F., Hui, B., Ji, L., Li, M., Lin, J., Lin, R., Liu, D., Liu, G., Lu, C., Lu, K., Ma, J., Men, R., Ren, X., Ren, X., Tan, C., Tan, S., Tu, J., Wang, P., Wang, S., Wang, W., Wu, S., Xu, B., Xu, J., Yang, A., Yang, H., Yang, J., Yang, S., Yao, Y., Yu, B., Yuan, H., Yuan, Z., Zhang, J., Zhang, X., Zhang, Y., Zhang, Z., Zhou, C., Zhou, J., Zhou, X., Zhu, T.: Qwen technical report. arXiv preprint arXiv:2309.16609 (2023) 2, 3, 6, 7"
1492
+ },
1493
+ {
1494
+ "type": "ref_text",
1495
+ "bbox": [
1496
+ 0.182,
1497
+ 0.261,
1498
+ 0.826,
1499
+ 0.301
1500
+ ],
1501
+ "angle": 0,
1502
+ "content": "[3] Bai, J., Bai, S., Yang, S., Wang, S., Tan, S., Wang, P., Lin, J., Zhou, C., Zhou, J.: Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966 (2023) 2, 3"
1503
+ },
1504
+ {
1505
+ "type": "ref_text",
1506
+ "bbox": [
1507
+ 0.182,
1508
+ 0.311,
1509
+ 0.826,
1510
+ 0.351
1511
+ ],
1512
+ "angle": 0,
1513
+ "content": "[4] Biten, A.F., Litman, R., Xie, Y., Appalaraju, S., Manmatha, R.: Latr: Layout-aware transformer for scene-text vqa. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 16548-16558 (2022) 1, 3"
1514
+ },
1515
+ {
1516
+ "type": "ref_text",
1517
+ "bbox": [
1518
+ 0.182,
1519
+ 0.36,
1520
+ 0.826,
1521
+ 0.4
1522
+ ],
1523
+ "angle": 0,
1524
+ "content": "[5] Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020) 3, 6"
1525
+ },
1526
+ {
1527
+ "type": "ref_text",
1528
+ "bbox": [
1529
+ 0.182,
1530
+ 0.409,
1531
+ 0.826,
1532
+ 0.449
1533
+ ],
1534
+ "angle": 0,
1535
+ "content": "[6] Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 213-229. Springer (2020) 2"
1536
+ },
1537
+ {
1538
+ "type": "ref_text",
1539
+ "bbox": [
1540
+ 0.182,
1541
+ 0.457,
1542
+ 0.826,
1543
+ 0.484
1544
+ ],
1545
+ "angle": 0,
1546
+ "content": "[7] Chen, K., Zhang, Z., Zeng, W., Zhang, R., Zhu, F., Zhao, R.: Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195 (2023) 7"
1547
+ },
1548
+ {
1549
+ "type": "ref_text",
1550
+ "bbox": [
1551
+ 0.182,
1552
+ 0.494,
1553
+ 0.826,
1554
+ 0.534
1555
+ ],
1556
+ "angle": 0,
1557
+ "content": "[8] Chiang, W.L., Li, Z., Lin, Z., Sheng, Y., Wu, Z., Zhang, H., Zheng, L., Zhuang, S., Zhuang, Y., Gonzalez, J.E., Stoica, I., Xing, E.P.: Vicuna: An open-source chatbot impressing gpt-4 with \\(90\\%\\) * chatgpt quality. https://lmsys.org/blog/2023-03-30-vicuna/ (2023) 3, 5"
1558
+ },
1559
+ {
1560
+ "type": "ref_text",
1561
+ "bbox": [
1562
+ 0.182,
1563
+ 0.543,
1564
+ 0.826,
1565
+ 0.571
1566
+ ],
1567
+ "angle": 0,
1568
+ "content": "[9] Chu, X., Qiao, L., Lin, X., Xu, S., Yang, Y., Hu, Y., Wei, F., Zhang, X., Zhang, B., Wei, X., Shen, C.: Mobilevlm: A fast, strong and open vision language assistant for mobile devices (2023) 3"
1569
+ },
1570
+ {
1571
+ "type": "ref_text",
1572
+ "bbox": [
1573
+ 0.174,
1574
+ 0.58,
1575
+ 0.826,
1576
+ 0.619
1577
+ ],
1578
+ "angle": 0,
1579
+ "content": "[10] Davis, B., Morse, B., Price, B., Tensmeyer, C., Wigington, C., Morariu, V.: End-to-end document recognition and understanding with dessurt. In: European Conference on Computer Vision. pp. 280-296. Springer (2022) 6"
1580
+ },
1581
+ {
1582
+ "type": "ref_text",
1583
+ "bbox": [
1584
+ 0.174,
1585
+ 0.629,
1586
+ 0.826,
1587
+ 0.656
1588
+ ],
1589
+ "angle": 0,
1590
+ "content": "[11] Deng, J., Yang, Z., Chen, T., Zhou, W., Li, H.: Transvg: End-to-end visual grounding with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1769-1779 (2021) 7"
1591
+ },
1592
+ {
1593
+ "type": "ref_text",
1594
+ "bbox": [
1595
+ 0.174,
1596
+ 0.665,
1597
+ 0.826,
1598
+ 0.693
1599
+ ],
1600
+ "angle": 0,
1601
+ "content": "[12] Dong, R., Han, C., Peng, Y., Qi, Z., Ge, Z., Yang, J., Zhao, L., Sun, J., Zhou, H., Wei, H., et al.: Dreamllm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499 (2023) 3"
1602
+ },
1603
+ {
1604
+ "type": "ref_text",
1605
+ "bbox": [
1606
+ 0.174,
1607
+ 0.702,
1608
+ 0.826,
1609
+ 0.741
1610
+ ],
1611
+ "angle": 0,
1612
+ "content": "[13] Gan, Z., Chen, Y.C., Li, L., Zhu, C., Cheng, Y., Liu, J.: Large-scale adversarial training for vision-and-language representation learning. Advances in Neural Information Processing Systems 33, 6616-6628 (2020) 7"
1613
+ },
1614
+ {
1615
+ "type": "ref_text",
1616
+ "bbox": [
1617
+ 0.174,
1618
+ 0.75,
1619
+ 0.826,
1620
+ 0.778
1621
+ ],
1622
+ "angle": 0,
1623
+ "content": "[14] Hao, Y., Song, H., Dong, L., Huang, S., Chi, Z., Wang, W., Ma, S., Wei, F.: Language models are general-purpose interfaces. arXiv preprint arXiv:2206.06336 (2022) 1"
1624
+ },
1625
+ {
1626
+ "type": "ref_text",
1627
+ "bbox": [
1628
+ 0.174,
1629
+ 0.787,
1630
+ 0.826,
1631
+ 0.827
1632
+ ],
1633
+ "angle": 0,
1634
+ "content": "[15] Kazemzadeh, S., Ordonez, V., Matten, M., Berg, T.: Referitgame: Referring to objects in photographs of natural scenes. In: Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP). pp. 787-798 (2014) 2, 6"
1635
+ },
1636
+ {
1637
+ "type": "ref_text",
1638
+ "bbox": [
1639
+ 0.174,
1640
+ 0.836,
1641
+ 0.826,
1642
+ 0.875
1643
+ ],
1644
+ "angle": 0,
1645
+ "content": "[16] Kim, G., Hong, T., Yim, M., Nam, J., Park, J., Yim, J., Hwang, W., Yun, S., Han, D., Park, S.: Ocr-free document understanding transformer. In: European Conference on Computer Vision. pp. 498-517. Springer (2022) 6"
1646
+ },
1647
+ {
1648
+ "type": "ref_text",
1649
+ "bbox": [
1650
+ 0.174,
1651
+ 0.885,
1652
+ 0.826,
1653
+ 0.913
1654
+ ],
1655
+ "angle": 0,
1656
+ "content": "[17] Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) 4"
1657
+ },
1658
+ {
1659
+ "type": "list",
1660
+ "bbox": [
1661
+ 0.174,
1662
+ 0.113,
1663
+ 0.827,
1664
+ 0.913
1665
+ ],
1666
+ "angle": 0,
1667
+ "content": null
1668
+ },
1669
+ {
1670
+ "type": "page_number",
1671
+ "bbox": [
1672
+ 0.494,
1673
+ 0.936,
1674
+ 0.505,
1675
+ 0.948
1676
+ ],
1677
+ "angle": 0,
1678
+ "content": "9"
1679
+ }
1680
+ ],
1681
+ [
1682
+ {
1683
+ "type": "ref_text",
1684
+ "bbox": [
1685
+ 0.174,
1686
+ 0.092,
1687
+ 0.828,
1688
+ 0.142
1689
+ ],
1690
+ "angle": 0,
1691
+ "content": "[18] Kuznetsova, A., Rom, H., Alldrin, N., Uijlings, J., Krasin, I., Pont-Tuset, J., Kamali, S., Popov, S., Malloci, M., Kolesnikov, A., et al.: The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision 128(7), 1956–1981 (2020) 4"
1692
+ },
1693
+ {
1694
+ "type": "ref_text",
1695
+ "bbox": [
1696
+ 0.174,
1697
+ 0.153,
1698
+ 0.826,
1699
+ 0.181
1700
+ ],
1701
+ "angle": 0,
1702
+ "content": "[19] Law, H., Deng, J.: Cornernet: Detecting objects as paired keypoints. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 734-750 (2018) 2"
1703
+ },
1704
+ {
1705
+ "type": "ref_text",
1706
+ "bbox": [
1707
+ 0.174,
1708
+ 0.188,
1709
+ 0.827,
1710
+ 0.227
1711
+ ],
1712
+ "angle": 0,
1713
+ "content": "[20] Lee, K., Joshi, M., Turc, I.R., Hu, H., Liu, F., Eisenschlos, J.M., Khandelwal, U., Shaw, P., Chang, M.W., Toutanova, K.: Pix2struct: Screenshot parsing as pretraining for visual language understanding. In: International Conference on Machine Learning. pp. 18893-18912. PMLR (2023) 6"
1714
+ },
1715
+ {
1716
+ "type": "ref_text",
1717
+ "bbox": [
1718
+ 0.174,
1719
+ 0.236,
1720
+ 0.825,
1721
+ 0.263
1722
+ ],
1723
+ "angle": 0,
1724
+ "content": "[21] Li, B., Zhang, Y., Chen, L., Wang, J., Yang, J., Liu, Z.: Otter: A multi-modal model with in-context instruction tuning. arXiv preprint arXiv:2305.03726 (2023) 7"
1725
+ },
1726
+ {
1727
+ "type": "ref_text",
1728
+ "bbox": [
1729
+ 0.174,
1730
+ 0.272,
1731
+ 0.825,
1732
+ 0.299
1733
+ ],
1734
+ "angle": 0,
1735
+ "content": "[22] Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023) 1, 3, 7, 8"
1736
+ },
1737
+ {
1738
+ "type": "ref_text",
1739
+ "bbox": [
1740
+ 0.174,
1741
+ 0.307,
1742
+ 0.825,
1743
+ 0.334
1744
+ ],
1745
+ "angle": 0,
1746
+ "content": "[23] Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision. pp. 2980-2988 (2017) 2"
1747
+ },
1748
+ {
1749
+ "type": "ref_text",
1750
+ "bbox": [
1751
+ 0.174,
1752
+ 0.343,
1753
+ 0.825,
1754
+ 0.37
1755
+ ],
1756
+ "angle": 0,
1757
+ "content": "[24] Lin, T., Maire, M., Belongie, S.J., Hays, J., Perona, P., Ramanan, D., Dollár, P., Zitnick, C.L.: Microsoft COCO: common objects in context. In: ECCV. pp. 740-755 (2014) 1, 3, 5, 6"
1758
+ },
1759
+ {
1760
+ "type": "ref_text",
1761
+ "bbox": [
1762
+ 0.174,
1763
+ 0.377,
1764
+ 0.761,
1765
+ 0.392
1766
+ ],
1767
+ "angle": 0,
1768
+ "content": "[25] Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning (2023) 3, 7"
1769
+ },
1770
+ {
1771
+ "type": "ref_text",
1772
+ "bbox": [
1773
+ 0.174,
1774
+ 0.4,
1775
+ 0.698,
1776
+ 0.414
1777
+ ],
1778
+ "angle": 0,
1779
+ "content": "[26] Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023) 1, 2, 3, 5, 6, 7, 8"
1780
+ },
1781
+ {
1782
+ "type": "ref_text",
1783
+ "bbox": [
1784
+ 0.174,
1785
+ 0.423,
1786
+ 0.825,
1787
+ 0.449
1788
+ ],
1789
+ "angle": 0,
1790
+ "content": "[27] Loshchilov, I., Hutter, F.: Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983 (2016) 6"
1791
+ },
1792
+ {
1793
+ "type": "ref_text",
1794
+ "bbox": [
1795
+ 0.174,
1796
+ 0.457,
1797
+ 0.708,
1798
+ 0.472
1799
+ ],
1800
+ "angle": 0,
1801
+ "content": "[28] Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019) 6"
1802
+ },
1803
+ {
1804
+ "type": "ref_text",
1805
+ "bbox": [
1806
+ 0.174,
1807
+ 0.482,
1808
+ 0.825,
1809
+ 0.508
1810
+ ],
1811
+ "angle": 0,
1812
+ "content": "[29] Masry, A., Long, D.X., Tan, J.Q., Joty, S., Hoque, E.: Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244 (2022) 2, 5, 6, 7"
1813
+ },
1814
+ {
1815
+ "type": "ref_text",
1816
+ "bbox": [
1817
+ 0.174,
1818
+ 0.517,
1819
+ 0.826,
1820
+ 0.543
1821
+ ],
1822
+ "angle": 0,
1823
+ "content": "[30] Mathew, M., Karatzas, D., Jawahar, C.: Docvqa: A dataset for vqa on document images. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 2200-2209 (2021) 1, 2, 3, 5, 6, 7"
1824
+ },
1825
+ {
1826
+ "type": "ref_text",
1827
+ "bbox": [
1828
+ 0.174,
1829
+ 0.552,
1830
+ 0.826,
1831
+ 0.58
1832
+ ],
1833
+ "angle": 0,
1834
+ "content": "[31] Microsoft: Phi-2: The surprising power of small language models. https://www.microsoft.com/en-us/research/blog/phi-2-the-surprising-power-of-small-language-models/ (2023) 3"
1835
+ },
1836
+ {
1837
+ "type": "ref_text",
1838
+ "bbox": [
1839
+ 0.174,
1840
+ 0.588,
1841
+ 0.826,
1842
+ 0.626
1843
+ ],
1844
+ "angle": 0,
1845
+ "content": "[32] Mishra, A., Shekhar, S., Singh, A.K., Chakraborty, A.: Ocr-vqa: Visual question answering by reading text in images. In: 2019 international conference on document analysis and recognition (ICDAR). pp. 947-952. IEEE (2019) 1, 3"
1846
+ },
1847
+ {
1848
+ "type": "ref_text",
1849
+ "bbox": [
1850
+ 0.174,
1851
+ 0.635,
1852
+ 0.45,
1853
+ 0.65
1854
+ ],
1855
+ "angle": 0,
1856
+ "content": "[33] OpenAI: Gpt-4 technical report (2023) 6"
1857
+ },
1858
+ {
1859
+ "type": "ref_text",
1860
+ "bbox": [
1861
+ 0.174,
1862
+ 0.659,
1863
+ 0.827,
1864
+ 0.71
1865
+ ],
1866
+ "angle": 0,
1867
+ "content": "[34] Ouyang, L., Wu, J., Jiang, X., Almeida, D., Wainwright, C.L., Mishkin, P., Zhang, C., Agarwal, S., Slama, K., Ray, A., Schulman, J., Hilton, J., Kelton, F., Miller, L., Simens, M., Askell, A., Welinder, P., Christiano, P.F., Leike, J., Lowe, R.: Training language models to follow instructions with human feedback. In: NeurIPS (2022) 1, 3"
1868
+ },
1869
+ {
1870
+ "type": "ref_text",
1871
+ "bbox": [
1872
+ 0.174,
1873
+ 0.719,
1874
+ 0.826,
1875
+ 0.758
1876
+ ],
1877
+ "angle": 0,
1878
+ "content": "[35] Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) 1, 5"
1879
+ },
1880
+ {
1881
+ "type": "ref_text",
1882
+ "bbox": [
1883
+ 0.174,
1884
+ 0.767,
1885
+ 0.826,
1886
+ 0.793
1887
+ ],
1888
+ "angle": 0,
1889
+ "content": "[36] Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019) 3"
1890
+ },
1891
+ {
1892
+ "type": "ref_text",
1893
+ "bbox": [
1894
+ 0.174,
1895
+ 0.802,
1896
+ 0.826,
1897
+ 0.83
1898
+ ],
1899
+ "angle": 0,
1900
+ "content": "[37] Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: Unified, real-time object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 779-788 (2016) 2"
1901
+ },
1902
+ {
1903
+ "type": "ref_text",
1904
+ "bbox": [
1905
+ 0.174,
1906
+ 0.838,
1907
+ 0.825,
1908
+ 0.864
1909
+ ],
1910
+ "angle": 0,
1911
+ "content": "[38] Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems 28 (2015) 2"
1912
+ },
1913
+ {
1914
+ "type": "ref_text",
1915
+ "bbox": [
1916
+ 0.174,
1917
+ 0.873,
1918
+ 0.826,
1919
+ 0.911
1920
+ ],
1921
+ "angle": 0,
1922
+ "content": "[39] Schuhmann, C., Vencu, R., Beaumont, R., Kaczmarczyk, R., Mullis, C., Katta, A., Coombes, T., Jitsev, J., Komatsuzaki, A.: Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114 (2021) 5, 7"
1923
+ },
1924
+ {
1925
+ "type": "list",
1926
+ "bbox": [
1927
+ 0.174,
1928
+ 0.092,
1929
+ 0.828,
1930
+ 0.911
1931
+ ],
1932
+ "angle": 0,
1933
+ "content": null
1934
+ },
1935
+ {
1936
+ "type": "page_number",
1937
+ "bbox": [
1938
+ 0.491,
1939
+ 0.936,
1940
+ 0.509,
1941
+ 0.948
1942
+ ],
1943
+ "angle": 0,
1944
+ "content": "10"
1945
+ }
1946
+ ],
1947
+ [
1948
+ {
1949
+ "type": "ref_text",
1950
+ "bbox": [
1951
+ 0.174,
1952
+ 0.092,
1953
+ 0.826,
1954
+ 0.133
1955
+ ],
1956
+ "angle": 0,
1957
+ "content": "[40] Shao, S., Li, Z., Zhang, T., Peng, C., Yu, G., Zhang, X., Li, J., Sun, J.: Objects365: A large-scale, high-quality dataset for object detection. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 8430-8439 (2019) 4"
1958
+ },
1959
+ {
1960
+ "type": "ref_text",
1961
+ "bbox": [
1962
+ 0.174,
1963
+ 0.139,
1964
+ 0.826,
1965
+ 0.18
1966
+ ],
1967
+ "angle": 0,
1968
+ "content": "[41] Singh, A., Natarajan, V., Shah, M., Jiang, Y., Chen, X., Batra, D., Parikh, D., Rohrbach, M.: Towards vqa models that can read. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8317-8326 (2019) 1"
1969
+ },
1970
+ {
1971
+ "type": "ref_text",
1972
+ "bbox": [
1973
+ 0.174,
1974
+ 0.187,
1975
+ 0.826,
1976
+ 0.227
1977
+ ],
1978
+ "angle": 0,
1979
+ "content": "[42] Taori, R., Gulrajani, I., Zhang, T., Dubois, Y., Li, X., Guestrin, C., Liang, P., Hashimoto, T.B.: Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca (2023) 3, 5, 6"
1980
+ },
1981
+ {
1982
+ "type": "ref_text",
1983
+ "bbox": [
1984
+ 0.174,
1985
+ 0.235,
1986
+ 0.826,
1987
+ 0.274
1988
+ ],
1989
+ "angle": 0,
1990
+ "content": "[43] Team, G., Anil, R., Borgeaud, S., Wu, Y., Alayrac, J.B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A.M., Hauth, A., et al.: Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023) 3"
1991
+ },
1992
+ {
1993
+ "type": "ref_text",
1994
+ "bbox": [
1995
+ 0.174,
1996
+ 0.282,
1997
+ 0.826,
1998
+ 0.322
1999
+ ],
2000
+ "angle": 0,
2001
+ "content": "[44] Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., Rodriguez, A., Joulin, A., Grave, E., Lample, G.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023) 3"
2002
+ },
2003
+ {
2004
+ "type": "ref_text",
2005
+ "bbox": [
2006
+ 0.174,
2007
+ 0.33,
2008
+ 0.826,
2009
+ 0.357
2010
+ ],
2011
+ "angle": 0,
2012
+ "content": "[45] Veit, A., Matera, T., Neumann, L., Matas, J., Belongie, S.: Coco-text: Dataset and benchmark for text detection and recognition in natural images. arXiv preprint arXiv:1601.07140 (2016) 1"
2013
+ },
2014
+ {
2015
+ "type": "ref_text",
2016
+ "bbox": [
2017
+ 0.174,
2018
+ 0.365,
2019
+ 0.826,
2020
+ 0.405
2021
+ ],
2022
+ "angle": 0,
2023
+ "content": "[46] Wang, P., Yang, A., Men, R., Lin, J., Bai, S., Li, Z., Ma, J., Zhou, C., Zhou, J., Yang, H.: Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In: International Conference on Machine Learning. pp. 23318-23340. PMLR (2022) 7"
2024
+ },
2025
+ {
2026
+ "type": "ref_text",
2027
+ "bbox": [
2028
+ 0.174,
2029
+ 0.412,
2030
+ 0.826,
2031
+ 0.452
2032
+ ],
2033
+ "angle": 0,
2034
+ "content": "[47] Wang, W., Chen, Z., Chen, X., Wu, J., Zhu, X., Zeng, G., Luo, P., Lu, T., Zhou, J., Qiao, Y., et al.: Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. arXiv preprint arXiv:2305.11175 (2023) 7"
2035
+ },
2036
+ {
2037
+ "type": "ref_text",
2038
+ "bbox": [
2039
+ 0.174,
2040
+ 0.46,
2041
+ 0.826,
2042
+ 0.487
2043
+ ],
2044
+ "angle": 0,
2045
+ "content": "[48] Wei, H., Kong, L., Chen, J., Zhao, L., Ge, Z., Yang, J., Sun, J., Han, C., Zhang, X.: Vary: Scaling up the vision vocabulary for large vision-language models. arXiv preprint arXiv:2312.06109 (2023) 1, 2, 3, 4, 6"
2046
+ },
2047
+ {
2048
+ "type": "ref_text",
2049
+ "bbox": [
2050
+ 0.174,
2051
+ 0.495,
2052
+ 0.826,
2053
+ 0.561
2054
+ ],
2055
+ "angle": 0,
2056
+ "content": "[49] Wei, H., Liu, C., Guo, P., Zhu, Y., Fu, J., Wang, B., Wang, P.: Corner affinity: A robust grouping algorithm to make corner-guided detector great again. In: Raedt, L.D. (ed.) Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22. pp. 1458–1464. International Joint Conferences on Artificial Intelligence Organization (7 2022). https://doi.org/10.24963/ijcai.2022/203, https://doi.org/10.24963/ijcai.2022/203, main Track 2"
2057
+ },
2058
+ {
2059
+ "type": "ref_text",
2060
+ "bbox": [
2061
+ 0.174,
2062
+ 0.568,
2063
+ 0.826,
2064
+ 0.595
2065
+ ],
2066
+ "angle": 0,
2067
+ "content": "[50] Xu, C., Guo, D., Duan, N., McAuley, J.: Baize: An open-source chat model with parameter-efficient tuning on self-chat data. arXiv preprint arXiv:2304.01196 (2023) 5, 6"
2068
+ },
2069
+ {
2070
+ "type": "ref_text",
2071
+ "bbox": [
2072
+ 0.174,
2073
+ 0.603,
2074
+ 0.826,
2075
+ 0.642
2076
+ ],
2077
+ "angle": 0,
2078
+ "content": "[51] Yang, Z., Gan, Z., Wang, J., Hu, X., Ahmed, F., Liu, Z., Lu, Y., Wang, L.: Unitab: Unifying text and box outputs for grounded vision-language modeling. In: European Conference on Computer Vision. pp. 521-539. Springer (2022) 7"
2079
+ },
2080
+ {
2081
+ "type": "ref_text",
2082
+ "bbox": [
2083
+ 0.174,
2084
+ 0.65,
2085
+ 0.826,
2086
+ 0.69
2087
+ ],
2088
+ "angle": 0,
2089
+ "content": "[52] Ye, J., Hu, A., Xu, H., Ye, Q., Yan, M., Dan, Y., Zhao, C., Xu, G., Li, C., Tian, J., et al.: mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499 (2023) 3, 6"
2090
+ },
2091
+ {
2092
+ "type": "ref_text",
2093
+ "bbox": [
2094
+ 0.174,
2095
+ 0.697,
2096
+ 0.826,
2097
+ 0.725
2098
+ ],
2099
+ "angle": 0,
2100
+ "content": "[53] Yu, E., Zhao, L., Wei, Y., Yang, J., Wu, D., Kong, L., Wei, H., Wang, T., Ge, Z., Zhang, X., et al.: Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589 (2023) 3"
2101
+ },
2102
+ {
2103
+ "type": "ref_text",
2104
+ "bbox": [
2105
+ 0.174,
2106
+ 0.733,
2107
+ 0.826,
2108
+ 0.761
2109
+ ],
2110
+ "angle": 0,
2111
+ "content": "[54] Yu, W., Yang, Z., Li, L., Wang, J., Lin, K., Liu, Z., Wang, X., Wang, L.: Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490 (2023) 2, 6, 8"
2112
+ },
2113
+ {
2114
+ "type": "ref_text",
2115
+ "bbox": [
2116
+ 0.174,
2117
+ 0.768,
2118
+ 0.826,
2119
+ 0.795
2120
+ ],
2121
+ "angle": 0,
2122
+ "content": "[55] Zeng, A., Liu, X., Du, Z., Wang, Z., Lai, H., Ding, M., Yang, Z., Xu, Y., Zheng, W., Xia, X., et al.: Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414 (2022) 3"
2123
+ },
2124
+ {
2125
+ "type": "ref_text",
2126
+ "bbox": [
2127
+ 0.174,
2128
+ 0.803,
2129
+ 0.826,
2130
+ 0.83
2131
+ ],
2132
+ "angle": 0,
2133
+ "content": "[56] Zhang, A., Zhao, L., Xie, C.W., Zheng, Y., Ji, W., Chua, T.S.: Next-chat: An lmm for chat, detection and segmentation. arXiv preprint arXiv:2311.04498 (2023) 7"
2134
+ },
2135
+ {
2136
+ "type": "ref_text",
2137
+ "bbox": [
2138
+ 0.174,
2139
+ 0.838,
2140
+ 0.826,
2141
+ 0.865
2142
+ ],
2143
+ "angle": 0,
2144
+ "content": "[57] Zhang, S., Roller, S., Goyal, N., Artetxe, M., Chen, M., Chen, S., Dewan, C., Diab, M., Li, X., Lin, X.V., et al.: Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022) 2, 3, 4"
2145
+ },
2146
+ {
2147
+ "type": "ref_text",
2148
+ "bbox": [
2149
+ 0.174,
2150
+ 0.873,
2151
+ 0.826,
2152
+ 0.912
2153
+ ],
2154
+ "angle": 0,
2155
+ "content": "[58] Zhao, L., Yu, E., Ge, Z., Yang, J., Wei, H., Zhou, H., Sun, J., Peng, Y., Dong, R., Han, C., et al.: Chatspot: Bootstrapping multimodal llms via precise referring instruction tuning. arXiv preprint arXiv:2307.09474 (2023) 3"
2156
+ },
2157
+ {
2158
+ "type": "list",
2159
+ "bbox": [
2160
+ 0.174,
2161
+ 0.092,
2162
+ 0.826,
2163
+ 0.912
2164
+ ],
2165
+ "angle": 0,
2166
+ "content": null
2167
+ },
2168
+ {
2169
+ "type": "page_number",
2170
+ "bbox": [
2171
+ 0.491,
2172
+ 0.936,
2173
+ 0.508,
2174
+ 0.948
2175
+ ],
2176
+ "angle": 0,
2177
+ "content": "11"
2178
+ }
2179
+ ],
2180
+ [
2181
+ {
2182
+ "type": "ref_text",
2183
+ "bbox": [
2184
+ 0.172,
2185
+ 0.092,
2186
+ 0.826,
2187
+ 0.12
2188
+ ],
2189
+ "angle": 0,
2190
+ "content": "[59] Zhou, X., Zhuo, J., Krahenbuhl, P.: Bottom-up object detection by grouping extreme and center points. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 850-859 (2019) 2"
2191
+ },
2192
+ {
2193
+ "type": "ref_text",
2194
+ "bbox": [
2195
+ 0.173,
2196
+ 0.125,
2197
+ 0.825,
2198
+ 0.155
2199
+ ],
2200
+ "angle": 0,
2201
+ "content": "[60] Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023) 1, 3, 7, 8"
2202
+ },
2203
+ {
2204
+ "type": "list",
2205
+ "bbox": [
2206
+ 0.172,
2207
+ 0.092,
2208
+ 0.826,
2209
+ 0.155
2210
+ ],
2211
+ "angle": 0,
2212
+ "content": null
2213
+ },
2214
+ {
2215
+ "type": "page_number",
2216
+ "bbox": [
2217
+ 0.491,
2218
+ 0.936,
2219
+ 0.509,
2220
+ 0.948
2221
+ ],
2222
+ "angle": 0,
2223
+ "content": "12"
2224
+ }
2225
+ ]
2226
+ ]
2401.12xxx/2401.12503/4bc10b6c-537b-4aac-b190-2c35591d39a5_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdd6c90d7ddac68c29205218bf831b43dee86e848f3e2ded708b110a336d3fe5
3
+ size 1747130
2401.12xxx/2401.12503/full.md ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Small Language Model Meets with Reinforced Vision Vocabulary
2
+
3
+ Haoran Wei $^{1,*}$ Lingyu Kong $^{2,*}$ Jinyue Chen $^{2}$ Liang Zhao $^{1}$
4
+ Zheng Ge $^{1\dagger}$ En Yu $^{3}$ Jianjian Sun $^{1}$ Chunrui Han $^{1}$ Xiangyu Zhang $^{1}$ $^{1}$ MEGVII Technology University of Chinese Academy of Sciences
5
+ $^{3}$ Huazhong University of Science and Technology
6
+ https://varytoy.github.io/
7
+
8
+ # Abstract
9
+
10
+ Playing Large Vision Language Models (LVLMs) in 2023 is trendy among the AI community. However, the relatively large number of parameters (more than 7B) of popular LVLMs makes it difficult to train and deploy on consumer GPUs, discouraging many researchers with limited resources. Imagine how cool it would be to experience all the features of current LVLMs on an old GTX1080ti (our only game card). Accordingly, we present Vary-toy in this report, a small-size Vary along with Qwen-1.8B as the base "large" language model. In Vary-toy, we introduce an improved vision vocabulary, allowing the model to not only possess all features of Vary but also gather more generality. Specifically, we replace negative samples of natural images with positive sample data driven by object detection in the procedure of generating vision vocabulary, more sufficiently utilizing the capacity of the vocabulary network and enabling it to efficiently encode visual information corresponding to natural objects. For experiments, Vary-toy can achieve $65.6\%$ ANLS on DocVQA, $59.1\%$ accuracy on ChartQA, $88.1\%$ accuracy on RefCOCO, and $29\%$ on MMVet. The code will be publicly available on the homepage.
11
+
12
+ # 1 Introduction
13
+
14
+ Large Vision Language Model (LVLM) is one of the hottest research topics [1, 22, 26, 34, 48, 60] in the field of artificial intelligence among the last year. The exciting part is that one LVLM can achieve satisfactory performance in many downstream tasks [4, 24, 30, 32, 41, 45] guided by different prompts. However, there is still significant room for improvement in LVLM's overall image perception capacity. Intuitively, an advanced perceptual ability for visual concepts is essential to enhance the further development and implementation of a model. We deem that there are two main challenges to achieve that: 1) the shortcomings of the current vision vocabulary network [35, 48] in extracting rich visual information; 2) the huge model iteration cost in the optimization of a large number of parameters.
15
+
16
+ As aforementioned, current LVLMs demonstrate amazing ability in many tasks, especially the Computer Vision (CV) and Natural Language Processing (NLP) intersected ones (e.g., image caption [24], VQA [41], memes understanding, scene OCR [32], etc), based on the almost perfect vision vocabulary network — CLIP [35]. The structures of popular LVLMs can be divided into two main streams: 1) image tokens as prefixes like MetaLM [14]; 2) cross-attention for feature fusion like Flamingo [1]. Regardless of which structure is used, the upper limit of the model may be hindered by the visual signals encoding efficiency of its vision vocabulary network. To break through the potential bottleneck, Vary [48] introduces a simple and effective manner to scale up the vision
17
+
18
+ ![](images/c6b20cd7b3d326e2efa15fd7818f9a47fd9384c76ee95988050ec8d0db5c1fbe.jpg)
19
+ Figure 1: Features of Vary-toy. Based on a 1.8B language model, Vary-toy can achieve all features of vanilla Vary-base, including document OCR, image caption, VQA, general conversation, and so on. Besides, we introduce the natural object perception (location) ability for Vary-toy. Most importantly, with just only a single GTX1080ti GPU, you can experience all of the above.
20
+
21
+ vocabulary for an LVLM. The scaling law is to first train a new visual vocabulary network using a small auto-regressive model (OPT-125M [57]), and then merge the old and new vocabularies to form the final LVLM (Vary-base [48]). However, Vary suffers two drawbacks to being a user-friendly baseline: 1) The waste of network capacity in the new vision vocabulary (which in vanilla Vary is only used to compress text information in PDF images). 2) The Vary-base with 7B LLM takes high iteration costs (requiring multiple A100 machines to train).
22
+
23
+ In this report, we present a small-size Vary, i.e., Vary-toy, to alleviate the aforementioned issues. Overall, Vary-toy enjoys the same pipeline as vanilla Vary, including a vision vocabulary generating and scaling up processes. Considering the original Vary masks natural images as negative samples during the creation of a new visual vocabulary. We believe this procedure, to some extent, wastes network capacity, leaving room for optimization. Instead, we regard the natural image as the object detection task [6, 19, 23, 37, 38, 49, 59]. Thus in processing the vision vocabulary, we incorporate both dense textual data (PDF) and natural object location data into the vocabulary network of Vary-toy, making it more universal. After completing the new and reinforced vocabulary, we merge it with the genuine $(224\times 224)$ CLIP and then integrate them into a 1.8B language model [2].
24
+
25
+ In experiments, we report metrics on several challenging benchmarks, i.e., DocVQA [30], ChartQA [29], MMvet [54], and RefCOCO [15]. Specifically, Vary-toy can achieve $65.6\%$ ANLS on DocVQA, $59.1\%$ accuracy on ChartQA, $29\%$ accuracy on MMvet, and $88.1\%$ accuracy on RefCOCO val. More specifically, it can gather on par performance compared to Qwen-VL-7B [3] on DocVQA and RefCOCO as well as a better accuracy than LLaVA-7B [26] on the general benchmark MMVet.
26
+
27
+ In conclusion, Vary-toy is a toy because it is at least three times smaller compared to popular LVLMs $(>7\mathrm{B})$ . Vary-toy is not a toy due to its demonstrated excellent potential in challenging tasks. We believe that Vary-toy still enjoys many improvement rooms and we hope that our small-size LVLM can encourage more attention in corresponding research and become a practical baseline, especially for those researchers with limited resources.
28
+
29
+ ![](images/4183366d5553e76d6c60c2e01bc242513099651a3eecd0c75beca4a79e824185.jpg)
30
+ Figure 2: Architecture of the Vary-toy. We utilize the Vary-tiny+ pipeline to generate the new vision vocabulary of Vary-toy. Such vision vocabulary can efficiently encode dense text and natural object location information into tokens. Based on the improved vocabulary, Vary-toy not only possesses all the previous features (document OCR) but also handles object detection tasks well.
31
+
32
+ # 2 Related Works
33
+
34
+ Over the past years, Large Language Models (LLMs), such as the GPT family [5, 34, 36], LLaMA family [8, 42, 44], OPT [57], and the GLM family [55] gain significantly advanced performance in NLP tasks. With the help of LLMs' language reasoning abilities, Vision Language Models (VLMs) like Flamingo [1], BLIP2 [22], LLaVA [25, 26], Vary [48], etc [3, 12, 53, 58, 60] have achieved impressive results in various computer vision tasks such as image caption [24], VQA [4, 30, 32], image generation [12], visual grounding [3, 53, 60], document OCR [48] and so on. These models not only can follow human instructions but also possess remarkable few-shot and even zero-shot learning abilities, thereby driving the AI community toward the development of artificial general intelligence (AGI).
35
+
36
+ However, most popular open-source VLMs are parameter-heavy, with sizes like 7B (e.g., Qwen-VL [3] and mPIUG-Owl [52]) or 13B [26], which to some extent hinder the participation of researchers with limited resources and poses challenges for the implementation of VLMs in resource-constrained environments like home computer. Recently, there has been a growing interest in and development of smaller language models, such as Phi-2 (2.7B) [31] and Qwen-1.8B [2] for NLP tasks, and Gemini-nano (1.8B/3.25B) [43], MobileVLM (1.4B/2.7B) [9] for vision-language tasks.
37
+
38
+ In this report, Vary-toy will be an open-source small model that possesses features of the most popular LVLMs and demonstrates exceptional potential in fine-grained perception tasks.
39
+
40
+ # 3 Method
41
+
42
+ In this section, we will delve into the details of how to devise Vary-toy. As shown in Figure 2, there are two main parts in implementing the model: 1) how to generate a more practical vision vocabulary based on the Vary-tiny+ pipeline. 2) how to utilize the new vision vocabulary to make the 1.8B Vary-toy gather new features on the premise of not harming the original model features.
43
+
44
+ # 3.1 Generating A Reinforced Vision Vocabulary Upon Vary-tiny+
45
+
46
+ Vary-tiny [48] is a tiny vision language model to generate a specific PDF-parsing vision vocabulary for Vary. The vision vocabulary network comprises a SAM-base [17] main body and paired convolutions to reshape the output, enjoying about 80M parameters. Experiments in Vary prove that using the SAM initializing to gain intensive text perception is effective. However, the vocabulary-generating procedure in vanilla Vary suffers the risk of forgetting SAM's original natural object perception ability. What's more, we also think that writing only the visual knowledge of dense text into an 80M network is wasteful. Thus we generate a new and more reasonable vision vocabulary upon the Vary-tiny+ pipeline.
47
+
48
+ # Provide the OCR results of this image:
49
+
50
+ ![](images/e17543c3aae7ef2d529f87642358dd2c47cdd34338dc15acce6a04cfa4a00ba4.jpg)
51
+
52
+ # MARKETS AND STRATEGY
53
+
54
+ have also taken up this practice. It can be a very successful way of introducing new products and services to existing customers, up-selling customers, or influencing them to purchase more products.
55
+
56
+ # Loyalty Programs
57
+
58
+ Many companies develop loyalty or frequency-marketing programs in order to further engage the consumers with their products and increase customer loyalty. These programs are very effective for targeting the company's most valuable customers. Most airlines develop frequent-fliter programs, which allow customers to earn points toward their next flight. Other businesses, such as coffee shops, also offer frequency cards, that entitle the customer to a free beverage, for example, after purchasing a certain number of beverages.
59
+
60
+ Loyalty programs have been very effective in generating repeat business. They offer an added value to the consumer, whereby the purchaser is not simply enjoying the value of the current purchase, but is being rewarded. It is important, however, that the loyalty program be relative to the product and service offering of the organization and that it should be a means of promoting the brand's importance or frustration if, with an airline ticket as an example, they are unable to redeem their ticket when they want to travel, or if the restrictions on the reward are so high that it is not worth the hassle of redemption.
61
+
62
+ # PUBLIC RELATIONS AND PUBLICITY
63
+
64
+ An organization's public relations and publicity activities are the means to foster its relationships with its various audiences and to communicate with them. Public relations efforts are undertaken in order to form a favorable view in the public eye. Favorable publicity can enhance an organization's image and increase demand for its products. A positive article or review about a product or service adds credibility, believability, and legitimacy in a much more effective manner than paid-for advertising. Negative publicity, on the other hand, can tarnish an organization's reputation. Most public relations strategies include press releases, special events, and press conferences.
65
+
66
+ Press releases are articles or brief news releases that are submitted
67
+
68
+ # 184 MARKETS AND STRATEGY
69
+
70
+ have also taken up this practice. It can be a very successful way of in-producing new products and services to existing customers, up-selling customers, or influencing them to purchase more products.
71
+
72
+ # Loyalty Programs
73
+
74
+ Many companies develop loyalty or frequency-marketing programs in order to further engage the consumers with their products and increase customer loyalty. These programs are very effective for targeting the company's most valuable customers. Most airlines develop frequent-flyer programs, which allow customers to earn points toward their next flight. Other businesses, such as coffee shops, also offer frequency cards, that entitle the customer to a free beverage, for example, after purchasing a certain number of beverages.
75
+
76
+ Loyalty programs have been very effective in generating repeat business. They offer an added value to the consumer, whereby the purchaser is not simply enjoying the value of the current purchase, but is being rewarded. It is important, however, that the loyalty program be relative to the product and service offering of the organization and that the award be attainable. Customers may experience frustration if, with an airline ticket as an example, they are unable to redeem their ticket when they want to travel, or if the restrictions on the reward are so high that it is not worth the hassle of redemption. PUBLIC RELATIONS AND PUBLICITY
77
+
78
+ An organization's public relations and publicity activities are the means to foster its relationships with its various audiences and to communicate with them. Public relations efforts are undertaken in order to form a favorable view in the public eye. Favorable publicity can enhance an organization's image and increase demand for its products. A positive article or review about a product or service adds credibility, believability, and legitimacy in a much more effective manner than paid-for advertising. Negative publicity, on the other hand, can tarnish an organization's reputation. Most public relations strategies include press releases, special events, and press conferences. Press releases are articles or brief news releases that are submitted
79
+
80
+ ![](images/d65a31caf3a162f64a67fe1dca5740cfadfe290580f262b00ff733715678aca2.jpg)
81
+
82
+ # Detect all objects in this image:
83
+
84
+ ![](images/075dfdb3177a0fc3872d9fcf5ea64fc3596c1aee2c59fdf7a3ac99ed5688bc9d.jpg)
85
+ Person:[535,544,568,591]; Car:[009,552,058,737], [682,598,999,976], [910,558,999,600]; Bus:[044,070,913,909]
86
+
87
+ ![](images/3e77a06c848c394fc09652b57517377fd865681b297b80c5bd42ba059453e380.jpg)
88
+ Detect Tuba Gloves and Bow Tie in this image in this image:
89
+
90
+ Figure 3: Visualization of image-text pairs used by Vary-tiny+. For PDF image-text pair, there is only one prompt, while for the object detection task, we utilize two types of prompts as shown in the right half of the figure because some images may have too many objects that exceed the maximum token length (4096) of the OPT125M after interpolation.
91
+ ![](images/56c59b7bcd19d332b427ee9f73c96709d55c21dfe53880154d3495d2ded8c798.jpg)
92
+ Tuba: [512, 181, 971, 1000];
93
+ Gloves: [703, 730, 782, 862];
94
+ Bow Tie: [075, 590, 144, 630],
95
+ [570, 491, 662, 562].
96
+
97
+ # 3.1.1 Data Engine
98
+
99
+ PDF data. We prepare about 4M PDF image-text pairs in this stage. Following Vary, we use the PDF processing packages to extract the texts of each PDF page, which we find many Python packages can realize (e.g., pdfminer, pdfplumber, and fitz). Each page will be saved as a JPEG image and form an image-text pair with the corresponding text. In this way, we get 2M samples for English and 2M for Chinese. We use the sentence: "Provide the OCR results of this image." as the prompt for both English and Chinese tasks. The PDFs are mainly from arXiv, CC-MAIN-2021-31-PDF-UNTRUNCATED, and e-books. Figure 3 shows a sample of the PDF image-pair.
100
+
101
+ Object detection data. To fully utilize the capacity of the visual vocabulary network and obtain the natural image perception ability from SAM initialization, we introduce object detection data in the vision vocabulary generating process. We gather the samples from two large open-source datasets, i.e., Object365 [40] and OpenImage [18]. Due to the low efficiency of coordinate (number texts) encoding in OPT's [57] text tokenizer, for images with too many objects, the number of tokens in the ground truth may exceed the maximum token length supported by OPT-125M (although we interpolate it to 4096). Therefore, we re-organize the annotations into two tasks: 1) Object Detection: If there are no more than 30 object-boxes in the image, we will allow the Vary-tiny+ detect all objects with the prompt: "Detect all objects in this image". 2) REC: If the object-box number is over 30, we will regard this image as a REC task using a prompt template: "Detect class1, class2, ..., in this image". The selected classes are random so one image can be used multiple times. Through the above manner, we obtain approximately 3M of detection data. Some samples can be seen in Figure 3.
102
+
103
+ # 3.1.2 Input Format
104
+
105
+ Different from the single input/output form of Vary-tiny, Vary-tiny+ needs various input formats to adapt to corresponding tasks due to it requires different prompts to guide the model output correct results. For simplicity, we use the template of Vicuna v1 [8] to construct all ground truth in a conversation format as USER: <img>"image>"</img> "texts input" ASSITANT: "texts output" </s>. We add the "<img>" and "< img>" as special tokens of the text tokenizer of OPT-125M and we find that it can adapt very well to the Vicuna template. For the vision input branch, we don't utilize any augmentations and only resize the image to a fixed resolution, i.e., $1024 \times 1024$ .
106
+
107
+ # 3.2 Forge the Cost-Effective Vary-Toy
108
+
109
+ In this section, we depict the design details of Vary-toy, mainly including the structure of the network and the data construction utilized in the pre-training and SFT stages.
110
+
111
+ # 3.2.1 Architecture
112
+
113
+ As shown in Figure 2, we follow the Vary pipeline to devise the main body of Vary-toy but there are some minor differences. When fed an input image with a shape of $\mathrm{H} \times \mathrm{W}$ , the new vision vocabulary branch will directly resize the image to $1024 \times 1024$ , while the CLIP [35] branch gains a $224 \times 224$ image by the center crop. Both the two branches output 256 tokens with channels of 1024. The dimension of the Qwen-1.8B's input channel is also 2048, so the simplest manner is to concatenate the image tokens in two branches directly as the input image tokens of the language model. In terms of code implementation, to maintain consistency with the Vary structure, we still add input embedding layers behind the vision vocabulary networks.
114
+
115
+ <table><tr><td>Task</td><td>Dataset</td><td>Sample</td><td>A prompt example</td></tr><tr><td rowspan="2">Cap.</td><td>Laion-COCO [39]</td><td>4M</td><td>Describe the content of this image in a sentence.</td></tr><tr><td>BLIP558k [26]</td><td>558K</td><td>Describe the image with one saying.</td></tr><tr><td rowspan="2">PDF</td><td>Pure OCR</td><td>1M</td><td>Provide the OCR results of this image.</td></tr><tr><td>Markdown</td><td>500K</td><td>Convert the image to markdown format.</td></tr><tr><td rowspan="2">Det.</td><td>COCO [24]</td><td>50K</td><td>Detect all objects in this image.</td></tr><tr><td>RefCOCO</td><td>train set</td><td>Detect an object: the left woman.</td></tr><tr><td rowspan="3">NLP</td><td>ShareGPT</td><td>125K</td><td>Original conversation</td></tr><tr><td>Baize [50]</td><td>112K</td><td>Original conversation</td></tr><tr><td>Alpaca [42]</td><td>52K</td><td>Original conversation</td></tr><tr><td rowspan="2">VQA</td><td>DocVQA [30]</td><td>train set</td><td>Question.AnAnswer using a single word or phrase.</td></tr><tr><td>ChartVQA [29]</td><td>train set</td><td>Question.AnAnswer using a single-word or phrase.</td></tr></table>
116
+
117
+ Table 1: Multi-task training data. We introduce 5 types of data in the pretrain stage, including weakly supervised pair data, PDF image-text pair data, detection data, pure text auto-regressive data, and VQA data. All data annotations are reorganized to a conversation format.
118
+
119
+ # 3.2.2 Data Details
120
+
121
+ Intuitively, the sensitivity of the 1.8B model to data quantity and ratio is higher than that of the 7B or above models, so we put more effort into the data processing aspect for Vary-toy.
122
+
123
+ Pre-training & SFT data. For Vary-toy, the pretrain stage is actually a multi-task training stage, wherein we prepare a large amount of image-text pairs in various formats. As summarized in Table 1, we mainly focus on a total of 5 types of data in such stage, containing weakly annotated image caption, PDF dense OCR, object detection, pure text conversation, and VQA. Specifically, for natural images, we sample 4M image-text pair in the Laion-COCO [39] dataset, and we also use the BLIP-558K data proposed in LLaVA [26]. For PDF image-text pair, we prepare two types of data following Vary. One is pure dense text OCR, and the other is a task that converts the PDF image to a markdown format. The previous type of data is randomly sampled from the PDF data used in Vary-tiny+ and the last
124
+
125
+ one is obtained via LaTeX rendering. Compared to vanilla Vary, we reduce the proportion of PDF data to maintain universal capability. For the detection data, we gather images from the COCO [24] dataset. We sample 50K images with fewer objects included for the pure object detection task and use all train data of RefCOCO for the REC task. We normalize the coordinates of each box and then magnify them to 1000 times. To prevent the language ability of the LLM from deteriorating, we also introduce pure NLP conversation data, including ShareGPT, Baize [50], and Alpaca [42]. For the last downstream VQA tasks, we choose two challenge datasets (DocVQA and ChartQA [29]) to monitor the text perception and reasoning performance of Vary-toy for artificial data. There are at least 10 prompts made through GPT3.5 [5] for each task, and Table 1 shows one example of them.
126
+
127
+ In the SFT stage, we only use the LLaVA-80K [26] to instruction tuning the model. LLaVA-80K is a dataset with detailed descriptions and prompts of various types of images, produced by GPT4 [26, 33].
128
+
129
+ # 3.2.3 Data Format
130
+
131
+ In Vary-toy, we are pleased to keep the Chinese PDF-parsing feature to some extent because there is very little exploration in this area, which is also one of the reasons that we select Qwen-1.8B [2] as our base language model (due to the relatively comprehensive text vocabulary). The data input to Qwen-1.8B follows the vanilla Vary [48] format. That is: <lim_start>user: <img>"image></img>"human prompts"<lim_end> assistant: "model outputs"<lim_end>.
132
+
133
+ # 4 Experiments
134
+
135
+ # 4.1 Evaluation Metrics
136
+
137
+ We report the accuracy of Vary-toy on four popular and challenging benchmarks: DocVQA [30], ChartQA [29], RefCOCO [15], and MM Vet [54]. Wherein, the DocVQA and ChartQA can measure the text perception and reasoning ability of the model in manual images, RefCOCO can be used to test the model's ability to locate natural objects, while MM Vet, including 6 measurement areas, can be utilized to monitor the general ability of Vary-toy. We use the evaluation metrics introduced in their original paper for fair comparison. Specifically, we utilize ANLS, relaxed accuracy, accuracy under 0.5 IoU, and GPT4 scoring as the metrics for the above four datasets.
138
+
139
+ # 4.2 Implementation Details
140
+
141
+ For Vary-tiny+, we unfreeze all the parameters and train the whole model with a batch size of 512 for 2 epochs. We select the AdamW [28] optimizer with a cosine annealing scheduler [27]. The initial learning rate is set to 5e-5 and the end is 0. It is worth noting that the Vary-tiny is initialized by the weights of Vary-tiny for faster convergence.
142
+
143
+ For Vary-toy, following vanilla Vary, we freeze all weights of two vision vocabulary networks and only optimize the parameters of the input embedding layers and language model (Qwen-1.8B). In the multi-task training (pre-training) stage, we set the start learning rate to be 5e-5 while it is set to 2e-5 in SFT. We train the model with a batch size of 512 for only 1 epoch in both two stages.
144
+
145
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Size</td><td colspan="2">DocVQA</td><td colspan="3">ChartQA</td></tr><tr><td>val</td><td>test</td><td>human</td><td>augmented</td><td>Average</td></tr><tr><td>Dessurt [10]</td><td>-</td><td>46.5</td><td>63.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Donut [16]</td><td>-</td><td>-</td><td>67.5</td><td>-</td><td>-</td><td>41.8</td></tr><tr><td>Pix2Sturct [20]</td><td>-</td><td>-</td><td>72.1</td><td>30.5</td><td>81.6</td><td>56.0</td></tr><tr><td>mPLUG-DocOwl [52]</td><td>7B</td><td>62.2</td><td>-</td><td>-</td><td>-</td><td>57.4</td></tr><tr><td>Qwen-VL-chat [2]</td><td>7B</td><td>65.1</td><td>-</td><td>-</td><td>-</td><td>65.7</td></tr><tr><td>Vary-toy</td><td>1.8B</td><td>65.6</td><td>65.0</td><td>33.4</td><td>84.8</td><td>59.1</td></tr></table>
146
+
147
+ Table 2: Performance comparison to popular methods on DocVQA and ChartQA. Vary-toy can achieve $65.6\%$ ANLS on DocVQA which is on par with the 7B Qwen-VL-chat and $59.1\%$ accuracy on ChartQA which is higher than 7B-size mPLUG-DocOwl.
148
+
149
+ # 4.3 Manual Image Understanding Ability
150
+
151
+ We evaluate the fine-grained text perception and reasoning ability via the DocVQA [30] and ChartQA [29]. As shown in Table 2, along with the only 1.8B language model, Vary-toy can achieve $65.6\%$ ANLS on DocVQA and $59.1\%$ accuracy on ChartQA. For DocVQA, the Vary-toy enjoys comparable performance to the 7B-size Qwen-VL-chat, proving the excellent document-level text perception ability of the model and also proving that the new vision vocabulary is available on tokenizing PDF images. For ChartQA, Vary-toy can achieve $59.1\%$ average accuracy, which is better than the 7B size mPLUG-DocOwl, demonstrating the effectiveness of our model further.
152
+
153
+ <table><tr><td rowspan="2">Type</td><td rowspan="2">Method</td><td rowspan="2">Size</td><td colspan="3">RefCOCO</td></tr><tr><td>val</td><td>testA</td><td>testB</td></tr><tr><td rowspan="4">Traditional</td><td>OFA-L [46]</td><td>-</td><td>80.0</td><td>83.7</td><td>76.4</td></tr><tr><td>TransVG [11]</td><td>-</td><td>81.0</td><td>82.7</td><td>78.4</td></tr><tr><td>VILLA [13]</td><td>-</td><td>82.4</td><td>87.5</td><td>74.8</td></tr><tr><td>UniTAB [51]</td><td>-</td><td>86.3</td><td>88.8</td><td>80.6</td></tr><tr><td rowspan="5">LLM-based</td><td>VisionLLM-H [47]</td><td>-</td><td>-</td><td>86.7</td><td>-</td></tr><tr><td>Shikra-7B [7]</td><td>7B</td><td>87.0</td><td>90.6</td><td>80.2</td></tr><tr><td>Shikra-13B [7]</td><td>13B</td><td>87.8</td><td>91.1</td><td>81.7</td></tr><tr><td>Qwen-VL-chat [2]</td><td>7B</td><td>88.6</td><td>92.3</td><td>84.5</td></tr><tr><td>Next-chat [56]</td><td>7B</td><td>85.5</td><td>90.0</td><td>77.9</td></tr><tr><td></td><td>Vary-toy</td><td>1.8B</td><td>88.1</td><td>90.6</td><td>85.7</td></tr></table>
154
+
155
+ # 4.4 Natural Object Perception Ability
156
+
157
+ The vision vocabulary network generated by Vary-tiny+ should enjoy two main advanced perception abilities: one for dense text and the other for natural objects. In this part, We test the latter ability of Vary-toy after accessing the improved vision vocabulary. It is worth noting that a center crop operation processes the input image of the CLIP branch. Therefore, it can be ruled out that the model uses CLIP for object localization.
158
+
159
+ As shown in Table 3, Vary-toy can get $88.1\%$ accuracy@0.5 on the RefCOCO validation set, which is also on par with Qwen-VL-chat (7B) and even better than the Shikra-13B. The results show that under the knowledgeable vision vocabulary, Vary-toy gathers great natural object perception ability, proving the effectiveness of using the Vary-tiny+ architecture to build a vision vocabulary, allowing us to further reflect on the necessity of CLIP if we add a large amount of weakly labeled image caption data, e.g., Laion-400M [39], during the new vocabulary generating process.
160
+
161
+ Table 3: Comparison with popular methods on RefCOCO. Benefiting from the new vision vocabulary, Vary-toy can achieve $88.1\%$ accuracy on RefCOCO val, which is on par with the 7B Qwen-VL-chat.
162
+
163
+ <table><tr><td rowspan="2">Method</td><td colspan="7">MM-Vet</td></tr><tr><td>Rec</td><td>OCR</td><td>Know</td><td>Gen</td><td>Spat</td><td>Math</td><td>Total</td></tr><tr><td>BLIP-2 [22]</td><td>27.5</td><td>11.1</td><td>11.8</td><td>7.0</td><td>16.2</td><td>5.8</td><td>22.4</td></tr><tr><td>LLaVA-7B [26]</td><td>28.0</td><td>17.1</td><td>16.3</td><td>18.9</td><td>21.2</td><td>11.5</td><td>23.8</td></tr><tr><td>MiniGPT-4 [60]</td><td>29.9</td><td>16.1</td><td>20.4</td><td>22.1</td><td>22.2</td><td>3.8</td><td>24.4</td></tr><tr><td>Otter [21]</td><td>27.3</td><td>17.8</td><td>14.2</td><td>13.8</td><td>24.4</td><td>3.8</td><td>24.7</td></tr><tr><td>OpenFlamingo [1]</td><td>28.7</td><td>16.7</td><td>16.4</td><td>13.1</td><td>21.0</td><td>7.7</td><td>24.8</td></tr><tr><td>LLaVA1.5-7B [25]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>30.5</td></tr><tr><td>Vary-toy (1.8B)</td><td>33.4</td><td>20.3</td><td>19.9</td><td>17.5</td><td>24</td><td>10.8</td><td>29.0</td></tr></table>
164
+
165
+ Table 4: Comparison with popular LVLMs on MMVet. With only a 1.8B language model, Varytoy can get a promising $29.0\%$ accuracy. The abbreviations represent Rec: Recognition; Know: Knowledge; Gen: Language generation; Spat: Spatial awareness.
166
+
167
+ # 4.5 General Ability
168
+
169
+ We report the accuracy of Vary-toy on MMVet [54] to test its general performance. As summarized in table 4, Vary-toy gains $29.0\%$ of the total accuracy, which is higher than the classical LLaVA-7B [26] $(23.8\%)$ , BLIP-2 [22], and MiniGPT-4 [60], demonstrating Vary-toy enjoys excellent general knowledge. For a 1.8B model, the generation ability is relatively poor and needs to be strengthened.
170
+
171
+ ![](images/f6b2c6df24cb60e6159179a19c8df93f525c89578e7b47f6691c7c374ae5ca4b.jpg)
172
+
173
+ # What is funny about this image?
174
+
175
+ ![](images/0c5b4907fd5007d20597006260a6b8b1939d51adc3aa300e8ebeab5aa2e86fcd.jpg)
176
+
177
+ The funny aspect of this image lies in the fire hydrant painted with a smiling face, which gives it an unconventional and whimsical appearance. The cute drawing on the red fire hydrant attracts attention and can be considered a creative expression or an artistic statement. In addition, there's a car parked nearby, emphasizing the urban setting and reinforcing the idea that the image might have been taken in a city environment where such decorative objects and artistic installations are more common.
178
+
179
+ ![](images/563ea805d37b3d753beafd3a125c191d1e5d6ce910bc76e54956970665eff6f5.jpg)
180
+
181
+ # Detect all objects in this image:
182
+
183
+ ![](images/6651eabff288f71a5d3a4074c5337ccfc1a009b71a7027abc8694e71b7a4dff5.jpg)
184
+
185
+ ![](images/de5099e331aba41405c84dcaf36b1ad77db2cb8812ac6a342ff4c211c978fd18.jpg)
186
+
187
+ # Provide the OCR results of this image.
188
+
189
+ ![](images/386ecfbd46a75ec2ffd8d8c1b24f083c9cd4aaf8dd48c3fbf8c915f83300d061.jpg)
190
+ Figure 4: Visualization of high-quality results of our model in four common fields. We can see that Vary-toy has satisfactory general ability and enjoys strong text and object perception abilities.
191
+
192
+ ![](images/216a363ee11a1c01732301a264fd720154aa0b3cad6ba3c6092ef88e632e80aa.jpg)
193
+
194
+ ou are cordially invited to join us for the dramatic retum of Pagliacci , Le Prophète andLubéric . Each one ofBilli Boudi , Don Cardi , and Orlyl . Each one is given
195
+
196
+ the Metropolitan Opera's first production of Kurt Weill and Bertolt Brech's twentieth century masterpiece, Rise and Fall of the City of Mahogany. And a new production of Mozart's delightful Abduction from the Seraglio.
197
+
198
+ A trial subscription for the 1979-90 season at The Met gives you a chance to appreciate some of the many advantages enjoyed by our regular Subscribers. Choose any one of the twenty-three mini-series described inside and you'll enjoy the same seats for each performance. Your tickets will be sent to you in August, well in advance of box office lines and sold out notices.
199
+
200
+ Next
201
+
202
+ season, we hope your trial series experience will have convinced you that subscribing is the best way to be a part of the Metropolitan.
203
+
204
+ Opening Night is just a summer away. Won't you take a few moments now to
205
+
206
+ guarantee your series and seating for the glorious season ahead? It's a season that promises grand opera performed by some of the world's greatest artists. We hope you'll be with us.
207
+
208
+ P. S. If you are already a Met Subscriber, you may want to use a mini-series for operas not on your regular subscription. Or perhaps you'd like to give this brochure to a friend who may be interested in this introductory offer.
209
+
210
+ ![](images/620a0ad6d83ceec73d93b42b908ebdf4815ba351207750f8bf3daa8a652dfe24.jpg)
211
+ Ocillo Set: Franco Zefendi, Costamco: Peter J. Hall
212
+
213
+ you are cordially invited to join us for the dramatic return of Pagliazzi, Le Prophets and Lohengrin. For the power of Billy Budd, Don Carlo and Otello. For the excitement of the Metropolitan Opera's first production of Kurt Weill and Bertolt Brecht's twentieth century masterpiece, Rise and Fall of the City of Mahagony. And a new production of Mozart's delightful Abduction from the Seraglio.
214
+
215
+ At trial subscription for the 1979-80 season at The Met gives you a chance to appreciate some of the many advantages enjoyed by our regular Subscribers. Choose any one of the twenty-three mini-series described inside and you'll enjoy the same seats for each performance.
216
+
217
+ Your tickets will be sent to you in August, well in advance of box office lines and sold out notices.
218
+
219
+ Next season, we hope your trial series experience
220
+
221
+ will have convinced you that subscribing is the best way to be a part of the Metropolitan,
222
+
223
+ Opening Night is just a summer
224
+
225
+ away. Won't you take a few moments now to guarantee your series and seating for the glorious season ahead? It's a season that promises grand opera performed by some of the world's greatest artists. We hope you'll be with us.
226
+
227
+ P.S. If you are already a Met Subscriber, you may want to use a mini-series for operas not on your regular subscription.
228
+
229
+ Or perhaps you'd like to give this brochure to a friend who may be interested in this introductory offer.
230
+
231
+ # 4.6 Visualization
232
+
233
+ Figure 4 shows high-quality results of Vary-toy on four different downstream fields. We can see that the model enjoys good vision concept understanding and localization capacities, indicating that a reinforced vision vocabulary with a small language model can also perform well in multimodal tasks.
234
+
235
+ # 5 Conclusion
236
+
237
+ In this report, we propose a small LVLM — Vary-toy, which can be deployed on a GTX1080ti GPU and enjoys fine performance in many downstream tasks. What's more, we generate a new and more comprehensive vision vocabulary for the presented model, which is the key to the success of Vary-toy. We hope the promising and user-friendly Vary-toy can become a new baseline in such fields as well as draw more attention to LVLM, especially for researchers who previously lacked computing resources. We also encourage researchers to use our reinforced vision vocabulary for more downstream tasks. Finally, we firmly confirm that the Vary-toy will evolve beyond just a toy.
238
+
239
+ # References
240
+
241
+ [1] Alayrac, J., Donahue, J., Luc, P., Miech, A., Barr, I., Hasson, Y., Lenc, K., Mensch, A., Millican, K., Reynolds, M., Ring, R., Rutherford, E., Cabi, S., Han, T., Gong, Z., Samangooei, S., Monteiro, M., Menick, J.L., Borgeaud, S., Brock, A., Nematzadeh, A., Sharifzadeh, S., Binkowski, M., Barreira, R., Vinyals, O., Zisserman, A., Simonyan, K.: Flamingo: a visual language model for few-shot learning. In: NeurIPS (2022) 1, 3, 7
242
+ [2] Bai, J., Bai, S., Chu, Y., Cui, Z., Dang, K., Deng, X., Fan, Y., Ge, W., Han, Y., Huang, F., Hui, B., Ji, L., Li, M., Lin, J., Lin, R., Liu, D., Liu, G., Lu, C., Lu, K., Ma, J., Men, R., Ren, X., Ren, X., Tan, C., Tan, S., Tu, J., Wang, P., Wang, S., Wang, W., Wu, S., Xu, B., Xu, J., Yang, A., Yang, H., Yang, J., Yang, S., Yao, Y., Yu, B., Yuan, H., Yuan, Z., Zhang, J., Zhang, X., Zhang, Y., Zhang, Z., Zhou, C., Zhou, J., Zhou, X., Zhu, T.: Qwen technical report. arXiv preprint arXiv:2309.16609 (2023) 2, 3, 6, 7
243
+ [3] Bai, J., Bai, S., Yang, S., Wang, S., Tan, S., Wang, P., Lin, J., Zhou, C., Zhou, J.: Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966 (2023) 2, 3
244
+ [4] Biten, A.F., Litman, R., Xie, Y., Appalaraju, S., Manmatha, R.: Latr: Layout-aware transformer for scene-text vqa. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 16548-16558 (2022) 1, 3
245
+ [5] Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020) 3, 6
246
+ [6] Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part I 16. pp. 213-229. Springer (2020) 2
247
+ [7] Chen, K., Zhang, Z., Zeng, W., Zhang, R., Zhu, F., Zhao, R.: Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195 (2023) 7
248
+ [8] Chiang, W.L., Li, Z., Lin, Z., Sheng, Y., Wu, Z., Zhang, H., Zheng, L., Zhuang, S., Zhuang, Y., Gonzalez, J.E., Stoica, I., Xing, E.P.: Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality. https://lmsys.org/blog/2023-03-30-vicuna/ (2023) 3, 5
249
+ [9] Chu, X., Qiao, L., Lin, X., Xu, S., Yang, Y., Hu, Y., Wei, F., Zhang, X., Zhang, B., Wei, X., Shen, C.: Mobilevlm: A fast, strong and open vision language assistant for mobile devices (2023) 3
250
+ [10] Davis, B., Morse, B., Price, B., Tensmeyer, C., Wigington, C., Morariu, V.: End-to-end document recognition and understanding with dessurt. In: European Conference on Computer Vision. pp. 280-296. Springer (2022) 6
251
+ [11] Deng, J., Yang, Z., Chen, T., Zhou, W., Li, H.: Transvg: End-to-end visual grounding with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 1769-1779 (2021) 7
252
+ [12] Dong, R., Han, C., Peng, Y., Qi, Z., Ge, Z., Yang, J., Zhao, L., Sun, J., Zhou, H., Wei, H., et al.: Dreamllm: Synergistic multimodal comprehension and creation. arXiv preprint arXiv:2309.11499 (2023) 3
253
+ [13] Gan, Z., Chen, Y.C., Li, L., Zhu, C., Cheng, Y., Liu, J.: Large-scale adversarial training for vision-and-language representation learning. Advances in Neural Information Processing Systems 33, 6616-6628 (2020) 7
254
+ [14] Hao, Y., Song, H., Dong, L., Huang, S., Chi, Z., Wang, W., Ma, S., Wei, F.: Language models are general-purpose interfaces. arXiv preprint arXiv:2206.06336 (2022) 1
255
+ [15] Kazemzadeh, S., Ordonez, V., Matten, M., Berg, T.: Referitgame: Referring to objects in photographs of natural scenes. In: Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP). pp. 787-798 (2014) 2, 6
256
+ [16] Kim, G., Hong, T., Yim, M., Nam, J., Park, J., Yim, J., Hwang, W., Yun, S., Han, D., Park, S.: Ocr-free document understanding transformer. In: European Conference on Computer Vision. pp. 498-517. Springer (2022) 6
257
+ [17] Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A.C., Lo, W.Y., et al.: Segment anything. arXiv preprint arXiv:2304.02643 (2023) 4
258
+
259
+ [18] Kuznetsova, A., Rom, H., Alldrin, N., Uijlings, J., Krasin, I., Pont-Tuset, J., Kamali, S., Popov, S., Malloci, M., Kolesnikov, A., et al.: The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision 128(7), 1956–1981 (2020) 4
260
+ [19] Law, H., Deng, J.: Cornernet: Detecting objects as paired keypoints. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 734-750 (2018) 2
261
+ [20] Lee, K., Joshi, M., Turc, I.R., Hu, H., Liu, F., Eisenschlos, J.M., Khandelwal, U., Shaw, P., Chang, M.W., Toutanova, K.: Pix2struct: Screenshot parsing as pretraining for visual language understanding. In: International Conference on Machine Learning. pp. 18893-18912. PMLR (2023) 6
262
+ [21] Li, B., Zhang, Y., Chen, L., Wang, J., Yang, J., Liu, Z.: Otter: A multi-modal model with in-context instruction tuning. arXiv preprint arXiv:2305.03726 (2023) 7
263
+ [22] Li, J., Li, D., Savarese, S., Hoi, S.: Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. arXiv preprint arXiv:2301.12597 (2023) 1, 3, 7, 8
264
+ [23] Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision. pp. 2980-2988 (2017) 2
265
+ [24] Lin, T., Maire, M., Belongie, S.J., Hays, J., Perona, P., Ramanan, D., Dollár, P., Zitnick, C.L.: Microsoft COCO: common objects in context. In: ECCV. pp. 740-755 (2014) 1, 3, 5, 6
266
+ [25] Liu, H., Li, C., Li, Y., Lee, Y.J.: Improved baselines with visual instruction tuning (2023) 3, 7
267
+ [26] Liu, H., Li, C., Wu, Q., Lee, Y.J.: Visual instruction tuning (2023) 1, 2, 3, 5, 6, 7, 8
268
+ [27] Loshchilov, I., Hutter, F.: Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983 (2016) 6
269
+ [28] Loshchilov, I., Hutter, F.: Decoupled weight decay regularization. In: ICLR (2019) 6
270
+ [29] Masry, A., Long, D.X., Tan, J.Q., Joty, S., Hoque, E.: Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244 (2022) 2, 5, 6, 7
271
+ [30] Mathew, M., Karatzas, D., Jawahar, C.: Docvqa: A dataset for vqa on document images. In: Proceedings of the IEEE/CVF winter conference on applications of computer vision. pp. 2200-2209 (2021) 1, 2, 3, 5, 6, 7
272
+ [31] Microsoft: Phi-2: The surprising power of small language models. https://www.microsoft.com/en-us/research/blog/phi-2-the-surprising-power-of-small-language-models/ (2023) 3
273
+ [32] Mishra, A., Shekhar, S., Singh, A.K., Chakraborty, A.: Ocr-vqa: Visual question answering by reading text in images. In: 2019 international conference on document analysis and recognition (ICDAR). pp. 947-952. IEEE (2019) 1, 3
274
+ [33] OpenAI: Gpt-4 technical report (2023) 6
275
+ [34] Ouyang, L., Wu, J., Jiang, X., Almeida, D., Wainwright, C.L., Mishkin, P., Zhang, C., Agarwal, S., Slama, K., Ray, A., Schulman, J., Hilton, J., Kelton, F., Miller, L., Simens, M., Askell, A., Welinder, P., Christiano, P.F., Leike, J., Lowe, R.: Training language models to follow instructions with human feedback. In: NeurIPS (2022) 1, 3
276
+ [35] Radford, A., Kim, J.W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al.: Learning transferable visual models from natural language supervision. In: International conference on machine learning. pp. 8748-8763. PMLR (2021) 1, 5
277
+ [36] Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019) 3
278
+ [37] Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: Unified, real-time object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 779-788 (2016) 2
279
+ [38] Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems 28 (2015) 2
280
+ [39] Schuhmann, C., Vencu, R., Beaumont, R., Kaczmarczyk, R., Mullis, C., Katta, A., Coombes, T., Jitsev, J., Komatsuzaki, A.: Laion-400m: Open dataset of clip-filtered 400 million image-text pairs. arXiv preprint arXiv:2111.02114 (2021) 5, 7
281
+
282
+ [40] Shao, S., Li, Z., Zhang, T., Peng, C., Yu, G., Zhang, X., Li, J., Sun, J.: Objects365: A large-scale, high-quality dataset for object detection. In: Proceedings of the IEEE/CVF international conference on computer vision. pp. 8430-8439 (2019) 4
283
+ [41] Singh, A., Natarajan, V., Shah, M., Jiang, Y., Chen, X., Batra, D., Parikh, D., Rohrbach, M.: Towards vqa models that can read. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 8317-8326 (2019) 1
284
+ [42] Taori, R., Gulrajani, I., Zhang, T., Dubois, Y., Li, X., Guestrin, C., Liang, P., Hashimoto, T.B.: Stanford alpaca: An instruction-following llama model. https://github.com/tatsu-lab/stanford_alpaca (2023) 3, 5, 6
285
+ [43] Team, G., Anil, R., Borgeaud, S., Wu, Y., Alayrac, J.B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A.M., Hauth, A., et al.: Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805 (2023) 3
286
+ [44] Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., Rodriguez, A., Joulin, A., Grave, E., Lample, G.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023) 3
287
+ [45] Veit, A., Matera, T., Neumann, L., Matas, J., Belongie, S.: Coco-text: Dataset and benchmark for text detection and recognition in natural images. arXiv preprint arXiv:1601.07140 (2016) 1
288
+ [46] Wang, P., Yang, A., Men, R., Lin, J., Bai, S., Li, Z., Ma, J., Zhou, C., Zhou, J., Yang, H.: Ofa: Unifying architectures, tasks, and modalities through a simple sequence-to-sequence learning framework. In: International Conference on Machine Learning. pp. 23318-23340. PMLR (2022) 7
289
+ [47] Wang, W., Chen, Z., Chen, X., Wu, J., Zhu, X., Zeng, G., Luo, P., Lu, T., Zhou, J., Qiao, Y., et al.: Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. arXiv preprint arXiv:2305.11175 (2023) 7
290
+ [48] Wei, H., Kong, L., Chen, J., Zhao, L., Ge, Z., Yang, J., Sun, J., Han, C., Zhang, X.: Vary: Scaling up the vision vocabulary for large vision-language models. arXiv preprint arXiv:2312.06109 (2023) 1, 2, 3, 4, 6
291
+ [49] Wei, H., Liu, C., Guo, P., Zhu, Y., Fu, J., Wang, B., Wang, P.: Corner affinity: A robust grouping algorithm to make corner-guided detector great again. In: Raedt, L.D. (ed.) Proceedings of the Thirty-First International Joint Conference on Artificial Intelligence, IJCAI-22. pp. 1458–1464. International Joint Conferences on Artificial Intelligence Organization (7 2022). https://doi.org/10.24963/ijcai.2022/203, https://doi.org/10.24963/ijcai.2022/203, main Track 2
292
+ [50] Xu, C., Guo, D., Duan, N., McAuley, J.: Baize: An open-source chat model with parameter-efficient tuning on self-chat data. arXiv preprint arXiv:2304.01196 (2023) 5, 6
293
+ [51] Yang, Z., Gan, Z., Wang, J., Hu, X., Ahmed, F., Liu, Z., Lu, Y., Wang, L.: Unitab: Unifying text and box outputs for grounded vision-language modeling. In: European Conference on Computer Vision. pp. 521-539. Springer (2022) 7
294
+ [52] Ye, J., Hu, A., Xu, H., Ye, Q., Yan, M., Dan, Y., Zhao, C., Xu, G., Li, C., Tian, J., et al.: mplug-docowl: Modularized multimodal large language model for document understanding. arXiv preprint arXiv:2307.02499 (2023) 3, 6
295
+ [53] Yu, E., Zhao, L., Wei, Y., Yang, J., Wu, D., Kong, L., Wei, H., Wang, T., Ge, Z., Zhang, X., et al.: Merlin: Empowering multimodal llms with foresight minds. arXiv preprint arXiv:2312.00589 (2023) 3
296
+ [54] Yu, W., Yang, Z., Li, L., Wang, J., Lin, K., Liu, Z., Wang, X., Wang, L.: Mm-vet: Evaluating large multimodal models for integrated capabilities. arXiv preprint arXiv:2308.02490 (2023) 2, 6, 8
297
+ [55] Zeng, A., Liu, X., Du, Z., Wang, Z., Lai, H., Ding, M., Yang, Z., Xu, Y., Zheng, W., Xia, X., et al.: Glm-130b: An open bilingual pre-trained model. arXiv preprint arXiv:2210.02414 (2022) 3
298
+ [56] Zhang, A., Zhao, L., Xie, C.W., Zheng, Y., Ji, W., Chua, T.S.: Next-chat: An lmm for chat, detection and segmentation. arXiv preprint arXiv:2311.04498 (2023) 7
299
+ [57] Zhang, S., Roller, S., Goyal, N., Artetxe, M., Chen, M., Chen, S., Dewan, C., Diab, M., Li, X., Lin, X.V., et al.: Opt: Open pre-trained transformer language models. arXiv preprint arXiv:2205.01068 (2022) 2, 3, 4
300
+ [58] Zhao, L., Yu, E., Ge, Z., Yang, J., Wei, H., Zhou, H., Sun, J., Peng, Y., Dong, R., Han, C., et al.: Chatspot: Bootstrapping multimodal llms via precise referring instruction tuning. arXiv preprint arXiv:2307.09474 (2023) 3
301
+
302
+ [59] Zhou, X., Zhuo, J., Krahenbuhl, P.: Bottom-up object detection by grouping extreme and center points. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 850-859 (2019) 2
303
+ [60] Zhu, D., Chen, J., Shen, X., Li, X., Elhoseiny, M.: Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592 (2023) 1, 3, 7, 8
2401.12xxx/2401.12503/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c9ffa0c2261cae4d209e54b169c6418f591a595de95af1cdc61a800724b786a
3
+ size 530470
2401.12xxx/2401.12503/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12554/1ac8e6f1-9e91-4660-91d5-33063262f8cf_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12554/1ac8e6f1-9e91-4660-91d5-33063262f8cf_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12554/1ac8e6f1-9e91-4660-91d5-33063262f8cf_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fd2fb24f86760805fa8aa0f0c7fcfb7432689dc7f37c65a8ad526bfada5f6a9
3
+ size 972462
2401.12xxx/2401.12554/full.md ADDED
@@ -0,0 +1,493 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Can Large Language Models Write Parallel Code?
2
+
3
+ Daniel Nichols
4
+
5
+ dnicho@umd.edu
6
+
7
+ Department of Computer Science,
8
+
9
+ University of Maryland
10
+
11
+ College Park, Maryland, USA
12
+
13
+ Joshua H. Davis
14
+
15
+ jhDavis@umd.edu
16
+
17
+ Department of Computer Science,
18
+
19
+ University of Maryland
20
+
21
+ College Park, Maryland, USA
22
+
23
+ Zhaojun Xie
24
+
25
+ zxie12@umd.edu
26
+
27
+ Department of Computer Science,
28
+
29
+ University of Maryland
30
+
31
+ College Park, Maryland, USA
32
+
33
+ Arjun Rajaram
34
+
35
+ arajara1@umd.edu
36
+
37
+ Department of Computer Science,
38
+
39
+ University of Maryland
40
+
41
+ College Park, Maryland, USA
42
+
43
+ # ABSTRACT
44
+
45
+ Large language models are increasingly becoming a popular tool for software development. Their ability to model and generate source code has been demonstrated in a variety of contexts, including code completion, summarization, translation, and lookup. However, they often struggle to generate code for complex programs. In this paper, we study the capabilities of state-of-the-art language models to generate parallel code. In order to evaluate language models, we create a benchmark, PAREVAL, consisting of prompts that represent 420 different coding tasks related to scientific and parallel computing. We use PAREVAL to evaluate the effectiveness of several state-of-the-art open- and closed-source language models on these tasks. We introduce novel metrics for evaluating the performance of generated code, and use them to explore how well each large language model performs for 12 different computational problem types and six different parallel programming models.
46
+
47
+ # CCS CONCEPTS
48
+
49
+ - Computing methodologies $\rightarrow$ Parallel programming languages; Neural networks; Artificial intelligence.
50
+
51
+ # KEYWORDS
52
+
53
+ Large language models, Parallel code generation, Performance evaluation, Benchmarking, HPC
54
+
55
+ # ACM Reference Format:
56
+
57
+ Daniel Nichols, Joshua H. Davis, Zhaojun Xie, Arjun Rajaram, and Abhinav Bhatele. 2024. Can Large Language Models Write Parallel Code?. In The 33rd International Symposium on High-Performance Parallel and Distributed Computing (HPDC '24), June 3-7, 2024, Pisa, Italy. ACM, New York, NY, USA, 14 pages. https://doi.org/10.1145/3625549.3658689
58
+
59
+ Abhinav Bhatele
60
+
61
+ bhatele@cs.umd.edu
62
+
63
+ Department of Computer Science,
64
+
65
+ University of Maryland
66
+
67
+ College Park, Maryland, USA
68
+
69
+ # 1 INTRODUCTION
70
+
71
+ Large language model (LLM) based coding tools are becoming popular in software development workflows. Prior work has demonstrated their effectiveness at performing a variety of tasks, including code completion, summarization, translation, and lookup [4, 5, 18, 20, 21, 26, 40]. Popular models such as StarCoder [29], span a wide range of programming languages and domains, and can be used to complete or generate code during the development process. This makes them a promising tool for improving developer productivity and the overall quality of software. However, despite the rapid advancement and scaling of LLMs in recent years, they still struggle with more complicated tasks such as reasoning and planning. One particularly complex task that LLMs struggle with is generating parallel code. This task involves reasoning about data distributions, parallel algorithms, and parallel programming models.
72
+
73
+ Parallel code is essential to modern software development due to the ubiquity of multi-core processors, GPGPs, and distributed systems. However, writing parallel code is difficult and error-prone. Parallel algorithms are generally more complicated than their sequential counterparts, and parallel bugs such as race conditions and deadlocks are notoriously non-trivial to debug. Further, it can be challenging to reason about the performance of parallel code and identify "performance bugs" [25]. LLMs can potentially help developers overcome these challenges but, this requires an understanding of the current capabilities of LLMs, and in turn, a well-designed and reproducible methodology to assess these capabilities.
74
+
75
+ There are several existing benchmarks for evaluating the capabilities of LLMs to generate correct code. However, none of them test generation of parallel code. Most existing benchmarks focus on short, array or string manipulation tasks, and are predominantly in Python (or translated to other languages from Python [9]). Only more recent benchmarks such as DS-1000 [28], test the usage of APIs, which are critical to using parallel programming models. Further, these benchmarks do not evaluate the performance of the generated code, instead testing only correctness. While correctness is a crucial metric, performance is also vital for developers writing parallel code. Thus, it is imperative to design new benchmarks and metrics to evaluate the usefulness of LLMs for parallel code generation tasks.
76
+
77
+ Developing a set of benchmarks that fully covers the space of desired capabilities is non-trivial. Identifying the best LLM for parallel
78
+
79
+ code generation requires testing on problems that cover shared- and distributed-memory programming models, different computational problem types, and different parallel algorithms. This can become a large quantity of benchmarks that must be manually designed. Further, these benchmarks are challenging to test. Traditional Python code generation benchmarks are tested by running eval on the generated code for a small number of small unit tests. On the other hand, in the case of parallel code - we must compile C/C++ code, link against one or more parallel libraries, and run the code in the proper parallel environment. Additionally, if we want to test the performance of the generated code, then we must choose reasonable input sizes for each benchmark.
80
+
81
+ In order to evaluate the current capabilities and limitations of LLMs in generating parallel code, we propose the Parallel Code Generation Evaluation (PAREval) benchmark: a set of benchmarks (prompts) for evaluating how well LLMs generate parallel code. These benchmarks cover twelve different computational problem types, and seven different execution models: serial, OpenMP, Kokkos, MPI, MPI+OpenMP, CUDA, and HIP. We evaluate several state-of-the-art open- and closed-source LLMs using these benchmarks, and report metrics that represent the correctness and performance of the generated code. We introduce novel code generation evaluation metrics that assess performance and parallel scaling. We further analyze how each model performs with respect to the various programming models and computational problem types. We discuss the areas where current state-of-the-art LLMs are already performing well and the areas where they can be improved.
82
+
83
+ In this paper, we make the following important contributions:
84
+
85
+ - We design the PAREVAL benchmark for evaluating the ability of LLMs to generate and translate parallel code. PAREVAL is available online at: github.com/parallelcodefoundry/Pareval
86
+ - We introduce two novel metrics, speedup $_n$ @k and efficiency $_n$ @k, for evaluating the performance and scaling of LLM generated code.
87
+ - We evaluate the effectiveness of several state-of-the-art open and closed-source LLMs using the PAREVAL benchmark.
88
+ - We identify several areas where current state-of-the-art LLMs can improve their capabilities on parallel code generation.
89
+
90
+ In addition to these contributions, we explore the following research questions (answers based on our observations):
91
+
92
+ RQ1 How well do state-of-the-art LLMs generate parallel code, and which models are the best? We show that all tested LLMs, both open- and closed-source, struggle to generate parallel code. Of the models tested, GPT-3.5 performs the best with a pass@1 of 76.0 for serial code generation and a pass@1 of 39.6 for parallel code generation.
93
+ RQ2 Which parallel execution models and problem types are most challenging for LLMs? We observe that LLMs struggle most with MPI code generation, and perform best for OpenMP and Kokkos code generation. Additionally, we show that LLMs find it challenging to generate parallel code for sparse, unstructured problems.
94
+ RQ3 How performant and scalable is the parallel code generated by LLMs? We observe that the parallel code generated by LLMs can have poor parallel speedup and efficiency. Additionally,
95
+
96
+ we show that the LLMs that most often generate correct parallel code do not necessarily generate the most performant parallel code.
97
+
98
+ RQ4 How well can LLMs translate between execution models? How performant and scalable is the translated code? We show that providing LLMs with correct implementations in one execution model can improve their ability to generate correct code in another execution model. This is particularly true for smaller open-source models.
99
+
100
+ # 2 BACKGROUND
101
+
102
+ In this section, we provide background information on large language models and how they are used for text generation. We further discuss how large language models can be used for code generation.
103
+
104
+ # 2.1 Large Language Models
105
+
106
+ Natural Language Processing (NLP) has largely been dominated by transformer-based models since their introduction in 2017 by Vaswani et al. [47]. Transformer networks are designed to model sequential data, such as text, relying on self-attention mechanisms to model the relationships between values in a sequence. Self-attention enables modeling of long-range dependencies in the data without vanishing gradient and scaling issues and allows for sequence elements to processed in parallel. Transformers learn attention scores, which are computed between pairs of tokens in the input. Multi-head attention allows for learning multiple attention representations. These large transformer models are generally trained to model the distribution of a text corpus such as the English language by predicting the next token in a sequence given previous tokens. Transformer-based models have emerged as the most effective means of modeling text data, and have been shown to be effective at a wide range of NLP tasks.
107
+
108
+ # 2.2 Large Language Models for Code
109
+
110
+ An LLM trained on a large corpus of code can be used to generate code by giving it a code input prompt and asking it to predict the next token. Generally, code LLMs are trained on a large corpus of code, such as The Stack [27], that covers a wide range of programming languages and application types. Sometimes the pre-training corpus includes natural language as well, such as The Pile [16, 49]. In some instances, such as CodeLlama [41], the code LLM is a natural language model that has been further fine-tuned on a corpus of code. When generating code with one of these models it is often not enough to simply select the most probable next token to construct a sequence. This often leads to repetitive, low-quality outputs [22], so we also need a strategy for token selection. We utilize nucleus sampling and model temperature in this study.
111
+
112
+ Nucleus Sampling. Nucleus sampling [22], also called top- $p$ sampling, samples the next token from the token probability distribution up to some cut-off $p$ in the cumulative distribution function. Compared to sampling from a fixed number of top tokens in the distribution (called top- $k$ sampling), this ensures the selection of a more representative sample of tokens from the distribution. Nucleus sampling is often used in code generation tasks with a value of $p = 0.95$ and is sometimes combined with top- $k$ sampling.
113
+
114
+ Model Temperature. Generation temperature is a scaling value applied to the raw model outputs, or logits, before they are converted to a probability distribution. The value is applied by first dividing the logits vector by the scalar temperature before computing the softmax of the logits. Higher temperatures make the probability distribution more peaked, upweighting the most probably tokens, while lower temperatures make the distribution more uniform. Intuitively, lower temperatures yield more conservative generations that the model is more confident in. Conversely, higher temperatures will lead to more varied and creative generations. For code generation tasks, a low temperature value of 0.2 is often used.
115
+
116
+ # 3 RELATED WORK
117
+
118
+ Below, we describe related work in benchmarking LLMs for code-related tasks and applying LLMs to parallel and HPC code.
119
+
120
+ # 3.1 Benchmarking LLMs for Code-related Tasks
121
+
122
+ Since the introduction of the Codex model and HumanEval benchmark [13], many works have proposed new LLMs for code and evaluated them on a variety of tasks. The number of code-specific models has grown rapidly as open-source models and data sets become more available and low-rank training techniques, such as LoRA [23], make training large models more feasible. These models are usually evaluated on code generation tasks such as HumanEval [13], MBPP [7], and DS-1000 [28].
123
+
124
+ The first of these, HumanEval [13], is a set of 164 code generation tasks that are designed to evaluate the ability of LLMs to write short Python functions that solve a variety of problems, given a docstring and function signature. Similar to HumanEval is the Mostly Basic Python Problems (MBPP) [7] benchmark which is a set of 1000 simple Python problems. MBPP is often evaluated with few-shot prompts, where example correct solutions to other problems are included in the prompts. A common extension of these benchmarks is MultiPL-E [9] which extends the set of HumanEval and MBPP tests to 18 programming languages.
125
+
126
+ The DS-1000 benchmark [28] tests the ability of the model to generate more complex, data science-related code, for 1000 tasks making use of common data science libraries. Other similar benchmarks that evaluate coding LLMs on more complex tasks are GSM8K [14] and GSM-HARD [17], which use PAL [17] to evaluate the ability of LLMs to generate Python code snippets to assist in chains of reasoning. The CoderEval benchmarks [50] are a set of 230 Java and 230 Python code generation tasks that require the model to write context-dependent functions, rather than standalone functions as in HumanEval and MBPP.
127
+
128
+ Additionally, there have been several domain specific benchmarks that evaluate more narrow uses of LLM code generation [15, 30, 43]. All of these benchmarks make use of tasks manually created by experts to test more specific use cases of LLMs.
129
+
130
+ # 3.2 Applying LLMs to Parallel and HPC Code
131
+
132
+ Recently there has been a growing interest in applying LLMs to parallel and High Performance Computing (HPC) code. Several works have looked at creating smaller specialized HPC models [24, 32] or applying existing LLMs to HPC tasks [10, 11, 31]. Nichols et al. [32]
133
+
134
+ introduce HPCCoder, a model fine-tuned on HPC code, and evaluate its ability to generate HPC code, label OpenMP pragmas, and predict performance. Kadosh et al. [24] introduce TOKOMPILER, an HPC specific tokenizer for LLMs, and use it to train COMPCODER, a model trained on C, $\mathrm{C + + }$ , and Fortran code.
135
+
136
+ Other works have looked at applying existing LLMs to HPC tasks. Munley et al. [31] evaluate the ability of LLMs to generate compiler verification tests for parallel OpenACC code. Chen et al. [10] use LLMs to identify data races in parallel code and propose the DRBML data set, which is integrated into the LM4HPC framework [11]. Godoy et al. [19] and Valero-Lara et al. [46] both evaluate the capabilities of LLMs on generating HPC kernels, but use a limited set of problems and LLMs and do not prompt or evaluate the LLMs using standard practices. None of these works comprehensively evaluate and compare the ability of LLMs to generate parallel code across a large number of problems, execution models, and LLMs using state-of-the-art evaluation techniques, which is the focus of this work.
137
+
138
+ # 4 PAREVAL: PROMPTS FOR PARALLEL CODE GENERATION
139
+
140
+ In order to evaluate the ability of LLMs to generate parallel code, we propose the Parallel Code Generation Evaluation (PAREVAL) benchmark. Below, we discuss the design of PAREVAL, and its various components that lead to the creation of concrete prompts for LLMs.
141
+
142
+ To disambiguate the use of the terms prompt, task, problem, problem type, and benchmark we define them as follows.
143
+
144
+ - Task/Prompt: An individual text prompt that is given to the LLM to generate code. The output can be compiled, executed, and scored as either correct or incorrect code.
145
+ - Problem: A set of tasks or prompts that test the ability of the LLM to generate code for the same computational work, but each task or prompt may use a different execution model.
146
+ - Problem Type: A set of problems that test computational problems with similar work or from similar domains (for example, sorting problems).
147
+ - Benchmark: A set of prompts that are all tested together to evaluate the performance of the LLM. We name the collection of all the prompts we have designed as the PAREVAL benchmark.
148
+
149
+ Benchmark Requirements. The goal of PAREVAL is to evaluate the ability of LLMs to generate parallel code. To do this, the prompts should be such that:
150
+
151
+ (1) The prompts should cover a wide variety of computational problem types, and parallel programming models.
152
+ (2) The prompts should be simple enough that they can be generated as a standalone function, but complex enough that they are not too trivial to solve.
153
+ (3) The prompts should not exist within any of the LLMs' training datasets, to prevent the LLMs from simply copying solutions from their training data.
154
+ (4) The prompts and corresponding outputs should be able to be evaluated automatically, since there will be many different tasks and LLM outputs.
155
+
156
+ In order to fulfill the requirements above, we propose PAREVAL, a set of 420 prompts that cover twelve different computational problem types and seven different execution models. Each problem type has five different problems, and each problem has a prompt for each of the seven execution models, resulting in 420 total prompts. Each prompt in PAREVAL is a standalone function that requires the LLM to generate code that solves the problem either sequentially or in parallel.
157
+
158
+ Problem Types. The problem types are listed and described in Table 1. These were hand-selected by us, and represent a wide variety of common computational problems that are often parallelized. Each requires different strategies or APIs to solve in parallel. For instance, the problems in the Sort problem type require the LLM to generate code that sorts an array of values.
159
+
160
+ Table 1: Descriptions of the twelve problem types in PAREVAL. Each problem type has five concrete problems, and each problem has a prompt for all seven execution models.
161
+
162
+ <table><tr><td>Problem Type</td><td>Description</td></tr><tr><td>Sort</td><td>Sort an array or sub-array of values; in-place and out-of-place.</td></tr><tr><td>Scan</td><td>Scan operations, such as prefix sum, over an array of values.</td></tr><tr><td>Dense Linear Algebra</td><td>Dense linear algebra functions from all three levels of BLAS.</td></tr><tr><td>Sparse Linear Algebra</td><td>Sparse linear algebra functions from all three levels of BLAS.</td></tr><tr><td>Search</td><td>Search for an element or property in an array of values.</td></tr><tr><td>Reduce</td><td>Reduction operation over an array dimension, such as computing a sum.</td></tr><tr><td>Histogram</td><td>Binning values based on a property of the data.</td></tr><tr><td>Stencil</td><td>One iteration of 1D and 2D stencil problems, such as Jacobi relaxation.</td></tr><tr><td>Graph</td><td>Graph algorithms, such as component counting.</td></tr><tr><td>Geometry</td><td>Compute geometric properties, such as convex hull.</td></tr><tr><td>Fourier Transform</td><td>Compute standard and inverse Fourier transforms.</td></tr><tr><td>Transform</td><td>Map a constant function to each element of an array.</td></tr></table>
163
+
164
+ Problems. The five problems within each problem type are designed to test the core functionality of the problem type. To prevent prompting the model for a solution that is already in its training dataset, the five problems are small variations of the usual problem type. For example, one of the scan problems is to compute the reverse prefix sum of an array, rather than directly computing the prefix sum. These variations still test the model's understanding of the core computational problem, but mitigate the likelihood of it simply copying code from its training dataset. Listing 1 shows another example of these problem variations. Another benefit of
165
+
166
+ having five problems per problem type is that it provides more data points for evaluating the LLM's performance on that problem type, but not so many that it becomes infeasible to implement and maintain.
167
+
168
+ **Prompts.** Each problem has a prompt for each of the seven execution models that the LLM is required to generate code for. The seven execution models we test are: serial, OpenMP [37], MPI [42], MPI+OpenMP, Kokkos [45], CUDA [33], and HIP [2]. All the prompts are in $\mathrm{C}++$ , CUDA, or HIP. These represent both shared and distributed memory programming models, as well as GPU programming models. The prompts for each execution model are designed to be as similar to the other prompts for that problem as possible, while still being idiomatic for the programming model. For serial, OpenMP, MPI, and MPI+OpenMP prompts, we use STL data structures such as std::vector and std::array. For Kokkos, we utilize the Kokkos::View data structure (as shown in Listing 1). The CUDA and HIP prompts use raw pointers to represent array structures.
169
+
170
+ include <Kokkos-Core.hpp>
171
+ ```txt
172
+ $\text{水}$ Replace the i-th element of the array x with the minimum value from indices 0 through i. Use Kokkos to compute in parallel. Assume Kokkos has already been initialized. Examples: input: [8, 6, -1, 7, 3, 4, 4] output: [8, 6, -1, -1, -1, -1, -1] input: [5, 4, 6, 4, 3, 6, 1, 1] output: [5, 4, 4, 4, 3, 3, 1, 1] \*/ void partialMinimums(Kokkos::View&x) {
173
+ ```
174
+
175
+ Listing 1: An example Scan prompt for Kokkos. The LLM will be tasked with completing the function body.
176
+
177
+ We list an example prompt in Listing 1 for a variant of a scan problem to generate Kokkos code. The goal of this problem is to compute the minimum value of the array up to each index. We include example inputs and outputs in the prompt as this can significantly improve the quality of the generated code [7]. The necessary #include statements are also prepended to the prompt as we found that this improves the likelihood of the LLM correctly using the required programming model.
178
+
179
+ # 5 DESCRIPTION OF EVALUATION EXPERIMENTS
180
+
181
+ Now that we have described the prompts in the previous section, we describe how we can use them to evaluate the performance of LLMs on two different tasks - code generation and translation.
182
+
183
+ # 5.1 Experiment 1: Parallel Code Generation
184
+
185
+ The first experiment studies the ability of LLMs to generate code, either sequential or in a specific parallel programming model, given a simple description in a prompt (see Listing 1). We evaluate LLMs
186
+
187
+ Table 2: The models compared in our evaluation. CodeLlama and its variants currently represent state-of-the-art open-source LLMs and GPT represents closed-source LLMs. OpenAI does not publish the numbers of parameters in their models.
188
+
189
+ <table><tr><td>Model Name</td><td>No. of
190
+ Parameters</td><td>Open-source
191
+ Weights</td><td>License</td><td>HumanEval†
192
+ (pass@1)</td><td>MBPP‡
193
+ (pass@1)</td></tr><tr><td>CodeLlama-7B [41]</td><td>6.7B</td><td>✓</td><td>llama2</td><td>29.98</td><td>41.4</td></tr><tr><td>CodeLlama-13B [41]</td><td>13.0B</td><td>✓</td><td>llama2</td><td>35.07</td><td>47.0</td></tr><tr><td>StarCoderBase [29]</td><td>15.5B</td><td>✓</td><td>BigInt Code OpenRAIL-M</td><td>30.35</td><td>49.0</td></tr><tr><td>CodeLlama-34B [41]</td><td>32.5B</td><td>✓</td><td>llama2</td><td>45.11</td><td>55.0</td></tr><tr><td>Phind-CodeLlama-V2 [39]</td><td>32.5B</td><td>✓</td><td>llama2</td><td>71.95</td><td>—</td></tr><tr><td>GPT-3.5 [8]</td><td>—</td><td>X</td><td>—</td><td>61.50</td><td>52.2</td></tr><tr><td>GPT-4 [34]</td><td>—</td><td>X</td><td>—</td><td>84.10</td><td>—</td></tr></table>
194
+
195
+ †HumanEval results are from the BigCode Models Leaderboard [1], except for GPT-3.5 and GPT-4 which are from [3].
196
+ ‡MBPP results are from [41].
197
+
198
+ on how well they can generate code for all the prompts in PAREVAL. We do so by asking the model to complete the function started in the prompt, and then evaluating the generated code. By compiling and executing the generated code, we report different metrics that will be described in Section 7. The metrics are computed over the combined results from the OpenMP, MPI, MPI+OpenMP, Kokkos, CUDA, and HIP execution models, and compared with the same metrics computed over the serial results. These results will provide insight into how well the model can write parallel code based on natural language descriptions. The results can also be compared along the axes of execution model and problem type.
199
+
200
+ # 5.2 Experiment 2: Parallel Code Translation
201
+
202
+ The second experiment studies the ability of LLMs to effectively translate code provided in one execution model to another execution model. To accomplish this, we prompt the LLM with a correct version of the code in one execution model and ask it to translate it to another execution model. An example of this prompt format is shown in Listing 2. We evaluated several prompting formats for translation, such as giving examples of other successful translations, but found the format in Listing 2 to be the most effective.
203
+
204
+ In theory, we could have evaluated translation capabilities between each pair of execution models for each problem. However, to limit the quadratic increase in the number of prompts, we only evaluate translations for these pairs: serial $\rightarrow$ OpenMP, serial $\rightarrow$ MPI, and CUDA $\rightarrow$ Kokkos. We identify these as some of the most relevant translation tasks for HPC developers. We compute the same metrics as for Experiment 1. These results will provide insight into how well the model can translate between different execution models. The results can also be compared along the axes of source and target execution model and problem type.
205
+
206
+ # 6 MODELS USED FOR COMPARISON
207
+
208
+ We choose to compare several state-of-the-art open-source and closed-source LLMs, as well as smaller LLMs that are more practical for use in production. We provide brief descriptions of the LLMs used in our evaluation, and their properties below. Table 2 provides a summary and some salient properties of the models used.
209
+
210
+ ```cpp
211
+ // A serial implementation of sumOfMinimumElements
212
+ /* Return the sum of the minimum value at each index of vectors x and y for all indices. i.e. sum = min(x_0, y_0) + min(x_1, y_1) + min(x_2, y_2) + ... Example: input: x=[3, 4, 0, 2, 3], y=[2, 5, 3, 1, 7] output: 10
213
+ */ double sumOfMinimumElements(std::vector<double> const& x, std::vector<double> const& y) { double sum = 0.0; for (size_t i = 0; i < x.size(); ++i) { sum += std::min(x[i], y[i]); } return sum;
214
+ }
215
+ // An OpenMP implementation of sumOfMinimumElements
216
+ /* Return the sum of the minimum value at each index of vectors x and y for all indices. i.e. sum = min(x_0, y_0) + min(x_1, y_1) + min(x_2, y_2) + ... Use OpenMP to sum in parallel. Example: input: x=[3, 4, 0, 2, 3], y=[2, 5, 3, 1, 7] output: 10
217
+ */ double sumOfMinimumElements(std::vector<double> const& x, std::vector<double> const& y) {
218
+ ```
219
+
220
+ Listing 2: An example prompt given to the model for code translation. The model is given a sequential implementation of sumOfMinimumElements and tasked with translating it to OpenMP.
221
+
222
+ CodeLlama (CL-7B, CL-13B, and CL-34B). Rozière et al. originally introduced CodeLlama models in [41] as variants of the Llama 2 model [44], fine-tuned for code. All three models started with Llama 2 weights and were then fine-tuned on 500 billion tokens from a dataset of predominantly code. The Llama 2 models were also extended to support longer context lengths of 16k and infilling to generate code in the middle of sequences. We select these models as they are amongst the top performing open-source LLMs. Additionally, the CodeLlama models are very accessible as there are small model sizes available and there exists a thriving software ecosystem surrounding Llama 2 based models.
223
+
224
+ StarCoderBase. The StarCoderBase model [29] is a 15.5B parameter model trained on 1 trillion tokens from The Stack [27]. In addition to code from $80+$ programming languages, its data set includes natural language in git commits and Jupyter notebooks. StarCoderBase supports infilling as well as a multitude of custom tokens specific to code text data. The model architecture is based on the SantaCoder model [6], and it supports a context length of 8K tokens. We select StarCoderBase as it is one of the best performing open-source models around its size, and is frequently used for comparisons in related literature.
225
+
226
+ Phind-CodeLlama-V2. The Phind-CodeLlama-V2 model [39] is a CodeLlama-34B model fine-tuned on over 1.5 billion tokens of code data. At the time we were selecting models for comparison it topped the BigCode Models Leaderboard [1] among open-access models on HumanEval with a pass@1 score of 71.95. However, the fine-tuning dataset for this model is not publicly available, so it is not possible to ensure that the BigCode benchmarks themselves are not included in Phind's fine-tuning dataset.
227
+
228
+ GPT-3.5 and GPT-4. GPT-3.5 and GPT-4 are closed-source LLMs from OpenAI [8, 34]. Most information about these models is not publicly available, however, they can be used for inference via a paid API. We use the most up-to-date versions of these models available at the time of writing, the gpt-3.5-turbo-1106 and gpt-4-1106-preview models. Unlike the other models tested, these are instruction-tuned and aligned to human preferences. Rather than using them for direct code generation, we have to interact with them via a chat interface. As with the Phind-CodeLlama-V2 model, the data used to train these models is not publicly available, so it is difficult to fairly compare them with the other models as they might have seen some prompts during training.
229
+
230
+ # 7 EVALUATION METRICS
231
+
232
+ It is important to be able to meaningfully compare the performance of the selected LLMs at generating correct and efficient code for the prompts in PAREVAL. This section details how we accomplish this by adopting a popular correctness metric for code LLMs, and defining two new performance-related metrics.
233
+
234
+ # 7.1 Metric for Correctness
235
+
236
+ We adopt the pass@k metric from [13] to quantify correctness of the generated code. For a given prompt, pass@k estimates the probability that the model will generate a correct solution given $k$ attempts. Often the average pass@k over all prompts in a benchmark is reported. To estimate the pass@k over a set of prompts, we first generate $N$ samples for each prompt using the model, where $N > k$ . These samples are then evaluated for correctness. The number of correct samples can be used to estimate the pass@k value as shown in Equation (1).
237
+
238
+ $$
239
+ \text {p a s s} @ k = \frac {1}{| P |} \sum_ {\substack {p \in P \\ \text {Set of prompts}}} \left[ 1 - \binom {N - c _ {p}} {k} / \binom {N}{k} \right] \tag{1}
240
+ $$
241
+
242
+ This metric provides insight into how often do models generate correct code. The probability that the model will generate a correct solution in one attempt, $\text{pass}@1$ , is the most useful metric for end-users as it aligns with how LLMs are used in practice. In this paper, we report $100 \times \text{pass}@k$ as is common in related literature and online leaderboards [1, 12]. Additionally, as models have become more capable, studies have shifted toward only reporting $\text{pass}@1$ values. However, $\text{pass}@k$ values for $k > 1$ are still useful for understanding how models perform on more difficult prompts. Commonly reported values of $k$ are 1, 5, 10, 20, and 100. It is also common to report $\text{pass}@1$ values using a generation temperature of 0.2 and $\text{pass}@k$ for higher values of $k$ using a generation temperature of 0.8. This higher temperature allows the model to more extensively explore the solution space when generating a larger number of attempts.
243
+
244
+ # 7.2 Performance Metrics
245
+
246
+ For parallel and HPC code, it is important to consider both the correctness and performance of the generated code. To analyze and compare the runtime performance of LLM generated code, we introduce two new metrics: speedup $_n@k$ and efficiency $_n@k$ .
247
+
248
+ $\mathrm{speedup}_{\mathbf{n}}@ \mathbf{k}$ . The first metric, $\mathrm{speedup}_n@k$ , measures the expected best performance speedup of the generated code relative to the performance of a sequential baseline (see Section 8.2) if the model is given $k$ attempts to generate the code. The relative speedup is computed based on the execution time obtained using $n$ processes or threads. For a given prompt $p$ , the expected best speedup relative to a sequential baseline, $T_p^*$ , is given by Equation (2).
249
+
250
+ $$
251
+ \begin{array}{l} \text {r u n t i m e o f s e q u e n t i a l b a s e l i n e f o r p r o m p t} p \\ \mathbb {E} \left[ \max \left\{\frac {T _ {p} ^ {*}}{T _ {p , s _ {1} , n}}, \dots , \frac {T _ {p} ^ {*}}{T _ {p , s _ {k} , n}} \right\} \right] = \sum_ {j = 1} ^ {N} \frac {\binom {j - 1} {k - 1}}{\binom {N} {k}} \frac {T _ {p} ^ {*}}{T _ {p , j , n}} \\ \text {r u n t i m e o f s a m p l e} j \text {o f p r o m p t} p \text {o n} n \text {r e s o u r c e s} \end{array} \tag {2}
252
+ $$
253
+
254
+ To demonstrate that Equation (2) represents the desired quantity, consider the set of $N$ generated samples is in order from slowest to fastest. This is without loss of generality as we assume the $k$ samples are selected uniformly and, thus, all size $k$ permutations are equally likely. The probability that the max is the $j$ th sample is given by $\binom{j-1}{k-1}/\binom{N}{k}$ , as there must be $j-1$ elements before $j$ and, thus, $\binom{j-1}{k-1}$ ways to select the remaining elements. The sum of these probabilities, each weighted by their respective speedups, gives the expected max speedup over $k$ samples. Taking the average of Equation (2) over all prompts we can define the speedup $n@k$ metric as shown in Equation (3).
255
+
256
+ $$
257
+ \operatorname {s p e e d u p} _ {n} @ k = \frac {1}{| P |} \sum_ {p \in P} \sum_ {j = 1} ^ {N} \frac {\binom {j - 1} {k - 1}}{\binom {N} {k}} \frac {T _ {p} ^ {*}}{T _ {p , j , n}} \tag {3}
258
+ $$
259
+
260
+ For a single LLM, the speedup $_n$ @k metric can be used to understand how well its generated code performs compared to sequential baselines. A value greater than 1 indicates that the generated code is faster than the baseline on average, while a value less than 1 indicates that the generated code is generally slower than the baseline. When comparing multiple LLMs, a higher value of speedup $_n$ @k signifies more performant code. It is important to note that this
261
+
262
+ metric is hardware dependent and, thus, to compare models fairly all the run times need to be collected on the same hardware.
263
+
264
+ The speedup $_n$ @k metric also gives insight into how well the generated code makes use of parallelism in its computation. It is fixed to a given number of resources, $n$ , which can either be threads or processes, depending on the model of parallelism being used. It also adds another axis to vary when comparing models. When studying a single model, the speedup $_n$ @k metric can be compared at different values of $n$ to understand the complete scaling behavior of that model. When comparing multiple models, it is typically most useful to fix $n$ to a single value. One could also average over many values of $n$ , but this risks hiding too much information to be useful.
265
+
266
+ $\mathrm{speedup}_{\mathrm{max}}@k$ . We also define a variant of the speedup $n@k$ metric, $\mathrm{speedup}_{\mathrm{max}}@k$ , as shown in Equation (4), which estimates the maximum speed up over all $n$ and not a fixed resource count.
267
+
268
+ $$
269
+ \operatorname {s p e e d u p} _ {\max } @ k = \frac {1}{| P |} \sum_ {p \in P} \sum_ {\substack {j = 1 \\ n \in \operatorname {p r o c s}}} ^ {N \cdot | \operatorname {p r o c s} |} \frac {\binom {j - 1}{k - 1}}{\binom {N \cdot | \operatorname {p r o c s} |}{k}} \frac {T _ {p} ^ {*}}{T _ {p , j , n}} \tag{4}
270
+ $$
271
+
272
+ Here procs is the set of resource counts over which the experiments can be performed. For example, if there are 128 hardware cores, $\text{procs} = 1, 2, 4, 8, 16, 32, 64, 128$ processes or threads.
273
+
274
+ efficiency $_n@k$ . To further understand the parallel performance of the generated code, we define the efficiency $_n@k$ metric. This metric measures the expected best performance efficiency (speedup per process or thread) if the model is given $k$ attempts to generate the code. This is easily defined by modifying Equation (3) to divide by $n$ as shown in Equation (5). The possible values of this metric range between 0 and 1.0, with 1.0 representing a model that generates code that scales perfectly with the number of processes or threads. This metric is useful for understanding how well the generated code makes use of parallel resources. In addition to efficiency $_n@k$ , we also define efficiency $_{\max}@k$ in the same fashion as Equation (4).
275
+
276
+ $$
277
+ \text {e f f i c i e n c y} _ {n} @ k = \frac {1}{| P |} \sum_ {p \in P} \sum_ {j = 1} ^ {N} \frac {\binom {j - 1} {k - 1}}{\binom {N} {k}} \frac {T _ {p} ^ {*}}{n \cdot T _ {p , j , n}} \tag {5}
278
+ $$
279
+
280
+ Even though we explore parallel code generation in this paper, these metrics can be used to consider the performance of sequential code generation as well. For example, examining speedup $_1@k$ for the HumanEval, MBPP, or DS-1000 benchmarks will lead to a better understanding of how efficient the generated Python code is compared to a human created baseline. Additionally, both performance metrics could be modified to be parameterized by problem size instead of number of processes/threads in order to study the computational complexity of the generated code.
281
+
282
+ # 8 EXPERIMENTAL SETUP
283
+
284
+ This section describes how we generate outputs using each of the LLMs (Section 6) and the prompts in PAREVAL, and how we evaluated the generated code using the PAREVAL test harness.
285
+
286
+ # 8.1 LLM Inference: Generating Code Output
287
+
288
+ To generate outputs with the open-source models, we use the HuggingFace library [48] with PyTorch [38] as the backend to load the
289
+
290
+ LLM weights and use them for inference. Specifically, we create a PyTorch Dataset object that wraps the set of prompts and we pass this as input to a Huggingface Pipeline object, which then runs the models in inference mode and generates the outputs. We do these runs on a single NVIDIA A100 80GB GPU using 16-bit floating point precision. Since the prompt workloads are fairly regular, we get the best inference performance for larger batch sizes. So for each model, we use the largest batch size that fits in GPU memory. To generate the GPT-3.5 and GPT-4 outputs we use the OpenAI API [35] via OpenAI's Python client [36].
291
+
292
+ For all of the tasks, we use nucleus sampling with a value of $p = 0.95$ . Additionally, we limit the maximum number of new tokens generated to 1024. We experimentally found this to be long enough for all of the tasks to be completed, but short enough to limit long, repetitive outputs. Using this configuration, we create two sets of outputs for each model: one with 20 samples per prompt and a temperature of 0.2, and the other with 200 samples per prompt and a temperature of 0.8. The former is used to calculate the metrics at $k = 1$ (such as $\text{pass} @1$ ) and the latter for larger values of $k$ . This is in line with the generation configurations in related literature [29, 41]. Note that we exclude the evaluation of GPT-3.5 and GPT-4 with 200 samples per prompt and a temperature of 0.8 due to the high monetary cost of generating these outputs.
293
+
294
+ # 8.2 Evaluating the Generated Code
295
+
296
+ To evaluate the generated code, we use the PAREVAL test harness. The test harness is a set of scripts that compile and run the generated code using manually written test drivers for each problem. The scripts handle recording the compile status, correctness, and execution time of the generated code.
297
+
298
+ To compile the generated code, we use the GNU Compiler Collection (GCC) version 9.4.0. For serial, OpenMP, and Kokkos versions, we use GCC as the primary compiler, whereas we use it as the backend to the respective frontend compiler for the other models (i.e. the backend compiler to mpicxx). All compilations use the flags -03 -std=c++17 and the OpenMP tasks add the -fopenmp flag. We use version 4.1.0 of Kokkos, and the threads execution space, which uses C++ threads for parallelism. MPI codes are compiled with OpenMPI version 4.1.1. CUDA programs are compiled with nvcc and CUDA version 12.1.1. Likewise, HIP programs are compiled with hipcc and ROCm version 5.7.0.
299
+
300
+ Before compiling an output, the prompt and generated code are written to a header file that is included by the driver script for that task. Once compiled, the generated binary is run by the test harness. The test harness checks if the generated code produces the same results as the sequential baseline. The sequential baselines are handwritten, optimal implementations of the prompt that are used to test correctness and to calculate the performance metrics (see Section 7.2). Additionally, a code can be labeled as incorrect for the following reasons:
301
+
302
+ - The code does not compile or it takes longer than three minutes to run. We choose the problem sizes for each prompt such that any reasonable implementations execute in much less than three minutes.
303
+
304
+ - The code does not use its respective parallel programming model. For example, if the model generates a sequential implementation rather than using OpenMP when prompted to do so, it is labeled as incorrect. We utilize several string matching criteria to implement this check.
305
+
306
+ The output of the program includes the result of the correctness check of the generated code, the average runtime of the generated code, and that of the sequential baseline over ten runs. We use the default timer for each execution model to measure its run time.
307
+
308
+ The CPU runs are conducted on an AMD EPYC 7763, 2.45 GHz CPU with 64 physical cores and 512 GB of RAM. We run with $1,2,4,\ldots,32$ threads for OpenMP and Kokkos. For MPI, we run with $1,2,4,\ldots,512$ processes across multiple nodes with one process per physical core. For $\mathrm{MPI + OpenMP}$ we run on $1,2,3,$ and 4 nodes with 1 process per node and $1,2,4,\ldots,64$ threads per node. The CUDA runs are completed on an NVIDIA A100 80GB GPU and the AMD runs on an AMD MI50 GPU. Kernels are launched with the number of threads indicated in the prompt text (i.e. at least as many threads as values in the array).
309
+
310
+ # 9 EVALUATION RESULTS
311
+
312
+ We now present detailed results from evaluating the LLMs described in Section 6 using the PAREVAL prompts and test harness.
313
+
314
+ # 9.1 Experiment 1: Parallel Code Generation
315
+
316
+ RQ1 How well do state-of-the-art LLMs generate parallel code, and which models are the best?
317
+
318
+ To evaluate the correctness of the code generated by the LLMs we first look at the pass@1 scores over PAREVAL. Figure 1 shows the pass@1 score for each LLM for generating the serial code versus the average over the six parallel execution models. As defined in Equation (1), these values are aggregated over all the prompts including problem types and execution models. Notably, all of the LLMs score significantly worse for parallel code generation than they do for serial code generation. The best performing models, GPT-3.5 and GPT-4, both achieve $\sim 76$ pass@1 on the serial prompts. This is a strong score in the context of other benchmarks, such as HumanEval, where GPT-4 gets 84.1 (see Table 2). Despite the strong serial scores, GPT-3.5 and GPT-4 only achieve 39.6 and 37.8 pass@1, respectively, on the parallel prompts.
319
+
320
+ The open-source models show a significant decrease in performance for parallel code generation with all of them except Phind-V2 (Phind-CodeLlama-V2) scoring between 10.2 and 18.6. Phind-V2 does much better than the other open-source models, achieving 32 pass@1 on the parallel prompts. This suggests that further fine-tuning of the open-source code models can improve their performance on parallel code generation. Additionally, it is significant that an open-source model performs near to the closed-source models on parallel code generation. Open-source models are more accessible and, thus, having a strong open-source model for parallel code generation would be beneficial to the community.
321
+
322
+ Another interesting trend we observe in Figure 1 is that CodeLlama-34B and GPT-4 both score worse than their smaller counterparts
323
+
324
+ ![](images/dcfc356400ded9009fb78e0ddaaa90b3e2d3eb9693651ffa3acb29512a90804b.jpg)
325
+ Figure 1: Each LLM's pass@1 score over PAREVAL. All of the LLMs score significantly worse in generating parallel code than serial code.
326
+
327
+ on parallel code generation. The reasons for this decrease in performance are not immediately obvious. However, we observe that CodeLlama-34B and GPT-4 often generate the same output for a given prompt for most or all of the 20 samples. This is due to the larger models being more "confident" in their outputs, but this can have an adverse effect on the pass@1 score when the output is incorrect.
328
+
329
+ Ultimately, the closed-source models are better than the open-source models at parallel code generation. Interestingly, GPT-3.5 beats GPT-4 on the parallel prompts by almost 2 percentage points, suggesting it may be better suited for parallel code generation tasks. This is interesting since GPT-4 is bigger and newer than GPT-3.5 and generally obtains better results on other code and natural language benchmarks. Amongst the open-source models, Phind-V2 has the best results, but still lags behind the closed-source models by almost 8 percentage points.
330
+
331
+ In addition to pass@1 it is also useful to consider pass@k for $k > 1$ to understand how the LLMs perform provided more attempts at a problem. Figure 2 shows the pass@k for each LLM for $k = 1,5,10,20$ with 200 samples and a temperature of 0.8 for $k \neq 1$ . The GPT models are omitted for $k > 1$ due to the monetary cost of generating a large number of samples with these models. We observe the same relative ordering as in Figure 1 is maintained for all values of $k$ with Phind-V2 leading the open-source LLMs. At $k = 20$ Phind-V2 achieves a pass@k of 46 meaning that on average it is able to generate a correct answer to one of the parallel prompts in 20 attempts $46\%$ of the time. The scores of each LLM improving with an increase in $k$ is expected due to the nature of the pass@k metric. The fact that each LLM begins to plateau suggests that there is an upper limit to their ability to generate correct parallel code and giving them more attempts does not significantly improve their performance.
332
+
333
+ RQ2 Which parallel execution models and problem types are most challenging for LLMs?
334
+
335
+ 9.1.1 Breakdowns by Execution Models. We further break down the pass@1 results by each execution model in Figure 3. From this data we observe that every LLM follows a similar distribution of scores across the execution models: serial (best), OpenMP,
336
+
337
+ ![](images/ad29e7a16845402c90b20773c626e45aee89defb2bf6e65e3dbe8c48e3b5be5f.jpg)
338
+ Figure 2: The pass@k for various values of k. The relative order of the LLMs is the same for all values of k with PhindV2 leading the group.
339
+
340
+ CUDA/CHIP, and MPI/MPI+OpenMP (worst) with Kokkos varying between LLMs.
341
+
342
+ ![](images/9f62dea0c40381c882c256063a0d1b98a35d9a125262d72580f393018863e315.jpg)
343
+ Figure 3: pass@1 for each execution model. The LLMs generally follow the same distribution of scores across the execution models: serial (best), OpenMP, CUDA/CHIP, and MPI/MPI+OpenMP (worst) with Kokkos varying between LLMs.
344
+
345
+ The pass@1 of LLMs being better with OpenMP than other parallel execution models is likely due to the fact that OpenMP code is the most similar to serial code. For many problems it only requires adding an OpenMP pragma, and occasionally a reduction clause. GPT-4 gets nearly as many OpenMP problems correct as serial problems, with an OpenMP pass@1 of 60 vs a 76 serial pass@1. The other top LLMs, GPT-3.5 and Phind-V2, are also nearly as efficient on OpenMP problems as serial problems. StarCoderBase and the CodeLlama models have a larger gap between their serial and OpenMP pass@1 scores, but still have better results on OpenMP than the other parallel execution models.
346
+
347
+ With the larger LLMs, Kokkos is consistently just behind OpenMP in its pass@1 results. Like OpenMP, Kokkos is a shared memory parallel programming model that relies mostly on high-level abstract constructs to parallelize code. These high-level abstractions make it simpler for the LLM to translate the prompt text to code.
348
+
349
+ The smaller LLMs struggle with Kokkos, likely due to the fact that Kokkos is more verbose than OpenMP and is more niche than the other parallel execution models leading to less inclusion in their training data. With fewer Kokkos examples in the dataset the smaller LLMs likely struggle to learn how to model Kokkos code well.
350
+
351
+ Following Kokkos, we observe that all the LLMs are next most efficient for CUDA/CHIP. These two always have a similar pass@1 score, which is likely due to the similarity of CUDA and HIP. All of the open-source LLMs have a slightly better pass@1 with HIP than CUDA, while the closed-source LLMs are slightly better with CUDA than HIP. CUDA/CHIP kernels are more complex than OpenMP and Kokkos, but the parallelism is intrinsic to the kernel making it easier than MPI, since the LLM does not need to reason about large changes to the underlying algorithm.
352
+
353
+ MPI and $\mathrm{MPI + OpenMP}$ are generally the worst parallel execution models for all the LLMs (except for CodeLlama 7B and 13B where they are second and third worst). Compared to the other execution models in our testing, MPI implementations often differ the most from their sequential counterparts. This complexity makes it difficult for the LLMs to generate correct MPI code. Based on the results for all the execution models, we hypothesize that this trend generalizes to all parallel execution models: the more different a parallel programming model's code is from the corresponding serial code, the more difficult it is for the LLMs to generate correct code in that programming model.
354
+
355
+ 9.1.2 Breakdowns by Problem Types. In addition to execution models it is also important to understand what types of computational problems LLMs struggle to parallelize. Figure 4 shows the pass@1 score for each problem type across all the LLMs. As a general trend, we observe that all LLMs are better at generating parallel solutions for structured, dense problems and worse for unstructured, sparse problems.
356
+
357
+ All of the LLMs get their best pass@1 scores for transform problems with the exception of GPT-3.5 where it is the second best. Transform problems are the simplest as they are completely data parallel. In addition to transform, all of the LLMs generally score well on reduction and search. These are also fairly simple to parallelize as searching requires little to no communication and reductions are often offered as high-level constructs in parallel programming models.
358
+
359
+ Phind-V2 and the GPT LLMs score well on stencil, histogram, and dense linear algebra problems. These problems are all structured and dense, which makes them easier for the LLMs to parallelize. These three problems are in the middle of the group for StarCoder-Base and the CodeLlama LLMs coming after transform, search, and reduce. This suggests that the larger LLMs are better at parallelizing these types of problems. Interestingly, StarCoderBase and the CodeLlama LLMs all have graph problems in their top four to five problem types, which is not the case for Phind-V2 and the GPTs.
360
+
361
+ The bottom five problem types for all of the LLMs are sparse linear algebra, scan, ft, geometry, and sort. GPT-4 is the exception with graph instead of sort as the fifth-worst problem type. Sparse linear algebra is generally the worst problem type, which is likely due to the difficulty in parallelizing sparse computations. FFT and geometry problems are also generally more difficult to parallelize
362
+
363
+ ![](images/956778649fa16ae69cffc6e0ef37708436dc7bba0b0d717f4c1710c248511462.jpg)
364
+ Figure 4: pass@1 for each problem type. The LLMs are best at transform problems, while they are worst at sparse linear algebra problems.
365
+
366
+ so it readily follows that the LLMs would struggle with them. The sorting and scan results are more surprising. Parallel implementations for sort and scan are well known and certain execution models like OpenMP and MPI even offer high-level abstractions for scan.
367
+
368
+ Figure 5 provides an even more detailed view of the pass@1 metric across both execution models and problem types for GPT-4. We see the same trends as in Figures 3 and 4 for GPT-4, however, we can also see where certain trends do not hold. For example, despite being the best LLM for search problems and the best LLM at Kokkos, GPT-4 does not do well on Kokkos search problems. We also see that MPI and $\mathrm{MPI + OpenMP}$ scores on a particular problem type are not always the same. This suggests that the model has difficulty dealing with these dual execution models.
369
+
370
+ ![](images/7bb8059d349b66ab2496032a7f0113419a6b076c09c4dfc961a8bec9cd0cc564.jpg)
371
+ Figure 5: pass@1 for GPT-4 across all execution models and problem types. GPT-4 excels with the Kokkos and OpenMP execution models, while getting more problems correct for transform, search, and reduce problems.
372
+
373
+ RQ3 How performant and scalable is the parallel code generated by LLMs?
374
+
375
+ 9.1.3 Speedup and Efficiency. When writing parallel code, it is important to consider performance in addition to correctness. A parallel implementation that is correct, but makes inefficient use of resources is not useful in practice. Hence, we compare the speedup $_n$ @k and efficiency $_n$ @k metrics for each LLM.
376
+
377
+ ![](images/c3909c5f37d1a1d320802901fb31faa87e36f98986891cbe8dad6b36d8bf5821.jpg)
378
+ Figure 6: speedup $_n$ @1 and efficiency $_n$ @1 for parallel prompts. Results are shown for $n = 32$ threads for OpenMP and Kokkos, $n = 512$ ranks for MPI, and $n = (4 \text{ ranks}) \times (64 \text{ threads})$ for MPI+OpenMP. For CUDA/CHIP $n$ is set to the number of kernel threads, which varies across prompts. $^1$
379
+
380
+ Figure 6 shows the speedup $n$ at 1 and efficiency $n$ at 1 scores for each LLM, averaged across the parallel execution models. For comparison, we use the highest value of $n$ for each execution model that we ran in our experimentation: $n = 32$ threads for OpenMP and Kokkos, $n = 512$ processes for MPI, and $n = (4 \text{ processes}) \times (64 \text{ threads})$ for MPI+OpenMP. For CUDA/CHIP, $n$ is set to the number of kernel threads, which varies across prompts. $^1$
381
+
382
+ In Figure 6, we see a trend similar to the pass@1 scores in Figure 1, with the GPT models scoring the highest and the CodeLlama models scoring the lowest. Despite GPT-3.5 having the highest pass@1 for parallel prompts, GPT-4 has the highest speedup $n$ @1
383
+
384
+ ![](images/f26a3e1e69593376b94c1a379e22a5cc0f87043f91b0b44a2ebfbb83d82e1b2d.jpg)
385
+ Figure 7: efficiency@1 for MPI (left), OpenMP (middle), and Kokkos (right) prompts across rank and thread counts. Phind-V2 is most efficient for MPI prompts, but is one of the least efficient for OpenMP and Kokkos. GPT-4 is the most efficient for OpenMP and Kokkos prompts.
386
+
387
+ ![](images/419e5abe8d61f1f1da01b5c82ab13a0bc0af8c46eff3b761b003e235905c628d.jpg)
388
+
389
+ ![](images/0c1eba4824deb26b3c3a98381ccf62471f4447fa6c82f574d1c72ea1701fc910.jpg)
390
+
391
+ for all parallel execution models at 20.28. This means that on average GPT-4's parallel code achieves a $20.28\mathrm{x}$ speedup over the sequential baseline. To help interpret this result, we also show the efficiency $n@1$ for each LLM for the parallel prompts in Figure 6. From this we see that none of the LLMs use parallel resources efficiently. The best efficiency $n@1$ is 0.13 for GPT-4 meaning that on average GPT-4's parallel code achieves $13\%$ of the maximum possible speedup. CodeLlama-34B has the worst efficiency $n@1$ at 0.06. From the results in Figure 6 we can conclude that the parallel code produced by LLMs is generally inefficient even when correct.
392
+
393
+ It is also important to consider how efficiency $_n@1$ varies across $n$ . Figure 7 compares the efficiency $_n@1$ curves for MPI, OpenMP, and Kokkos. We see Phind-V2 is the most efficient at MPI prompts, while the least efficient at OpenMP and second to least for Kokkos. GPT-4 produces the most efficient code on average as it is one of the top two most efficient for all three execution models. All of the models start with better efficiency $_n@1$ for OpenMP than Kokkos, but rapidly decline towards an efficiency $_n@1$ of $\sim0.2$ . On the other hand, the Kokkos efficiency $_n@1$ values stay roughly consistent across $n$ , showing efficient use of threads.
394
+
395
+ Figure 8 further shows the expected maximum speedup and efficiency across all resource counts $n$ . We see the same trends as in Figure 6 with the speedups at similar values and the efficiencies higher. This is likely due to a number of the generated code samples plateauing at a certain $n$ , so choosing a smaller $n$ can give a better efficiency with the same speedup.
396
+
397
+ ![](images/c88bc2d54deb99357e346f0d455c334a8b158df9ec8434e97f2c47bf945fff10.jpg)
398
+ Figure 8: The expected max speedup and efficiency across all resource counts $n$ .
399
+
400
+ # 9.2 Experiment 2: Parallel Code Translation
401
+
402
+ RQ4 How well can LLMs translate between execution models? How performant and scalable is the translated code?
403
+
404
+ In addition to generating parallel code from scratch, we also evaluate the LLMs ability to translate between execution models (see Section 5.2). Figure 9 shows the pass@1 scores for each LLM for translating serial to OpenMP, serial to MPI, and CUDA to Kokkos. We also include the generation pass@1 scores from Figure 3 for each LLM for OpenMP, MPI, and Kokkos.
405
+
406
+ ![](images/bf3bcdc4a1c9a3c60574741c27410680bfc2384f444a5b3b09156a0f04600461.jpg)
407
+ Figure 9: pass@1 for each LLM when translating serial to OpenMP, serial to MPI, and CUDA to Kokkos compared to the pass@1 score for generating code in the destination execution model. The smaller LLMs see a significant improvement when shown an example correct implementation.
408
+
409
+ Several LLMs score significantly better when given a correct example implementation in a different execution model i.e. translation. All LLMs, except for GPT-3.5, have a higher pass@1 score for translating to OpenMP than they do for generating OpenMP code from scratch. We observe that the LLMs are able to correctly parallelize the provided serial code with OpenMP. A similar trend emerges with the serial to MPI translation. All of the LLMs score better when translating serial code to MPI than they do when generating MPI code from scratch. Likewise, all of the LLMs see an
410
+
411
+ improvement translating from CUDA to Kokkos over native Kokkos generation with the exception of the GPT models.
412
+
413
+ It is expected that the pass@1 scores would either increase or stay the same, since the LLM is given more information during translation than when generating code from scratch. It is surprising, however, the magnitude of improvement that the smaller LLMs experience. For example, CodeLlama-7B has a pass@1 of 20 for generating OpenMP code from scratch, but a pass@1 of 52 for translating serial code to OpenMP. This suggests that providing LLMs with correct implementations can improve their ability to generate correct parallel code.
414
+
415
+ 9.2.1 Speedup and Efficiency. While translating between execution models improves the pass@1 score it does not generally improve the performance of the generated code as shown in Figure 10. Most LLMs see a similar efficiency $n$ at 1 for OpenMP, MPI, and Kokkos whether generating from scratch or translating between execution models. A number of LLMs actually perform worse when translating from serial to OpenMP.
416
+
417
+ ![](images/c88283ea60c44d977860e333afd79a4110b5c2dc50683aed7c365ee918e0afc7.jpg)
418
+ Figure 10: efficiency@1 translation scores compared to generation scores. The LLMs generally score similarly for translation and generation. $^{1}$
419
+
420
+ We observe similar trends with OpenMP and Kokkos for speedup $_n@1$ as shown in Figure 11. The LLMs generally perform similarly for translation and generation. The exception is MPI where CodeLlama-13B, CodeLlama-34B, and GPT-4 all get significantly better speedup $_n@1$ when translating from serial to MPI code. From the results in Figures 9 to 11 we conclude that providing LLMs with correct implementations in one execution model helps them generate correct code in another execution model, but does not necessarily improve the performance of the generated code.
421
+
422
+ # 10 CONCLUSION
423
+
424
+ In this paper, we proposed a Parallel Code Generation Evaluation (PAREVAL) benchmark for evaluating the ability of LLMs to generate parallel code. We additionally introduced two novel metrics for evaluating the runtime performance and scaling behavior of the generated parallel code. Using PAREVAL and these metrics, we have evaluated the ability of state-of-the-art open- and closed-source LLMs to generate parallel code. We find that LLMs are significantly worse at generating parallel code than they are at generating serial code. In particular, we find that LLMs struggle most with MPI code and sparse, unstructured problems. Further, we observe that
425
+
426
+ ![](images/f91c32644c1c0e98f3c3edebe991c71998fc7890d30e75d0f34444f74ab26967.jpg)
427
+ Figure 11: speedup@1 translation scores compared to generation scores. The LLMs generally perform similarly for translation and generation with the exception of MPI. $^{1}$
428
+
429
+ closed-source models outperform all the open-source models we tested, and that even when LLMs generate correct parallel code, it is often not performant or scalable. Providing correct implementations in one execution model (i.e. serial) helps LLMs generate correct parallel code, but does not necessarily improve the performance or scalability of the generated parallel code.
430
+
431
+ The poor performance of LLMs on PAREVAL indicates that further efforts are necessary to improve the ability of LLMs to model parallel code and/or create new LLMs that are specialized for parallel code generation. These LLMs will need to improve both the correctness and runtime performance of their outputs. Benchmarks, such as PAREVAL, are vital to creating and improving LLMs for parallel code generation. By iterating on PAREVAL and the metrics we have proposed, we can continue to improve the ability of LLMs in this domain and create state-of-the-art open-source LLMs for different parallel code development tasks.
432
+
433
+ # ACKNOWLEDGMENTS
434
+
435
+ This material is based upon work supported in part by the National Science Foundation under Grant No. 2047120, and by the National Science Foundation Graduate Research Fellowship Program under Grant No. DGE 2236417. This research used resources of the National Energy Research Scientific Computing Center, a U.S. Department of Energy Office of Science User Facility using NERSC award DDR-ERCAP0025593. We spent $\sim 80$ dollars for the use of the paid API of GPT-3.5 and GPT-4 for the evaluation in this paper.
436
+
437
+ # REFERENCES
438
+
439
+ [1] 2023. Big Code Models Leaderboard - a Hugging Face Space by bigcode. https://huggingface.co/spaces/bigcode/bigcode-models-leaderboard
440
+ [2] 2023. HIP Documentation. https://rcm.docs/amd.com/projects/CHIP/en/latest/
441
+ [3] 2023. Zero-Shot Replication Framework. https://github.com/emrgnt-cmplxty/ zero-shot-replication.
442
+ [4] Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. 2020. A Transformer-based Approach for Source Code Summarization. ArXiv abs/2005.00653 (2020).
443
+ [5] Toufique Ahmed and Prem Devanbu. 2022. Learning code summarization from a small and local dataset. ArXiv abs/2206.00804 (2022).
444
+ [6] Loubna Ben Allal, Raymond Li, Denis Kocetkov, Chenghao Mou, Christopher Akiki, Carlos Munoz Ferrandis, Niklas Muennighoff, Mayank Mishra, Alex Gu, Manan Dey, et al. 2023. SantaCoder: don't reach for the stars! arXiv preprint arXiv:2301.03988 (2023).
445
+ [7] Jacob Austin, Augustus Odena, Maxwell I. Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie J. Cai, Michael Terry, Quoc V. Le,
446
+
447
+ and Charles Sutton. 2021. Program Synthesis with Large Language Models. CoRR abs/2108.07732 (2021). arXiv:2108.07732 https://arxiv.org/abs/2108.07732
448
+ [8] Tom B. Brown et al. 2020. Language Models are Few-Shot Learners. CoRR abs/2005.14165 (2020). arXiv:2005.14165 https://arxiv.org/abs/2005.14165
449
+ [9] Federico Cassano, John Gouwar, Daniel Nguyen, Sydney Nguyen, Luna Phipps-Costin, Donald Pinckney, Ming-Ho Yee, Yangtian Zi, Carolyn Jane Anderson, Molly Q Feldman, Arjun Guha, Michael Greenberg, and Abhinav Jangda. 2023. MultiPL-E: A Scalable and Polyglot Approach to Benchmarking Neural Code Generation. IEEE Transactions on Software Engineering 49, 7 (2023), 3675-3691. https://doi.org/10.1109/TSE.2023.3267446
450
+ [10] Le Chen, Xianzhong Ding, Murali Emani, Tristan Vanderbruggen, Pei hung Lin, and Chuanhua Liao. 2023. Data Race Detection Using Large Language Models. arXiv:2308.07505 [cs.LG]
451
+ [11] Le Chen, Pei-Hung Lin, Tristan Vanderbruggen, Chunhua Liao, Murali Emani, and Bronis de Supinski. 2023. LM4HPC: Towards Effective Language Model Application in High-Performance Computing. In OpenMP: Advanced Task-Based, Device and Compiler Programming, Simon McIntosh-Smith, Michael Klemm, Bronis R. de Supinski, Tom Deakin, and Jannis Klinkenberg (Eds.). Springer Nature Switzerland, Cham, 18-33.
452
+ [12] Mark Chen and et al. 2021. Evaluating Large Language Models Trained on Code. arXiv:arXiv:2107.03374
453
+ [13] Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde de Oliveira Pinto, Jared Kaplan, Harri Edwards, Yuri Burda, Nicholas Joseph, Greg Brockman, Alex Ray, Raul Puri, Gretchen Krueger, Michael Petrov, Heidy Khlaaf, Girish Sastry, Pamela Mishkin, Brooke Chan, Scott Gray, Nick Ryder, Mikhail Pavlov, Alethea Power, Lukasz Kaiser, Mohammad Bavarian, Clemens Winter, Philippe Tillet, Felipe Petroski Such, Dave Cummings, Matthias Plappert, Fotos Chantzis, Elizabeth Barnes, Ariel Herbert-Voss, William Hebgen Guss, Alex Nichol, Alex Paino, Nikolas Tezak, Jie Tang, Igor Babuschkin, Suchir Balaji, Shantanu Jain, William Saunders, Christopher Hesse, Andrew N. Carr, Jan Leike, Josh Achiam, Vedant Misra, Evan Morikawa, Alec Radford, Matthew Knight, Miles Brundage, Mira Murati, Katie Mayer, Peter Welinder, Bob McGrew, Dario Amodei, Sam McCandlish, Ilya Sutskever, and Wojciech Zaremba. 2021. Evaluating Large Language Models Trained on Code. arXiv:arXiv:2107.03374
454
+ [14] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. 2021. Training Verifiers to Solve Math Word Problems. arXiv preprint arXiv:2110.14168 (2021).
455
+ [15] Xueying Du, Mingwei Liu, Kaixin Wang, Hanlin Wang, Junwei Liu, Yixuan Chen, Jiayi Feng, Chaofeng Sha, Xin Peng, and Yiling Lou. 2023. ClassEval: A Manually-Crafted Benchmark for Evaluating LLMs on Class-level Code Generation. arXiv:2308.01861 [cs.CL]
456
+ [16] Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, Shawn Presser, and Connor Leahy. 2021. The Pile: An 800GB Dataset of Diverse Text for Language Modeling. CoRR abs/2101.00027 (2021). arXiv:2101.00027 https://arxiv.org/abs/2101.00027
457
+ [17] Luyu Gao, Aman Madaan, Shuyan Zhou, Uri Alon, Pengfei Liu, Yiming Yang, Jamie Callan, and Graham Neubig. 2022. PAL: Program-aided Language Models. arXiv preprint arXiv:2211.10435 (2022).
458
+ [18] Spandan Garg, Roshanak Zilouchian Moghaddam, Colin B. Clement, Neel Sundaresan, and Chen Wu. 2022. DeepDev-PERF: a deep learning-based approach for improving software performance. Proceedings of the 30th ACM Joint European Software Engineering Conference and Symposium on the Foundations of Software Engineering (2022).
459
+ [19] William Godoy, Pedro Valero-Lara, Keita Teranishi, Prasanna Balaprakash, and Jeffrey Vetter. 2023. Evaluation of OpenAI Codex for HPC Parallel Programming Models Kernel Generation. In Proceedings of the 52nd International Conference on Parallel Processing Workshops (ICPP-W 2023). ACM. https://doi.org/10.1145/3605731.3605886
460
+ [20] Jian Gu, Pasquale Salza, and Harald C. Gall. 2022. Assemble Foundation Models for Automatic Code Summarization. 2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER) (2022), 935-946.
461
+ [21] Sakib Haque, Zachary Eberhart, Aakash Bansal, and Collin McMillan. 2022. Semantic Similarity Metrics for Evaluating Source Code Summarization. 2022 IEEE/ACM 30th International Conference on Program Comprehension (ICPC) (2022), 36-47.
462
+ [22] Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The Curious Case of Neural Text Degeneration. In International Conference on Learning Representations. https://openreview.net/forum?id=rygGQyrFvH
463
+ [23] Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, and Weizhu Chen. 2021. LoRA: Low-Rank Adaptation of Large Language Models. CoRR abs/2106.09685 (2021). arXiv:2106.09685 https://arxiv.org/abs/2106.09685
464
+ [24] Tal Kadosh, Niranjan Hasabnis, Vy A. Vo, Nadav Schneider, Neva Krien, Abdul Wasay, Nesreen Ahmed, Ted Willkke, Guy Tamir, Yuval Pinter, Timothy Mattson, and Gal Oren. 2023. Scope is all you need: Transforming LLMs for HPC Code. arXiv:2308.09440 [cs.CL]
465
+
466
+ [25] Md Abul Kalam Azad, Nafees Iqbal, Foyzul Hassan, and Probir Roy. 2023. An Empirical Study of High Performance Computing (HPC) Performance Bugs. In 2023 IEEE/ACM 20th International Conference on Mining Software Repositories (MSR). 194-206. https://doi.org/10.1109/MSR59073.2023.00037
467
+ [26] Anant Kharkar, Roshanak Zilouchian Moghaddam, Matthew Jin, Xiaoyu Liu, Xin Shi, Colin B. Clement, and Neel Sundaresan. 2022. Learning to Reduce False Positives in Analytic Bug Detectors. 2022 IEEE/ACM 44th International Conference on Software Engineering (ICSE) (2022), 1307-1316.
468
+ [27] Denis Kocetkov, Raymond Li, Loubna Ben Allal, Jia Li, Chenghao Mou, Carlos Munoz Ferrandis, Yacine Jenite, Margaret Mitchell, Sean Hughes, Thomas Wolf, Dzmitry Bahdanau, Leandro von Werra, and Harm de Vries. 2022. The Stack: 3 TB of permissively licensed source code. Preprint (2022).
469
+ [28] Yuhang Lai, Chengxi Li, Yiming Wang, Tianyi Zhang, Ruiqi Zhong, Luke Zettlemoyer, Scott Wen tau Yih, Daniel Fried, Sida Wang, and Tao Yu. 2022. DS1000: A Natural and Reliable Benchmark for Data Science Code Generation. arXiv:2211.11501 [cs.SE]
470
+ [29] Raymond Li, Louba Ben Allal, Yangtian Zi, Niklas Muennighoff, Denis Kocetkov, Chenghao Mou, Marc Marone, Christopher Akiki, Jia Li, Jenny Chim, Qian Liu, Evgenii Zheltonozhskii, Terry Yue Zhuo, Thomas Wang, Olivier Dehaene, Mishig Davaadorj, Joel Lamy-Poirier, João Monteiro, Oleh Shliazhkova, Nicolas Gontier, Nicholas Meade, Armel Zebaze, Ming-Ho Yee, Logesh Kumar Umapathi, Jian Zhu, Benjamin Lipkin, Muhtasham Oblokulov, Zhiruo Wang, Rudra Murthy, Jason Stillerman, Siva Sankalp Patel, Dmitry Abulkhanov, Marco Zocca, Manan Dey, Zhihan Zhang, Nour Fahmy, Urvashi Bhattacharyya, Wenhao Yu, Swayam Singh, Sasha Luccioni, Paulo Villegas, Maxim Kunakov, Fedor Zhdanov, Manuel Romero, Tony Lee, Nadav Timor, Jennifer Ding, Claire Schlesinger, Hailey Schoelkopf, Jan Ebert, Tri Dao, Mayank Mishra, Alex Gu, Jennifer Robinson, Carolyn Jane Anderson, Brendan Dolan-Gavitt, Danish Contractor, Siva Reddy, Daniel Fried, Dzmitry Bahdanau, Yacine Jernite, Carlos Munoz Ferrandis, Sean Hughes, Thomas Wolf, Arjun Guha, Leandro von Werra, and Harm de Vries. 2023. StarCoder: may the source be with you! (2023). arXiv:2305.06161 [cs.CL]
471
+ [30] Mingjie Liu, Nathaniel Pinckney, Brucek Khailany, and Haoxing Ren. 2023. VerilogEval: Evaluating Large Language Models for Verilog Code Generation. arXiv:2309.07544 [cs.LG]
472
+ [31] Christian Munley, Aaron Jarmusch, and Sunita Chandrasekaran. 2023. LLM4VV: Developing LLM-Driven Testsuite for Compiler Validation. arXiv:2310.04963 [cs.AI]
473
+ [32] Daniel Nichols, Aniruddha Marathe, Harshitha Menon, Todd Gamblin, and Abhinav Bhatele. 2023. Modeling Parallel Programs using Large Language Models. arXiv:2306.17281 [cs.DC]
474
+ [33] NVIDIA, Peter Vingelmann, and Frank H.P. Fitzek. 2020. CUDA, release: 10.2.89. https://developer.nvidia.com/cuda-toolkit
475
+ [34] OpenAI. 2023. GPT-4 Technical Report. arXiv:2303.08774 [cs.CL]
476
+ [35] OpenAI. 2023. OpenAI API. https://platform.openai.com/docs/api-reference/
477
+ [36] OpenAI. 2023. OpenAI Python API library. https://github.com/openai/openai-python
478
+ [37] OpenMP4 2013. OpenMP Application Program Interface. Version 4.0. July 2013.
479
+ [38] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Köpf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. arXiv:1912.01703 [cs.LG]
480
+ [39] Phind. 2023. Phind-CodeLlama-34B-v2. https://huggingface.co/Phind/Phind-CodeLlama-34B-v2
481
+ [40] Cedric Richter and Heike Wehrheim. 2022. Can we learn from developer mistakes? Learning to localize and repair real bugs from real bug fixes. ArXiv abs/2207.00301 (2022).
482
+ [41] Baptiste Roziere, Jonas Gehring, Fabian Gloeckle, Sten Sootla, Itai Gat, Xiaqing Ellen Tan, Yossi Adi, Jingyu Liu, Tal Remez, Jérémy Rapin, Artyom Kozhevnikov, Ivan Evtimov, Joanna Bitton, Manish Bhatt, Cristian Canton Ferrer, Aaron Grattafori, Wenhan Xiong, Alexandre Défossez, Jade Copet, Faisal Azhar, Hugo Touvron, Louis Martin, Nicolas Usunier, Thomas Scialom, and Gabriel Synnaeve. 2023. Code Llama: Open Foundation Models for Code. arXiv:2308.12950 [cs.CL]
483
+ [42] M. Snir. 1998. MPI-the Complete Reference: The MPI core. Mass. https://books.google.com/books?id=x79puJ2YkroC
484
+ [43] Xiangru Tang, Bill Qian, Rick Gao, Jiakang Chen, Xinyun Chen, and Mark Gerstein. 2023. BioCoder: A Benchmark for Bioinformatics Code Generation with Contextual Pragmatic Knowledge. arXiv:2308.16458 [cs.LG]
485
+ [44] Hugo Touvron et al. 2023. Llama 2: Open Foundation and Fine-Tuned Chat Models. arXiv:2307.09288 [cs.CL]
486
+ [45] Christian R. Trott, Damien Lebrun-Grandie, Daniel Arndt, Jan Ciesko, Vinh Dang, Nathan Ellingwood, Rahulkumar Gayatri, Evan Harvey, Daisy S. Hollman, Dan Ibanez, Nevin Liber, Jonathan Madsen, Jeff Miles, David Poliakoff, Amy Powell, Sivasankaran Rajamanickam, Mikael Simberg, Dan Sunderland, Bruno Turcksin, and Jeremiah Wilke. 2022. Kokkos 3: Programming Model Extensions for the Exascale Era. IEEE Transactions on Parallel and Distributed Systems 33, 4 (2022),
487
+
488
+ 805-817. https://doi.org/10.1109/TPDS.2021.3097283
489
+ [46] Pedro Valero-Lara, Alexis Huante, Mustafa Al Lail, William F. Godoy, Keita Teranishi, Prasanna Balaprakash, and Jeffrey S. Vetter. 2023. Comparing Llama-2 and GPT-3 LLMs for HPC kernels generation. arXiv:2309.07103 [cs.SE]
490
+ [47] Ashish Vavwani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Lion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention Is All You Need. CoRR abs/1706.03762 (2017). arXiv:1706.03762 http://arxiv.org/abs/1706.03762
491
+ [48] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Perric Cistac, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Transformers: State-of-the-Art Natural Language Processing. Association for Computational Linguistics, 38-45. https://www.aclweb.org/anthology/2020.emnlp-demos.6
492
+ [49] Frank F. Xu, Uri Alon, Graham Neubig, and Vincent J. Hellendoorn. 2022. A Systematic Evaluation of Large Language Models of Code. https://doi.org/10.5281/ zenodo.6363556 https://arxiv.org/abs/2202.13169.
493
+ [50] Hao Yu, Bo Shen, Dezhi Ran, Jiaxin Zhang, Qi Zhang, Yuchi Ma, Guangtai Liang, Ying Li, Tao Xie, and Qianxiang Wang. 2023. CoderEval: A Benchmark of Pragmatic Code Generation with Generative Pre-trained Models. arXiv preprint arXiv:2302.00288 (2023).
2401.12xxx/2401.12554/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff77248a00216fd5ce30a2827532f15f18a6253860aa64b46c96893e7987490
3
+ size 650488
2401.12xxx/2401.12554/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12586/fc908757-3ff6-4bd6-a609-8ec6463c04cf_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12586/fc908757-3ff6-4bd6-a609-8ec6463c04cf_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12586/fc908757-3ff6-4bd6-a609-8ec6463c04cf_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c491d82260f26bf121c078be06ccc42ce14ad0fbce1782a618becdbc13a5ca9
3
+ size 22099780
2401.12xxx/2401.12586/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12586/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ec8a5f00c6bcf39b6b6691c06438b2d08bf21cad53fda7d7c2ef1b6c06d59b5
3
+ size 889787
2401.12xxx/2401.12586/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12592/e8da1853-39d1-41e6-8ee3-71b374b562d5_content_list.json ADDED
@@ -0,0 +1,1206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "RGBD Objects in the Wild: Scaling Real-World 3D Object Learning from RGB-D Videos",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 176,
8
+ 130,
9
+ 792,
10
+ 176
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Hongchi Xia $^{1*}$ Yang Fu $^{2*}$ Sifei Liu $^{3}$ Xiaolong Wang $^{2}$ $^{1}$ University of Illinois Urbana-Champaign ${}^{2}$ UC San Diego ${}^{3}$ NVIDIA",
17
+ "bbox": [
18
+ 189,
19
+ 202,
20
+ 772,
21
+ 239
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "image",
27
+ "img_path": "images/22df23b8e8e93e3cc29e4c9901cc6bf3d74a0d80a00a3ad9f7efe248943b0b2c.jpg",
28
+ "image_caption": [
29
+ "Figure 1. WildRGB-D Dataset contains almost 8500 recorded objects and nearly 20000 RGBD videos in 46 common categories with corresponding object masks and 3D point clouds."
30
+ ],
31
+ "image_footnote": [],
32
+ "bbox": [
33
+ 163,
34
+ 253,
35
+ 805,
36
+ 608
37
+ ],
38
+ "page_idx": 0
39
+ },
40
+ {
41
+ "type": "text",
42
+ "text": "Abstract",
43
+ "text_level": 1,
44
+ "bbox": [
45
+ 233,
46
+ 648,
47
+ 313,
48
+ 662
49
+ ],
50
+ "page_idx": 0
51
+ },
52
+ {
53
+ "type": "text",
54
+ "text": "We introduce a new RGB-D object dataset captured in the wild called WildRGB-D. Unlike most existing real-world object-centric datasets which only come with RGB capturing, the direct capture of the depth channel allows better 3D annotations and broader downstream applications. WildRGB-D comprises large-scale category-level RGB-D object videos, which are taken using an iPhone to go around the objects in 360 degrees. It contains around 8500 recorded objects and nearly 20000 RGB-D videos across 46 common object categories. These videos are taken with diverse cluttered backgrounds with three setups to cover as many real-world scenarios as possible: (i) a single object in one video; (ii) multiple objects in one video; and (iii) an",
55
+ "bbox": [
56
+ 75,
57
+ 679,
58
+ 470,
59
+ 875
60
+ ],
61
+ "page_idx": 0
62
+ },
63
+ {
64
+ "type": "text",
65
+ "text": "object with a static hand in one video. The dataset is annotated with object masks, real-world scale camera poses, and reconstructed aggregated point clouds from RGBD videos. We benchmark four tasks with WildRGB-D including novel view synthesis, camera pose estimation, object 6d pose estimation, and object surface reconstruction. Our experiments show that the large-scale capture of RGB-D objects provides a large potential to advance 3D object learning. Our project page is https://wildrgbd.github.io/.",
66
+ "bbox": [
67
+ 500,
68
+ 648,
69
+ 890,
70
+ 784
71
+ ],
72
+ "page_idx": 0
73
+ },
74
+ {
75
+ "type": "text",
76
+ "text": "1. Introduction",
77
+ "text_level": 1,
78
+ "bbox": [
79
+ 500,
80
+ 814,
81
+ 630,
82
+ 829
83
+ ],
84
+ "page_idx": 0
85
+ },
86
+ {
87
+ "type": "text",
88
+ "text": "The recent advancement of computer vision has been largely relying on the scaling of training data [26, 53]. The same success in data-driven approaches has been recently adopted to 3D object modeling with new large 3D object-",
89
+ "bbox": [
90
+ 500,
91
+ 839,
92
+ 890,
93
+ 900
94
+ ],
95
+ "page_idx": 0
96
+ },
97
+ {
98
+ "type": "aside_text",
99
+ "text": "arXiv:2401.12592v3 [cs.CV] 28 Jul 2024",
100
+ "bbox": [
101
+ 22,
102
+ 270,
103
+ 57,
104
+ 705
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "page_footnote",
110
+ "text": "* Equal contribution.",
111
+ "bbox": [
112
+ 94,
113
+ 886,
114
+ 205,
115
+ 898
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "image",
121
+ "img_path": "images/9909e0e1f6f8f0052a3aa09734c25a18e023e1fd870342b37e3cb51f8d48b53b.jpg",
122
+ "image_caption": [
123
+ "Figure 2. The camera poses trajectory in WildRGB-D Dataset. We visualize the corresponding camera in each scene of our dataset, showing that our dataset is featured in 360 degree full and dense multi-view camera poses."
124
+ ],
125
+ "image_footnote": [],
126
+ "bbox": [
127
+ 163,
128
+ 88,
129
+ 810,
130
+ 280
131
+ ],
132
+ "page_idx": 1
133
+ },
134
+ {
135
+ "type": "text",
136
+ "text": "centric dataset collection [25, 40, 52]. Most of the large datasets are synthetic 3D data [5, 14, 22, 76] and a mix of synthetic data and real-world object scans [16], given it is much less labor intensive for scaling by rendering from simulation. However, it remains a big challenge to apply the model trained in simulation data to the real world. This is not only because the synthetic data has less realistic texture and shape, but also due to it is very hard to model the cluttered background and the natural light comes with it in simulation.",
137
+ "bbox": [
138
+ 75,
139
+ 345,
140
+ 472,
141
+ 494
142
+ ],
143
+ "page_idx": 1
144
+ },
145
+ {
146
+ "type": "text",
147
+ "text": "To make deep learning with 3D objects applicable in the real world, researchers have made efforts to collect real-world multiview object data [2, 54]. For example, the CO3D dataset [54] contains 19K object videos across 50 categories. However, due to the lack of depth, they require the use of COLMAP [56] to provide 3D annotations, which only works for $20\\%$ of the collected videos. Collecting the depth channel part of the data is not only useful for more accurate 3D ground-truth annotations, but also provides very useful information for downstream applications such as object 6D pose estimation and novel view synthesis. The OmniObject3D dataset [75] provides both object videos and a separate scanning of the objects. However, the collected videos do not come with the depth channel inputs and they are mostly taken with clean backgrounds. The Wild6D dataset [23] is one of the few recent efforts to collect RGB-D object videos taken in the wild. However, it only contains 6 categories of data and covers relatively smaller ranges of object views.",
148
+ "bbox": [
149
+ 75,
150
+ 501,
151
+ 468,
152
+ 789
153
+ ],
154
+ "page_idx": 1
155
+ },
156
+ {
157
+ "type": "text",
158
+ "text": "In this paper, we propose to collect a new dataset that contains large-scale RGB-D object videos across diverse object categories and presented in the wild. Our dataset, namely WildRGB-D, covers 8500 tabletop objects across 44 categories in 20K videos. The videos are taken using iPhones to go around the objects in 360 degrees (see Figure 2 for visualization). Examples of the dataset are shown",
159
+ "bbox": [
160
+ 75,
161
+ 795,
162
+ 470,
163
+ 902
164
+ ],
165
+ "page_idx": 1
166
+ },
167
+ {
168
+ "type": "text",
169
+ "text": "in Figure 1. There are three types of videos: (i) Single object video where there is only one object presented on the table; (ii) Multi-object video where there are multiple objects presented at the same time; and (iii) Hand-object video where there is a static human hand grasping the object. More video types add variety, creating occlusion for objects in scenes, which are worthy study cases in some tasks. The collection of the WildRGB-D dataset not only considers the cluttered background in the real world, but also the common scenarios where the objects are occluded by human hands.",
170
+ "bbox": [
171
+ 496,
172
+ 345,
173
+ 893,
174
+ 496
175
+ ],
176
+ "page_idx": 1
177
+ },
178
+ {
179
+ "type": "text",
180
+ "text": "We perform automatic annotations for WildRGB-D. With RGB-D capturing, we can apply the Simultaneous Localization and Mapping [19, 58] (SLAM) algorithms, and exploit the RGB images and depth information from the depth sensor of mobile phones to reconstruct the $3D$ camera poses in real-world scale and aggregated $3D$ point clouds. Additionally, center object segmentation masks can be attained by bounding-box detection using text prompt of object category in Grounding-DINO [41], segmentation using Segment-Anything [33] and mask tracking using XMem [11], which are largely integrated into [12, 78].",
181
+ "bbox": [
182
+ 496,
183
+ 500,
184
+ 895,
185
+ 666
186
+ ],
187
+ "page_idx": 1
188
+ },
189
+ {
190
+ "type": "text",
191
+ "text": "To exploit the potential of our dataset, we benchmark it in four downstream tracks:",
192
+ "bbox": [
193
+ 498,
194
+ 667,
195
+ 893,
196
+ 696
197
+ ],
198
+ "page_idx": 1
199
+ },
200
+ {
201
+ "type": "list",
202
+ "sub_type": "text",
203
+ "list_items": [
204
+ "(i) Novel view synthesis. We evaluate various algorithms based on NeRF [47] which is optimized in a single scene, and generalizable NeRF which is trained in a category-level. With the help of depth information when training NeRFs, we can achieve consistently improved results. This offers a new platform for evaluating view synthesis approaches using RGB or RGB-D data.",
205
+ "(ii) Camera pose estimation. We adopt different pose estimation approaches [35, 86] to evaluate their capability of estimating relative camera poses in a sparse setting. We validate their generalizable ability through training on a partial of all categories and testing on unseen ones. We observe remarkable generalization performance on unseen"
206
+ ],
207
+ "bbox": [
208
+ 496,
209
+ 700,
210
+ 893,
211
+ 901
212
+ ],
213
+ "page_idx": 1
214
+ },
215
+ {
216
+ "type": "table",
217
+ "img_path": "images/8c940f5d8284b2cb0be9a75cdd8626c9bb312ed8e7ee6b6ffa873e20b0349652.jpg",
218
+ "table_caption": [],
219
+ "table_footnote": [],
220
+ "table_body": "<table><tr><td>Dataset</td><td>Real</td><td>Multi-View</td><td>Depth Src.</td><td>3D GT</td><td>Video</td><td># Cats</td><td># Obj</td></tr><tr><td>ShapeNet [5]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>55</td><td>51k</td></tr><tr><td>ModelNet [76]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>40</td><td>12k</td></tr><tr><td>3D-Future [22]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>34</td><td>16k</td></tr><tr><td>ABO [14]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>63</td><td>8k</td></tr><tr><td>DTU [1]</td><td>✓</td><td>limited</td><td>COLMAP</td><td>mesh</td><td>RGB</td><td>N/A</td><td>124</td></tr><tr><td>CO3D [54]</td><td>✓</td><td>full</td><td>COLMAP</td><td>pcl</td><td>RGB</td><td>50</td><td>19k</td></tr><tr><td>MVIImgNet [84]</td><td>✓</td><td>limited</td><td>COLMAP</td><td>pcl</td><td>RGB</td><td>238</td><td>220k</td></tr><tr><td>Objectron [2]</td><td>✓</td><td>limited</td><td>COLMAP</td><td>pcl</td><td>RGB</td><td>9</td><td>15k</td></tr><tr><td>GSO [18]</td><td>✓</td><td>none</td><td>scatterer</td><td>mesh</td><td>none</td><td>17</td><td>1k</td></tr><tr><td>OmniObject3D [75]</td><td>✓</td><td>full</td><td>scatterer</td><td>mesh</td><td>RGB</td><td>190</td><td>6k</td></tr><tr><td>Choi et al. [13]</td><td>✓</td><td>limited</td><td>sensor</td><td>mesh*</td><td>RGBD</td><td>9</td><td>2k</td></tr><tr><td>Wild6D [23]</td><td>✓</td><td>limited</td><td>sensor</td><td>pcl</td><td>RGBD</td><td>5</td><td>1.8k</td></tr><tr><td>Ours</td><td>✓</td><td>full</td><td>sensor</td><td>pcl</td><td>RGBD</td><td>44</td><td>8.5k</td></tr></table>",
221
+ "bbox": [
222
+ 81,
223
+ 89,
224
+ 467,
225
+ 234
226
+ ],
227
+ "page_idx": 2
228
+ },
229
+ {
230
+ "type": "text",
231
+ "text": "Table 1. Comparison of WildRGB-D dataset with other 3D object dataset. Some datasets don't provide video and we mark in \"none\". Some only cover partial angles, which is marked in \"limited\". Asterisk(*) means partial annotations. Depth Src. means where the depth information comes from, including CAD models, COLMAP, scanner devices and depth sensor in iPhones. pcl is the abbreviation of point cloud.",
232
+ "bbox": [
233
+ 75,
234
+ 248,
235
+ 468,
236
+ 345
237
+ ],
238
+ "page_idx": 2
239
+ },
240
+ {
241
+ "type": "text",
242
+ "text": "categories, which indicates our large-scale category-level dataset can serve as a training source for generalizable camera pose estimation.",
243
+ "bbox": [
244
+ 75,
245
+ 387,
246
+ 468,
247
+ 431
248
+ ],
249
+ "page_idx": 2
250
+ },
251
+ {
252
+ "type": "list",
253
+ "sub_type": "text",
254
+ "list_items": [
255
+ "(iii) Object surface reconstruction. We conduct object surface reconstruction in our dataset with RGB or RGB-D videos and object masks through Instant-NGP [49] and Neusfacto [85]. Results show that our depth information endow reconstruction with more accurate precision and SDF-based algorithm [85] performs better in this setting.",
256
+ "(iv) Object 6D pose estimation. We exploit the self-supervised algorithm in category-level object 6D pose estimation [87] with large-scale RGB-D images in our dataset and then evaluate the pre-trained model on the Wild6D [23] test set. We show our dataset can facilitate 6D pose estimation even without training labels, and we also study its generalization ability to the out-of-distribution test set."
257
+ ],
258
+ "bbox": [
259
+ 75,
260
+ 433,
261
+ 468,
262
+ 631
263
+ ],
264
+ "page_idx": 2
265
+ },
266
+ {
267
+ "type": "text",
268
+ "text": "2. Related Work",
269
+ "text_level": 1,
270
+ "bbox": [
271
+ 76,
272
+ 647,
273
+ 218,
274
+ 662
275
+ ],
276
+ "page_idx": 2
277
+ },
278
+ {
279
+ "type": "text",
280
+ "text": "3D Object Datasets One representative kind of 3D object dataset is the 3D synthetic dataset, like ShapeNet [5] and ModelNet40 [76], which consist of category-level objects. 3D-FUTURE [22] and ABO [14] datasets are typical of higher quality mesh with textures. [34] and [64] introduce real-world category-specific object datasets that mainly focus on birds and chairs respectively. DTU [1] and BlendedMVS [80] are datasets designed for multiview stereo and lack category-level classification. Objectron [2] provides rich annotations but only partial videos are fully 360 degree covered. CO3D [54] is a largescale category-level dataset that annotates camera poses and depths with COLMAP [57], which doesn't provide depths in real-world scale, and MVImgNet [84] is also a dataset similar to CO3D. Pascal3D [77] contains real-",
281
+ "bbox": [
282
+ 75,
283
+ 672,
284
+ 468,
285
+ 900
286
+ ],
287
+ "page_idx": 2
288
+ },
289
+ {
290
+ "type": "text",
291
+ "text": "world 3D objects with pose annotations and CAD models in limited categories. Datasets collected with specialized hardware (scatterer, dome, etc.) like GSO [18] and OmniObject3D [75] have more accurate 3D geometry models and rendered depths from them. However, they don't have RGBD wild object videos collected and lack real captured depths as well as background depths. In the aspect of RGBD object datasets, Wild6D [23] features RGBD image sequences and 6D pose annotations while lacking full 360 coverage and category types. Choi et al. [13] proposes RGBD object-centric datasets in 44 categories, but with limited camera annotations. As a comparison, our proposed WildRGB-D dataset contains almost 8500 recorded objects and nearly 20000 RGBD videos recorded all 360 degrees in 46 common categories from well-known 2D datasets, all with real-world scale camera poses and object mask annotations as well as aggregated point clouds. We present the detailed comparison in Tab. 1.",
292
+ "bbox": [
293
+ 496,
294
+ 90,
295
+ 890,
296
+ 362
297
+ ],
298
+ "page_idx": 2
299
+ },
300
+ {
301
+ "type": "text",
302
+ "text": "Neural Radiance Field and Object Surface Reconstruction Neural Radiance Field (NeRF) [47] is a kind of scene representation based on MLPs. It takes in sampled points along each ray and outputs the density and color of each point, which are then aggregated by volume rendering to synthesize views. [3, 4, 48, 66] propose new changes to the original NeRF to improve the visual quality and [7, 21, 39, 49, 60] advance the NeRF efficiency. In order to generalize the NeRF representations to other scenes, [6, 28, 43, 70, 83] learn latent 3D representations and priors from a bunch of existed scenes to help synthesize views across different scenes. Derived from original NeRF, [15, 50, 68, 72, 74, 81, 85] leverage Sign Distance Function (SDF) and represent the 3D scene by implicit surface, which has a more clear object boundary definition. Recently, 3D Gaussian Splating [31] has become a competitive alternative to NeRF. WildRGB-D dataset comprises various category-level objects and scenes on a large scale, which is suitable for novel view synthesis benchmarks and helps boost more mature reconstruction algorithms and generalizable 3D scene representations.",
303
+ "bbox": [
304
+ 496,
305
+ 382,
306
+ 890,
307
+ 699
308
+ ],
309
+ "page_idx": 2
310
+ },
311
+ {
312
+ "type": "text",
313
+ "text": "Camera Pose Estimation Given dense image views, mature algorithms of SfM [57] and SLAM [19] can estimate camera poses well by computing visual matches [44], verifying through RANSAC [20] and optimizing via bundle adjustment [63]. However, in a sparse camera view setting, camera pose estimation remains a challenging task. Some approaches [61, 71] leverage RNN or adopt auto-regression [79] targeting at SLAM applications. For category-agnostic sparse view camera pose estimation, [46, 55] adopt a direct regression approach. [29] estimates 6D pose upon training on synthetic dataset. Energy-based method [86] estimates distributions over relative rotations",
314
+ "bbox": [
315
+ 496,
316
+ 719,
317
+ 890,
318
+ 900
319
+ ],
320
+ "page_idx": 2
321
+ },
322
+ {
323
+ "type": "image",
324
+ "img_path": "images/b64f73897576ac06d7de26d0a1c8254f484a179d62d482f2fe691db8dae06c30.jpg",
325
+ "image_caption": [
326
+ "Figure 3. Statistics of WildRGB-D Dataset list the total and per-category number of objects and different types of videos."
327
+ ],
328
+ "image_footnote": [],
329
+ "bbox": [
330
+ 98,
331
+ 95,
332
+ 625,
333
+ 219
334
+ ],
335
+ "page_idx": 3
336
+ },
337
+ {
338
+ "type": "image",
339
+ "img_path": "images/5aef8eafa9ea0f6964252c5a74af7dbc5c432eea3a9315507142c4bb7adf1a32.jpg",
340
+ "image_caption": [],
341
+ "image_footnote": [],
342
+ "bbox": [
343
+ 638,
344
+ 106,
345
+ 861,
346
+ 214
347
+ ],
348
+ "page_idx": 3
349
+ },
350
+ {
351
+ "type": "image",
352
+ "img_path": "images/00c7655f91ab7c8ce270aa9ef82a3a9425564851ad868e86fdb97bb252af3825.jpg",
353
+ "image_caption": [
354
+ "Figure 4. Distribution visualization of different kinds of Object 6D pose dataset and WildRGB-D dataset. We observe obvious disparity between Wild6D and Our dataset. Visualizations of [23, 30, 42, 67, 69] are from [30]."
355
+ ],
356
+ "image_footnote": [],
357
+ "bbox": [
358
+ 99,
359
+ 272,
360
+ 210,
361
+ 371
362
+ ],
363
+ "page_idx": 3
364
+ },
365
+ {
366
+ "type": "image",
367
+ "img_path": "images/61ca8803c882d0614dc9b936aa6214cde59a4b1593593a12b8880c9988f0caec.jpg",
368
+ "image_caption": [],
369
+ "image_footnote": [],
370
+ "bbox": [
371
+ 212,
372
+ 272,
373
+ 326,
374
+ 371
375
+ ],
376
+ "page_idx": 3
377
+ },
378
+ {
379
+ "type": "image",
380
+ "img_path": "images/86e795ba43d00f5105db35380c405d017c16f90b086cfffa8298af4cb81de5bb.jpg",
381
+ "image_caption": [],
382
+ "image_footnote": [],
383
+ "bbox": [
384
+ 331,
385
+ 272,
386
+ 449,
387
+ 371
388
+ ],
389
+ "page_idx": 3
390
+ },
391
+ {
392
+ "type": "text",
393
+ "text": "and [35] incorporates multi-view context to estimate camera 6D pose. Bundle adjustment gets learned after predictions in [59] to refine the estimated poses. In WildRGB-D dataset, with full 360-degree multi-view videos, the sparse view camera pose estimation setting is easily accessible, enabling our dataset to serve as a large-scale training database for these algorithms.",
394
+ "bbox": [
395
+ 75,
396
+ 479,
397
+ 468,
398
+ 585
399
+ ],
400
+ "page_idx": 3
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "Object 6D Pose Estimation In the setting of category-level 6D pose estimation, algorithms predict object poses in the same category and meet with various intra-class shapes. [67] predicts 6D pose using Umeyama algorithm [65] with NOCS map estimation and [8, 23, 62] follow up to learn more accurate NOCS representations. Other algorithms learn to estimate 6D pose through direct regression [9, 36], keypoint location estimations [38] and so on. Apart from supervised learning, self-supervision emerges due to the high cost of annotations. One approach [10, 24, 27, 82] is to adapt sim-to-real upon the pre-trained model on synthetic data. Another one [23, 45, 51] resorts to semi-supervised training. [87] proposes cycles across 2D-3D space learned correspondence, which enables training using only in-the-wild RGBD images without any annotations and is compatible with our dataset. With large-scale category-level RGBD wild object images for self-supervised learning, our dataset has the potential to boost future developments in this field.",
405
+ "bbox": [
406
+ 75,
407
+ 628,
408
+ 470,
409
+ 900
410
+ ],
411
+ "page_idx": 3
412
+ },
413
+ {
414
+ "type": "text",
415
+ "text": "3. The WildRGB-D Dataset",
416
+ "text_level": 1,
417
+ "bbox": [
418
+ 500,
419
+ 266,
420
+ 733,
421
+ 282
422
+ ],
423
+ "page_idx": 3
424
+ },
425
+ {
426
+ "type": "text",
427
+ "text": "3.1. Data Collections, Processing, and Annotation",
428
+ "text_level": 1,
429
+ "bbox": [
430
+ 500,
431
+ 292,
432
+ 880,
433
+ 308
434
+ ],
435
+ "page_idx": 3
436
+ },
437
+ {
438
+ "type": "text",
439
+ "text": "Data Collections In order to collect RGBD video on a large scale expeditently and economically, we record with the help of an iPhone front camera using Record3D App and rotate the camera around the object so that full 360-degree views of objects are captured with RGB images and the corresponding depth images. Camera rotating speed is controlled equably by our collection setup to ensure less blur in videos. We select 46 common categories from well-known 2D datasets [17, 37]. We record three videos for every selected object, which are composed of single-object video, multi-object video, and hand-object video. Every recorded video has been checked and some are left behind due to poor SLAM camera pose estimation. Details of WildRGB-D Dataset are listed in Fig. 3.",
440
+ "bbox": [
441
+ 496,
442
+ 316,
443
+ 890,
444
+ 529
445
+ ],
446
+ "page_idx": 3
447
+ },
448
+ {
449
+ "type": "text",
450
+ "text": "Generating Camera Poses and 3D Point Cloud Our WildRGB-D dataset has 3D annotations including camera poses in real-world scale, scene point clouds, and central object masks. In order to attain real-world scale camera poses, instead of relying on COLMAP [57] to first generate camera poses and then the depths using the poses, we generate more accurate camera poses with the mature RGBD Simultaneous Localization and Mapping [19, 58] (SLAM) algorithm, which leverages our captured depths. Additionally, it has the capability of exploiting the RGB images and depth information from the depth sensor of mobile phones to reconstruct the 3D camera poses in real-world scale, which is different from COLMAP depths, which are not in real-world scale. It enables us to simply project the depth images and gain aggregated 3D point clouds (see Figure 5). Then we manually check the quality of the aggregated 3D point cloud and exclude videos in which SLAM fails to get accurate camera poses. To increase the probability of getting correct SLAM results for each video, we adopt two kinds of SLAM algorithms including BAD SLAM [58] and SLAM implementation from Open3D [89], which increase our successful rate to over $90\\%$ .",
451
+ "bbox": [
452
+ 496,
453
+ 541,
454
+ 892,
455
+ 872
456
+ ],
457
+ "page_idx": 3
458
+ },
459
+ {
460
+ "type": "page_footnote",
461
+ "text": "1 https://record3d.app/",
462
+ "bbox": [
463
+ 517,
464
+ 886,
465
+ 635,
466
+ 901
467
+ ],
468
+ "page_idx": 3
469
+ },
470
+ {
471
+ "type": "image",
472
+ "img_path": "images/e60809dd3a508b3a33a2a4007fce47b447f726ebc16b33a0bb38ee06134d3f7d.jpg",
473
+ "image_caption": [],
474
+ "image_footnote": [],
475
+ "bbox": [
476
+ 91,
477
+ 88,
478
+ 483,
479
+ 164
480
+ ],
481
+ "page_idx": 4
482
+ },
483
+ {
484
+ "type": "image",
485
+ "img_path": "images/6e24260e58fc047a2b29593d29a2c2cfe388acf14cbb9c45e73c1b28ef55b288.jpg",
486
+ "image_caption": [
487
+ "Figure 5. Point cloud reconstruction of objects in WildRGB-D Dataset. We reconstruct the aggregated point cloud of the scene by leveraging existed 3D annotations of camera poses and depth images."
488
+ ],
489
+ "image_footnote": [],
490
+ "bbox": [
491
+ 91,
492
+ 165,
493
+ 483,
494
+ 241
495
+ ],
496
+ "page_idx": 4
497
+ },
498
+ {
499
+ "type": "image",
500
+ "img_path": "images/15637e4f7a89cbb63e921b14280d1e5b0660320dc6e92224a5f65de2d464dae8.jpg",
501
+ "image_caption": [],
502
+ "image_footnote": [],
503
+ "bbox": [
504
+ 486,
505
+ 89,
506
+ 877,
507
+ 164
508
+ ],
509
+ "page_idx": 4
510
+ },
511
+ {
512
+ "type": "image",
513
+ "img_path": "images/647ea6f1df8c01e2c7dc935e17998a1e207b424dcaa60f6416df40f599d73e52.jpg",
514
+ "image_caption": [],
515
+ "image_footnote": [],
516
+ "bbox": [
517
+ 486,
518
+ 165,
519
+ 877,
520
+ 241
521
+ ],
522
+ "page_idx": 4
523
+ },
524
+ {
525
+ "type": "table",
526
+ "img_path": "images/c85ac2076b6120d379f2b81a0060de5f09fa805fd91d1dca479a4fc31433d042.jpg",
527
+ "table_caption": [],
528
+ "table_footnote": [],
529
+ "table_body": "<table><tr><td>Method</td><td>PSNR↑/SD</td><td>SSIM↑/SD</td><td>LPIPS↓/SD</td><td>MAE↓/SD</td></tr><tr><td>NeRF [47]</td><td>23.03/1.50</td><td>0.690/0.072</td><td>0.390/0.075</td><td>0.306/0.109</td></tr><tr><td>NeRF (w mask)</td><td>34.65/4.44</td><td>0.943/0.077</td><td>0.031/0.032</td><td>0.029/0.019</td></tr><tr><td>Mip-NeRF 360 [4]</td><td>23.84/1.60</td><td>0.762/0.063</td><td>0.280/0.067</td><td>0.185/0.068</td></tr><tr><td>Mip-NeRF 360 (w mask)</td><td>35.60/4.51</td><td>0.949/0.077</td><td>0.024/0.025</td><td>0.020/0.015</td></tr><tr><td>Instant-NGP [49]</td><td>23.67/2.07</td><td>0.745/0.063</td><td>0.257/0.070</td><td>0.366/0.105</td></tr><tr><td>Instant-NGP (w mask)</td><td>35.65/5.20</td><td>0.946/0.077</td><td>0.021/0.031</td><td>0.068/0.074</td></tr></table>",
530
+ "bbox": [
531
+ 81,
532
+ 304,
533
+ 467,
534
+ 387
535
+ ],
536
+ "page_idx": 4
537
+ },
538
+ {
539
+ "type": "text",
540
+ "text": "Generating Central Object Masks We perform central object mask segmentation through a series of methods. Instead of the classic PointRend [32] algorithm, we leverage the novel segmentation tool Segment-Anything (SAM) [33]. We attain the prompt for SAM using Grounding-DINO [41], which generates a bounding box for SAM according to the category text prompt. After attaining the mask segmentation of the first frame in the video, XMem [11] is applied to track the mask in the video. The masking pipeline is largely integrated into [12, 78].",
541
+ "bbox": [
542
+ 75,
543
+ 465,
544
+ 468,
545
+ 617
546
+ ],
547
+ "page_idx": 4
548
+ },
549
+ {
550
+ "type": "text",
551
+ "text": "3.2. Statistics and Distribution",
552
+ "text_level": 1,
553
+ "bbox": [
554
+ 76,
555
+ 619,
556
+ 313,
557
+ 633
558
+ ],
559
+ "page_idx": 4
560
+ },
561
+ {
562
+ "type": "text",
563
+ "text": "In WildRGB-D dataset collections, we recorded 8500 objects and 3 videos for each one. After excluding those SLAM-failed videos, we have 8367 objects in 23049 videos in our dataset (maintaining rates are $99.3\\% / 91.0\\%$ ). The selected videos contain $33.1\\%$ single object videos, $63.0\\%$ multi-object videos, and $3.9\\%$ hand-object videos. Details of WildRGB-D dataset are listed in Fig. 3.",
564
+ "bbox": [
565
+ 75,
566
+ 642,
567
+ 468,
568
+ 750
569
+ ],
570
+ "page_idx": 4
571
+ },
572
+ {
573
+ "type": "text",
574
+ "text": "4. Experiments",
575
+ "text_level": 1,
576
+ "bbox": [
577
+ 76,
578
+ 762,
579
+ 209,
580
+ 779
581
+ ],
582
+ "page_idx": 4
583
+ },
584
+ {
585
+ "type": "text",
586
+ "text": "4.1. Novel View Synthesis",
587
+ "text_level": 1,
588
+ "bbox": [
589
+ 76,
590
+ 786,
591
+ 276,
592
+ 801
593
+ ],
594
+ "page_idx": 4
595
+ },
596
+ {
597
+ "type": "text",
598
+ "text": "In this section, we conduct multiple experiments towards methods concerning novel view synthesis (NVS) in the following three scenarios: 1) Single-Scene NVS, where we train NeRF-based methods [4, 47, 49] on a single scene with only RGB image sequence. 2) Cross-Scene NVS, where we learn category-level scene representations to generalize",
599
+ "bbox": [
600
+ 75,
601
+ 809,
602
+ 470,
603
+ 902
604
+ ],
605
+ "page_idx": 4
606
+ },
607
+ {
608
+ "type": "table",
609
+ "img_path": "images/27fa29b8ee5548da2c1e79c8779f52552b608c91c776a57d0abeef5159f701e9.jpg",
610
+ "table_caption": [
611
+ "Table 2. Single-scene NVS results. Average of four metrics w and w/o masks across all training dataset are reported with their standard deviation (SD)."
612
+ ],
613
+ "table_footnote": [],
614
+ "table_body": "<table><tr><td>Method</td><td>Level</td><td>PSNR↑/SD</td><td>SSIM↑/SD</td><td>LPIPS↓/SD</td><td>MAE↓/SD</td></tr><tr><td>Pixel-NeRF [83]</td><td rowspan=\"3\">Easy</td><td>20.28/0.65</td><td>0.645/0.043</td><td>0.495/0.074</td><td>0.355/0.120</td></tr><tr><td>MVSNeRF [6]</td><td>19.95/1.00</td><td>0.663/0.036</td><td>0.351/0.066</td><td>0.370/0.100</td></tr><tr><td>IBRNet [70]</td><td>20.93/0.98</td><td>0.711/0.031</td><td>0.395/0.153</td><td>-</td></tr><tr><td>Pixel-NeRF [83]</td><td rowspan=\"3\">Middle</td><td>18.76/0.50</td><td>0.572/0.064</td><td>0.534/0.047</td><td>0.299/0.057</td></tr><tr><td>MVSNeRF [6]</td><td>18.75/0.74</td><td>0.601/0.069</td><td>0.363/0.036</td><td>0.345/0.102</td></tr><tr><td>IBRNet [70]</td><td>19.77/1.01</td><td>0.663/0.071</td><td>0.362/0.063</td><td>-</td></tr><tr><td>Pixel-NeRF [83]</td><td rowspan=\"3\">Hard</td><td>17.23/0.66</td><td>0.521/0.035</td><td>0.624/0.054</td><td>0.383/0.121</td></tr><tr><td>MVSNeRF [6]</td><td>17.13/0.89</td><td>0.564/0.043</td><td>0.425/0.045</td><td>0.502/0.260</td></tr><tr><td>IBRNet [70]</td><td>17.92/1.12</td><td>0.614/0.056</td><td>0.439/0.069</td><td>-</td></tr></table>",
615
+ "bbox": [
616
+ 503,
617
+ 304,
618
+ 890,
619
+ 422
620
+ ],
621
+ "page_idx": 4
622
+ },
623
+ {
624
+ "type": "table",
625
+ "img_path": "images/b0f225a87eefc968e74425814ce0ffd8fd67c24532e6a2ffb89be476fc9a6321.jpg",
626
+ "table_caption": [
627
+ "Table 3. Cross-scene NVS results. Average of four metrics across all categories in training dataset are reported. We report metrics of three difficulty level respectively. Entries marked in - are not provided."
628
+ ],
629
+ "table_footnote": [],
630
+ "table_body": "<table><tr><td>Method</td><td>PSNR↑/SD</td><td>SSIM↑/SD</td><td>LPIPS↓/SD</td><td>MAE↓/SD</td></tr><tr><td>Instant-NGP [49]</td><td>23.67/2.07</td><td>0.745/0.063</td><td>0.257/0.070</td><td>0.366/0.105</td></tr><tr><td>Instant-NGP (depth sup.)</td><td>24.60/2.13</td><td>0.759/0.062</td><td>0.239/0.066</td><td>0.108/0.057</td></tr><tr><td>Pixel-NeRF [83]</td><td>18.53/1.21</td><td>0.568/0.067</td><td>0.556/0.073</td><td>0.336/0.099</td></tr><tr><td>Pixel-NeRF (depth sup.)</td><td>19.10/1.21</td><td>0.605/0.060</td><td>0.499/0.064</td><td>0.147/0.087</td></tr><tr><td>MVSNeRF [6]</td><td>18.43/1.30</td><td>0.600/0.065</td><td>0.381/0.054</td><td>0.400/0.182</td></tr><tr><td>MVSNeRF (depth sup.)</td><td>18.44/1.29</td><td>0.600/0.065</td><td>0.381/0.054</td><td>0.397/0.186</td></tr></table>",
631
+ "bbox": [
632
+ 503,
633
+ 503,
634
+ 890,
635
+ 592
636
+ ],
637
+ "page_idx": 4
638
+ },
639
+ {
640
+ "type": "text",
641
+ "text": "Table 4. Depth Supervised NVS and depth estimation results. Average of four metrics w and w/o depth supervision across all training dataset are reported with their standard deviation (SD).",
642
+ "bbox": [
643
+ 498,
644
+ 602,
645
+ 890,
646
+ 643
647
+ ],
648
+ "page_idx": 4
649
+ },
650
+ {
651
+ "type": "text",
652
+ "text": "into other scenes with Generalizable NeRFs [6, 70, 83]. 3) Depth Supervised NVS, where we conduct NVS experiments with depth image priors in our dataset to study the potential that depth information will endow to NVS tasks.",
653
+ "bbox": [
654
+ 496,
655
+ 670,
656
+ 890,
657
+ 731
658
+ ],
659
+ "page_idx": 4
660
+ },
661
+ {
662
+ "type": "text",
663
+ "text": "Single-Scene NVS We select ten scenes from each category and uniformly sample images as validation split. We choose NeRF [47], Mip-NeRF 360 [4] and Instant-NGP [49] for evaluations. Results are shown in Tab. 2. We report the average PSNR, SSIM [73], LPIPS [88] and rendering depths Mean Average Error (MAE) compared with our sensor-collected depths across all categories. We also report metrics only related to the NVS quality of central objects using object masks. Results show that Mip-NeRF 360 and Instant-NGP outperform original NeRF in terms of visual quality metrics. NeRF-based methods perform better",
664
+ "bbox": [
665
+ 496,
666
+ 734,
667
+ 892,
668
+ 900
669
+ ],
670
+ "page_idx": 4
671
+ },
672
+ {
673
+ "type": "image",
674
+ "img_path": "images/424355a14fce7ac886a94f06f3b859a49140c8ac3ffc5d1eefe4d73f0c7b127c.jpg",
675
+ "image_caption": [
676
+ "Figure 6. Novel view synthesis visualization of different kinds of NeRF methods: NeRF [47], Mip-NeRF 360 [4] and Instant-NGP [49]."
677
+ ],
678
+ "image_footnote": [],
679
+ "bbox": [
680
+ 84,
681
+ 87,
682
+ 460,
683
+ 367
684
+ ],
685
+ "page_idx": 5
686
+ },
687
+ {
688
+ "type": "text",
689
+ "text": "when we only concern with the recovery of central objects under object masks. What's more, Mip-NeRF 360 performs best in learning single-scene geometry. Visualization can be found in Figure 6. In brief, our dataset offers extensive categories and scenes for in-depth NVs experiments.",
690
+ "bbox": [
691
+ 75,
692
+ 441,
693
+ 467,
694
+ 516
695
+ ],
696
+ "page_idx": 5
697
+ },
698
+ {
699
+ "type": "text",
700
+ "text": "Cross-Scene NVS Apart from single-scene optimizations, we also evaluate Generalizable NeRFs: PixelNeRF [83], MVSNeRF [6] and IBRNet [70] in the cross-scene setting. For each category in our dataset, we select the same test scenes as single-scene NVS experiments and train in the remaining scenes of the same category to learn per-category latent representations. We divide the 46 categories into three difficulty levels and report the average metrics of each level. For evaluation, we use three source views to synthesize novel views. We report PSNR, SSIM and LPIPS to measure visual quality and depth MAE to measure the learned geometry quality. From Tab. 3, we observe that IBRNet outperforms in all three difficulty levels in terms of visual quality. Additionally, learned geometry quality isn't highly correlated with the rendering visual quality in novel views. To sum up, our dataset provides great potential in learning category-level cross-scene NVS methods.",
701
+ "bbox": [
702
+ 75,
703
+ 518,
704
+ 467,
705
+ 775
706
+ ],
707
+ "page_idx": 5
708
+ },
709
+ {
710
+ "type": "text",
711
+ "text": "Depth Supervised NVS We also study the influences that depth supervision brings. We choose Instant-NGP [49] in single-scene NVS methods and both Pixel-NeRF [83] and MVSNeRF [6] in cross-scene NVS methods. Our experiment results in Tab. 4 prove that depth supervision is beneficial for these methods to learn better representations. In our experiment setting, we add L1 depth loss to every algorithm and choose the best-performance depth loss weight",
712
+ "bbox": [
713
+ 75,
714
+ 779,
715
+ 467,
716
+ 900
717
+ ],
718
+ "page_idx": 5
719
+ },
720
+ {
721
+ "type": "image",
722
+ "img_path": "images/165950f06335eeb2ef47c2773db19240c708671699e1ab5ba5d03392d438a812.jpg",
723
+ "image_caption": [
724
+ "Figure 7. Relpose++ [35] pair-wise evaluation visualization. We show every image pair with its relative rotation predicted by Relpose."
725
+ ],
726
+ "image_footnote": [],
727
+ "bbox": [
728
+ 516,
729
+ 89,
730
+ 875,
731
+ 357
732
+ ],
733
+ "page_idx": 5
734
+ },
735
+ {
736
+ "type": "text",
737
+ "text": "for them. Compared with conducting NVS tasks without depths, the performances of both Instant-NGP and PixelNeRF get boosted when training with depth loss. As for experiments of MVSNeRF, since we've already added depth information in the original training as the guidance in building rays, it turns out that the improvements when training with extra added depth loss are limited. In a nutshell, with depth priors, both single-scene NVs and cross-scene NVS methods learn better generalization capabilities, boosting NVS more accurate and generalizable.",
738
+ "bbox": [
739
+ 496,
740
+ 441,
741
+ 890,
742
+ 592
743
+ ],
744
+ "page_idx": 5
745
+ },
746
+ {
747
+ "type": "text",
748
+ "text": "4.2. Camera Pose Estimation",
749
+ "text_level": 1,
750
+ "bbox": [
751
+ 500,
752
+ 604,
753
+ 723,
754
+ 619
755
+ ],
756
+ "page_idx": 5
757
+ },
758
+ {
759
+ "type": "text",
760
+ "text": "In this section, we benchmark two data-driven methods RelPose [86] and RelPose++ [35] for inference of the relative camera poses from multi-view images in a sparse setting. Leveraging the given annotations of camera poses and large-scale category-level video in our dataset, we aim to learn generalizable viewpoint inference capability from training-seen categories to unseen ones. Since WildRGB-D dataset has a full and dense 360-degree camera trajectory, we can provide both a large-scale database and various view settings to assist training. In our experiments, we divided totally 46 categories into training and testing categories. We also hold some videos in training categories for evaluation. We adopt evaluation settings described in [35, 86] and report results in Tab. 5 and Tab. 6. We observe that these two methods can generalize well to other scenarios both in known categories and unseen categories since the relative rotation estimation errors are in a reasonable range (also see Figure 7 for visualization). However,",
761
+ "bbox": [
762
+ 496,
763
+ 628,
764
+ 890,
765
+ 900
766
+ ],
767
+ "page_idx": 5
768
+ },
769
+ {
770
+ "type": "image",
771
+ "img_path": "images/1ad50b11ab8b604fcd43523a82d19164067801f061fbb5fa77892d3dcbdcf850.jpg",
772
+ "image_caption": [
773
+ "Figure 8. Visualization of RGBD reconstruction surface from Neusfacto [85]. Original RGBD image samples are listed on the left and multi-view reconstructed surface is on the right for each example."
774
+ ],
775
+ "image_footnote": [],
776
+ "bbox": [
777
+ 101,
778
+ 90,
779
+ 872,
780
+ 306
781
+ ],
782
+ "page_idx": 6
783
+ },
784
+ {
785
+ "type": "text",
786
+ "text": "the error of translation prediction is comparatively large in RelPose++, which still poses challenges in this field. To sum up, WildRGB-D dataset can serve as large-scale training sources for generalizable camera pose estimation algorithms to achieve remarkable results.",
787
+ "bbox": [
788
+ 75,
789
+ 367,
790
+ 470,
791
+ 444
792
+ ],
793
+ "page_idx": 6
794
+ },
795
+ {
796
+ "type": "text",
797
+ "text": "4.3. RGBD Object Surface Reconstruction",
798
+ "text_level": 1,
799
+ "bbox": [
800
+ 76,
801
+ 452,
802
+ 405,
803
+ 468
804
+ ],
805
+ "page_idx": 6
806
+ },
807
+ {
808
+ "type": "text",
809
+ "text": "In our experiment setting of object surface reconstruction, algorithms need to utilize RGBD image sequence and central object masks to reconstruct the surface mesh of the central object. Reconstruction without depths is also evaluated for comparison. For evaluation of reconstruction quality, we calculate the Chamfer Distance between the reconstruction mesh and aggregated object point cloud which is derived from object-masked depth images. Ten single object scenes are selected in each category for evaluations of Instant-NGP [49] and Neusfacto [85]. From the results in Tab. 7, we observe that reconstruction is better with depth priors. Additionally, the performance of Neusfacto with RGBD is superior to Instant-NGP, which shows that depths help the sdf-based method Neusfacto learn the correct object boundary and boost the performance more compared with Instant-NGP. The average deviation is high due to the varied reconstruction qualities across different categories in the dataset. Visualization of Neusfacto RGBD reconstructions are shown in Figure 8. In brief, our dataset provides an RGBD object reconstruction evaluation track, boosting the development of more mature algorithms in this field.",
810
+ "bbox": [
811
+ 75,
812
+ 476,
813
+ 470,
814
+ 792
815
+ ],
816
+ "page_idx": 6
817
+ },
818
+ {
819
+ "type": "text",
820
+ "text": "4.4. RGBD 6D Object Pose Estimation",
821
+ "text_level": 1,
822
+ "bbox": [
823
+ 76,
824
+ 801,
825
+ 375,
826
+ 816
827
+ ],
828
+ "page_idx": 6
829
+ },
830
+ {
831
+ "type": "text",
832
+ "text": "We explore our dataset in self-supervised training for 6D pose estimation. We adopt the algorithm proposed in [87], which leverages category shape prior and learns by matching the correspondence between images and shapes. In our experiment, we evaluate the trained model on Wild6D [23]",
833
+ "bbox": [
834
+ 75,
835
+ 824,
836
+ 468,
837
+ 902
838
+ ],
839
+ "page_idx": 6
840
+ },
841
+ {
842
+ "type": "image",
843
+ "img_path": "images/cfbf725d8c2d58023c1876d39cb419d30c986d507fb0fbc41ca4a5a1af5c0d4f.jpg",
844
+ "image_caption": [
845
+ "Figure 9. Object 6D pose estimation visualization. We visualize the predicted category-level 6D pose on three categories in Wild6D [23] test set (bottle, bowl, and mug) using models that only perform self-supervised training on the corresponding category of WildRGB-D Dataset. The ground truth bounding boxes are colored in green, and the predicted bounding boxes are in red."
846
+ ],
847
+ "image_footnote": [],
848
+ "bbox": [
849
+ 508,
850
+ 364,
851
+ 883,
852
+ 636
853
+ ],
854
+ "page_idx": 6
855
+ },
856
+ {
857
+ "type": "text",
858
+ "text": "test set. Three different settings concerning training sets are adopted: 1) Wild6D training set; 2) WildRGB-D dataset; 3) Wild6D training set + WildRGB-D dataset. Common categories in these two datasets are selected for self-supervised training and evaluation. Results in Tab. 8 show that in the case of an out-of-distribution setting, where we trained only on our dataset and evaluated on a different dataset, some of the metrics are decreased in our experiments. This is mainly due to the different distribution of camera rotations in these",
859
+ "bbox": [
860
+ 496,
861
+ 763,
862
+ 890,
863
+ 900
864
+ ],
865
+ "page_idx": 6
866
+ },
867
+ {
868
+ "type": "table",
869
+ "img_path": "images/9898d76be531418af3d4e2f3b020a4efe07efa7bbc0fbeb1bd445bd3e6b9aa68.jpg",
870
+ "table_caption": [],
871
+ "table_footnote": [],
872
+ "table_body": "<table><tr><td rowspan=\"2\">Eval. Type</td><td rowspan=\"2\">Categories</td><td rowspan=\"2\">Metrics</td><td colspan=\"4\">#Frames</td></tr><tr><td>3</td><td>5</td><td>10</td><td>20</td></tr><tr><td rowspan=\"4\">MST</td><td rowspan=\"2\">seen</td><td>&lt;15 deg.</td><td>57.4</td><td>55.1</td><td>51.4</td><td>47.4</td></tr><tr><td>&lt;30 deg.</td><td>82.1</td><td>79.8</td><td>77.2</td><td>74.0</td></tr><tr><td rowspan=\"2\">unseen</td><td>&lt;15 deg.</td><td>38.4</td><td>37.7</td><td>36.6</td><td>35.2</td></tr><tr><td>&lt;30 deg.</td><td>62.5</td><td>61.8</td><td>60.4</td><td>59.0</td></tr><tr><td rowspan=\"4\">Coord.Asc.</td><td rowspan=\"2\">seen</td><td>&lt;15 deg.</td><td>69.3</td><td>69.3</td><td>69.8</td><td>69.3</td></tr><tr><td>&lt;30 deg.</td><td>85.3</td><td>85.3</td><td>85.4</td><td>85.3</td></tr><tr><td rowspan=\"2\">unseen</td><td>&lt;15 deg.</td><td>46.0</td><td>46.2</td><td>46.9</td><td>46.5</td></tr><tr><td>&lt;30 deg.</td><td>66.4</td><td>67.0</td><td>67.2</td><td>67.1</td></tr><tr><td rowspan=\"4\">Sequential</td><td rowspan=\"2\">seen</td><td>&lt;15 deg.</td><td>51.9</td><td>45.1</td><td>36.0</td><td>26.9</td></tr><tr><td>&lt;30 deg.</td><td>78.3</td><td>72.5</td><td>61.9</td><td>49.3</td></tr><tr><td rowspan=\"2\">unseen</td><td>&lt;15 deg.</td><td>34.9</td><td>31.0</td><td>25.1</td><td>18.3</td></tr><tr><td>&lt;30 deg.</td><td>59.1</td><td>54.4</td><td>46.4</td><td>37.1</td></tr></table>",
873
+ "bbox": [
874
+ 81,
875
+ 88,
876
+ 467,
877
+ 289
878
+ ],
879
+ "page_idx": 7
880
+ },
881
+ {
882
+ "type": "table",
883
+ "img_path": "images/9ee81673aceaa4b830c076a9cc398be42649040d641ca564902a254924ad3808.jpg",
884
+ "table_caption": [
885
+ "Table 5. RelPose [86] camera evaluation results. We follow three evaluation types (MST, Coord.Asc., Sequential) proposed in [86] and report the average percent of rotation prediction errors in degrees both in training-seen categories and unseen ones."
886
+ ],
887
+ "table_footnote": [],
888
+ "table_body": "<table><tr><td rowspan=\"2\">Eval. Type</td><td rowspan=\"2\">Categories</td><td rowspan=\"2\">Metrics</td><td colspan=\"4\">#Frames</td></tr><tr><td>2</td><td>3</td><td>5</td><td>8</td></tr><tr><td rowspan=\"4\">Pairwise</td><td rowspan=\"2\">seen</td><td>&lt;15 deg.</td><td>69.6</td><td>68.3</td><td>67.3</td><td>66.6</td></tr><tr><td>&lt;30 deg.</td><td>86.5</td><td>87.4</td><td>87.2</td><td>86.8</td></tr><tr><td rowspan=\"2\">unseen</td><td>&lt;15 deg.</td><td>53.4</td><td>52.5</td><td>52.5</td><td>52.3</td></tr><tr><td>&lt;30 deg.</td><td>74.1</td><td>74.5</td><td>75.3</td><td>75.4</td></tr><tr><td rowspan=\"4\">Coord.Asc.</td><td rowspan=\"2\">seen</td><td>&lt;15 deg.</td><td>70.4</td><td>71.5</td><td>71.9</td><td>71.6</td></tr><tr><td>&lt;30 deg.</td><td>86.7</td><td>87.9</td><td>88.5</td><td>88.7</td></tr><tr><td rowspan=\"2\">unseen</td><td>&lt;15 deg.</td><td>52.9</td><td>55.4</td><td>54.9</td><td>54.8</td></tr><tr><td>&lt;30 deg.</td><td>73.9</td><td>75.7</td><td>76.3</td><td>76.8</td></tr><tr><td rowspan=\"4\">CamCENTER</td><td rowspan=\"2\">seen</td><td>&lt;0.2 SS</td><td>100.0</td><td>29.8</td><td>12.6</td><td>5.9</td></tr><tr><td>&lt;0.3 SS</td><td>100.0</td><td>43.8</td><td>23.6</td><td>13.3</td></tr><tr><td rowspan=\"2\">unseen</td><td>&lt;0.2 SS</td><td>100.0</td><td>30.5</td><td>12.4</td><td>5.6</td></tr><tr><td>&lt;0.3 SS</td><td>100.0</td><td>44.3</td><td>23.2</td><td>12.5</td></tr><tr><td rowspan=\"4\">Cam.Trans.</td><td rowspan=\"2\">seen</td><td>&lt;0.2 SS</td><td>22.3</td><td>11.9</td><td>4.8</td><td>2.2</td></tr><tr><td>&lt;0.3 SS</td><td>30.1</td><td>20.4</td><td>10.8</td><td>6.2</td></tr><tr><td rowspan=\"2\">unseen</td><td>&lt;0.2 SS</td><td>21.8</td><td>12.0</td><td>5.7</td><td>2.7</td></tr><tr><td>&lt;0.3 SS</td><td>29.8</td><td>20.9</td><td>12.0</td><td>6.4</td></tr></table>",
889
+ "bbox": [
890
+ 81,
891
+ 375,
892
+ 465,
893
+ 625
894
+ ],
895
+ "page_idx": 7
896
+ },
897
+ {
898
+ "type": "table",
899
+ "img_path": "images/c8bbae51d17bf19466809f5723d03052ffddff5b39897859e4ffe6d9abb346e0.jpg",
900
+ "table_caption": [
901
+ "Table 6. RelPose++ [35] camera evaluation results. We follow four evaluation types (Pairwise, Coord.Asc., CamCENTER, Cam.Trans) proposed in [35] and report the average percent of rotation prediction errors in degrees both in training-seen categories and unseen ones. Notes: SS means scene scale defined in [35]."
902
+ ],
903
+ "table_footnote": [],
904
+ "table_body": "<table><tr><td colspan=\"2\">RGB</td><td colspan=\"2\">RGBD</td></tr><tr><td>Instant-NGP [49]</td><td>Neusfacto [85]</td><td>Instant-NGP [49]</td><td>Neusfacto [85]</td></tr><tr><td>45.91/64.01</td><td>88.92/89.94</td><td>28.46/29.28</td><td>25.83/34.07</td></tr></table>",
905
+ "bbox": [
906
+ 91,
907
+ 722,
908
+ 454,
909
+ 772
910
+ ],
911
+ "page_idx": 7
912
+ },
913
+ {
914
+ "type": "text",
915
+ "text": "two datasets (visualized in Fig. 4), where Wild6D doesn't cover full 360 degrees and WildRGB-D dataset covers a larger pitch angle range in object 6D poses. However, we",
916
+ "bbox": [
917
+ 76,
918
+ 854,
919
+ 468,
920
+ 901
921
+ ],
922
+ "page_idx": 7
923
+ },
924
+ {
925
+ "type": "table",
926
+ "img_path": "images/b49293c1fba1c8831a440c8454795c00e8b9f50f092db33d714f9a7330a423ae.jpg",
927
+ "table_caption": [
928
+ "Table 7. RGBD object surface reconstruction results. Average of chamfer distance with standard deviation across selected categories are reported (Average/SD)."
929
+ ],
930
+ "table_footnote": [],
931
+ "table_body": "<table><tr><td>Category</td><td>Datasets</td><td>IOU0.25</td><td>IOU0.5</td><td>5 deg. 2cm</td><td>5 deg. 5cm</td><td>10 deg. 2cm</td><td>10 deg. 5cm</td></tr><tr><td rowspan=\"3\">Bottle</td><td>Wild6D</td><td>93.2</td><td>85.2</td><td>71.3</td><td>79.4</td><td>79.8</td><td>90.9</td></tr><tr><td>ROW</td><td>93.3</td><td>70.9</td><td>34.1</td><td>48.8</td><td>47.9</td><td>78.8</td></tr><tr><td>Wild6D+ROW</td><td>93.3</td><td>85.8</td><td>71.9</td><td>78.6</td><td>81.7</td><td>91.7</td></tr><tr><td rowspan=\"3\">Bowl</td><td>Wild6D</td><td>98.3</td><td>90.4</td><td>66.1</td><td>70.0</td><td>86.8</td><td>94.6</td></tr><tr><td>ROW</td><td>98.3</td><td>91.8</td><td>33.8</td><td>35.7</td><td>86.3</td><td>93.5</td></tr><tr><td>Wild6D+ROW</td><td>98.4</td><td>91.8</td><td>40.3</td><td>42.0</td><td>87.5</td><td>93.7</td></tr><tr><td rowspan=\"3\">Mug</td><td>Wild6D</td><td>89.0</td><td>59.2</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.1</td></tr><tr><td>ROW</td><td>89.1</td><td>61.9</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.1</td></tr><tr><td>Wild6D+ROW</td><td>89.3</td><td>50.2</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td></tr></table>",
932
+ "bbox": [
933
+ 504,
934
+ 89,
935
+ 890,
936
+ 215
937
+ ],
938
+ "page_idx": 7
939
+ },
940
+ {
941
+ "type": "text",
942
+ "text": "Table 8. Self-Supervised 6D Pose Estimation results. The evaluation results on Wild6D test dataset under three different settings in bottle, bowl and mug categories.",
943
+ "bbox": [
944
+ 500,
945
+ 226,
946
+ 890,
947
+ 268
948
+ ],
949
+ "page_idx": 7
950
+ },
951
+ {
952
+ "type": "text",
953
+ "text": "still witness some improvements in evaluations. Training with WildRGB-D dataset benefits in IOU evaluations and joint-dataset training improves rotation+translation evaluation in particular categories. Visualization of 6D pose estimation in Wild6D test set using models that only train in our dataset can be found in Figure 9, To summarize, our dataset provides large-scale category-level RGBD images sequences, serving as ample unsupervised training data, which has the potential to boost more accurate 6D pose estimation in the future.",
954
+ "bbox": [
955
+ 496,
956
+ 297,
957
+ 890,
958
+ 448
959
+ ],
960
+ "page_idx": 7
961
+ },
962
+ {
963
+ "type": "text",
964
+ "text": "5. Conclusion",
965
+ "text_level": 1,
966
+ "bbox": [
967
+ 500,
968
+ 464,
969
+ 617,
970
+ 479
971
+ ],
972
+ "page_idx": 7
973
+ },
974
+ {
975
+ "type": "text",
976
+ "text": "The object-centric datasets in the computer vision community have mostly focused on RGB videos, while practical applications often involve depth as inputs or for better annotations. We collect the largest object-centric RGB-D video dataset WildRGB-D , where all videos are captured in cluttered scenes. It is composed of category-level RGB-D object videos taken using iPhones around the objects in 360 degrees, which contains around 8500 recorded objects and nearly 20000 RGB-D videos across 46 common object categories with three setups covering most scenarios. The dataset is well annotated with object masks, real-world scale camera poses, and reconstructed aggregated point clouds from RGBD videos. We set up four evaluation tracks with WildRGB-D, showing that the large-scale capture of RGB-D objects provides a large potential to advance 3D object learning. The current dataset does not come with annotations of the object 6D pose, which requires further crowdsourcing effort. It will be one of our future efforts to collect this annotation for supervised training methods as well as evaluation. We are committed to releasing our dataset and evaluation code.",
977
+ "bbox": [
978
+ 496,
979
+ 491,
980
+ 890,
981
+ 808
982
+ ],
983
+ "page_idx": 7
984
+ },
985
+ {
986
+ "type": "text",
987
+ "text": "Acknowledgment This project was supported, in part, by the Amazon Research Award, the Qualcomm Innovation Fellowship, the Intel Rising Star Faculty Award, and the CISCO Faculty Award.",
988
+ "bbox": [
989
+ 496,
990
+ 830,
991
+ 890,
992
+ 887
993
+ ],
994
+ "page_idx": 7
995
+ },
996
+ {
997
+ "type": "text",
998
+ "text": "References",
999
+ "text_level": 1,
1000
+ "bbox": [
1001
+ 78,
1002
+ 89,
1003
+ 173,
1004
+ 104
1005
+ ],
1006
+ "page_idx": 8
1007
+ },
1008
+ {
1009
+ "type": "list",
1010
+ "sub_type": "ref_text",
1011
+ "list_items": [
1012
+ "[1] Henrik Aanaes, Rasmus Ramsbøl Jensen, George Vogiatzis, Engin Tola, and Anders Bjorholm Dahl. Large-scale data for multiple-view stereopsis. International Journal of Computer Vision, 120:153-168, 2016. 3",
1013
+ "[2] Adel Ahmadyan, Liangkai Zhang, Artsiom Ablavatski, Jianing Wei, and Matthias Grundmann. Objectron: A large scale dataset of object-centric videos in the wild with pose annotations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7822-7831, 2021. 2, 3",
1014
+ "[3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In ICCV, 2021. 3",
1015
+ "[4] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In CVPR, 2022. 3, 5, 6",
1016
+ "[5] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 2, 3",
1017
+ "[6] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14124-14133, 2021. 3, 5, 6",
1018
+ "[7] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision, pages 333-350. Springer, 2022. 3",
1019
+ "[8] Dengsheng Chen, Jun Li, Zheng Wang, and Kai Xu. Learning canonical shape space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11973-11982, 2020. 4",
1020
+ "[9] Wei Chen, Xi Jia, Hyung Jin Chang, Jinming Duan, Linlin Shen, and Ales Leonardis. Fs-net: Fast shape-based network for category-level 6d object pose estimation with decoupled rotation mechanism. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1581-1590, 2021. 4",
1021
+ "[10] Xu Chen, Zijian Dong, Jie Song, Andreas Geiger, and Otmar Hilliges. Category level object pose estimation via neural analysis-by-synthesis. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVI 16, pages 139-156. Springer, 2020. 4",
1022
+ "[11] Ho Kei Cheng and Alexander G Schwing. Xmem: Long-term video object segmentation with an atkinson-shiffrin memory model. In European Conference on Computer Vision, pages 640-658. Springer, 2022. 2, 5",
1023
+ "[12] Yangming Cheng, Liulei Li, Yuanyou Xu, Xiaodi Li, Zongxin Yang, Wenguan Wang, and Yi Yang. Segment and track anything, 2023. 2, 5"
1024
+ ],
1025
+ "bbox": [
1026
+ 78,
1027
+ 114,
1028
+ 470,
1029
+ 900
1030
+ ],
1031
+ "page_idx": 8
1032
+ },
1033
+ {
1034
+ "type": "list",
1035
+ "sub_type": "ref_text",
1036
+ "list_items": [
1037
+ "[13] Sungjoon Choi, Qian-Yi Zhou, Stephen Miller, and Vladlen Koltun. A large dataset of object scans. arXiv preprint arXiv:1602.02481, 2016. 3",
1038
+ "[14] Jasmine Collins, Shubham Goel, Kenan Deng, Achleshwar Luthra, Leon Xu, Erhan Gundogdu, Xi Zhang, Tomas F Yago Vicente, Thomas Dideriksien, Himanshu Arora, et al. Abo: Dataset and benchmarks for real-world 3d object understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21126-21136, 2022. 2, 3",
1039
+ "[15] François Darmon, Bénédicte Bascle, Jean-Clement Devaux, Pascal Monasse, and Mathieu Aubry. Improving neural implicit surfaces geometry with patch warping. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6260-6269, 2022. 3",
1040
+ "[16] Matt Deitke, Dustin Schwenk, Jordi Salvador, Luca Weihs, Oscar Michel, Eli VanderBilt, Ludwig Schmidt, Kiana Ehsani, Aniruddha Kembhavi, and Ali Farhadi. Objverse: A universe of annotated 3d objects, 2022. 2",
1041
+ "[17] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 4",
1042
+ "[18] Laura Downs, Anthony Francis, Nate Koenig, Brandon Kinman, Ryan Hickman, Krista Reymann, Thomas B McHugh, and Vincent Vanhoucke. Google scanned objects: A high-quality dataset of 3d scanned household items. In 2022 International Conference on Robotics and Automation (ICRA), pages 2553-2560. IEEE, 2022. 3",
1043
+ "[19] Hugh Durrant-Whyte and Tim Bailey. Simultaneous localization and mapping: part i. IEEE robotics & automation magazine, 13(2):99-110, 2006. 2, 3, 4",
1044
+ "[20] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 3",
1045
+ "[21] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 3",
1046
+ "[22] Huan Fu, Rongfei Jia, Lin Gao, Mingming Gong, Binqiang Zhao, Steve Maybank, and Dacheng Tao. 3d-future: 3d furniture shape with texture. International Journal of Computer Vision, 129:3313-3337, 2021. 2, 3",
1047
+ "[23] Yang Fu and Xiaolong Wang. Category-level 6d object pose estimation in the wild: A semi-supervised learning approach and a new dataset, 2022. 2, 3, 4, 7",
1048
+ "[24] Ge Gao, Mikko Lauri, Yulong Wang, Xiaolin Hu, Jianwei Zhang, and Simone Frintrop. 6d object pose regression via supervised learning on point clouds. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 3643-3649. IEEE, 2020. 4",
1049
+ "[25] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. In Advances In Neural Information Processing Systems, 2022. 2"
1050
+ ],
1051
+ "bbox": [
1052
+ 501,
1053
+ 92,
1054
+ 893,
1055
+ 900
1056
+ ],
1057
+ "page_idx": 8
1058
+ },
1059
+ {
1060
+ "type": "list",
1061
+ "sub_type": "ref_text",
1062
+ "list_items": [
1063
+ "[26] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. 1",
1064
+ "[27] Yisheng He, Haoqiang Fan, Haibin Huang, Qifeng Chen, and Jian Sun. Towards self-supervised category-level object pose and size estimation. arXiv preprint arXiv:2203.02884, 2022. 4",
1065
+ "[28] Philipp Henzler, Jeremy Reizenstein, Patrick Labatut, Roman Shapovalov, Tobias Ritschel, Andrea Vedaldi, and David Novotny. Unsupervised learning of 3d object categories from videos in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4700-4709, 2021. 3",
1066
+ "[29] Hanwen Jiang, Zhenyu Jiang, Kristen Grauman, and Yuke Zhu. Few-view object reconstruction with unknown categories and camera poses. arXiv preprint arXiv:2212.04492, 2022.3",
1067
+ "[30] HyunJun Jung, Shun-Cheng Wu, Patrick Ruhkamp, Guangyao Zhai, Hannah Schieber, Giulia Rizzoli, Pengyuan Wang, Hongcheng Zhao, Lorenzo Garattoni, Sven Meier, Daniel Roth, Nassir Navab, and Benjamin Busam. Housecat6d – a large-scale multi-modal category level 6d object pose dataset with household objects in realistic scenarios, 2023. 4",
1068
+ "[31] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 3",
1069
+ "[32] Alexander Kirillov, Yuxin Wu, Kaiming He, and Ross Girshick. Pointrend: Image segmentation as rendering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9799-9808, 2020. 5",
1070
+ "[33] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 2, 5",
1071
+ "[34] Joseph J Lim, Hamed Piriaviavash, and Antonio Torralba. Parsing iceberg objects: Fine pose estimation. In Proceedings of the IEEE international conference on computer vision, pages 2992-2999, 2013. 3",
1072
+ "[35] Amy Lin, Jason Y Zhang, Deva Ramanan, and Shubham Tulsiani. Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926, 2023. 2, 4, 6, 8",
1073
+ "[36] Jiehong Lin, Zewei Wei, Zhihao Li, Songcen Xu, Kui Jia, and Yuanqing Li. Dualposenet: Category-level 6d object pose and size estimation using dual pose network with refined learning of pose consistency. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3560-3569, 2021. 4",
1074
+ "[37] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 4",
1075
+ "[38] Yunzhi Lin, Jonathan Tremblay, Stephen Tyree, Patricio A Vela, and Stan Birchfield. Single-stage keypoint-based"
1076
+ ],
1077
+ "bbox": [
1078
+ 78,
1079
+ 90,
1080
+ 468,
1081
+ 900
1082
+ ],
1083
+ "page_idx": 9
1084
+ },
1085
+ {
1086
+ "type": "list",
1087
+ "sub_type": "ref_text",
1088
+ "list_items": [
1089
+ "category-level object pose estimation from an rgb image. In 2022 International Conference on Robotics and Automation (ICRA), pages 1547-1553. IEEE, 2022. 4",
1090
+ "[39] Zhi-Hao Lin, Wei-Chiu Ma, Hao-Yu Hsu, Yu-Chiang Frank Wang, and Shenlong Wang. Neurmips: Neural mixture of planar experts for view synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15702-15712, 2022. 3",
1091
+ "[40] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2",
1092
+ "[41] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. 2, 5",
1093
+ "[42] Xingyu Liu, Shun Iwase, and Kris M. Kitani. Stereobj-1m: Large-scale stereo image dataset for 6d object pose estimation, 2022. 4",
1094
+ "[43] Yuan Liu, Sida Peng, Lingjie Liu, Qianqian Wang, Peng Wang, Christian Theobalt, Xiaowei Zhou, and Wenping Wang. Neural rays for occlusion-aware image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7824-7833, 2022. 3",
1095
+ "[44] Bruce D Lucas and Takeo Kanade. An iterative image registration technique with an application to stereo vision. In IJCAI'81: 7th international joint conference on Artificial intelligence, pages 674-679, 1981. 3",
1096
+ "[45] Fabian Manhardt, Gu Wang, Benjamin Busam, Manuel Nickel, Sven Meier, Luca Minciullo, Xiangyang Ji, and Nassir Navab. Cps++: Improving class-level 6d pose and shape estimation from monocular images with self-supervised learning. arXiv preprint arXiv:2003.05848, 2020. 4",
1097
+ "[46] Iaroslav Melekhov, Juha Ylioinas, Juho Kannala, and Esa Rahtu. Relative camera pose estimation using convolutional neural networks. In Advanced Concepts for Intelligent Vision Systems: 18th International Conference, ACIVS 2017, Antwerp, Belgium, September 18-21, 2017, Proceedings 18, pages 675-687. Springer, 2017. 3",
1098
+ "[47] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2, 3, 5, 6",
1099
+ "[48] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 3",
1100
+ "[49] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM TOG, 2022. 3, 5, 6, 7, 8",
1101
+ "[50] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the"
1102
+ ],
1103
+ "bbox": [
1104
+ 503,
1105
+ 92,
1106
+ 893,
1107
+ 900
1108
+ ],
1109
+ "page_idx": 9
1110
+ },
1111
+ {
1112
+ "type": "list",
1113
+ "sub_type": "ref_text",
1114
+ "list_items": [
1115
+ "IEEE/CVF International Conference on Computer Vision, pages 5589-5599, 2021. 3",
1116
+ "[51] Wanli Peng, Jianhang Yan, Hongtao Wen, and Yi Sun. Self-supervised category-level 6d object pose estimation with deep implicit shape representation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 2082-2090, 2022. 4",
1117
+ "[52] Charles R. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation, 2017. 2",
1118
+ "[53] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 1",
1119
+ "[54] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10901-10911, 2021. 2, 3",
1120
+ "[55] Chris Rockwell, Justin Johnson, and David F Fouhey. The 8-point algorithm as an inductive bias for relative pose prediction by vits. In 2022 International Conference on 3D Vision (3DV), pages 1-11. IEEE, 2022. 3",
1121
+ "[56] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2",
1122
+ "[57] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4104-4113, 2016. 3, 4",
1123
+ "[58] Thomas Schops, Torsten Sattler, and Marc Pollefeys. Bad slam: Bundle adjusted direct rgb-d slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 134-144, 2019. 2, 4",
1124
+ "[59] Samarth Sinha, Jason Y Zhang, Andrea Tagliasacchi, Igor Gilitschenski, and David B Lindell. Sparsepose: Sparse-view camera pose regression and refinement. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21349-21359, 2023. 4",
1125
+ "[60] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5459-5469, 2022. 3",
1126
+ "[61] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Advances in neural information processing systems, 34:16558-16569, 2021. 3",
1127
+ "[62] Meng Tian, Marcelo H Ang, and Gim Hee Lee. Shape prior deformation for categorical 6d object pose and size estimation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXI 16, pages 530-546. Springer, 2020. 4",
1128
+ "[63] Bill Triggs, Philip F McLauchlan, Richard I Hartley, and Andrew W Fitzgibbon. Bundle adjustment—a modern synthesis. In Vision Algorithms: Theory and Practice: International Workshop on Vision Algorithms Corfu,"
1129
+ ],
1130
+ "bbox": [
1131
+ 78,
1132
+ 92,
1133
+ 468,
1134
+ 900
1135
+ ],
1136
+ "page_idx": 10
1137
+ },
1138
+ {
1139
+ "type": "list",
1140
+ "sub_type": "ref_text",
1141
+ "list_items": [
1142
+ "Greece, September 21-22, 1999 Proceedings, pages 298-372. Springer, 2000. 3",
1143
+ "[64] Shubham Tulsiani, Abhishek Kar, Joao Carreira, and Jitendra Malik. Learning category-specific deformable 3d models for object reconstruction. IEEE transactions on pattern analysis and machine intelligence, 39(4):719-731, 2016. 3",
1144
+ "[65] Shinji Umeyama. Least-squares estimation of transformation parameters between two point patterns. IEEE Transactions on Pattern Analysis & Machine Intelligence, 13(04): 376-380, 1991. 4",
1145
+ "[66] Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T Barron, and Pratul P Srinivasan. Ref-nerf: Structured view-dependent appearance for neural radiance fields. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5481–5490. IEEE, 2022. 3",
1146
+ "[67] He Wang, Srinath Sridhar, Jingwei Huang, Julien Valentin, Shuran Song, and Leonidas J Guibas. Normalized object coordinate space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2642-2651, 2019. 4",
1147
+ "[68] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 3",
1148
+ "[69] Pengyuan Wang, HyunJun Jung, Yitong Li, Siyuan Shen, Rahul Parthasarathy Srikanth, Lorenzo Garattoni, Sven Meier, Nassir Navab, and Benjamin Busam. Phocal: A multi-modal dataset for category-level object pose estimation with photometrically challenging objects, 2022. 4",
1149
+ "[70] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2021. 3, 5, 6",
1150
+ "[71] Sen Wang, Ronald Clark, Hongkai Wen, and Niki Trigoni. Deepvo: Towards end-to-end visual odometry with deep recurrent convolutional neural networks. In 2017 IEEE international conference on robotics and automation (ICRA), pages 2043-2050. IEEE, 2017. 3",
1151
+ "[72] Yiming Wang, Qin Han, Marc Habermann, Kostas Dani-ilidis, Christian Theobalt, and Lingjie Liu. Neus2: Fast learning of neural implicit surfaces for multi-view reconstruction, 2023. 3",
1152
+ "[73] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 5",
1153
+ "[74] Tong Wu, Jiaqi Wang, Xingang Pan, Xudong Xu, Christian Theobalt, Ziwei Liu, and Dahua Lin. Voxurf: Voxel-based efficient and accurate neural surface reconstruction. arXiv preprint arXiv:2208.12697, 2022.3",
1154
+ "[75] Tong Wu, Jiarui Zhang, Xiao Fu, Yuxin Wang, Jiawei Ren, Liang Pan, Wayne Wu, Lei Yang, Jiaqi Wang, Chen Qian, et al. Omniobject3d: Large-vocabulary 3d object dataset for realistic perception, reconstruction and generation. In Pro"
1155
+ ],
1156
+ "bbox": [
1157
+ 503,
1158
+ 92,
1159
+ 890,
1160
+ 900
1161
+ ],
1162
+ "page_idx": 10
1163
+ },
1164
+ {
1165
+ "type": "list",
1166
+ "sub_type": "ref_text",
1167
+ "list_items": [
1168
+ "ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 803-814, 2023. 2, 3",
1169
+ "[76] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 2, 3",
1170
+ "[77] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE winter conference on applications of computer vision, pages 75-82. IEEE, 2014. 3",
1171
+ "[78] Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, and Feng Zheng. Track anything: Segment anything meets videos, 2023. 2, 5",
1172
+ "[79] Nan Yang, Lukas von Stumberg, Rui Wang, and Daniel Cremers. D3vo: Deep depth, deep pose and deep uncertainty for monocular visual odometry. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1281-1292, 2020. 3",
1173
+ "[80] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. Blendedmvs: A large-scale dataset for generalized multi-view stereo networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1790-1799, 2020. 3",
1174
+ "[81] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. Advances in Neural Information Processing Systems, 34:4805-4815, 2021. 3",
1175
+ "[82] Yang You, Ruoxi Shi, Weiming Wang, and Cewu Lu. CPPf: Towards robust category-level 9d pose estimation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6866-6875, 2022. 4",
1176
+ "[83] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelnerf: Neural radiance fields from one or few images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4578-4587, 2021. 3, 5, 6",
1177
+ "[84] Xianggang Yu, Mutian Xu, Yidan Zhang, Haolin Liu, Chongjie Ye, Yushuang Wu, Zizheng Yan, Chenming Zhu, Zhangyang Xiong, Tianyou Liang, Guanying Chen, Shuguang Cui, and Xiaoguang Han. Mvimgnet: A large-scale dataset of multi-view images, 2023. 3",
1178
+ "[85] Zehao Yu, Anpei Chen, Bozidar Antic, Songyou Peng, Apratim Bhattacharyya, Michael Niemeyer, Siyu Tang, Torsten Sattler, and Andreas Geiger. Sdfstudio: A unified framework for surface reconstruction, 2022. 3, 7, 8",
1179
+ "[86] Jason Y Zhang, Deva Ramanan, and Shubham Tulsiani. Relapse: Predicting probabilistic relative rotation for single objects in the wild. In European Conference on Computer Vision, pages 592-611. Springer, 2022. 2, 3, 6, 8",
1180
+ "[87] Kaifeng Zhang, Yang Fu, Shubhankar Borse, Hong Cai, Fatih Porikli, and Xiaolong Wang. Self-supervised geometric correspondence for category-level 6d object pose estimation in the wild. arXiv preprint arXiv:2210.07199, 2022. 3, 4, 7",
1181
+ "[88] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the"
1182
+ ],
1183
+ "bbox": [
1184
+ 78,
1185
+ 92,
1186
+ 468,
1187
+ 900
1188
+ ],
1189
+ "page_idx": 11
1190
+ },
1191
+ {
1192
+ "type": "list",
1193
+ "sub_type": "ref_text",
1194
+ "list_items": [
1195
+ "IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5",
1196
+ "[89] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3D: A modern library for 3D data processing. arXiv:1801.09847, 2018. 4"
1197
+ ],
1198
+ "bbox": [
1199
+ 501,
1200
+ 92,
1201
+ 890,
1202
+ 160
1203
+ ],
1204
+ "page_idx": 11
1205
+ }
1206
+ ]
2401.12xxx/2401.12592/e8da1853-39d1-41e6-8ee3-71b374b562d5_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12592/e8da1853-39d1-41e6-8ee3-71b374b562d5_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:923418973cfcc5121046ea2285fad6dd54dabe569a31cae3fc99b09467e3427e
3
+ size 11944567
2401.12xxx/2401.12592/full.md ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RGBD Objects in the Wild: Scaling Real-World 3D Object Learning from RGB-D Videos
2
+
3
+ Hongchi Xia $^{1*}$ Yang Fu $^{2*}$ Sifei Liu $^{3}$ Xiaolong Wang $^{2}$ $^{1}$ University of Illinois Urbana-Champaign ${}^{2}$ UC San Diego ${}^{3}$ NVIDIA
4
+
5
+ ![](images/22df23b8e8e93e3cc29e4c9901cc6bf3d74a0d80a00a3ad9f7efe248943b0b2c.jpg)
6
+ Figure 1. WildRGB-D Dataset contains almost 8500 recorded objects and nearly 20000 RGBD videos in 46 common categories with corresponding object masks and 3D point clouds.
7
+
8
+ # Abstract
9
+
10
+ We introduce a new RGB-D object dataset captured in the wild called WildRGB-D. Unlike most existing real-world object-centric datasets which only come with RGB capturing, the direct capture of the depth channel allows better 3D annotations and broader downstream applications. WildRGB-D comprises large-scale category-level RGB-D object videos, which are taken using an iPhone to go around the objects in 360 degrees. It contains around 8500 recorded objects and nearly 20000 RGB-D videos across 46 common object categories. These videos are taken with diverse cluttered backgrounds with three setups to cover as many real-world scenarios as possible: (i) a single object in one video; (ii) multiple objects in one video; and (iii) an
11
+
12
+ object with a static hand in one video. The dataset is annotated with object masks, real-world scale camera poses, and reconstructed aggregated point clouds from RGBD videos. We benchmark four tasks with WildRGB-D including novel view synthesis, camera pose estimation, object 6d pose estimation, and object surface reconstruction. Our experiments show that the large-scale capture of RGB-D objects provides a large potential to advance 3D object learning. Our project page is https://wildrgbd.github.io/.
13
+
14
+ # 1. Introduction
15
+
16
+ The recent advancement of computer vision has been largely relying on the scaling of training data [26, 53]. The same success in data-driven approaches has been recently adopted to 3D object modeling with new large 3D object-
17
+
18
+ ![](images/9909e0e1f6f8f0052a3aa09734c25a18e023e1fd870342b37e3cb51f8d48b53b.jpg)
19
+ Figure 2. The camera poses trajectory in WildRGB-D Dataset. We visualize the corresponding camera in each scene of our dataset, showing that our dataset is featured in 360 degree full and dense multi-view camera poses.
20
+
21
+ centric dataset collection [25, 40, 52]. Most of the large datasets are synthetic 3D data [5, 14, 22, 76] and a mix of synthetic data and real-world object scans [16], given it is much less labor intensive for scaling by rendering from simulation. However, it remains a big challenge to apply the model trained in simulation data to the real world. This is not only because the synthetic data has less realistic texture and shape, but also due to it is very hard to model the cluttered background and the natural light comes with it in simulation.
22
+
23
+ To make deep learning with 3D objects applicable in the real world, researchers have made efforts to collect real-world multiview object data [2, 54]. For example, the CO3D dataset [54] contains 19K object videos across 50 categories. However, due to the lack of depth, they require the use of COLMAP [56] to provide 3D annotations, which only works for $20\%$ of the collected videos. Collecting the depth channel part of the data is not only useful for more accurate 3D ground-truth annotations, but also provides very useful information for downstream applications such as object 6D pose estimation and novel view synthesis. The OmniObject3D dataset [75] provides both object videos and a separate scanning of the objects. However, the collected videos do not come with the depth channel inputs and they are mostly taken with clean backgrounds. The Wild6D dataset [23] is one of the few recent efforts to collect RGB-D object videos taken in the wild. However, it only contains 6 categories of data and covers relatively smaller ranges of object views.
24
+
25
+ In this paper, we propose to collect a new dataset that contains large-scale RGB-D object videos across diverse object categories and presented in the wild. Our dataset, namely WildRGB-D, covers 8500 tabletop objects across 44 categories in 20K videos. The videos are taken using iPhones to go around the objects in 360 degrees (see Figure 2 for visualization). Examples of the dataset are shown
26
+
27
+ in Figure 1. There are three types of videos: (i) Single object video where there is only one object presented on the table; (ii) Multi-object video where there are multiple objects presented at the same time; and (iii) Hand-object video where there is a static human hand grasping the object. More video types add variety, creating occlusion for objects in scenes, which are worthy study cases in some tasks. The collection of the WildRGB-D dataset not only considers the cluttered background in the real world, but also the common scenarios where the objects are occluded by human hands.
28
+
29
+ We perform automatic annotations for WildRGB-D. With RGB-D capturing, we can apply the Simultaneous Localization and Mapping [19, 58] (SLAM) algorithms, and exploit the RGB images and depth information from the depth sensor of mobile phones to reconstruct the $3D$ camera poses in real-world scale and aggregated $3D$ point clouds. Additionally, center object segmentation masks can be attained by bounding-box detection using text prompt of object category in Grounding-DINO [41], segmentation using Segment-Anything [33] and mask tracking using XMem [11], which are largely integrated into [12, 78].
30
+
31
+ To exploit the potential of our dataset, we benchmark it in four downstream tracks:
32
+
33
+ (i) Novel view synthesis. We evaluate various algorithms based on NeRF [47] which is optimized in a single scene, and generalizable NeRF which is trained in a category-level. With the help of depth information when training NeRFs, we can achieve consistently improved results. This offers a new platform for evaluating view synthesis approaches using RGB or RGB-D data.
34
+ (ii) Camera pose estimation. We adopt different pose estimation approaches [35, 86] to evaluate their capability of estimating relative camera poses in a sparse setting. We validate their generalizable ability through training on a partial of all categories and testing on unseen ones. We observe remarkable generalization performance on unseen
35
+
36
+ <table><tr><td>Dataset</td><td>Real</td><td>Multi-View</td><td>Depth Src.</td><td>3D GT</td><td>Video</td><td># Cats</td><td># Obj</td></tr><tr><td>ShapeNet [5]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>55</td><td>51k</td></tr><tr><td>ModelNet [76]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>40</td><td>12k</td></tr><tr><td>3D-Future [22]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>34</td><td>16k</td></tr><tr><td>ABO [14]</td><td></td><td>none</td><td>CAD</td><td>mesh</td><td>none</td><td>63</td><td>8k</td></tr><tr><td>DTU [1]</td><td>✓</td><td>limited</td><td>COLMAP</td><td>mesh</td><td>RGB</td><td>N/A</td><td>124</td></tr><tr><td>CO3D [54]</td><td>✓</td><td>full</td><td>COLMAP</td><td>pcl</td><td>RGB</td><td>50</td><td>19k</td></tr><tr><td>MVIImgNet [84]</td><td>✓</td><td>limited</td><td>COLMAP</td><td>pcl</td><td>RGB</td><td>238</td><td>220k</td></tr><tr><td>Objectron [2]</td><td>✓</td><td>limited</td><td>COLMAP</td><td>pcl</td><td>RGB</td><td>9</td><td>15k</td></tr><tr><td>GSO [18]</td><td>✓</td><td>none</td><td>scatterer</td><td>mesh</td><td>none</td><td>17</td><td>1k</td></tr><tr><td>OmniObject3D [75]</td><td>✓</td><td>full</td><td>scatterer</td><td>mesh</td><td>RGB</td><td>190</td><td>6k</td></tr><tr><td>Choi et al. [13]</td><td>✓</td><td>limited</td><td>sensor</td><td>mesh*</td><td>RGBD</td><td>9</td><td>2k</td></tr><tr><td>Wild6D [23]</td><td>✓</td><td>limited</td><td>sensor</td><td>pcl</td><td>RGBD</td><td>5</td><td>1.8k</td></tr><tr><td>Ours</td><td>✓</td><td>full</td><td>sensor</td><td>pcl</td><td>RGBD</td><td>44</td><td>8.5k</td></tr></table>
37
+
38
+ Table 1. Comparison of WildRGB-D dataset with other 3D object dataset. Some datasets don't provide video and we mark in "none". Some only cover partial angles, which is marked in "limited". Asterisk(*) means partial annotations. Depth Src. means where the depth information comes from, including CAD models, COLMAP, scanner devices and depth sensor in iPhones. pcl is the abbreviation of point cloud.
39
+
40
+ categories, which indicates our large-scale category-level dataset can serve as a training source for generalizable camera pose estimation.
41
+
42
+ (iii) Object surface reconstruction. We conduct object surface reconstruction in our dataset with RGB or RGB-D videos and object masks through Instant-NGP [49] and Neusfacto [85]. Results show that our depth information endow reconstruction with more accurate precision and SDF-based algorithm [85] performs better in this setting.
43
+ (iv) Object 6D pose estimation. We exploit the self-supervised algorithm in category-level object 6D pose estimation [87] with large-scale RGB-D images in our dataset and then evaluate the pre-trained model on the Wild6D [23] test set. We show our dataset can facilitate 6D pose estimation even without training labels, and we also study its generalization ability to the out-of-distribution test set.
44
+
45
+ # 2. Related Work
46
+
47
+ 3D Object Datasets One representative kind of 3D object dataset is the 3D synthetic dataset, like ShapeNet [5] and ModelNet40 [76], which consist of category-level objects. 3D-FUTURE [22] and ABO [14] datasets are typical of higher quality mesh with textures. [34] and [64] introduce real-world category-specific object datasets that mainly focus on birds and chairs respectively. DTU [1] and BlendedMVS [80] are datasets designed for multiview stereo and lack category-level classification. Objectron [2] provides rich annotations but only partial videos are fully 360 degree covered. CO3D [54] is a largescale category-level dataset that annotates camera poses and depths with COLMAP [57], which doesn't provide depths in real-world scale, and MVImgNet [84] is also a dataset similar to CO3D. Pascal3D [77] contains real-
48
+
49
+ world 3D objects with pose annotations and CAD models in limited categories. Datasets collected with specialized hardware (scatterer, dome, etc.) like GSO [18] and OmniObject3D [75] have more accurate 3D geometry models and rendered depths from them. However, they don't have RGBD wild object videos collected and lack real captured depths as well as background depths. In the aspect of RGBD object datasets, Wild6D [23] features RGBD image sequences and 6D pose annotations while lacking full 360 coverage and category types. Choi et al. [13] proposes RGBD object-centric datasets in 44 categories, but with limited camera annotations. As a comparison, our proposed WildRGB-D dataset contains almost 8500 recorded objects and nearly 20000 RGBD videos recorded all 360 degrees in 46 common categories from well-known 2D datasets, all with real-world scale camera poses and object mask annotations as well as aggregated point clouds. We present the detailed comparison in Tab. 1.
50
+
51
+ Neural Radiance Field and Object Surface Reconstruction Neural Radiance Field (NeRF) [47] is a kind of scene representation based on MLPs. It takes in sampled points along each ray and outputs the density and color of each point, which are then aggregated by volume rendering to synthesize views. [3, 4, 48, 66] propose new changes to the original NeRF to improve the visual quality and [7, 21, 39, 49, 60] advance the NeRF efficiency. In order to generalize the NeRF representations to other scenes, [6, 28, 43, 70, 83] learn latent 3D representations and priors from a bunch of existed scenes to help synthesize views across different scenes. Derived from original NeRF, [15, 50, 68, 72, 74, 81, 85] leverage Sign Distance Function (SDF) and represent the 3D scene by implicit surface, which has a more clear object boundary definition. Recently, 3D Gaussian Splating [31] has become a competitive alternative to NeRF. WildRGB-D dataset comprises various category-level objects and scenes on a large scale, which is suitable for novel view synthesis benchmarks and helps boost more mature reconstruction algorithms and generalizable 3D scene representations.
52
+
53
+ Camera Pose Estimation Given dense image views, mature algorithms of SfM [57] and SLAM [19] can estimate camera poses well by computing visual matches [44], verifying through RANSAC [20] and optimizing via bundle adjustment [63]. However, in a sparse camera view setting, camera pose estimation remains a challenging task. Some approaches [61, 71] leverage RNN or adopt auto-regression [79] targeting at SLAM applications. For category-agnostic sparse view camera pose estimation, [46, 55] adopt a direct regression approach. [29] estimates 6D pose upon training on synthetic dataset. Energy-based method [86] estimates distributions over relative rotations
54
+
55
+ ![](images/b64f73897576ac06d7de26d0a1c8254f484a179d62d482f2fe691db8dae06c30.jpg)
56
+ Figure 3. Statistics of WildRGB-D Dataset list the total and per-category number of objects and different types of videos.
57
+
58
+ ![](images/5aef8eafa9ea0f6964252c5a74af7dbc5c432eea3a9315507142c4bb7adf1a32.jpg)
59
+
60
+ ![](images/00c7655f91ab7c8ce270aa9ef82a3a9425564851ad868e86fdb97bb252af3825.jpg)
61
+ Figure 4. Distribution visualization of different kinds of Object 6D pose dataset and WildRGB-D dataset. We observe obvious disparity between Wild6D and Our dataset. Visualizations of [23, 30, 42, 67, 69] are from [30].
62
+
63
+ ![](images/61ca8803c882d0614dc9b936aa6214cde59a4b1593593a12b8880c9988f0caec.jpg)
64
+
65
+ ![](images/86e795ba43d00f5105db35380c405d017c16f90b086cfffa8298af4cb81de5bb.jpg)
66
+
67
+ and [35] incorporates multi-view context to estimate camera 6D pose. Bundle adjustment gets learned after predictions in [59] to refine the estimated poses. In WildRGB-D dataset, with full 360-degree multi-view videos, the sparse view camera pose estimation setting is easily accessible, enabling our dataset to serve as a large-scale training database for these algorithms.
68
+
69
+ Object 6D Pose Estimation In the setting of category-level 6D pose estimation, algorithms predict object poses in the same category and meet with various intra-class shapes. [67] predicts 6D pose using Umeyama algorithm [65] with NOCS map estimation and [8, 23, 62] follow up to learn more accurate NOCS representations. Other algorithms learn to estimate 6D pose through direct regression [9, 36], keypoint location estimations [38] and so on. Apart from supervised learning, self-supervision emerges due to the high cost of annotations. One approach [10, 24, 27, 82] is to adapt sim-to-real upon the pre-trained model on synthetic data. Another one [23, 45, 51] resorts to semi-supervised training. [87] proposes cycles across 2D-3D space learned correspondence, which enables training using only in-the-wild RGBD images without any annotations and is compatible with our dataset. With large-scale category-level RGBD wild object images for self-supervised learning, our dataset has the potential to boost future developments in this field.
70
+
71
+ # 3. The WildRGB-D Dataset
72
+
73
+ # 3.1. Data Collections, Processing, and Annotation
74
+
75
+ Data Collections In order to collect RGBD video on a large scale expeditently and economically, we record with the help of an iPhone front camera using Record3D App and rotate the camera around the object so that full 360-degree views of objects are captured with RGB images and the corresponding depth images. Camera rotating speed is controlled equably by our collection setup to ensure less blur in videos. We select 46 common categories from well-known 2D datasets [17, 37]. We record three videos for every selected object, which are composed of single-object video, multi-object video, and hand-object video. Every recorded video has been checked and some are left behind due to poor SLAM camera pose estimation. Details of WildRGB-D Dataset are listed in Fig. 3.
76
+
77
+ Generating Camera Poses and 3D Point Cloud Our WildRGB-D dataset has 3D annotations including camera poses in real-world scale, scene point clouds, and central object masks. In order to attain real-world scale camera poses, instead of relying on COLMAP [57] to first generate camera poses and then the depths using the poses, we generate more accurate camera poses with the mature RGBD Simultaneous Localization and Mapping [19, 58] (SLAM) algorithm, which leverages our captured depths. Additionally, it has the capability of exploiting the RGB images and depth information from the depth sensor of mobile phones to reconstruct the 3D camera poses in real-world scale, which is different from COLMAP depths, which are not in real-world scale. It enables us to simply project the depth images and gain aggregated 3D point clouds (see Figure 5). Then we manually check the quality of the aggregated 3D point cloud and exclude videos in which SLAM fails to get accurate camera poses. To increase the probability of getting correct SLAM results for each video, we adopt two kinds of SLAM algorithms including BAD SLAM [58] and SLAM implementation from Open3D [89], which increase our successful rate to over $90\%$ .
78
+
79
+ ![](images/e60809dd3a508b3a33a2a4007fce47b447f726ebc16b33a0bb38ee06134d3f7d.jpg)
80
+
81
+ ![](images/6e24260e58fc047a2b29593d29a2c2cfe388acf14cbb9c45e73c1b28ef55b288.jpg)
82
+ Figure 5. Point cloud reconstruction of objects in WildRGB-D Dataset. We reconstruct the aggregated point cloud of the scene by leveraging existed 3D annotations of camera poses and depth images.
83
+
84
+ ![](images/15637e4f7a89cbb63e921b14280d1e5b0660320dc6e92224a5f65de2d464dae8.jpg)
85
+
86
+ ![](images/647ea6f1df8c01e2c7dc935e17998a1e207b424dcaa60f6416df40f599d73e52.jpg)
87
+
88
+ <table><tr><td>Method</td><td>PSNR↑/SD</td><td>SSIM↑/SD</td><td>LPIPS↓/SD</td><td>MAE↓/SD</td></tr><tr><td>NeRF [47]</td><td>23.03/1.50</td><td>0.690/0.072</td><td>0.390/0.075</td><td>0.306/0.109</td></tr><tr><td>NeRF (w mask)</td><td>34.65/4.44</td><td>0.943/0.077</td><td>0.031/0.032</td><td>0.029/0.019</td></tr><tr><td>Mip-NeRF 360 [4]</td><td>23.84/1.60</td><td>0.762/0.063</td><td>0.280/0.067</td><td>0.185/0.068</td></tr><tr><td>Mip-NeRF 360 (w mask)</td><td>35.60/4.51</td><td>0.949/0.077</td><td>0.024/0.025</td><td>0.020/0.015</td></tr><tr><td>Instant-NGP [49]</td><td>23.67/2.07</td><td>0.745/0.063</td><td>0.257/0.070</td><td>0.366/0.105</td></tr><tr><td>Instant-NGP (w mask)</td><td>35.65/5.20</td><td>0.946/0.077</td><td>0.021/0.031</td><td>0.068/0.074</td></tr></table>
89
+
90
+ Generating Central Object Masks We perform central object mask segmentation through a series of methods. Instead of the classic PointRend [32] algorithm, we leverage the novel segmentation tool Segment-Anything (SAM) [33]. We attain the prompt for SAM using Grounding-DINO [41], which generates a bounding box for SAM according to the category text prompt. After attaining the mask segmentation of the first frame in the video, XMem [11] is applied to track the mask in the video. The masking pipeline is largely integrated into [12, 78].
91
+
92
+ # 3.2. Statistics and Distribution
93
+
94
+ In WildRGB-D dataset collections, we recorded 8500 objects and 3 videos for each one. After excluding those SLAM-failed videos, we have 8367 objects in 23049 videos in our dataset (maintaining rates are $99.3\% / 91.0\%$ ). The selected videos contain $33.1\%$ single object videos, $63.0\%$ multi-object videos, and $3.9\%$ hand-object videos. Details of WildRGB-D dataset are listed in Fig. 3.
95
+
96
+ # 4. Experiments
97
+
98
+ # 4.1. Novel View Synthesis
99
+
100
+ In this section, we conduct multiple experiments towards methods concerning novel view synthesis (NVS) in the following three scenarios: 1) Single-Scene NVS, where we train NeRF-based methods [4, 47, 49] on a single scene with only RGB image sequence. 2) Cross-Scene NVS, where we learn category-level scene representations to generalize
101
+
102
+ Table 2. Single-scene NVS results. Average of four metrics w and w/o masks across all training dataset are reported with their standard deviation (SD).
103
+
104
+ <table><tr><td>Method</td><td>Level</td><td>PSNR↑/SD</td><td>SSIM↑/SD</td><td>LPIPS↓/SD</td><td>MAE↓/SD</td></tr><tr><td>Pixel-NeRF [83]</td><td rowspan="3">Easy</td><td>20.28/0.65</td><td>0.645/0.043</td><td>0.495/0.074</td><td>0.355/0.120</td></tr><tr><td>MVSNeRF [6]</td><td>19.95/1.00</td><td>0.663/0.036</td><td>0.351/0.066</td><td>0.370/0.100</td></tr><tr><td>IBRNet [70]</td><td>20.93/0.98</td><td>0.711/0.031</td><td>0.395/0.153</td><td>-</td></tr><tr><td>Pixel-NeRF [83]</td><td rowspan="3">Middle</td><td>18.76/0.50</td><td>0.572/0.064</td><td>0.534/0.047</td><td>0.299/0.057</td></tr><tr><td>MVSNeRF [6]</td><td>18.75/0.74</td><td>0.601/0.069</td><td>0.363/0.036</td><td>0.345/0.102</td></tr><tr><td>IBRNet [70]</td><td>19.77/1.01</td><td>0.663/0.071</td><td>0.362/0.063</td><td>-</td></tr><tr><td>Pixel-NeRF [83]</td><td rowspan="3">Hard</td><td>17.23/0.66</td><td>0.521/0.035</td><td>0.624/0.054</td><td>0.383/0.121</td></tr><tr><td>MVSNeRF [6]</td><td>17.13/0.89</td><td>0.564/0.043</td><td>0.425/0.045</td><td>0.502/0.260</td></tr><tr><td>IBRNet [70]</td><td>17.92/1.12</td><td>0.614/0.056</td><td>0.439/0.069</td><td>-</td></tr></table>
105
+
106
+ Table 3. Cross-scene NVS results. Average of four metrics across all categories in training dataset are reported. We report metrics of three difficulty level respectively. Entries marked in - are not provided.
107
+
108
+ <table><tr><td>Method</td><td>PSNR↑/SD</td><td>SSIM↑/SD</td><td>LPIPS↓/SD</td><td>MAE↓/SD</td></tr><tr><td>Instant-NGP [49]</td><td>23.67/2.07</td><td>0.745/0.063</td><td>0.257/0.070</td><td>0.366/0.105</td></tr><tr><td>Instant-NGP (depth sup.)</td><td>24.60/2.13</td><td>0.759/0.062</td><td>0.239/0.066</td><td>0.108/0.057</td></tr><tr><td>Pixel-NeRF [83]</td><td>18.53/1.21</td><td>0.568/0.067</td><td>0.556/0.073</td><td>0.336/0.099</td></tr><tr><td>Pixel-NeRF (depth sup.)</td><td>19.10/1.21</td><td>0.605/0.060</td><td>0.499/0.064</td><td>0.147/0.087</td></tr><tr><td>MVSNeRF [6]</td><td>18.43/1.30</td><td>0.600/0.065</td><td>0.381/0.054</td><td>0.400/0.182</td></tr><tr><td>MVSNeRF (depth sup.)</td><td>18.44/1.29</td><td>0.600/0.065</td><td>0.381/0.054</td><td>0.397/0.186</td></tr></table>
109
+
110
+ Table 4. Depth Supervised NVS and depth estimation results. Average of four metrics w and w/o depth supervision across all training dataset are reported with their standard deviation (SD).
111
+
112
+ into other scenes with Generalizable NeRFs [6, 70, 83]. 3) Depth Supervised NVS, where we conduct NVS experiments with depth image priors in our dataset to study the potential that depth information will endow to NVS tasks.
113
+
114
+ Single-Scene NVS We select ten scenes from each category and uniformly sample images as validation split. We choose NeRF [47], Mip-NeRF 360 [4] and Instant-NGP [49] for evaluations. Results are shown in Tab. 2. We report the average PSNR, SSIM [73], LPIPS [88] and rendering depths Mean Average Error (MAE) compared with our sensor-collected depths across all categories. We also report metrics only related to the NVS quality of central objects using object masks. Results show that Mip-NeRF 360 and Instant-NGP outperform original NeRF in terms of visual quality metrics. NeRF-based methods perform better
115
+
116
+ ![](images/424355a14fce7ac886a94f06f3b859a49140c8ac3ffc5d1eefe4d73f0c7b127c.jpg)
117
+ Figure 6. Novel view synthesis visualization of different kinds of NeRF methods: NeRF [47], Mip-NeRF 360 [4] and Instant-NGP [49].
118
+
119
+ when we only concern with the recovery of central objects under object masks. What's more, Mip-NeRF 360 performs best in learning single-scene geometry. Visualization can be found in Figure 6. In brief, our dataset offers extensive categories and scenes for in-depth NVs experiments.
120
+
121
+ Cross-Scene NVS Apart from single-scene optimizations, we also evaluate Generalizable NeRFs: PixelNeRF [83], MVSNeRF [6] and IBRNet [70] in the cross-scene setting. For each category in our dataset, we select the same test scenes as single-scene NVS experiments and train in the remaining scenes of the same category to learn per-category latent representations. We divide the 46 categories into three difficulty levels and report the average metrics of each level. For evaluation, we use three source views to synthesize novel views. We report PSNR, SSIM and LPIPS to measure visual quality and depth MAE to measure the learned geometry quality. From Tab. 3, we observe that IBRNet outperforms in all three difficulty levels in terms of visual quality. Additionally, learned geometry quality isn't highly correlated with the rendering visual quality in novel views. To sum up, our dataset provides great potential in learning category-level cross-scene NVS methods.
122
+
123
+ Depth Supervised NVS We also study the influences that depth supervision brings. We choose Instant-NGP [49] in single-scene NVS methods and both Pixel-NeRF [83] and MVSNeRF [6] in cross-scene NVS methods. Our experiment results in Tab. 4 prove that depth supervision is beneficial for these methods to learn better representations. In our experiment setting, we add L1 depth loss to every algorithm and choose the best-performance depth loss weight
124
+
125
+ ![](images/165950f06335eeb2ef47c2773db19240c708671699e1ab5ba5d03392d438a812.jpg)
126
+ Figure 7. Relpose++ [35] pair-wise evaluation visualization. We show every image pair with its relative rotation predicted by Relpose.
127
+
128
+ for them. Compared with conducting NVS tasks without depths, the performances of both Instant-NGP and PixelNeRF get boosted when training with depth loss. As for experiments of MVSNeRF, since we've already added depth information in the original training as the guidance in building rays, it turns out that the improvements when training with extra added depth loss are limited. In a nutshell, with depth priors, both single-scene NVs and cross-scene NVS methods learn better generalization capabilities, boosting NVS more accurate and generalizable.
129
+
130
+ # 4.2. Camera Pose Estimation
131
+
132
+ In this section, we benchmark two data-driven methods RelPose [86] and RelPose++ [35] for inference of the relative camera poses from multi-view images in a sparse setting. Leveraging the given annotations of camera poses and large-scale category-level video in our dataset, we aim to learn generalizable viewpoint inference capability from training-seen categories to unseen ones. Since WildRGB-D dataset has a full and dense 360-degree camera trajectory, we can provide both a large-scale database and various view settings to assist training. In our experiments, we divided totally 46 categories into training and testing categories. We also hold some videos in training categories for evaluation. We adopt evaluation settings described in [35, 86] and report results in Tab. 5 and Tab. 6. We observe that these two methods can generalize well to other scenarios both in known categories and unseen categories since the relative rotation estimation errors are in a reasonable range (also see Figure 7 for visualization). However,
133
+
134
+ ![](images/1ad50b11ab8b604fcd43523a82d19164067801f061fbb5fa77892d3dcbdcf850.jpg)
135
+ Figure 8. Visualization of RGBD reconstruction surface from Neusfacto [85]. Original RGBD image samples are listed on the left and multi-view reconstructed surface is on the right for each example.
136
+
137
+ the error of translation prediction is comparatively large in RelPose++, which still poses challenges in this field. To sum up, WildRGB-D dataset can serve as large-scale training sources for generalizable camera pose estimation algorithms to achieve remarkable results.
138
+
139
+ # 4.3. RGBD Object Surface Reconstruction
140
+
141
+ In our experiment setting of object surface reconstruction, algorithms need to utilize RGBD image sequence and central object masks to reconstruct the surface mesh of the central object. Reconstruction without depths is also evaluated for comparison. For evaluation of reconstruction quality, we calculate the Chamfer Distance between the reconstruction mesh and aggregated object point cloud which is derived from object-masked depth images. Ten single object scenes are selected in each category for evaluations of Instant-NGP [49] and Neusfacto [85]. From the results in Tab. 7, we observe that reconstruction is better with depth priors. Additionally, the performance of Neusfacto with RGBD is superior to Instant-NGP, which shows that depths help the sdf-based method Neusfacto learn the correct object boundary and boost the performance more compared with Instant-NGP. The average deviation is high due to the varied reconstruction qualities across different categories in the dataset. Visualization of Neusfacto RGBD reconstructions are shown in Figure 8. In brief, our dataset provides an RGBD object reconstruction evaluation track, boosting the development of more mature algorithms in this field.
142
+
143
+ # 4.4. RGBD 6D Object Pose Estimation
144
+
145
+ We explore our dataset in self-supervised training for 6D pose estimation. We adopt the algorithm proposed in [87], which leverages category shape prior and learns by matching the correspondence between images and shapes. In our experiment, we evaluate the trained model on Wild6D [23]
146
+
147
+ ![](images/cfbf725d8c2d58023c1876d39cb419d30c986d507fb0fbc41ca4a5a1af5c0d4f.jpg)
148
+ Figure 9. Object 6D pose estimation visualization. We visualize the predicted category-level 6D pose on three categories in Wild6D [23] test set (bottle, bowl, and mug) using models that only perform self-supervised training on the corresponding category of WildRGB-D Dataset. The ground truth bounding boxes are colored in green, and the predicted bounding boxes are in red.
149
+
150
+ test set. Three different settings concerning training sets are adopted: 1) Wild6D training set; 2) WildRGB-D dataset; 3) Wild6D training set + WildRGB-D dataset. Common categories in these two datasets are selected for self-supervised training and evaluation. Results in Tab. 8 show that in the case of an out-of-distribution setting, where we trained only on our dataset and evaluated on a different dataset, some of the metrics are decreased in our experiments. This is mainly due to the different distribution of camera rotations in these
151
+
152
+ <table><tr><td rowspan="2">Eval. Type</td><td rowspan="2">Categories</td><td rowspan="2">Metrics</td><td colspan="4">#Frames</td></tr><tr><td>3</td><td>5</td><td>10</td><td>20</td></tr><tr><td rowspan="4">MST</td><td rowspan="2">seen</td><td>&lt;15 deg.</td><td>57.4</td><td>55.1</td><td>51.4</td><td>47.4</td></tr><tr><td>&lt;30 deg.</td><td>82.1</td><td>79.8</td><td>77.2</td><td>74.0</td></tr><tr><td rowspan="2">unseen</td><td>&lt;15 deg.</td><td>38.4</td><td>37.7</td><td>36.6</td><td>35.2</td></tr><tr><td>&lt;30 deg.</td><td>62.5</td><td>61.8</td><td>60.4</td><td>59.0</td></tr><tr><td rowspan="4">Coord.Asc.</td><td rowspan="2">seen</td><td>&lt;15 deg.</td><td>69.3</td><td>69.3</td><td>69.8</td><td>69.3</td></tr><tr><td>&lt;30 deg.</td><td>85.3</td><td>85.3</td><td>85.4</td><td>85.3</td></tr><tr><td rowspan="2">unseen</td><td>&lt;15 deg.</td><td>46.0</td><td>46.2</td><td>46.9</td><td>46.5</td></tr><tr><td>&lt;30 deg.</td><td>66.4</td><td>67.0</td><td>67.2</td><td>67.1</td></tr><tr><td rowspan="4">Sequential</td><td rowspan="2">seen</td><td>&lt;15 deg.</td><td>51.9</td><td>45.1</td><td>36.0</td><td>26.9</td></tr><tr><td>&lt;30 deg.</td><td>78.3</td><td>72.5</td><td>61.9</td><td>49.3</td></tr><tr><td rowspan="2">unseen</td><td>&lt;15 deg.</td><td>34.9</td><td>31.0</td><td>25.1</td><td>18.3</td></tr><tr><td>&lt;30 deg.</td><td>59.1</td><td>54.4</td><td>46.4</td><td>37.1</td></tr></table>
153
+
154
+ Table 5. RelPose [86] camera evaluation results. We follow three evaluation types (MST, Coord.Asc., Sequential) proposed in [86] and report the average percent of rotation prediction errors in degrees both in training-seen categories and unseen ones.
155
+
156
+ <table><tr><td rowspan="2">Eval. Type</td><td rowspan="2">Categories</td><td rowspan="2">Metrics</td><td colspan="4">#Frames</td></tr><tr><td>2</td><td>3</td><td>5</td><td>8</td></tr><tr><td rowspan="4">Pairwise</td><td rowspan="2">seen</td><td>&lt;15 deg.</td><td>69.6</td><td>68.3</td><td>67.3</td><td>66.6</td></tr><tr><td>&lt;30 deg.</td><td>86.5</td><td>87.4</td><td>87.2</td><td>86.8</td></tr><tr><td rowspan="2">unseen</td><td>&lt;15 deg.</td><td>53.4</td><td>52.5</td><td>52.5</td><td>52.3</td></tr><tr><td>&lt;30 deg.</td><td>74.1</td><td>74.5</td><td>75.3</td><td>75.4</td></tr><tr><td rowspan="4">Coord.Asc.</td><td rowspan="2">seen</td><td>&lt;15 deg.</td><td>70.4</td><td>71.5</td><td>71.9</td><td>71.6</td></tr><tr><td>&lt;30 deg.</td><td>86.7</td><td>87.9</td><td>88.5</td><td>88.7</td></tr><tr><td rowspan="2">unseen</td><td>&lt;15 deg.</td><td>52.9</td><td>55.4</td><td>54.9</td><td>54.8</td></tr><tr><td>&lt;30 deg.</td><td>73.9</td><td>75.7</td><td>76.3</td><td>76.8</td></tr><tr><td rowspan="4">CamCENTER</td><td rowspan="2">seen</td><td>&lt;0.2 SS</td><td>100.0</td><td>29.8</td><td>12.6</td><td>5.9</td></tr><tr><td>&lt;0.3 SS</td><td>100.0</td><td>43.8</td><td>23.6</td><td>13.3</td></tr><tr><td rowspan="2">unseen</td><td>&lt;0.2 SS</td><td>100.0</td><td>30.5</td><td>12.4</td><td>5.6</td></tr><tr><td>&lt;0.3 SS</td><td>100.0</td><td>44.3</td><td>23.2</td><td>12.5</td></tr><tr><td rowspan="4">Cam.Trans.</td><td rowspan="2">seen</td><td>&lt;0.2 SS</td><td>22.3</td><td>11.9</td><td>4.8</td><td>2.2</td></tr><tr><td>&lt;0.3 SS</td><td>30.1</td><td>20.4</td><td>10.8</td><td>6.2</td></tr><tr><td rowspan="2">unseen</td><td>&lt;0.2 SS</td><td>21.8</td><td>12.0</td><td>5.7</td><td>2.7</td></tr><tr><td>&lt;0.3 SS</td><td>29.8</td><td>20.9</td><td>12.0</td><td>6.4</td></tr></table>
157
+
158
+ Table 6. RelPose++ [35] camera evaluation results. We follow four evaluation types (Pairwise, Coord.Asc., CamCENTER, Cam.Trans) proposed in [35] and report the average percent of rotation prediction errors in degrees both in training-seen categories and unseen ones. Notes: SS means scene scale defined in [35].
159
+
160
+ <table><tr><td colspan="2">RGB</td><td colspan="2">RGBD</td></tr><tr><td>Instant-NGP [49]</td><td>Neusfacto [85]</td><td>Instant-NGP [49]</td><td>Neusfacto [85]</td></tr><tr><td>45.91/64.01</td><td>88.92/89.94</td><td>28.46/29.28</td><td>25.83/34.07</td></tr></table>
161
+
162
+ two datasets (visualized in Fig. 4), where Wild6D doesn't cover full 360 degrees and WildRGB-D dataset covers a larger pitch angle range in object 6D poses. However, we
163
+
164
+ Table 7. RGBD object surface reconstruction results. Average of chamfer distance with standard deviation across selected categories are reported (Average/SD).
165
+
166
+ <table><tr><td>Category</td><td>Datasets</td><td>IOU0.25</td><td>IOU0.5</td><td>5 deg. 2cm</td><td>5 deg. 5cm</td><td>10 deg. 2cm</td><td>10 deg. 5cm</td></tr><tr><td rowspan="3">Bottle</td><td>Wild6D</td><td>93.2</td><td>85.2</td><td>71.3</td><td>79.4</td><td>79.8</td><td>90.9</td></tr><tr><td>ROW</td><td>93.3</td><td>70.9</td><td>34.1</td><td>48.8</td><td>47.9</td><td>78.8</td></tr><tr><td>Wild6D+ROW</td><td>93.3</td><td>85.8</td><td>71.9</td><td>78.6</td><td>81.7</td><td>91.7</td></tr><tr><td rowspan="3">Bowl</td><td>Wild6D</td><td>98.3</td><td>90.4</td><td>66.1</td><td>70.0</td><td>86.8</td><td>94.6</td></tr><tr><td>ROW</td><td>98.3</td><td>91.8</td><td>33.8</td><td>35.7</td><td>86.3</td><td>93.5</td></tr><tr><td>Wild6D+ROW</td><td>98.4</td><td>91.8</td><td>40.3</td><td>42.0</td><td>87.5</td><td>93.7</td></tr><tr><td rowspan="3">Mug</td><td>Wild6D</td><td>89.0</td><td>59.2</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.1</td></tr><tr><td>ROW</td><td>89.1</td><td>61.9</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.1</td></tr><tr><td>Wild6D+ROW</td><td>89.3</td><td>50.2</td><td>0.0</td><td>0.0</td><td>0.0</td><td>0.0</td></tr></table>
167
+
168
+ Table 8. Self-Supervised 6D Pose Estimation results. The evaluation results on Wild6D test dataset under three different settings in bottle, bowl and mug categories.
169
+
170
+ still witness some improvements in evaluations. Training with WildRGB-D dataset benefits in IOU evaluations and joint-dataset training improves rotation+translation evaluation in particular categories. Visualization of 6D pose estimation in Wild6D test set using models that only train in our dataset can be found in Figure 9, To summarize, our dataset provides large-scale category-level RGBD images sequences, serving as ample unsupervised training data, which has the potential to boost more accurate 6D pose estimation in the future.
171
+
172
+ # 5. Conclusion
173
+
174
+ The object-centric datasets in the computer vision community have mostly focused on RGB videos, while practical applications often involve depth as inputs or for better annotations. We collect the largest object-centric RGB-D video dataset WildRGB-D , where all videos are captured in cluttered scenes. It is composed of category-level RGB-D object videos taken using iPhones around the objects in 360 degrees, which contains around 8500 recorded objects and nearly 20000 RGB-D videos across 46 common object categories with three setups covering most scenarios. The dataset is well annotated with object masks, real-world scale camera poses, and reconstructed aggregated point clouds from RGBD videos. We set up four evaluation tracks with WildRGB-D, showing that the large-scale capture of RGB-D objects provides a large potential to advance 3D object learning. The current dataset does not come with annotations of the object 6D pose, which requires further crowdsourcing effort. It will be one of our future efforts to collect this annotation for supervised training methods as well as evaluation. We are committed to releasing our dataset and evaluation code.
175
+
176
+ Acknowledgment This project was supported, in part, by the Amazon Research Award, the Qualcomm Innovation Fellowship, the Intel Rising Star Faculty Award, and the CISCO Faculty Award.
177
+
178
+ # References
179
+
180
+ [1] Henrik Aanaes, Rasmus Ramsbøl Jensen, George Vogiatzis, Engin Tola, and Anders Bjorholm Dahl. Large-scale data for multiple-view stereopsis. International Journal of Computer Vision, 120:153-168, 2016. 3
181
+ [2] Adel Ahmadyan, Liangkai Zhang, Artsiom Ablavatski, Jianing Wei, and Matthias Grundmann. Objectron: A large scale dataset of object-centric videos in the wild with pose annotations. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7822-7831, 2021. 2, 3
182
+ [3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In ICCV, 2021. 3
183
+ [4] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In CVPR, 2022. 3, 5, 6
184
+ [5] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 2, 3
185
+ [6] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 14124-14133, 2021. 3, 5, 6
186
+ [7] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In European Conference on Computer Vision, pages 333-350. Springer, 2022. 3
187
+ [8] Dengsheng Chen, Jun Li, Zheng Wang, and Kai Xu. Learning canonical shape space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11973-11982, 2020. 4
188
+ [9] Wei Chen, Xi Jia, Hyung Jin Chang, Jinming Duan, Linlin Shen, and Ales Leonardis. Fs-net: Fast shape-based network for category-level 6d object pose estimation with decoupled rotation mechanism. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1581-1590, 2021. 4
189
+ [10] Xu Chen, Zijian Dong, Jie Song, Andreas Geiger, and Otmar Hilliges. Category level object pose estimation via neural analysis-by-synthesis. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXVI 16, pages 139-156. Springer, 2020. 4
190
+ [11] Ho Kei Cheng and Alexander G Schwing. Xmem: Long-term video object segmentation with an atkinson-shiffrin memory model. In European Conference on Computer Vision, pages 640-658. Springer, 2022. 2, 5
191
+ [12] Yangming Cheng, Liulei Li, Yuanyou Xu, Xiaodi Li, Zongxin Yang, Wenguan Wang, and Yi Yang. Segment and track anything, 2023. 2, 5
192
+
193
+ [13] Sungjoon Choi, Qian-Yi Zhou, Stephen Miller, and Vladlen Koltun. A large dataset of object scans. arXiv preprint arXiv:1602.02481, 2016. 3
194
+ [14] Jasmine Collins, Shubham Goel, Kenan Deng, Achleshwar Luthra, Leon Xu, Erhan Gundogdu, Xi Zhang, Tomas F Yago Vicente, Thomas Dideriksien, Himanshu Arora, et al. Abo: Dataset and benchmarks for real-world 3d object understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21126-21136, 2022. 2, 3
195
+ [15] François Darmon, Bénédicte Bascle, Jean-Clement Devaux, Pascal Monasse, and Mathieu Aubry. Improving neural implicit surfaces geometry with patch warping. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6260-6269, 2022. 3
196
+ [16] Matt Deitke, Dustin Schwenk, Jordi Salvador, Luca Weihs, Oscar Michel, Eli VanderBilt, Ludwig Schmidt, Kiana Ehsani, Aniruddha Kembhavi, and Ali Farhadi. Objverse: A universe of annotated 3d objects, 2022. 2
197
+ [17] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 4
198
+ [18] Laura Downs, Anthony Francis, Nate Koenig, Brandon Kinman, Ryan Hickman, Krista Reymann, Thomas B McHugh, and Vincent Vanhoucke. Google scanned objects: A high-quality dataset of 3d scanned household items. In 2022 International Conference on Robotics and Automation (ICRA), pages 2553-2560. IEEE, 2022. 3
199
+ [19] Hugh Durrant-Whyte and Tim Bailey. Simultaneous localization and mapping: part i. IEEE robotics & automation magazine, 13(2):99-110, 2006. 2, 3, 4
200
+ [20] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 24(6):381-395, 1981. 3
201
+ [21] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 3
202
+ [22] Huan Fu, Rongfei Jia, Lin Gao, Mingming Gong, Binqiang Zhao, Steve Maybank, and Dacheng Tao. 3d-future: 3d furniture shape with texture. International Journal of Computer Vision, 129:3313-3337, 2021. 2, 3
203
+ [23] Yang Fu and Xiaolong Wang. Category-level 6d object pose estimation in the wild: A semi-supervised learning approach and a new dataset, 2022. 2, 3, 4, 7
204
+ [24] Ge Gao, Mikko Lauri, Yulong Wang, Xiaolin Hu, Jianwei Zhang, and Simone Frintrop. 6d object pose regression via supervised learning on point clouds. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 3643-3649. IEEE, 2020. 4
205
+ [25] Jun Gao, Tianchang Shen, Zian Wang, Wenzheng Chen, Kangxue Yin, Daiqing Li, Or Litany, Zan Gojcic, and Sanja Fidler. Get3d: A generative model of high quality 3d textured shapes learned from images. In Advances In Neural Information Processing Systems, 2022. 2
206
+
207
+ [26] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition, 2015. 1
208
+ [27] Yisheng He, Haoqiang Fan, Haibin Huang, Qifeng Chen, and Jian Sun. Towards self-supervised category-level object pose and size estimation. arXiv preprint arXiv:2203.02884, 2022. 4
209
+ [28] Philipp Henzler, Jeremy Reizenstein, Patrick Labatut, Roman Shapovalov, Tobias Ritschel, Andrea Vedaldi, and David Novotny. Unsupervised learning of 3d object categories from videos in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4700-4709, 2021. 3
210
+ [29] Hanwen Jiang, Zhenyu Jiang, Kristen Grauman, and Yuke Zhu. Few-view object reconstruction with unknown categories and camera poses. arXiv preprint arXiv:2212.04492, 2022.3
211
+ [30] HyunJun Jung, Shun-Cheng Wu, Patrick Ruhkamp, Guangyao Zhai, Hannah Schieber, Giulia Rizzoli, Pengyuan Wang, Hongcheng Zhao, Lorenzo Garattoni, Sven Meier, Daniel Roth, Nassir Navab, and Benjamin Busam. Housecat6d – a large-scale multi-modal category level 6d object pose dataset with household objects in realistic scenarios, 2023. 4
212
+ [31] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Transactions on Graphics (ToG), 42(4):1-14, 2023. 3
213
+ [32] Alexander Kirillov, Yuxin Wu, Kaiming He, and Ross Girshick. Pointrend: Image segmentation as rendering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9799-9808, 2020. 5
214
+ [33] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 2, 5
215
+ [34] Joseph J Lim, Hamed Piriaviavash, and Antonio Torralba. Parsing iceberg objects: Fine pose estimation. In Proceedings of the IEEE international conference on computer vision, pages 2992-2999, 2013. 3
216
+ [35] Amy Lin, Jason Y Zhang, Deva Ramanan, and Shubham Tulsiani. Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926, 2023. 2, 4, 6, 8
217
+ [36] Jiehong Lin, Zewei Wei, Zhihao Li, Songcen Xu, Kui Jia, and Yuanqing Li. Dualposenet: Category-level 6d object pose and size estimation using dual pose network with refined learning of pose consistency. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3560-3569, 2021. 4
218
+ [37] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer Vision-ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13, pages 740-755. Springer, 2014. 4
219
+ [38] Yunzhi Lin, Jonathan Tremblay, Stephen Tyree, Patricio A Vela, and Stan Birchfield. Single-stage keypoint-based
220
+
221
+ category-level object pose estimation from an rgb image. In 2022 International Conference on Robotics and Automation (ICRA), pages 1547-1553. IEEE, 2022. 4
222
+ [39] Zhi-Hao Lin, Wei-Chiu Ma, Hao-Yu Hsu, Yu-Chiang Frank Wang, and Shenlong Wang. Neurmips: Neural mixture of planar experts for view synthesis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 15702-15712, 2022. 3
223
+ [40] Ruoshi Liu, Rundi Wu, Basile Van Hoorick, Pavel Tokmakov, Sergey Zakharov, and Carl Vondrick. Zero-1-to-3: Zero-shot one image to 3d object, 2023. 2
224
+ [41] Shilong Liu, Zhaoyang Zeng, Tianhe Ren, Feng Li, Hao Zhang, Jie Yang, Chunyuan Li, Jianwei Yang, Hang Su, Jun Zhu, et al. Grounding dino: Marrying dino with grounded pre-training for open-set object detection. arXiv preprint arXiv:2303.05499, 2023. 2, 5
225
+ [42] Xingyu Liu, Shun Iwase, and Kris M. Kitani. Stereobj-1m: Large-scale stereo image dataset for 6d object pose estimation, 2022. 4
226
+ [43] Yuan Liu, Sida Peng, Lingjie Liu, Qianqian Wang, Peng Wang, Christian Theobalt, Xiaowei Zhou, and Wenping Wang. Neural rays for occlusion-aware image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7824-7833, 2022. 3
227
+ [44] Bruce D Lucas and Takeo Kanade. An iterative image registration technique with an application to stereo vision. In IJCAI'81: 7th international joint conference on Artificial intelligence, pages 674-679, 1981. 3
228
+ [45] Fabian Manhardt, Gu Wang, Benjamin Busam, Manuel Nickel, Sven Meier, Luca Minciullo, Xiangyang Ji, and Nassir Navab. Cps++: Improving class-level 6d pose and shape estimation from monocular images with self-supervised learning. arXiv preprint arXiv:2003.05848, 2020. 4
229
+ [46] Iaroslav Melekhov, Juha Ylioinas, Juho Kannala, and Esa Rahtu. Relative camera pose estimation using convolutional neural networks. In Advanced Concepts for Intelligent Vision Systems: 18th International Conference, ACIVS 2017, Antwerp, Belgium, September 18-21, 2017, Proceedings 18, pages 675-687. Springer, 2017. 3
230
+ [47] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 2, 3, 5, 6
231
+ [48] Ben Mildenhall, Peter Hedman, Ricardo Martin-Brualla, Pratul P Srinivasan, and Jonathan T Barron. Nerf in the dark: High dynamic range view synthesis from noisy raw images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16190-16199, 2022. 3
232
+ [49] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM TOG, 2022. 3, 5, 6, 7, 8
233
+ [50] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In Proceedings of the
234
+
235
+ IEEE/CVF International Conference on Computer Vision, pages 5589-5599, 2021. 3
236
+ [51] Wanli Peng, Jianhang Yan, Hongtao Wen, and Yi Sun. Self-supervised category-level 6d object pose estimation with deep implicit shape representation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 2082-2090, 2022. 4
237
+ [52] Charles R. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation, 2017. 2
238
+ [53] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision, 2021. 1
239
+ [54] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10901-10911, 2021. 2, 3
240
+ [55] Chris Rockwell, Justin Johnson, and David F Fouhey. The 8-point algorithm as an inductive bias for relative pose prediction by vits. In 2022 International Conference on 3D Vision (3DV), pages 1-11. IEEE, 2022. 3
241
+ [56] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 2
242
+ [57] Johannes L Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4104-4113, 2016. 3, 4
243
+ [58] Thomas Schops, Torsten Sattler, and Marc Pollefeys. Bad slam: Bundle adjusted direct rgb-d slam. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 134-144, 2019. 2, 4
244
+ [59] Samarth Sinha, Jason Y Zhang, Andrea Tagliasacchi, Igor Gilitschenski, and David B Lindell. Sparsepose: Sparse-view camera pose regression and refinement. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21349-21359, 2023. 4
245
+ [60] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5459-5469, 2022. 3
246
+ [61] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. Advances in neural information processing systems, 34:16558-16569, 2021. 3
247
+ [62] Meng Tian, Marcelo H Ang, and Gim Hee Lee. Shape prior deformation for categorical 6d object pose and size estimation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXI 16, pages 530-546. Springer, 2020. 4
248
+ [63] Bill Triggs, Philip F McLauchlan, Richard I Hartley, and Andrew W Fitzgibbon. Bundle adjustment—a modern synthesis. In Vision Algorithms: Theory and Practice: International Workshop on Vision Algorithms Corfu,
249
+
250
+ Greece, September 21-22, 1999 Proceedings, pages 298-372. Springer, 2000. 3
251
+ [64] Shubham Tulsiani, Abhishek Kar, Joao Carreira, and Jitendra Malik. Learning category-specific deformable 3d models for object reconstruction. IEEE transactions on pattern analysis and machine intelligence, 39(4):719-731, 2016. 3
252
+ [65] Shinji Umeyama. Least-squares estimation of transformation parameters between two point patterns. IEEE Transactions on Pattern Analysis & Machine Intelligence, 13(04): 376-380, 1991. 4
253
+ [66] Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T Barron, and Pratul P Srinivasan. Ref-nerf: Structured view-dependent appearance for neural radiance fields. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5481–5490. IEEE, 2022. 3
254
+ [67] He Wang, Srinath Sridhar, Jingwei Huang, Julien Valentin, Shuran Song, and Leonidas J Guibas. Normalized object coordinate space for category-level 6d object pose and size estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2642-2651, 2019. 4
255
+ [68] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021. 3
256
+ [69] Pengyuan Wang, HyunJun Jung, Yitong Li, Siyuan Shen, Rahul Parthasarathy Srikanth, Lorenzo Garattoni, Sven Meier, Nassir Navab, and Benjamin Busam. Phocal: A multi-modal dataset for category-level object pose estimation with photometrically challenging objects, 2022. 4
257
+ [70] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2021. 3, 5, 6
258
+ [71] Sen Wang, Ronald Clark, Hongkai Wen, and Niki Trigoni. Deepvo: Towards end-to-end visual odometry with deep recurrent convolutional neural networks. In 2017 IEEE international conference on robotics and automation (ICRA), pages 2043-2050. IEEE, 2017. 3
259
+ [72] Yiming Wang, Qin Han, Marc Habermann, Kostas Dani-ilidis, Christian Theobalt, and Lingjie Liu. Neus2: Fast learning of neural implicit surfaces for multi-view reconstruction, 2023. 3
260
+ [73] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. 5
261
+ [74] Tong Wu, Jiaqi Wang, Xingang Pan, Xudong Xu, Christian Theobalt, Ziwei Liu, and Dahua Lin. Voxurf: Voxel-based efficient and accurate neural surface reconstruction. arXiv preprint arXiv:2208.12697, 2022.3
262
+ [75] Tong Wu, Jiarui Zhang, Xiao Fu, Yuxin Wang, Jiawei Ren, Liang Pan, Wayne Wu, Lei Yang, Jiaqi Wang, Chen Qian, et al. Omniobject3d: Large-vocabulary 3d object dataset for realistic perception, reconstruction and generation. In Pro
263
+
264
+ ceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 803-814, 2023. 2, 3
265
+ [76] Zhirong Wu, Shuran Song, Aditya Khosla, Fisher Yu, Linguang Zhang, Xiaou Tang, and Jianxiong Xiao. 3d shapenets: A deep representation for volumetric shapes. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1912-1920, 2015. 2, 3
266
+ [77] Yu Xiang, Roozbeh Mottaghi, and Silvio Savarese. Beyond Pascal: A benchmark for 3d object detection in the wild. In IEEE winter conference on applications of computer vision, pages 75-82. IEEE, 2014. 3
267
+ [78] Jinyu Yang, Mingqi Gao, Zhe Li, Shang Gao, Fangjing Wang, and Feng Zheng. Track anything: Segment anything meets videos, 2023. 2, 5
268
+ [79] Nan Yang, Lukas von Stumberg, Rui Wang, and Daniel Cremers. D3vo: Deep depth, deep pose and deep uncertainty for monocular visual odometry. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1281-1292, 2020. 3
269
+ [80] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. Blendedmvs: A large-scale dataset for generalized multi-view stereo networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1790-1799, 2020. 3
270
+ [81] Lior Yariv, Jiatao Gu, Yoni Kasten, and Yaron Lipman. Volume rendering of neural implicit surfaces. Advances in Neural Information Processing Systems, 34:4805-4815, 2021. 3
271
+ [82] Yang You, Ruoxi Shi, Weiming Wang, and Cewu Lu. CPPf: Towards robust category-level 9d pose estimation in the wild. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6866-6875, 2022. 4
272
+ [83] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelnerf: Neural radiance fields from one or few images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4578-4587, 2021. 3, 5, 6
273
+ [84] Xianggang Yu, Mutian Xu, Yidan Zhang, Haolin Liu, Chongjie Ye, Yushuang Wu, Zizheng Yan, Chenming Zhu, Zhangyang Xiong, Tianyou Liang, Guanying Chen, Shuguang Cui, and Xiaoguang Han. Mvimgnet: A large-scale dataset of multi-view images, 2023. 3
274
+ [85] Zehao Yu, Anpei Chen, Bozidar Antic, Songyou Peng, Apratim Bhattacharyya, Michael Niemeyer, Siyu Tang, Torsten Sattler, and Andreas Geiger. Sdfstudio: A unified framework for surface reconstruction, 2022. 3, 7, 8
275
+ [86] Jason Y Zhang, Deva Ramanan, and Shubham Tulsiani. Relapse: Predicting probabilistic relative rotation for single objects in the wild. In European Conference on Computer Vision, pages 592-611. Springer, 2022. 2, 3, 6, 8
276
+ [87] Kaifeng Zhang, Yang Fu, Shubhankar Borse, Hong Cai, Fatih Porikli, and Xiaolong Wang. Self-supervised geometric correspondence for category-level 6d object pose estimation in the wild. arXiv preprint arXiv:2210.07199, 2022. 3, 4, 7
277
+ [88] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the
278
+
279
+ IEEE conference on computer vision and pattern recognition, pages 586-595, 2018. 5
280
+ [89] Qian-Yi Zhou, Jaesik Park, and Vladlen Koltun. Open3D: A modern library for 3D data processing. arXiv:1801.09847, 2018. 4
2401.12xxx/2401.12592/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e21aefc21838b3021b5a848872a4da11b802652f105d55a8df78921bd4fc9a99
3
+ size 1009568
2401.12xxx/2401.12592/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12599/1b5a3a10-2f46-443f-9cb0-ad6eb32b9945_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12599/1b5a3a10-2f46-443f-9cb0-ad6eb32b9945_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12599/1b5a3a10-2f46-443f-9cb0-ad6eb32b9945_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98d60c899db9e0fdde6926a5fec94ea5ffc29826448f735a658147dd86f2b31c
3
+ size 4873264
2401.12xxx/2401.12599/full.md ADDED
@@ -0,0 +1,906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Revolutionizing Retrieval-Augmented Generation with Enhanced PDF Structure Recognition
2
+
3
+ Demiao LIN chatdoc.com
4
+
5
+ # Abstract
6
+
7
+ With the rapid development of Large Language Models (LLMs), Retrieval-Augmented Generation (RAG) has become a predominant method in the field of professional knowledge-based question answering. Presently, major foundation model companies have opened up Embedding and Chat API interfaces, and frameworks like LangChain have already integrated the RAG process. It appears that the key models and steps in RAG have been resolved, leading to the question: are professional knowledge QA systems now approaching perfection? This article discovers that current primary methods depend on the premise of accessing high-quality text corpora. However, since professional documents are mainly stored in PDFs, the low accuracy of PDF parsing significantly impacts the effectiveness of professional knowledge-based QA. We conducted an empirical RAG experiment across hundreds of questions from the corresponding real-world professional documents. The results show that, ChatDOC (chatdoc.com), a RAG system equipped with a panoptic and pinpoint PDF parser, retrieves more accurate and complete segments, and thus better answers. Empirical experiments show that ChatDOC is superior to baseline on nearly $47\%$ of questions, ties for $38\%$ of cases, and falls short on only $15\%$ of cases. It shows that we may revolutionize RAG with enhanced PDF structure recognition.
8
+
9
+ # 1 Introduction
10
+
11
+ Large language models (LLM) are trained on data that predominantly come from publicly available internet sources, including web pages, books, news, and dialogue texts. It means that LLMs primarily rely on internet sources as their training data, which are vast, diverse, and easily accessible, supporting them to scale up their capabilities. However, in vertical applications, professional tasks require LLMs to utilize domain knowledge, which unfortunately is private, and not part of their pre-training data.
12
+
13
+ A popular approach to equip LLM with domain knowledge is Retrieval-Augmented Generation (RAG). RAG framework answers a question in four steps: the user proposes a query, the system retrieves the relevant content from private knowledge bases, combines it with the user query as context, and finally asks the LLM to generate an answer. This is illustrated in Figure 1 with a simple example. This process mirrors the typical cognitive process of encountering a problem, including consulting relevant references and subsequently deriving an answer. In this framework, the pivotal component is the accurate retrieval of pertinent information, which is critical for the efficacy of the RAG model.
14
+
15
+ However, the process of retrieval from PDF files is fraught with challenges. Common issues include inaccuracies in text extraction and disarray in the row-column relationships of tables inside PDF files. Thus, before RAG, we need to convert large documents into retrievable content. The conversion involves several steps, as shown in Figure 2:
16
+
17
+ ![](images/6c40d26bc9b041d01a2b4f7feb1f96637f7ad01996c731732471fd90850ba0de.jpg)
18
+ Figure 1. The workflow of Retrieval-Augmented Generation (RAG).
19
+
20
+ ![](images/f0ec14dd819a609858a79d4aaf673f15275a7e2d15025851640868e9712ad201.jpg)
21
+ Figure 2. The process of converting PDFs into retrievable contents.
22
+
23
+ - Document Parsing & Chunking. It involves extracting paragraphs, tables, and other content blocks, then dividing the extracted content into chunks for subsequent retrieval.
24
+ - Embedding. It transforms text chunks into real-valued vectors and stores them in a database.
25
+
26
+ Since each of these steps can lead to information loss, the compounded losses can significantly impact the effectiveness of RAG's responses.
27
+
28
+ This paper primarily addresses the question of whether the quality of PDF parsing and chunking affects the outcomes of RAG. We will explore the challenges, methodologies, and real-world case studies pertaining to this issue. It will include an examination of two types of methods in this field, namely rule-based and deep learning-based methods, followed by empirical evaluations of their efficacy through practical examples.
29
+
30
+ # 2 PDF Parsing & Chunking
31
+
32
+ # 2.1 Challenges and Methods Overview
33
+
34
+ To humans, the cognitive process of perusing any document page is similar. When we read a page, characters are captured by our retinas. Then, in our brains, these characters are organized into
35
+
36
+ ![](images/940fee321ced48e98420b2308b7a19d314deb3aaf263d3be5d391bb6e60c56cb.jpg)
37
+ Figure 3. Two types of documents in the view of computers.
38
+
39
+ ![](images/302f96f5696527ea1734cd4642925ade372e0bfcb400a1948c61b3c840771374.jpg)
40
+
41
+ paragraphs, tables, and charts, and then understood or memorized. However, computers perceive information as binary codes. From their perspective, as illustrated in Figure 3, documents can be categorized into two distinct types:
42
+
43
+ - Tagged Documents: Examples include Microsoft Word and HTML documents, which contain special tags like $<\mathbb{P}>$ and $<\text{table}>$ to organize the text into paragraphs, cells, and tables.
44
+ - Untagged Documents: Examples include PDFs, which store instructions on the placement of characters, lines, and other content elements on each document page. They focus on 'drawing' these basic content elements in a way that makes the document legible to human readers. They do not store any structural information of the document, like tables or paragraphs. Thus, untagged documents are only for human e-reading, but are unreadable by machines. This becomes evident when attempting to copy a table from a PDF into MS Word, where the original structure of the table is often completely lost.
45
+
46
+ However, Large Language Models (LLMs) exhibit proficiency in processing serialized text. Consequently, to enable LLMs to effectively manage untagged documents, a parser that organizes scattered characters into coherent texts with their structures is necessary. Ideally, a PDF Parser should exhibit the following key features:
47
+
48
+ - Document Structure Recognition: It should adeptly divide pages into different types of content blocks like paragraphs, tables, and charts. This ensures that the divided text blocks are complete and independent semantic units.
49
+ - Robustness in Complex Document Layout: It should work well even for document pages with complex layouts, such as multi-column pages, border-less tables, and even tables with merged cells.
50
+
51
+ Currently, there are two main types of methods of PDF Parsing: rule based approaches and deep learning-based approaches. Among them, PyPDF, a widely-used rule-based parser, is a standard method in LangChain for PDF parsing. Conversely, our approach, ChatDOC PDF Parser (https://pdfparser.io/), is grounded in the deep learning models. Next, we illustrate the difference between them by introducing the methods and delving into some real-world cases.
52
+
53
+ # 2.2 Rule-based Method: PyPDF
54
+
55
+ We first introduce the parsing & chunking workflow based on PyPDF. First, PyPDF serializes characters in a PDF into a long sequence without document structure information. Then, this sequence undergoes segmentation into discrete chunks, utilizing some segmentation rule, such as the "RecursiveCharacterTextSplitter" function in LangChain. Specifically, this function divides the document based on a predefined list of separators, such as the newline character "\n". After this initial segmentation, adjacent chunks are merged only if the length of the combined chunks is not bigger than a predetermined limit of $N$ characters. Hereafter, we use "PyPDF" to refer to the method of document parsing and chunking using PyPDF+RecursiveCharacterTextSplitter, provided there is no contextual ambiguity. The maximum length of a chunk is set to 300 tokens in the following. Next, we use a case to observe the inherent nature of PyPDF.
56
+
57
+ Management Discussion and Analysis
58
+
59
+ <table><tr><td rowspan="2"></td><td rowspan="2">Day 12 (weeks)1</td><td rowspan="2">Peritoneal volume (ml)</td><td rowspan="2">Omentum volume (ml)</td><td rowspan="2">Urine (ml)</td><td rowspan="2">Dial (ml)</td><td rowspan="2">Fluid (ml)</td><td rowspan="2">Protein (g/dl)</td><td rowspan="2">Residual protein (g/dl)</td><td rowspan="2">Iodine (μmol/l)</td><td rowspan="2">Incubation (days)</td><td rowspan="2">Incubation (days)</td></tr><tr></tr><tr><td>Total</td><td>35.33</td><td>40.0</td><td>37.9</td><td>27.0</td><td>6.08</td><td>11.8</td><td>2.27</td><td>2.27</td><td>1.1</td><td>1.93</td><td>1.93</td></tr><tr><td>Normal to high grade</td><td>95.22</td><td>95.2</td><td>95.2</td><td>100</td><td>100</td><td>100</td><td>100</td><td>100</td><td>(24.6)</td><td>(44.9)</td><td>(44.9)</td></tr><tr><td>High grade/intermediate grade</td><td>14.05</td><td>4.0</td><td>4.0</td><td>1.98</td><td>1.98</td><td>1.20</td><td>2.8</td><td>2.99</td><td>8.49</td><td>8.70</td><td>8.70</td></tr><tr><td>High grade</td><td>1.62</td><td>2.6</td><td>1.62</td><td>1.95</td><td>1</td><td>9.2</td><td>9</td><td>92</td><td>2</td><td>(2.6)</td><td>(2.6)</td></tr><tr><td>Low grade</td><td>2.04</td><td>2.04</td><td>2.04</td><td>(0.73)</td><td>(0.73)</td><td>(0.10)</td><td>(0.10)</td><td>(0.10)</td><td>(0.09)</td><td>(0.09)</td><td>(0.09)</td></tr><tr><td>Disseminated</td><td>23.84</td><td>24.02</td><td>24.02</td><td>(0.73)</td><td>(0.73)</td><td>(0.10)</td><td>(0.10)</td><td>(0.10)</td><td>(0.09)</td><td>(0.09)</td><td>(0.09)</td></tr><tr><td>Disseminated/High grade</td><td>4.05</td><td>1.06</td><td>3.69</td><td>2.95</td><td>2%</td><td>(0%)</td><td>(2.5%)</td><td>(2.5%)</td><td>(1.4%)</td><td>(1.4%)</td><td>(1.4%)</td></tr></table>
60
+
61
+ <table><tr><td>(1)</td><td>The majority of patients were not satisfied with the results of the study. The majority of patients had no improvement in their symptoms, and the majority of patients had no improvement in their mental health. The majority of patients had no improvement in their physical health. The majority of patients had no improvement in their social functioning. The majority of patients had no improvement in their emotional well-being. The majority of patients had no improvement in their social relationship. The majority of patients had no improvement in their role in daily life. The majority of patients had no improvement in their relationships with family members. The majority of patients had no improvement in their relationships with friends or relatives. The majority of patients had no improvement in their relationships with colleagues or coworkers. The majority of patients had no improvement in their relationships with colleagues or coworkers.</td></tr><tr><td>(2)</td><td>There was a significant difference between the two groups in the number of patients who received treatment. The majority of patients received treatment for depression. The majority of patients received treatment for anxiety. The majority of patients received treatment for stress. The majority of patients received treatment for other conditions.</td></tr></table>
62
+
63
+ Non-GAAP Measures
64
+
65
+ <table><tr><td>Variables</td><td>Questions about our operating results.</td></tr><tr><td>We use adjusted EBITDA (included adjusting EBITDA from 2013 to 2014) as a proxy for our operating results, margin, non-GAAP net income, non-GAAP diluted EPS and operating income per share. We also use adjusted EBITDA (included adjusting EBITDA from 2013 to 2014) as a proxy for our business that is managed by our business that can be used for strategic planning, operationalization and operational decision-making purposes.</td><td>Adjustment of EBITDA (included adjusting EBITDA from 2013 to 2014) as a proxy for our operating results. This measure provides useful information about the potential for our business to be used for strategic planning, operationalization and operational decision-making purposes.</td></tr><tr><td>We follow that adjusted EBITDA, adjusted EBITDA margin, non-GAAP net income, non-GAAP diluted EPS and operating income per share are used to quantify the impact of the business that could be deleterious to the business. The effect of the business on income from operations, net income and diluted EPS may be affected by the magnitude of the change in earnings per share. Non-GAAP measures provide useful information about the potential for our business to use the same performance as or indicator of our operating results. Adjusted EBITDA is used to quantify the impact of the business on our ability to quantify our management&#x27;s net financial assets and liabilities. Adjusted EBITDA is used to quantify the impact of the business on other different income measures, namely adjusted EBITDA margin, non-GAAP net income, non-GAAP diluted EPS and operating income per share.</td><td>Adjusted EBITDA, adjusted EBITDA margin, non-GAAP net income, non-GAAP diluted EPS and operating income per share are used to quantify the impact of our business that can be used for strategic planning, operationalization and operational decision-making purposes.</td></tr><tr><td>Allstate Group Holding Limited</td><td></td></tr></table>
66
+
67
+ ![](images/2002a4392da420a79dc6111e9e1ada2a7ca5a1113867d77d09099409528b43f7.jpg)
68
+ Visualization of Chunking Result:
69
+
70
+ ![](images/8eed22375c98af2e2872f3a025dfdcb7c869afdbb1448c5986555af4f9eaf171.jpg)
71
+ Text Chunk
72
+ Figure 4. Parsing and chunking results of PyPDF on Case 1 (original document: [1]). Zoom in to see the details.
73
+
74
+ ![](images/b67dc75d29be589998471f35625c59e8f3936527ab0687a97c7e38bcf3e018c8.jpg)
75
+
76
+ # Chunking Result:
77
+
78
+ # [Chunk 1]
79
+
80
+ Year ended March 31, 2021\n
81
+
82
+ China\n
83
+
84
+ commerce(1)n
85
+
86
+ International\n
87
+
88
+ commerce
89
+
90
+ Local consumer n
91
+
92
+ services(1) Cainiao Cloud\n
93
+
94
+ Digital\n
95
+
96
+ media and
97
+
98
+ entertainment\nt
99
+
100
+ Innovation\nt
101
+
102
+ 1
103
+
104
+ others Unallocated(2) Consolidated in
105
+
106
+ RMB RMB RMB RMB RMB RMB RMB RMB n
107
+
108
+ (in millions, except percentages)
109
+
110
+ (in millions, except percentages) in 100s:
111
+
112
+ Revenue 501,379 48,831 33,746
113
+
114
+ 717,289n 1.()34107232(2361)(20107)(2064)
115
+
116
+ Income (Loss) from operations 197,232 (9,361) (29,197) (3,964)
117
+
118
+ [12,479)(10,321)(7,802)(34,430)89,678n
119
+
120
+ Add: Share-based compensation\n
121
+
122
+ # [Chunk 2]
123
+
124
+ expense 14,505 4,223 4,972 1,956 10,205 3,281 2,518 8,460 50,120n
125
+
126
+ Add: Amortization of intangible assets 1,922 206 7,852 1,195 23 922 83 224 12,427 n
127
+
128
+ Add: Anti-monopoly Fine(3) 18,228 18,228n Adjusted EBITA 213,659 (4,932) (16,373) (813) (2,251) (6,118) (5,201) (7,518) 170,453n
129
+
130
+ Adjusted EBITA margin 43% (10)% (46)% (2)% (4)% (20)% (22%) N/A/24%n
131
+
132
+ (1) Beginning on October 1, 2022, we reclassified the results of our Instant Supermarket Delivery (全能超市) business, which was $\mathsf{in}$ previously reported under China commerce segment, to Local consumer services segment following the strategy refinement in of Instant Supermarket Delivery business to focus on building customer mindshare for grocery delivery services through Ele.me $\mathsf{in}$ platform. This reclassification conforms to the way that we manage and monitor segment performance. Comparative figures were $\mathsf{in}$ reclassified to conform to this presentation.
133
+
134
+ # [Chunk 3]
135
+
136
+ (2) Unallocated expenses primarily relate to corporate administrative costs and other miscellaneous items that are not allocated to in
137
+
138
+ individual segments. The goodwill impairment, and the equity-settled donation expense related to the allotment of shares to a 'n charitable trust, are presented as unallocated items in the segment information because our management does not consider these 'n as part of the segment operating performance measure.
139
+
140
+ (3) For a description of the relevant PRC Anti-monopoly investigation and administrative penalty decision, see "Business Risk" section.
141
+
142
+ Legal and Administrative Proceedings - PRC Anti-monopoly Investigation and Administrative Penalty Decision "m
143
+
144
+ Non-GAAP Measures\n
145
+
146
+ We use adjusted EBITDA (including adjusted EBITDA) in
147
+
148
+ Incorporated by reference to Exhibit 10.2 of the Registrant's Annual Report on Form 10-K for the year ended December 31, 2009 (the "Annual Report on Form 10-K" or "Report on Form 10-K").
149
+
150
+ mairn) n GAAPrtncoe n GAAPdhtd)
151
+
152
+ mth),non-GAAP net income, non-GAAP diluted
153
+
154
+ e
155
+
156
+ a non-GAAP financial measure, in evaluating our h
157
+
158
+ operating results and for financial and operational n
159
+
160
+ decision-making purposes. n
161
+
162
+ # [Chunk 4]
163
+
164
+ We believe that adjusted EBITDA, adjusted EBITA, non-GAAP net income and non-GAAP diluted earnings per share/ADS help identify underlying trends in our business that could otherwise be distorted by the $\mathsf{n}$ effect of certain income or expenses that we include in income from operations, net income and diluted earnings per share/ADS. We believe that these non-GAAP measures provide useful information about our core operating results, enhance the overall understanding of our past performance and future prospects and allow for greater visibility with respect to key metrics used by our management in its financial and operational decision-making. We present three different income measures, namely adjusted EBITDA, unadjusted EBITA and non-GAAP net income in order to provide more information and greater transparency to $\mathsf{n}$ investors about our operating results. We consider free cash flow to be a liquidity measure that provides useful information to management and investors about the amount of cash generated by our business that can be used for strategic corporate transactions, including investing in our new business initiatives, making strategic investments and acquisitions and strengthening our balance sheet.
165
+
166
+ # [Chunk 5]
167
+
168
+ Adjusted EBITDA, adjusted EBITA, non-GAAP net $\mathsf{n}$ income, non-GAAP diluted earnings per share/ADS $\mathbb{N}$ and free cash flow should not be considered in n isolation or construed as an alternative to income n from operations, net income, diluted earnings per $\mathsf{n}$ share/ADS, cash flows or any other measure of performance or as an indicator of our operating performance. These non-GAAP financial measures $\mathbb{N}$ presented here do not have standardized meanings n prescribed by U.S. GAAP and may not be comparable $\mathbb{N}$ to similarly titled measures presented by other $\mathbb{N}$ companies. Other companies may calculate similarly n titled measures differently, limiting their usefulness as n comparative measures to our data. n 112 Alibaba Group Holding Limited n Management Discussion and Analysis n
169
+
170
+ Case 1 in Figure 4 is a page from a document that features a mix of a table and double-column text where their boundaries are difficult to distinguish. Rows in the middle of the table do not have horizontal lines, making it difficult to recognize the rows in the table. And paragraphs have both single-column layout (for notes below the table) and double-columns layout (for paragraphs in the lower part of the page).
171
+
172
+ The parsing and chunking result of PyPDF is shown in Figure 4. In the "3 Visualization" part, we can see that PyPDF correctly recognizes the one-column and two-column layout parts of the page. But there are three shortcomings of PyPDF:
173
+
174
+ 1. It cannot recognize the boundary of paragraphs and tables. It wrongly splits the table into two parts and merges the second part with the subsequent paragraph as one chunk.
175
+
176
+ PyPDF seems to be good at detecting the boundary of a paragraph, as it does not split one paragraph into multiple chunks. But it actually does not parse the boundary of a paragraph. In the “2 Chunking Result” part we can see that each visual text line in the page is parsed as a line ended with “\n” in the result, and there is no special format at the end of a paragraph. It chunks paragraphs correctly because we use a special separator “.\n” that regards a line ending with a period as likely to be the end of a paragraph. However, this heuristic may not hold in many cases.
177
+
178
+ 2. It cannot recognize the structure within a table. In the "2 Chunking Result" part, in chunk1, the upper part of the table is represented as a sequence of short phrases, where a cell may be split
179
+
180
+ into multiple lines (e.g. the cell "China commerce(1)") and some adjacent cells may be arranged in one line (e.g. the third to the fifth cells in the second line, "services(1) Cainiao Cloud"). So, the structure of the table is completely destroyed. If this chunk is retrieved for RAG, LLM is unable to perceive any meaningful information from it. Similar situation for Chunk 2. Moreover, the headers of the table only exist in Chunk 1, so the lower part of the table in Chunk 2 becomes meaningless.
181
+
182
+ 3. It cannot recognize the reading order of the content. The last line of Chunk 5, "Management Discussion and Analysis" is actually located at the top of the page, but is parsed as the last sentence in the result. This is because PyPDF parses the document by the storage order of the characters, instead of their reading order. This may cause chaotic results when faced with complex layouts.
183
+
184
+ The result on another case Case 2 features with a complex and cross-page table is shown in Figure 15 in the Appendix.
185
+
186
+ # 2.3 Deep Learning-based Method: ChatDOC PDF Parser
187
+
188
+ Next, we turn our attention to the method of deep learning-based parsing, exemplified by our ChatDOC PDF Parser. The ChatDOC PDF Parser (https://pdfparser.io/) has been trained on a corpus of over ten million document pages. Following the method in [2], it incorporates a sequence of sophisticated steps, including:
189
+
190
+ 1. OCR for text positioning and recognition;
191
+ 2. Physical document object detection;
192
+ 3. Cross-column and cross-page trimming;
193
+ 4. Reading order determination;
194
+ 5. Table structure recognition;
195
+ 6. Document logical structure recognition.
196
+
197
+ Readers might refer to [2] for the details of these steps. After parsing, we use the paragraphs and tables as basic blocks, and merge adjacent blocks until reaching the token limit to form a chunk.
198
+
199
+ ChatDOC PDF Parser is designed to consistently deliver parsing results in JSON or HTML formats, even for challenging PDF files. It parses a document into content blocks where each block refers to a table, paragraph, chart, or other type. For tables, it outputs the text in each table cell and also tells which cells are merged into a new one. Moreover, for documents with hierarchical headings, it outputs the hierarchical structure of the document. In summary, the parsed result is like a well-organized Word file. Figure 5 shows a scan-copy page and its parsing result. The left side displays the document and the recognized content blocks (with different colored rectangles). The right side shows the parsing result in JSON or HTML format. Readers might refer to [3] for the live demo of this parsing result.
200
+
201
+ Then, we check the result of ChatDOC PDF Parser on Case 1 in Figure 6. It successfully addresses the three shortcomings of PyPDF.
202
+
203
+ 1. As shown in the "3 Visualization" part, it recognizes the mixed layout and correctly sets the whole table as a separate chunk. For paragraphs, as shown in chunk 2 in the "2 Chunking Result" part, text lines in the same paragraphs are merged together, making it easier to understand.
204
+ 2. In the "2 Chunking Result" part, in Chunk 1, we can see the table is represented using the markdown format, which preserves the table's internal structure. Additionally, ChatDOC PDF Parser can recognize the merged cells inside a table. Since the markdown format cannot represent the merged cells, we put the whole text in the merged cell into each original cell in the markdown format. As you can see, in Chunk 1 the text "Year ended March 31, 2021" repeats 9 times, which stands for a merged cell with the original 9 ones.
205
+ 3. Moreover, "Management Discussion and Analysis" and "112 Alibaba Group Holding Limited" is recognized as the page header and footer, and they are placed at the top and bottom of the parsing result which is consistent with reading order.
206
+
207
+ The result on another case of Case 2 featured with complex and cross-page table is shown in Figure 16 in the Appendix.
208
+
209
+ ![](images/39719cebb6c6c9bd8729fcbd7bb316e26ad80adc9d982a254fa17029b2f995ea.jpg)
210
+ Figure 5. An example illustrating the results of the ChatDOC PDF Parser. Zoom in to see the details.
211
+
212
+ # 3 Experiments on the Impact of PDF Recognition on RAG
213
+
214
+ Back to the main topic of this paper, does the way a document is parsed and chunked affect the quality of answers provided by an RAG system? To answer this, we have carried out a systematic experiment to assess the impacts.
215
+
216
+ # 3.1 Quantitative Evaluation of RAG Answer Quality
217
+
218
+ # 3.1.1 Settings
219
+
220
+ We compared two RAG systems as listed in Table 1:
221
+
222
+ - ChatDOC: uses ChatDOC PDF Parser to parse the document and leverage the structure information for chunking.
223
+ - Baseline: uses PyPDF to parse the document and use RecursiveCharacterTextSplitter function for chunking.
224
+
225
+ Other components, like embedding, retrieval, and QA, are the same for both systems.
226
+
227
+ # 3.1.2 Data Preparation
228
+
229
+ For our experiment, we assembled a dataset that closely mirrors real-world conditions, comprising 188 documents from various fields. Specifically, This collection includes 100 academic papers, 28 financial reports, and 60 documents from other categories such as textbooks, courseware, and legislative materials.
230
+
231
+ We then gathered 800 manually generated questions via crowd-sourcing. After careful screening, we removed low-quality questions and got 302 questions for evaluation. These questions were divided into two categories (as shown in Table 2):
232
+
233
+ - Extractive questions are those that can be answered with direct excerpts from the documents. Usually, they require pinpoint answers because they seek specific information. We found when
234
+
235
+ Management Discussion and Analysis
236
+
237
+ <table><tr><td rowspan="3"></td><td colspan="8">Systolic BP</td></tr><tr><td>One hand</td><td>Second hand</td><td>Oxfordshire</td><td>Cockerside</td><td>Glasgow</td><td>Total</td><td>Indirectly measured BP</td><td>Indirectly measured CVD</td></tr><tr><td>mmHg</td><td>mmHg</td><td>mmHg</td><td>mmHg</td><td>mmHg</td><td>mmHg</td><td>mmHg</td><td>mmHg</td></tr><tr><td colspan="9">Cardiovascular disease</td></tr><tr><td>Normal</td><td>32.19</td><td>40.61</td><td>52.76</td><td>52.08</td><td>63.08</td><td>51.96</td><td>237</td><td>−17.38</td></tr><tr><td>Ischemic heart disease</td><td>19.21</td><td>23.91</td><td>20.47</td><td>20.44</td><td>12.91</td><td>22.25</td><td>39.83</td><td>24.68</td></tr><tr><td>Ischaemic heart disease and type 2 diabetes</td><td>1.42</td><td>4.23</td><td>1.92</td><td>1.95</td><td>1.02</td><td>1.29</td><td>2.59</td><td>6.40</td></tr><tr><td>Ischaemic heart disease and type 2 diabetes</td><td>1.02</td><td>1.96</td><td>1.02</td><td>1.95</td><td>1.02</td><td>1.29</td><td>1.29</td><td>1.29</td></tr><tr><td>Diabetes</td><td>23.89</td><td>14.02</td><td>14.75</td><td>8.71</td><td>2.21</td><td>21.47</td><td>12.76</td><td>12.76</td></tr><tr><td>Diabetic Heart Disease</td><td>4.5%</td><td>10%</td><td>4.8%</td><td>2%</td><td>4%</td><td>10%</td><td>2.5%</td><td>1%</td></tr></table>
238
+
239
+ <table><tr><td>1</td><td>Regime (until October 2012), we resourced the results of our study. Furthermore, we obtained data from a number of companies that were involved in the trial, including Abbott Diagnostics, Abbott Laboratories, Abbott Diagnostics Inc., Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories,Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Labora, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories, Abbott Laboratories.</td></tr></table>
240
+
241
+ <table><tr><td>(1)</td><td>For a description of the relevant PRC Anti-monopoly investigation and administrative penalty decision, see “Business Overview – Legal and Administrative Proceedings – PRC Anti-monopoly Invention and Administrative Resilience Division.”</td></tr></table>
242
+
243
+ # Non-GAAP Measures
244
+
245
+ <table><tr><td>(in millions)</td><td>2013</td><td>2012</td></tr><tr><td>Net income, as reported above</td><td>$1,467</td><td>$1,589</td></tr><tr><td>Other comprehensive income (loss)</td><td>(13)</td><td>(14)</td></tr><tr><td>Comprehensive loss</td><td>$(13)</td><td>$(14)</td></tr><tr><td>Net income, net of tax</td><td>$1,467</td><td>$1,589</td></tr><tr><td>Comprehensive loss, net of tax</td><td>$(13)</td><td>$(14)</td></tr></table>
246
+
247
+ Albela Group Holding Limited
248
+
249
+ <table><tr><td>We provide more information and greater transparency to our data. We also provide more information and greater details on the data.</td></tr><tr><td>We consider five cash flow to be liquidity measures: cash flows from operating activities, cash flows from investing and investment in the amount of cash generated from operations, cash flows from financing activities, cash flows from corporate transactions, including investing in new ventures, and cash flows from acquisitions and strengthening our balance sheet.</td></tr><tr><td>Adjusted EBITDA, adjusted EBITA, non-GAAP net income, net income per share, net income per share of common stock, and free cash flow should not be considered in the calculation of EBITDA. Adjusted EBITDA is calculated from operations, net income, diluted earnings per share, net income per share of common stock, and free cash flow as a percentage of net income. The results are presented by EBITDA, but may not be comparable to other companies. Other companies may calculate similarly weighted average shares or市值-adjusted comparative measures to our data.</td></tr></table>
250
+
251
+ ![](images/99d70ea0304eb79fbbe2bb3f550ff32ede63093771916b32b1f780e96ac720af.jpg)
252
+
253
+ # 3 Visualization of Chunking Result:
254
+
255
+ Management Discussion and Analysis
256
+
257
+ <table><tr><td rowspan="3"></td><td colspan="9">Nystatin/5-HT2R</td></tr><tr><td>Dra-1 (mmol/L)</td><td>Venlafaxine (mmol/L)</td><td>Amlodipine (mg/dL)</td><td>Glut.</td><td>Dra</td><td>Time of remission</td><td>Mean dose (mg/dL)</td><td>Oxaliplatin</td><td>Oxaliplatin</td></tr><tr><td>RR</td><td>RR</td><td>RR</td><td>RR</td><td>RR</td><td>RR</td><td>RR</td><td>RR</td><td>RR</td></tr><tr><td>Metformin</td><td>30.79</td><td>40.0</td><td>32.76</td><td>32.08</td><td>32.09</td><td>31.28</td><td>–</td><td>–</td><td>77.26</td></tr><tr><td>Levothyroxine</td><td>19.25</td><td>18.81</td><td>(17.47)</td><td>(18.44)</td><td>(18.25)</td><td>(19.03)</td><td>79.00</td><td>(40.6)</td><td>(69.8)</td></tr><tr><td>d-d-3-tert-Caprolactone</td><td>14.55</td><td>12.5</td><td>(10.5)</td><td>19.5</td><td>(2.5)</td><td>12.9</td><td>23.9</td><td>6.60</td><td>(6.10)</td></tr><tr><td>d-d-3-tert-Caprolactone</td><td>13.82</td><td>19.36</td><td>–</td><td>(19.7)</td><td>–</td><td>4.0</td><td>8.9</td><td>2.47</td><td>(2.47)</td></tr><tr><td>α-Lipoic acid*</td><td>–</td><td>–</td><td>–</td><td>–</td><td>–</td><td>–</td><td>–</td><td>0.23</td><td>(0.3)</td></tr><tr><td>Cephalosporin</td><td>2.689</td><td>13.69</td><td>13.64</td><td>8.75</td><td>12.77</td><td>9.14</td><td>5.24</td><td>1.67</td><td>(1.67)</td></tr><tr><td>Abacavirine (IU/day)</td><td>16%</td><td>20%</td><td>20%</td><td>20%</td><td>10%</td><td>12.5%</td><td>12.5%</td><td>14%</td><td>24%</td></tr></table>
258
+
259
+ <table><tr><td>Name</td><td>Description</td></tr><tr><td>1)</td><td>Beginning in October 2012, we consolidated our results of our major Subsidiary Business (B&amp;G) business, which was not included in our EBITDA. This includes the results of our Consolidated EBITDA and results of its subsidiaries. The results of its subsidiaries include results of its subsidiaries’ Subsidiary Business to focus on building customer volume for generic delivery channels since the market is becoming more competitive. These results are presented separately to the consolidated results of our EBITDA.</td></tr><tr><td>2)</td><td>We used the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income, net income from operations, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and for financial and operational results.</td></tr><tr><td>3)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational results.</td></tr><tr><td>4)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational result.</td></tr><tr><td>5)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational outcome.</td></tr><tr><td>6)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational outcomes.</td></tr><tr><td>7)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational outcometaking.</td></tr><tr><td>8)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational Outcomes.</td></tr><tr><td>9)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operationalOutcomes.</td></tr><tr><td>10)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operationaloutcomes.</td></tr><tr><td>11)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational output.</td></tr><tr><td>12)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and operational outputs.</td></tr><tr><td>13)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and Operational outputs.</td></tr><tr><td>14)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and Operational outcomes.</td></tr><tr><td>15)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and Operational Outcomes.</td></tr><tr><td>16)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and Operational outcometaking.</td></tr><tr><td>17)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and Operational output outcomes.</td></tr><tr><td>18)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and Operational results.</td></tr><tr><td>19)</td><td>We use the EBITDA as a proxy for operating results and corporate administrative costs and other miscellaneous items that are not attributable to individual segments. The provisional impairment, and the accumulated deferred tax expense were included by share of sales to non-GAAP financial statements. We also used the EBITDA as a proxy for operating results and corporate administrative expense, net income per share (AEG) and free cash flow, each of which was adjusted to reflect the operating results and/or financial and Operational outcome.</td></tr></table>
260
+
261
+ Paragraph
262
+ Table
263
+ #
264
+ Header & Footer
265
+
266
+ # 2 Chunking Result:
267
+
268
+ # [Chunk 1]
269
+
270
+ <Page Header> Management Discussion and Analysis\n
271
+ |Year ended March 31, 2021 |Year ended March 31, 2021 |Year ended March 31, 2021 |Year ended March 31, 2021 |Year ended March 31, 2021 |Year ended March 31, 2021 |Year ended March 31, 2021 |Year ended March 31, 2021 |Year ended March 31, 1998 |Year ended March 31, 1997 |Year ended March 31, 1996 |Year ended March 31, 1995 |Year ended March 31, 1994 |Year ended March 31, 1993 |Year ended March 31, 1992 |Year ended March 31, 1991 |Year ended March 31, 1990 |Year ended March 31, 1989 |Year ended March 31, 1988 |Year ended March 31, 1987 |Year ended March 31, 1986 |Year ended March 31, 1985 |Year ended March 31, 1984 |Year ended March 31, 1983 |Year ended March 31, 1982 |Year ended March 31, 1981 |Year ended March 31, 1980 |Year ended March 31, 1979 |Year ended March 31, 1978 |Year ended March 31, 1977 |Year ended March 31, 1976 |Year ended March 31, 1975 |Year ended March 31, 1974 |Year ended March 31, 1973 |Year ended March 31, 1972 |Year ended March 31, 1971 |Year ended March 31, 1970 |Year ended March 31, 1969 |Year ended March 31, 1968 |Year ended March 31, 1967 |Year ended March 31, 1966 |Year ended March 31, 1965 |Year ended March 31, 1964 |Year ended March 31, 1963 |Year ended March 31, 1962 |Year ended March 31, 1961 |Year ended March 31, 1960 |Year ended March 31, 1959 |Year ended March 31, 1958 |Year ended March 31, 1957 |Year ended March 31, 1956 |Year ended March 31, 1955 |Year ended March 31, 1954 |Year ended March 31,
272
+ | | China commerce(1) | International commerce | Local consumer services(1) | Cainiao | Cloud | Digital media and entertainment | Innovation initiatives and others | Unallocated(2) | Consolidated | n | RMB | RMB | RMB | RMB | RMB | RMB | RMB | n
273
+ | (in millions, except percentages) | (in millions, except percentages) | (in millions, except percentages) | (in millions, except percentages) | (in millions, except percentages) | (in millions, except percentages) | (in millions, except percentages) | (in millions, except percentages) | n
274
+ | Revenue | 501,379 | 48,851 | 35,746 | 37,258 | 60,558 | 31,186 | 2,311 | — | 717,289 | n |
275
+ | --- | --- | --- | --- | --- | --- | --- | --- |
276
+ | Income (Loss) from operations | 197,232 | (9,361) | (29,197) | (3,964) | (12,479) | (10,321) | (7,802) | (34,430) | (89,678) | n |
277
+ | Add: Share-based compensation expense | 14,505 | 4,223 | 4,972 | 1,956 | 10,205 | 3,281 | 2,518 | 8,460 | 50,120 | n |
278
+ | Add: Amortization of intangible assets | 1,922 | 206 | 7,852 | 1,195 | 23 | 922 | 83 | (224) | 12,427 | n |
279
+ | Add: Anti-monopoly Fine(3) | —— | —— | —— | —— | —— | 18,228 | 18,228 | n |
280
+ | --- | --- | --- | --- | --- | --- | --- | --- |
281
+ | Adjusted EBITA | 213,659 | (4,932) | (16,373) | (813) | (2,251) | (6,118) | (5,201) | (7,518) | 170,453 | n |
282
+ | Adjusted EBITA margin | 43% | (10)% | (46)% | (2)% | (4)% | (20)% | (225)% | N/A | 24% | n
283
+
284
+ # [Chunk 2]
285
+
286
+ (1) Beginning on October 1, 2022, we reclassified the results of our Instant Supermarket Delivery (全能超市) business, which was previously reported under China commerce segment, to Local consumer services segment following the strategy refinement of Instant Supermarket Delivery business to focus on building customer mindshare for grocery delivery services through Ele.me platform. This reclassification conforms to the way that we manage and monitor segment performance. Comparative figures were reclassified to conform to this presentation.
287
+
288
+ (2) Unallocated expenses primarily relate to corporate administrative costs and other miscellaneous items that are not allocated to individual segments. The goodwill impairment, and the equity-settled donation expense related to the allotment of shares to a charitable trust, are presented as unallocated items in the segment information because our management does not consider these as part of the segment operating performance measure.\n
289
+
290
+ (3) For a description of the relevant PRC Anti-monopoly investigation and administrative penalty decision, see "Business Overview — Legal and Administrative Proceedings — PRC Anti-monopoly Investigation and Administrative Penalty Decision."
291
+
292
+ Non-GAAP Measures\n
293
+
294
+ We use adjusted EBITDA (including adjusted EBITDA margin), adjusted EBITA (including adjusted EBITA margin), non-GAAP net income, non-GAAP diluted earnings per share/ADS and free cash flow, each a non-GAAP financial measure, in evaluating our operating results and for financial and operational decision-making purposes.
295
+
296
+ # [Chunk 3]
297
+
298
+ We believe that adjusted EBITDA, adjusted EBITA, non-GAAP net income and non-GAAP diluted earnings per share/ADS help identify underlying trends in our business that could otherwise be distorted by the effect of certain income or expenses that we include in income from operations, net income and diluted earnings per share/ADS. We believe that these non-GAAP measures provide useful information about our core operating results, enhance the overall understanding of our past performance and future prospects and allow for greater visibility with respect to key metrics used by our management in its financial and operational decision-making. We present three different income measures, namely adjusted EBITDA, adjusted EBITA and non-GAAP net income in order to provide more information and greater transparency to investors about our operating results.
299
+ We consider free cash flow to be a liquidity measure that provides useful information to management and investors about the amount of cash generated by our business that can be used for strategic corporate transactions, including investing in our new business initiatives, making strategic investments and acquisitions and strengthening our balance sheet.
300
+
301
+ # [Chunk 4]
302
+
303
+ Adjusted EBITDA, adjusted EBITA, non-GAAP net income, non-GAAP diluted earnings per share/ADS and free cash flow should not be considered in isolation or construed as an alternative to income from operations, net income, diluted earnings per share/ADS, cash flows or any other measure of performance or as an indicator of our operating performance. These non-GAAP financial measures presented here do not have standardized meanings prescribed by U.S. GAAP and may not be comparable to similarly titled measures presented by other companies. Other companies may calculate similarly titled measures differently, limiting their usefulness as comparative measures to our data. n <Page Footero> 112 Alibaba Group Holding Limited
304
+
305
+ Figure 6. Parsing and chunking results of ChatDOC PDF Parser on Case 1 (original document: [4]). Zoom in to see the details.
306
+
307
+ using LLM for evaluation, it may fail to distinguish the subtle but important differences between answers, so we relied on human assessment. We used a 0-10 scale to rate the results. An annotator is given the retrieved content and answer of both methods and rates the two methods at the same time. We show the retrieved content because it usually cannot evaluate the answer without document content, and show two methods together to promote detailed comparison, especially on partially correct results.
308
+
309
+ - Comprehensive analysis questions necessitate synthesizing information from multiple sources and aspects and making a summary. Since the answer is lengthy and requires a comprehensive understanding of the given document contents, we found it difficult and time-consuming for humans to evaluate. Hence, we used GPT-4 to evaluate the answer quality, scoring from 1-10. We also rate the result of two methods in one request. But we only give the retrieved content without an answer because the answer is lengthy (thus costly) compared with extractive questions and a better retrieved content can imply a better answer (since the used LLM is the same). A pair of results of two methods is scored 4 times to avoid bias [5], and their average value is used. Specifically, for a pair of content $(A, B)$ to be compared for the same question, we feed both
310
+
311
+ <table><tr><td>Steps ↓</td><td>ChatDOC
312
+ (PDFlux-LLM)</td><td>Baseline
313
+ (PyPDF-LLM)</td></tr><tr><td>PDF Parsing</td><td>PDFlux
314
+ (Deep Learning-based)</td><td>PyPDF
315
+ (Rule-based, default method in LangChain)</td></tr><tr><td>Chunking</td><td>≈300 tokens per chunk + chunking via paragraphs, tables etc.</td><td>≈300 tokens per chunk + separator</td></tr><tr><td>Embedding</td><td colspan="2">text-embedding-ada-002</td></tr><tr><td>Retrieval</td><td colspan="2">≤3000 tokens</td></tr><tr><td>QA</td><td colspan="2">GPT3.5-Turbo</td></tr></table>
316
+
317
+ Table 1. Settings of two RAG systems: ChatDOC and Baseline.
318
+
319
+ <table><tr><td></td><td>Extractive Questions</td><td>Comprehensive Analysis Questions</td></tr><tr><td>Number</td><td>86</td><td>216</td></tr><tr><td rowspan="5">Question Examples</td><td>1. Locate the content of section ten, what is the merged operating cost in the income statement?</td><td>1. Summarize and analyze the profit forecast and valuation in the research report.</td></tr><tr><td>2. What is the specific content of table 1.</td><td>2. Fully report the research approach of this text.</td></tr><tr><td>3. Extract financial data and profit forecast tables.</td><td>3. Analyze the long-term debt-paying ability based on this report.</td></tr><tr><td>4. Find the long-term loan table.</td><td>4. How is the feasibility analysis done in this article?</td></tr><tr><td></td><td>5. Give a simple example to explain the encoding steps and algorithm in the paper.</td></tr><tr><td>Evaluation</td><td>Human Evaluation</td><td>GPT 4 evaluation</td></tr></table>
320
+
321
+ Table 2. The questions in the dataset are categorized into extractive questions and comprehensive analysis questions.
322
+
323
+ $A$ and $B$ to GPT-4 to compare and score them twice. We also flip their order, feed $B$ and $A$ to GPT-4, and repeat the request twice.
324
+
325
+ # 3.1.3 Results
326
+
327
+ # Results of Extractive Questions
328
+
329
+ The results of extractive questions are shown in Table 3. Out of the 86 extractive questions, ChatDOC performed better than the baseline on 42 cases, tied on 36 cases, and was inferior to Baseline on only 8 cases.
330
+
331
+ The distribution of rating scores is further detailed in Figure 7. In the distribution table, $T_{ij} = k$ means there are $k$ questions whose answer by ChatDOC is rated as $i$ and the answer by Baseline is rated as $j$ . Cases where ChatDOC scores higher than the baseline (ChatDOC wins) are represented in the lower-left half, while cases where the baseline scores higher are in the upper-right. Notably, most samples with a clear winner are in the lower-left half, indicating ChatDOC's superiority. Impressively, ChatDOC achieved full marks (10) in nearly half of these cases, amounting to a total of 40.
332
+
333
+ Results of Comprehensive Analysis Questions
334
+
335
+ <table><tr><td></td><td>Total</td><td>ChatDOC wins</td><td>Tie</td><td>Baseline wins</td></tr><tr><td rowspan="2">Extractive Questions</td><td rowspan="2">86</td><td>42</td><td>36</td><td>8</td></tr><tr><td>(49%)</td><td>(42%)</td><td>(9%)</td></tr><tr><td rowspan="2">Comprehensive Questions</td><td rowspan="2">216</td><td>101</td><td>79</td><td>36</td></tr><tr><td>(47%)</td><td>(37%)</td><td>(17%)</td></tr><tr><td rowspan="2">Summary</td><td rowspan="2">302</td><td>143</td><td>115</td><td>44</td></tr><tr><td>(47%)</td><td>(38%)</td><td>(15%)</td></tr></table>
336
+
337
+ Table 3. The comparison result between ChatDOC and Baseline.
338
+
339
+ ![](images/8b9818d75390da99f446a045ecbfe16d45b024490acf8fefac0105087024f93f.jpg)
340
+ Figure 7. Distribution of rating scores of extractive questions.
341
+
342
+ ![](images/21bc28b66a569287ef0e0731291c92735dd9ba700601ff87b82567b3137952e0.jpg)
343
+ Figure 8. Distribution of rating scores of comprehensive analysis questions.
344
+
345
+ The results of comprehensive analysis questions are shown in Table 3. Out of the 216 comprehensive analysis questions, ChatDOC performed better than the baseline on 101 cases, tied on 79 cases, and was inferior to Baseline on only 36 cases.
346
+
347
+ In Figure 8, the score distribution chart for these questions shows a denser concentration in the lower-left half. This indicates that ChatDOC frequently outperforms the baseline. Notably, the majority of ChatDOC's retrievals scored between 8.5 and 9.5, demonstrating a high level of retrieval quality.
348
+
349
+ In summary, ChatDOC significantly outperforms the Baseline, primarily attributed to its superior PDF parsing method.
350
+
351
+ # 3.2 Case Studies of RAG
352
+
353
+ To make the comparison more tangible, we show some cases where ChatDOC demonstrates its superiority.
354
+
355
+ Case A: ChatDOC
356
+
357
+ # Query: How much cargo can I carry at most in terms of size?
358
+
359
+ # Retrieved Text Chunk:
360
+
361
+ [Chunk 1]
362
+
363
+ <table><tr><td>Cargo Volume | \n</td></tr><tr><td>| |-|n</td></tr><tr><td>| Front Trunk | 3.1 cu ft (88 L) |n</td></tr><tr><td>| Behind 2nd row | 19.8 cu ft (561 L) |n</td></tr><tr><td>| Maximum total cargo volume with 5 passengers</td></tr><tr><td>| 22.9 cu ft (649 L) |n</td></tr></table>
364
+
365
+ # Original Page Containing the Retrieved Chunk:
366
+
367
+ ![](images/a6ef295bbb218877e2f64d1c36062bb9fcec909c97c4184e8262f32e6bc99847.jpg)
368
+ Figure 9. ChatDOC's result on finding info in Tesla's Manual (original document: [6]).
369
+
370
+ # Answer:
371
+
372
+ Based on the document, the maximum cargo capacity in terms of size for the Model 3 is as follows:
373
+
374
+ - Front Trunk: 3.1 cu ft (88 L)
375
+ - Behind 2nd row: $19.8\mathrm{cu}$ ft (561 L)
376
+ Maximum total cargo volume with 5 passengers: 22.9 cu ft (649 L)
377
+
378
+ # Retrieved Chunk Highlighted:
379
+
380
+ ![](images/0da734da9f4566a39b13792512efef16ce7b01772ece7ccde4eb02940b776b3e.jpg)
381
+
382
+ # Case A: Baseline (PyPDF + LangChain)
383
+
384
+ # Query: How much cargo can I carry at most in terms of size?
385
+
386
+ # Retrieved Text Chunk: [Chunk 1]
387
+
388
+ Hip Room Front Rear 53.4 in 52.4 in 1,356 mm $1,331\mathrm{mm}$ Cargo Volume Front Trunk 3.1 cu ft (88 L) Behind 2nd row 19.8 cu ft (561 L) Maximum total cargo volume with 5 passengers $22.9~\mathrm{cu}$ ft (649 L)
389
+
390
+ # Original Page Containing the Retrieved Chunk:
391
+
392
+ ![](images/ed8a45b2e048bd15086d4d535368767db0c8df1d1c20b704b7eba3963375749e.jpg)
393
+ Figure 10. Baseline's result on finding info in Tesla's Manual (original document: [6]).
394
+
395
+ # Answer:
396
+
397
+ The maximum cargo size capacity for your Model 3 is not explicitly mentioned in the provided document snippets.
398
+
399
+ # Retrieved Chunk Highlighted:
400
+
401
+ ![](images/6dba696208627c9e8ad6202536fe07aeb0656ec6851d424d0c16110ab6b6705f.jpg)
402
+
403
+ # Query: What is the dependent variable in table 8?
404
+
405
+ # Retrieved Text Chunks:
406
+
407
+ [Chunk 1]
408
+
409
+ TABLE ESTIMATES OF RETURNS TO COGNITIVE ABILITIES, NONCOGNITIVE ABILITIES, AND FACTOR LOADINGS OF UNOBSERVEDHETEROGENEITY IN HIGH SCHOOL AND COLLEGE
410
+
411
+ [Chunk 2]
412
+
413
+ |Cognitive ability| Cognitive ability| Cognitive ability| Noncognitive ability| Noncognitive ability| Unobs. heterogeneity| Unobs. heterogeneity $\mathbf{\mu}_{\mathrm{N}}$
414
+
415
+ - - - - - - - - - - - - - - - - -
416
+
417
+ |Year|Estimate|Std. error|Estimate|Std. error|Estimate|Std. error\nHigh school|High school|High school|High school|High school|High school|High school|n1968|0.038|0.021|0.064|0.024|0.000|0.030|n
418
+
419
+ | 2000 | 0.047 | 0.025 | 0.091 | 0.028 | 0.225 | 0.023 | n
420
+
421
+ | College| College| College| College| College| College| College|n | 1968|0.085|0.038|0.082|0.045|-0.129|0.054|n
422
+
423
+ | 2000 | 0.151 | 0.039 | 0.100 | 0.037 | 0.079 | 0.037 | n
424
+
425
+ # Answer:
426
+
427
+ The dependent variable in Table 8 is the estimate of returns to cognitive ability, noncognitive ability, and factor loadings of unobserved heterogeneity in high school and college.
428
+
429
+ # Original Page Containing the Retrieved Chunks (Correct):
430
+
431
+ FLAVIO CUNHA, FATIH KARAHAN, AND ILTON SOARES : 71
432
+
433
+ TABLE8
434
+
435
+ ESTIMATES OR RETURNS TO CONCognitive ABILITIES, NONCOGNITIVE ABILITIES, AND FACTOR LOADINGS OF UNOBSERVED HETEROGENITY IN HIGH SCHOOL AND COLLEGE
436
+
437
+ <table><tr><td rowspan="2">Year</td><td colspan="2">Cognitive ability</td><td colspan="2">Noncognitive ability</td><td colspan="2">Uses: heterogeneity</td></tr><tr><td>Estimate</td><td>Std. error</td><td>Estimate</td><td>Std. error</td><td>Estimate</td><td>Std. error</td></tr><tr><td>High school</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>1968</td><td>0.038</td><td>0.021</td><td>0.064</td><td>0.024</td><td>0.000</td><td>0.030</td></tr><tr><td>1970</td><td>0.028</td><td>0.015</td><td>0.081</td><td>0.018</td><td>0.025</td><td>0.024</td></tr><tr><td>1972</td><td>0.061</td><td>0.015</td><td>0.073</td><td>0.018</td><td>0.018</td><td>0.024</td></tr><tr><td>1974</td><td>0.056</td><td>0.016</td><td>0.073</td><td>0.019</td><td>0.117</td><td>0.024</td></tr><tr><td>1976</td><td>0.028</td><td>0.016</td><td>0.075</td><td>0.019</td><td>0.173</td><td>0.024</td></tr><tr><td>1978</td><td>0.041</td><td>0.017</td><td>0.061</td><td>0.019</td><td>0.134</td><td>0.022</td></tr><tr><td>1980</td><td>0.037</td><td>0.015</td><td>0.057</td><td>0.018</td><td>0.196</td><td>0.020</td></tr><tr><td>1982</td><td>0.047</td><td>0.023</td><td>0.059</td><td>0.022</td><td>0.390</td><td>0.021</td></tr><tr><td>1984</td><td>0.048</td><td>0.022</td><td>0.078</td><td>0.023</td><td>0.283</td><td>0.020</td></tr><tr><td>1986</td><td>0.037</td><td>0.021</td><td>0.099</td><td>0.023</td><td>0.267</td><td>0.020</td></tr><tr><td>1988</td><td>0.039</td><td>0.021</td><td>0.088</td><td>0.024</td><td>0.217</td><td>0.020</td></tr><tr><td>1990</td><td>0.056</td><td>0.023</td><td>0.070</td><td>0.027</td><td>0.236</td><td>0.022</td></tr><tr><td>1992</td><td>0.077</td><td>0.023</td><td>0.057</td><td>0.027</td><td>0.213</td><td>0.022</td></tr><tr><td>1996</td><td>0.051</td><td>0.025</td><td>0.052</td><td>0.028</td><td>0.206</td><td>0.023</td></tr><tr><td>2000</td><td>0.047</td><td>0.025</td><td>0.091</td><td>0.028</td><td>0.225</td><td>0.023</td></tr><tr><td>College</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>1968</td><td>0.085</td><td>0.038</td><td>0.082</td><td>0.045</td><td>−0.129</td><td>0.054</td></tr><tr><td>1970</td><td>0.047</td><td>0.027</td><td>0.089</td><td>0.032</td><td>−0.094</td><td>0.041</td></tr><tr><td>1972</td><td>0.018</td><td>0.025</td><td>0.069</td><td>0.026</td><td>−0.030</td><td>0.030</td></tr><tr><td>1974</td><td>0.051</td><td>0.025</td><td>0.101</td><td>0.026</td><td>−0.045</td><td>0.033</td></tr><tr><td>1976</td><td>0.041</td><td>0.026</td><td>0.062</td><td>0.027</td><td>0.024</td><td>0.034</td></tr><tr><td>1978</td><td>0.069</td><td>0.026</td><td>0.111</td><td>0.026</td><td>−0.060</td><td>0.031</td></tr><tr><td>1980</td><td>0.031</td><td>0.026</td><td>0.068</td><td>0.025</td><td>0.015</td><td>0.029</td></tr><tr><td>1982</td><td>0.052</td><td>0.029</td><td>0.067</td><td>0.025</td><td>−0.044</td><td>0.028</td></tr><tr><td>1984</td><td>0.076</td><td>0.033</td><td>0.107</td><td>0.029</td><td>−0.138</td><td>0.030</td></tr><tr><td>1986</td><td>0.080</td><td>0.032</td><td>0.138</td><td>0.025</td><td>−0.064</td><td>0.031</td></tr><tr><td>1988</td><td>0.105</td><td>0.031</td><td>0.112</td><td>0.028</td><td>0.137</td><td>0.032</td></tr><tr><td>1990</td><td>0.118</td><td>0.034</td><td>0.112</td><td>0.031</td><td>0.106</td><td>0.034</td></tr><tr><td>1992</td><td>0.128</td><td>0.037</td><td>0.133</td><td>0.034</td><td>0.082</td><td>0.031</td></tr><tr><td>1996</td><td>0.131</td><td>0.039</td><td>0.065</td><td>0.037</td><td>0.089</td><td>0.037</td></tr><tr><td>2000</td><td>0.151</td><td>0.039</td><td>0.100</td><td>0.037</td><td>0.079</td><td>0.037</td></tr></table>
438
+
439
+ 4.4 The Evolution of the College Premium and the Importance of Compositional Changes
440
+
441
+ In this section, we quantify the importance of compositional changes in measuring the college premium. The literature has focused on the observed wage differentials between college and high school graduates. However, such differences are confounded by the fact that college graduates are a selected sample of the population. Depending on the market structure, one would expect individuals with higher expected returns to end up with college degrees. This creates a wedge between the return of obtaining a college degree and the observed wage differences among these groups. Moreover, as the importance of skills changes over time, the composition of college graduates may change, too. Therefore, the analysis would not only be biased in the level of the returns to college but also in its evolution over time. The benefit of our approach is the ability to correct for such compositional changes.
442
+
443
+ # Retrieved Chunks Highlighted:
444
+
445
+ FLAVIO CUNHA, FATIH KARAHAN, AND ILTON SOARES : 71
446
+
447
+ TABLE8
448
+
449
+ ESTIMATES OF RETURNS TO COGNITIVE ABILITIES, NONCOGNITIVE ABILITIES, AND FACTOR LOADINGS OF UNSERVED HETEROGENESIS IN HIGH SCHOOL AND COLLEGE
450
+
451
+ <table><tr><td colspan="3">Cognitive ability</td><td colspan="3">Noncognitive ability</td><td colspan="2">Urobis heterogeneity</td></tr><tr><td>Year</td><td>Estimate</td><td>Std error</td><td>Estimate</td><td>Std error</td><td>Estimate</td><td>Std error</td><td></td></tr><tr><td colspan="8">High school</td></tr><tr><td>1968</td><td>0.038</td><td>0.021</td><td>0.064</td><td>0.024</td><td>0.000</td><td>0.030</td><td></td></tr><tr><td>1970</td><td>0.028</td><td>0.015</td><td>0.081</td><td>0.018</td><td>0.025</td><td>0.024</td><td></td></tr><tr><td>1972</td><td>0.061</td><td>0.015</td><td>0.073</td><td>0.018</td><td>0.018</td><td>0.024</td><td></td></tr><tr><td>1974</td><td>0.056</td><td>0.016</td><td>0.073</td><td>0.019</td><td>0.117</td><td>0.024</td><td></td></tr><tr><td>1976</td><td>0.044</td><td>0.016</td><td>0.085</td><td>0.019</td><td>0.173</td><td>0.024</td><td></td></tr><tr><td>1978</td><td>0.041</td><td>0.017</td><td>0.061</td><td>0.019</td><td>0.134</td><td>0.022</td><td></td></tr><tr><td>1980</td><td>0.037</td><td>0.015</td><td>0.057</td><td>0.018</td><td>0.196</td><td>0.020</td><td></td></tr><tr><td>1982</td><td>0.027</td><td>0.015</td><td>0.069</td><td>0.022</td><td>0.309</td><td>0.020</td><td></td></tr><tr><td>1984</td><td>0.048</td><td>0.022</td><td>0.078</td><td>0.023</td><td>0.283</td><td>0.020</td><td></td></tr><tr><td>1987</td><td>0.037</td><td>0.021</td><td>0.099</td><td>0.023</td><td>0.267</td><td>0.020</td><td></td></tr><tr><td>1988</td><td>0.039</td><td>0.021</td><td>0.108</td><td>0.024</td><td>0.217</td><td>0.020</td><td></td></tr><tr><td>1990</td><td>0.056</td><td>0.023</td><td>0.070</td><td>0.027</td><td>0.236</td><td>0.022</td><td></td></tr><tr><td>1992</td><td>0.077</td><td>0.023</td><td>0.057</td><td>0.027</td><td>0.213</td><td>0.022</td><td></td></tr><tr><td>1996</td><td>0.051</td><td>0.025</td><td>0.092</td><td>0.028</td><td>0.206</td><td>0.023</td><td></td></tr><tr><td>2000</td><td>0.047</td><td>0.025</td><td>0.091</td><td>0.028</td><td>0.225</td><td>0.023</td><td></td></tr><tr><td colspan="8">College</td></tr><tr><td>1968</td><td>0.085</td><td>0.038</td><td>0.082</td><td>0.045</td><td>−0.129</td><td>0.054</td><td></td></tr><tr><td>1970</td><td>0.047</td><td>0.027</td><td>0.089</td><td>0.032</td><td>−0.094</td><td>0.041</td><td></td></tr><tr><td>1972</td><td>0.048</td><td>0.025</td><td>0.089</td><td>0.026</td><td>−0.030</td><td>0.033</td><td></td></tr><tr><td>1974</td><td>0.051</td><td>0.025</td><td>0.101</td><td>0.026</td><td>−0.045</td><td>0.033</td><td></td></tr><tr><td>1976</td><td>0.041</td><td>0.026</td><td>0.062</td><td>0.027</td><td>0.024</td><td>0.034</td><td></td></tr><tr><td>1978</td><td>0.049</td><td>0.026</td><td>0.101</td><td>0.026</td><td>−0.060</td><td>0.031</td><td></td></tr><tr><td>1980</td><td>0.031</td><td>0.026</td><td>0.068</td><td>0.025</td><td>0.015</td><td>0.029</td><td></td></tr><tr><td>1982</td><td>0.052</td><td>0.029</td><td>0.067</td><td>0.025</td><td>−0.044</td><td>0.028</td><td></td></tr><tr><td>1984</td><td>0.076</td><td>0.033</td><td>0.107</td><td>0.029</td><td>−0.138</td><td>0.030</td><td></td></tr><tr><td>1986</td><td>0.080</td><td>0.032</td><td>0.138</td><td>0.025</td><td>−0.064</td><td>0.031</td><td></td></tr><tr><td>1988</td><td>0.105</td><td>0.031</td><td>0.112</td><td>0.028</td><td>0.137</td><td>0.032</td><td></td></tr><tr><td>1990</td><td>0.118</td><td>0.034</td><td>0.112</td><td>0.031</td><td>0.106</td><td>0.034</td><td></td></tr><tr><td>1992</td><td>0.114</td><td>0.037</td><td>0.113</td><td>0.034</td><td>0.082</td><td>0.034</td><td></td></tr><tr><td>1996</td><td>0.131</td><td>0.039</td><td>0.065</td><td>0.037</td><td>0.089</td><td>0.037</td><td></td></tr><tr><td>2000</td><td>0.151</td><td>0.039</td><td>0.100</td><td>0.037</td><td>0.079</td><td>0.037</td><td></td></tr></table>
452
+
453
+ 4.4 The Evolution of the College Premium and the Importance of Compositional Changes
454
+
455
+ In this section, we quantify the importance of compositional changes in measuring the college premium. The literature has focused on the observed wage differentials between college and high school graduates. However, such differences are confounded by the fact that college graduates are a selected sample of the population. Depending on the market structure, one would expect individuals with higher expected returns to end up with college degrees. This creates a wedge between the return of obtaining a college degree and the observed wage differences among these groups. Moreover, as the importance of skills changes over time, the composition of college graduates may change, too. Therefore, the analysis would not only be biased in the level of the returns to college but also in its evolution over time. The benefit of our approach is the ability to correct for such compositional changes.
456
+
457
+ ![](images/ae8970ea5d8fc440148da5b8e5273fe20b31922d844919bb0a69637a3a406fb0.jpg)
458
+ Figure 11. ChatDOC's result on locating a specific table in a research paper (original document: [7]).
459
+
460
+ # Paragraph
461
+
462
+ ![](images/c91380cb6796834b6a62e7b542f90fd9e3da1f97cae57a00b681b2caf5c05c31.jpg)
463
+
464
+ # Table
465
+
466
+ # 3.2.1 Case A - Find Specific information in the Tesla manual
467
+
468
+ Case A involves a query about Tesla's user manual specifically targeting cargo volume information. For this query, ChatDOC and Baseline perform differently as in Figure 9 and Figure 10. The figures show the most relevant chunk(s) retrieved and the LLM's answer. They also show the document page that the relevant chunk(s) are located in, and highlight these chunks. In this case, both models located the table, but the text they feed to LLM is different, so the answer is different. Specifically,
469
+
470
+ - ChatDOC recognizes the table structure, interpreting the text in the markdown format (as shown in the "Retrieved TextChunks" part), which made it easier for the language model to comprehend.
471
+
472
+ # Query: What is the dependent variable in table 8?
473
+
474
+ # Retrieved Text Chunk:
475
+
476
+ # [Chunk 1]
477
+
478
+ The estimates and standard errors can be obtained upon request. coefficients on a quartic in periods, where a period is defined to be calendar year minus 1968. From this figure, we can see that the year dummies in the two different sectors have followed roughly a parallel pattern, except during the late 1960s and early 1970s when they go in opposite directions, increasing in the college sector and declining in the high school sector. In Table 8, we display the point estimates and standard errors of the returns to ability and the factor loadings associated with the unobserved heterogeneity factor. Because
479
+
480
+ # Answer:
481
+
482
+ The dependent variable in Table 8 is not explicitly mentioned in the provided document snippets.
483
+
484
+ # Original Page Containing the Retrieved Chunk (Wrong):
485
+
486
+ ![](images/c5fe907a5de01adc0956c8b5ca7647e902082660d3a6b5618249915636322a8c.jpg)
487
+ Figure 12. Baseline's result in locating a specific table in a research paper (original document: [7])
488
+
489
+ # Retrieved Chunk Highlighted:
490
+
491
+ ![](images/1e3a99a5bb973ff4cb24c4b622cb6a21c3c1bc8196643592bae6845081884282.jpg)
492
+
493
+ - Baseline erroneously merges the target table and the table above into one chunk and does not have the table structure. Hence, the text in the chunk is not understandable (as shown in the "Retrieved Text Chunks" part) and the LLM can only answer with "not explicitly mentioned".
494
+
495
+ This case underscores the effectiveness of ChatDOC's parsing method, particularly in handling tables and presenting them in an LLM-friendly format.
496
+
497
+ # 3.2.2 Case B - Research paper
498
+
499
+ In Case B, the user's query is on a specific research paper. It requests the system to identify "Table 8" in the paper and enumerate all the dependent variables it lists. Both the title and the content of the table were necessary for identifying these variables. Figure 11 and Figure 12 show how ChatDOC and Baseline perform in this case.
500
+
501
+ - ChatDOC effectively retrieves the entire table, encompassing both its title and content. This comprehensive retrieval allows for an accurate response to the query.
502
+
503
+ ![](images/29317406e62ec75957585c50b9f269f977d292b1b7e59201b58d1a9c4c5196c5.jpg)
504
+ Figure 13. An example of ChatDOC encountered the ranking and token limit issues.
505
+
506
+ ![](images/48bda043344498bf7e9598b43770e2e5823a6f6a46ed829320160bc4c2178495.jpg)
507
+ Figure 14. An example that ChatDOC fails to retrieve the relevant table (original document: [8]).
508
+
509
+ - Baseline does not retrieve true "Table 8", but only a text chunk below "Table 7" (since it contains the text of "Table 8). Due to the baseline's segmentation strategy, the content of "Table 8" and other content on the same page are combined into a large chunk. This chunk, containing a mix of unrelated content, has a low similarity score and consequently does not show up in the retrieval results.
510
+
511
+ This case highlights ChatDOC's superior ability to handle complex document structures and its impact on retrieving specific segments for accurate responses.
512
+
513
+ # 3.3 Discussion on Limitations
514
+
515
+ While ChatDOC generally performs well, there are instances where its retrieval quality is not as good as Baseline's. We observe two patterns in these cases.
516
+
517
+ Ranking and Token Limit Issue. If ChatDOC retrieves a large, but irrelevant table first, it uses up the context window, preventing access to the relevant information, as the example in Figure 13 shows. This is mainly because the embedding model does not rank the relevant chunk as the top result. This may be addressed by a better embedding model, or a more sophisticated way to handle large tables/paragraphs like only retaining the relevant part of the table for LLM.
518
+
519
+ Fine Segmentation Drawback. Figure 14 shows a case that requires retrieving the whole table with its title. However, ChatDOC wrongly recognizes the title as a regular paragraph, so that the title and the table are stored in different chunks. This led to retrieving only part of the required information, namely the table's title and footnotes, but not the key content within the table. Improving table title recognition could address these issues.
520
+
521
+ # 4 Applications in ChatDOC
522
+
523
+ We apply the enhanced PDF structure recognition framework on ChatDOC (chatdoc.com), an AI file-reading assistant that helps to summarize long documents, explain complex concepts, and find key information in seconds.
524
+
525
+ In terms of reliability and accuracy, it is the top among all ChatPDF products. Here's what makes ChatDOC special:
526
+
527
+ - Mastery over tables: Simply select any table or text, and dive right into the details.
528
+ - Multi-file conversation: Talk about lots of documents at the same time, without worrying about how many pages each one has.
529
+ - Citation-backed responses: All answers are supported by direct quotes pulled from the source documents.
530
+ - Handle Many File Types: Works seamlessly with scanned files, ePub, HTML, and docx formats.
531
+
532
+ We are still working on publishing the API of ChatDOC PDF Parser. Please subscribe to the wait list via pdfparser.io.
533
+
534
+ # 5 Conclusion
535
+
536
+ Large Language Models (LLMs) are capable of producing more accurate responses when assisted by a PDF parser that effectively extracts and integrates structured information from documents into the prompts. This process enhances the quality and relevance of the data fed into the models, thereby improving their output.
537
+
538
+ In the future, we will compare more deep learning-based document parsing methods to give a more comprehensive understanding of the relationship between the RAG quality and document parsing quality. Some initial experiments show that some open-sourced PDF parsing methods cannot meet the bar for high-quality RAG.
539
+
540
+ # References
541
+
542
+ [1] Alibaba Group Holding Limited. Fiscal year annual report 2023. https://static.alibabagroup.com/reports/fy2023/ar/ebook/en/index.html, 2023.
543
+ [2] Rongyu Cao, Hongwei Li, Ganbin Zhou, and Ping Luo. Towards document panoptic segmentation with pinpoint accuracy: Method and evaluation. In 16th International Conference on Document Analysis and Recognition, pages 3-18, 2021.
544
+
545
+ [3] ChatDOC Team. https://pdfparser.io/.
546
+ [4] Daisho Microline Holdings Limited. Fiscal year annual report 2022. https://www1.hkexnews.hk/listedco/listconews/sehk/2022/0626/2022062600094.pdf, 2022.
547
+ [5] Peiyi Wang, Lei Li, Liang Chen, Dawei Zhu, Binghuai Lin, Yunbo Cao, Qi Liu, Tianyu Liu, and Zhifang Sui. Large language models are not fair evaluators, 2023.
548
+ [6] Tesla Inc. Model 3 owner's manual. https://manual-directory.com/manual/2023-tesla-model-3-owners-manual/, 2023.
549
+ [7] Flavio Cunha, Fatih Karahan, and Ilton Soares. Returns to skills and the college premium. Journal of Money, Credit and Banking, 43:39-86, 2011. https://sci-hub.hkvisa.net/https://doi.org/10.1111/j.1538-4616.2011.00410.x.
550
+ [8] Tom S. Vogl. Height, skills, and labor market outcomes in mexico. NBER Working Paper Series, 2012. https://www.nber.org/system/files/working_papers/w18318/w18318.pdf.
551
+
552
+ # A More Cases on PDF Parsing & Chunking
553
+
554
+ Case 2 in Figure 15 features a large borderless table that spans two pages. Figure 15 shows the result by PyPDF. A close inspection reveals that tables are represented merely as sequences of text, making them challenging to interpret and understand. And the table is scattered in three chunks. Results on these two cases demonstrate that the rule-based method, like that of PyPDF, tends to dissect a document without a true understanding of its content structure. As a result, tables are often torn apart and paragraphs become jumbled, leading to a disjointed and confusing representation of the original document.
555
+
556
+ For ChatDOC PDF Parser, shown in Figure 16, the parsing outcome is notably different. It not only preserves the document structure but also effectively segments the document in a way that maintains its inherent meaning. In this case, the table that spans two pages is set into one chunk, with its title at the beginning. So, the information in this chunk is self-contained. If this chunk is retrieved for RAG, the LLM can digest useful information within it.
557
+
558
+ # Case 2: PyPDF
559
+
560
+ # 1 Original Pages:
561
+
562
+ Hong Kong Exchange and Clearing Limited and The Stock Exchange of Hong Kong Limited take no responsibility for the contents of this announcement, make no representation as to its accuracy or completeness and expressly disclaim any liability arising directly or indirectly in respect from or in reliance upon the whole or part of the contents of this announcement.
563
+
564
+ ![](images/ba2cdbd729dab00fd49594a4c097e5632158b89acdd211bce3828b73794e2d61.jpg)
565
+ Figure 15. Parsing and chunking results of PyPDF on Case 2 (original document: [4]).
566
+
567
+ DAISHO MICROLINE HOLDINGS LIMITED
568
+
569
+ (Stock Code: 0567)
570
+
571
+ ANNUNCEMENT OF ANNUAL RESULTS
572
+
573
+ The Board of Directors (the "Board") of Diezio Microline Holdings Limited (the "Diezio") was formed on 17 March 2003, and the Board of Directors was constituted by six subalterns ("the "Group") for the year ended 31 March 2002 together with the following executive directors:
574
+
575
+ CONSOLIDATED STATEMENT OF PROFIT OR LOSS
576
+
577
+ Year ended 31 March 2022
578
+
579
+ 2021 2021 Note HKS'009 HKS'000
580
+
581
+ Continuing operations
582
+
583
+ 160471 67,880
584
+
585
+ __________
586
+
587
+ Green green
588
+
589
+ 1.0001 1.0001
590
+
591
+ Selling and distribution expenses
592
+
593
+ (5,083)(3,401)
594
+
595
+ Other operating expenses
596
+
597
+ (480) (527)
598
+
599
+ Reversal of (Provision for) impairment loss on
600
+
601
+ $\therefore m - 1 \neq 0$ ;
602
+
603
+ Impairment loss on other receivables
604
+
605
+ (1700)42.0000(1.780)
606
+
607
+ Changes in fair value of options
608
+
609
+ $\therefore \overrightarrow{PB} \cdot \left( {\overrightarrow{PA} - \overrightarrow{PC}}\right) = 0$
610
+
611
+ Gm on hargain pncse gis
612
+
613
+ ${x}_{1} = \frac{-b - \sqrt{{b}^{2} - {4ac}}}{2a},{x}_{2} = \frac{-b + \sqrt{{b}^{2} - {4ac}}}{2a}$
614
+
615
+ acquisition of subsidiaries
616
+
617
+ 1.1972
618
+
619
+ Finance costs
620
+
621
+ 6 (2,244) (7,655)
622
+
623
+ 1
624
+
625
+ # 2 Chunking Result:
626
+
627
+ # [Chunk 1]
628
+
629
+ 1\n
630
+ Hong Kong Exchanges and Clearing Limited and The Stock Exchange of Hong Kong n
631
+ Limited take no responsibility for the contents of this announcement, make no in
632
+ representation as to its accuracy or completeness and expressly disclaim any liability in
633
+ whatsoever for any loss howsoever arising from or in reliance upon the whole or any \n
634
+ part of the contents of this announcement\n
635
+
636
+ # [Chunk 2]
637
+
638
+ DAISHO MICROLINE HOLDINGS LIMITED\n
639
+ (Incorporated in Bermuda with limited liability)
640
+ (Stock Code: 0567)\n\n
641
+ ANNUANCEMENT OF ANNUAL RESULTSn FOR THE YEAR ENDED 31 MARCH 2022s
642
+ The Board of Directors (the "Board") of Daisho Microline Holdings Limited (the n
643
+ "Company") announces the preliminary consolidated results of the Company and its
644
+ subsidiaries (the "Group") for the year ended 31 March 2022 together with the in
645
+ comparative figures of the previous corresponding year as follows:in
646
+ CONSOLIDATED STATEMENT OF PROFIT OR LOSS $\mathbf{\Pi}_{\mathrm{n}}$
647
+ Year ended 31 March 2022\n
648
+ 2022.2021n
649
+ Note HK\ $000 HK\$ 000\n
650
+ Continuing operation
651
+ Revenue 3 106,471 6
652
+ Cost of sales (98,670) (55,605)n
653
+ Gross profit 7,801,12,281\n
654
+ Other income 57,341,4616\n
655
+ Selling and distribution expenses (5,083) (3,401)n
656
+ Administrative expenses (31,157) (35,422)\n
657
+ Other operating expenses (480) (527)r
658
+ Fair value gain on derivative financial instruments - 101\n
659
+ Reversal of (Provision for) impairment loss on\n
660
+
661
+ # [Chunk 3]
662
+
663
+ trade receivables, net 10(b) 1.808 (2.859)n
664
+ Impairment loss on other receivables - (1,780)
665
+ Impairment loss on property, plant and equipment 15 (5,010)
666
+ (2,314)n
667
+ Change in fair value of contingent consideration $\backslash$
668
+ receivable-3.311\n
669
+ Gain on bargain purchase arising from the \n
670
+ acquisition of subsidiaries - 1.197\n
671
+ Loss on early redemption of a promissory note - (4.512)n
672
+ Finance costs 6 (2,244) (7,655)
673
+
674
+ # [Chunk 4]
675
+
676
+ 2n
677
+ 2022.2021n
678
+ Note HK$'000 HK$'000\n
679
+ Loss before taxation from continuing operations 6 (27.024)
680
+ (36.964)n
681
+ Income tax expense 7 (444) (532)n
682
+ Loss for the year from continuing operations (27,468)
683
+ (37,496)n
684
+ Discontinued operation\n
685
+ Loss for the year from discontinued operation 11 (1,660)
686
+ (29.480)n
687
+ Loss for the year (29,128) (66,976)n
688
+ From continuing and discontinued operations\n
689
+ Loss per share\n
690
+ Basic (Hong Kong cents) 8 (2.80) (10.38)\n
691
+ Diluted (Hong Kong cents) 8 (2.80) (10.38)n
692
+ From continuing operations\n
693
+ Loss per share\n
694
+ Basic (Hong Kong cents) 8 (2.64) (5.81)\n
695
+ Diluted (Hong Kong cents) 8 (2.64) (5.81)n
696
+
697
+ # 3 Visualization of Chunking Result:
698
+
699
+ Hong Kong Exchange and Clearing Limited and The Stock Exchange of Hong Kong Limited take no responsibility for the contents of this announcement, make no representation as to its accuracy or completeness and expressly disclaim any liability arising directly or indirectly in respect from or in reliance upon the whole or part of the contents of this announcement.
700
+
701
+ ![](images/f4143e8d2b6294a866b21affb62e9bcc2cde0abe1a0bf71af409d1b9d5ccd4f4.jpg)
702
+ Zoom in to see the details.
703
+
704
+ DAISHO MICROLINE HOLDINGS LIMITED (Unreported in Annexels with Data Only)
705
+
706
+ Stock Code: 0567)
707
+
708
+ ANNUNCEMENT OF ANNUAL RESULTS FOR THE YEAR ENDED 31 MARCH 2022
709
+
710
+ The Board of Directors (the "Board") of Daido Microcinel Holdings Limited (the "Daido" or "Microcinel") was formed on 13 March 2002, and the Board of Directors (the "Group") for the year ended 31 March 2002 together with the Board of Directors (the "Microcinel Group") were constituted.
711
+ CONSOLIDATED STATEMENT OF PROFIT OR LOSS
712
+
713
+ Year ended 31 March 2022
714
+
715
+ 2022 2021
716
+
717
+ Continuing operations
718
+
719
+ $\therefore m - 1 \neq 0$ ;
720
+
721
+ Revenue
722
+
723
+ 146,471 67,886
724
+
725
+ Cost of sales
726
+
727
+ (98,670) (35,805)
728
+
729
+ Gross profit
730
+
731
+ 7,801 12,281
732
+
733
+ Other income
734
+ Selling and distribution costs
735
+
736
+ 57,3414,616(5,083)(3,401)
737
+
738
+ Administrative expenses Other operating expenses
739
+
740
+ (31,157) (35,422)(489) (521)
741
+
742
+ Fair value gain on derivative financial instruments
743
+ Reversal of (Provision for) impairment loss on
744
+
745
+ ${101} \rightarrow {2.4}$
746
+
747
+ Table 6. Measurements not. $100(b)$ 7,408 (2,539) 2.2% Increase in value of non-recyclable receivables 15,080 (2,016) 2.1% Change in fair value of contingent consideration 3.4% Gain on buyback purchase arising from the acquisition of a goodwill 1,197 (2,224) 3.1% Loss on early redemption of a promissory note 6,000 (2,422) 5.1% Loss on early redemption of a promissory note 6,000 (2,422)
748
+
749
+ Note 2022 2021 2021
750
+ Loss before taxation from continuing operations 6 (27.824) (36.964)
751
+ Income tax expense 7 (444) (532)
752
+ Loss for the year from continuing operations 27 (468) (37,496)
753
+ Discontinued operation 11 (1,660) (29,480)
754
+ Loss for the year 29,128 (66,976)
755
+ From continuing and discontinued operations
756
+ Loss per share Basic (Hong Kong cents) 8 (2.88) (10.38)
757
+ Diluted (Hong Kong cents) 8 (2.88) (10.38)
758
+ From continuing operations
759
+ Loss per share Basic (Hong Kong cents) 8 (2.64) (5.83)
760
+ Diluted (Hong Kong cents) 8 (2.64) (5.83)
761
+
762
+ #
763
+
764
+ Text Chunk
765
+
766
+ zinal document: [4]).
767
+
768
+ # Case 2: ChatDOC PDF Parser
769
+
770
+ # 1 Original Pages:
771
+
772
+ Hong Kong Exaches and Clearing Limited and The Stock Exchange of Hong Kong Limited take no responsibility for the contents of this announcement, make no representation as to its accuracy or completeness and expressly disclaim any liability arising directly or indirectly in relation with the whole or part of the contents of this announcement.
773
+
774
+ # DM
775
+
776
+ # DAISHO MICROLINE HOLDINGS LIMITED
777
+
778
+ e 1
779
+
780
+ # ANNUANCEMENT OF ANNUAL RESULTS FOR THE YEAR ENDED 31 MARCH 2022
781
+
782
+ The Board of Directors (the "Board") of Duode Minimole Holdings Limited (the "Minimole" or "Minimole Group") was formed in 2013 by the Board of Directors as subsidiaries ("Group") for the year ended 31 March 2012 together with the "Minimole Group", which was subsequently designated as "Minimole".
783
+ CONSOLIDATED STATEMENT OF PROFIT OR LOSS Year ended 31 March 2022
784
+
785
+ <table><tr><td></td><td>Note</td><td>2022/ HKS 2009</td><td>2021/ HKS 2008</td></tr><tr><td colspan="4">Continuing operations</td></tr><tr><td>Revenue</td><td>3</td><td>106,471</td><td>67,886</td></tr><tr><td>Cost of sales</td><td></td><td>(98,670)</td><td>(55,605)</td></tr><tr><td>Gross profit</td><td></td><td>7,801</td><td>12,281</td></tr><tr><td>Other income</td><td>5</td><td>7,341</td><td>4,816</td></tr><tr><td>Selling and distribution expenses</td><td></td><td>(1,600)</td><td>(1,401)</td></tr><tr><td>Administrative expenses</td><td></td><td>(31,157)</td><td>(35,422)</td></tr><tr><td>Other operating income</td><td></td><td>(40)</td><td>(5)</td></tr><tr><td>Fair value gain on derivative financial instruments</td><td></td><td>—</td><td>101</td></tr><tr><td>Impairment loss on first impairment loss on trade receivables, net, net</td><td></td><td>1,808</td><td>(2,859)</td></tr><tr><td>Losses on other receivables</td><td></td><td>(1,170)</td><td>(1,790)</td></tr><tr><td>Impairment loss on property, plant and equipment</td><td></td><td>15</td><td>(5,010)</td></tr><tr><td>Change in fair value of contingent consideration receivable</td><td></td><td>—</td><td>3,311</td></tr><tr><td>Gain on a bargain purchase arising from the acquisition of subsidiaries</td><td></td><td>—</td><td>1,197</td></tr><tr><td>Loss on the redemption of a promissory note</td><td></td><td>(4,752)</td><td>(4,523)</td></tr><tr><td>Finance costs</td><td>6</td><td>(2,244)</td><td>(7,655)</td></tr></table>
786
+
787
+ ![](images/8ff88c87179ba0a1566d16c5a79c15b8906aa1b4ae9481f78922733706dd6477.jpg)
788
+ Figure 16. Parsing and chunking result of ChatDOC PDF Parser on Case 2 (original document: [4]). Zoom in to see the details.
789
+
790
+ ![](images/cbe3f0c54179f05af04f45b913d8e3d3d8c2666250d7453d4cb1f34943a47ce4.jpg)
791
+
792
+ # Chunking Result:
793
+
794
+ # [Chunk 1]
795
+
796
+ Hong Kong Exchanges and Clearing Limited and The Stock Exchange of Hong Kong Limited take no responsibility for the contents of this announcement, make no representation as to its accuracy or completeness and expressly disclaim any liability whatsoever for any loss whatsoever arising from or in reliance upon the whole or any part of the contents of this announcement. In
797
+ DAISHO MICROLINE HOLDINGS LIMITED\n
798
+ (Incorporated in Bermuda with limited liability)in (Stock Code: 0567)
799
+ ANNUANCE OF ANNUAL RESULTS
800
+ FOR THE YEAR ENDED 31 MARCH 2022n
801
+ The Board of Directors (the "Board") of Daisho Microline Holdings Limited (the "Company") announces the preliminary consolidated results of the Company and its subsidiaries (the "Group") for the year ended 31 March 2022 together with the comparative figures of the previous corresponding year as follows:
802
+
803
+ # [Chunk 2]
804
+
805
+ # CONSOLIDATED STATEMENT OF PROFIT OR LOSSIn
806
+
807
+ Year ended 31 March 2022\n
808
+
809
+ Note 2022 2021 n
810
+
811
+ |---|---|---|n
812
+
813
+ Note HK$'000 HK$'000/n
814
+
815
+ |Continuing operations|||n
816
+
817
+ Revenue 3106,471 67,886 n
818
+
819
+ Cost of sales | (98,670) | (55,605) |n
820
+
821
+ | Gross profit | 7,801 | 12,281 |
822
+
823
+ Other income 57,341 4,616n
824
+
825
+ Selling and distribution expenses (5,083) (3,401)
826
+
827
+ Administrative expenses | (31,157) | (35,422) | n
828
+
829
+ |Other operating expenses | | (480) | (527) |n
830
+
831
+ Fair value gain on derivative financial instruments | - | 101 |n| P#e#f (P#e#i#f) is in##tively t#d.
832
+
833
+ Reversal of (Provision for) impairment loss on trade receivables, net $\left\lbrack {{10}\left( \mathrm{\;b}\right) ,{1.808}\left( {2,{859}}\right) }\right\rbrack /\mathrm{n}$
834
+
835
+ Impairment loss on other receivables | -| (1,780) n
836
+
837
+ Impairment loss on property, plant and equipment | 15 (5,010)
838
+
839
+ (2,314) n
840
+
841
+ | Change in fair value of contingent consideration receivable | -1
842
+
843
+ 3,311\n
844
+
845
+ Gain on bargain purchase arising from the acquisition of
846
+
847
+ subsidiaries | -1,197 |n
848
+
849
+ Loss on early redemption of a promissory note (4,512)
850
+
851
+ |Finance costs |6|(2,244)7,655)n
852
+
853
+ Loss before taxation from continuing operations 6 (27,024)
854
+
855
+ (36,964) n
856
+
857
+ Income tax expense | 7 | (444) | (532) | n
858
+
859
+ Loss for the year from continuing operations | (27,468)
860
+
861
+ (37,496) n
862
+
863
+ Discontinued operation | | \n
864
+
865
+ Loss for the year from discontinued operation | 11 (1,660)
866
+
867
+ (29,480)
868
+
869
+ Loss for the year 29,12866,976n
870
+ From continuing and discontinued operations | | | \n
871
+ Loss per share | | |n
872
+ Basic (Hong Kong cents) | 8 | (2.80) | (10.38) |n
873
+ | Diluted (Hong Kong cents) | 8 | (2.80) | (10.38) |n
874
+ From continuing operations | | | n
875
+ Loss per share | | | n
876
+ Basic (Hong Kong cents) | 8 | (2.64) | (5.81) | n
877
+ Diluted (Hong Kong cents) | 8 | (2.64) | (5.81) | n
878
+
879
+ ![](images/08b4281c263185b7351236c635ff1ecd25f37ec9fdd8440bab928034e4a5a1e7.jpg)
880
+
881
+ # Visualization of Chunking Result:
882
+
883
+ Hong Kong Exchange and Clearing Limited and The Stock Exchange of Hong Kong Limited take no responsibility for the contents of this announcement, make no representation as to its accuracy or completeness and expressly disclaim any liability arising directly or indirectly in relation to the whole or part of the contents of this announcement.
884
+
885
+ # DM
886
+
887
+ # DAISHO MICROLINE HOLDINGS LIMITED
888
+
889
+ Stock Code: 0567
890
+
891
+ # ANNUNCEMENT OF ANNUAL RESULTS
892
+
893
+ # ANNUAL REPORT FOR THE YEAR ENDED 31 MARCH 2022
894
+
895
+ The Board of Directors (the "Board") of Dazhou Microelectronics Holdings Limited (the "Dazhou Microelectronics Holdings Limited") was constituted by the following three individuals who were subordinates to the "Group" for the year ended 31 March 2002 together with the corresponding directors. The Board of Directors is composed of four persons:
896
+
897
+ CONSOLIDATED STATEMENT OF PROFIT OR LOSS
898
+
899
+ <table><tr><td colspan="4">Year ended 31 March 2024</td></tr><tr><td rowspan="2"></td><td rowspan="2">Note</td><td>2023</td><td>2022</td></tr><tr><td>HK$&#x27;000</td><td>HK$&#x27;000</td></tr><tr><td colspan="4">Continuing operations</td></tr><tr><td>Revenue</td><td>3</td><td>106,471</td><td>67,886</td></tr><tr><td>Cost of sales</td><td></td><td>(98,670)</td><td>(55,605)</td></tr><tr><td>Gross profit</td><td></td><td>7,601</td><td>12,281</td></tr><tr><td>Other income</td><td>5</td><td>7,341</td><td>4,616</td></tr><tr><td>Selling and distribution expenses</td><td></td><td>(31,603)</td><td>(3,491)</td></tr><tr><td>Administrative expenses</td><td></td><td>(31,187)</td><td>(35,422)</td></tr><tr><td>Other operating income</td><td></td><td>168</td><td>101</td></tr><tr><td>Fair value gain on derivative financial instruments (reversal of provisions for impairment loss on trade receivables, net)</td><td>10(b)</td><td>1,080</td><td>101</td></tr><tr><td>Impairment loss on other receivables</td><td></td><td>(1,514)</td><td>(1,796)</td></tr><tr><td>Impairment loss on property, plant and equipment</td><td>15</td><td>(5,010)</td><td>(3,114)</td></tr><tr><td>Change in fair value of contingent consideration receivable</td><td></td><td>-</td><td>3,311</td></tr><tr><td>Gain on bargain purchase arising from the contingent subsidies</td><td></td><td>-</td><td>1,197</td></tr><tr><td>Loss on early redemption of a promissory note</td><td></td><td>-</td><td>(4,512)</td></tr><tr><td>Finance costs</td><td>6</td><td>(2,244)</td><td>(7,655)</td></tr><tr><td></td><td rowspan="2">Note</td><td>2022</td><td>2021</td></tr><tr><td></td><td>HK$&#x27;000</td><td>HK$&#x27;000</td></tr><tr><td>Loss before taxation from continuing operations</td><td>6</td><td>(27,024)</td><td>(36,964)</td></tr><tr><td>Income tax expense</td><td>7</td><td>(444)</td><td>(532)</td></tr><tr><td>Loss for the year from continuing operations</td><td></td><td>(27,406)</td><td>(37,496)</td></tr><tr><td>Discontinued operation</td><td></td><td></td><td></td></tr><tr><td>Loss for the year from discontinued operation</td><td>11</td><td>(1,660)</td><td>(29,480)</td></tr><tr><td>Loss for the year</td><td></td><td>(29,128)</td><td>(66,076)</td></tr><tr><td colspan="4">From continuing and discontinued operations</td></tr><tr><td colspan="4">Loss per share</td></tr><tr><td>Base (Hong Kong cents)</td><td>8</td><td>(2,88)</td><td>(10,38)</td></tr><tr><td>Diluted (Hong Kong cents)</td><td>8</td><td>(2,80)</td><td>(10,38)</td></tr><tr><td colspan="4">From continuing operations</td></tr><tr><td colspan="4">Loss per share</td></tr><tr><td>Base (Hong Kong cents)</td><td>8</td><td>(2,64)</td><td>(5,81)</td></tr><tr><td>Diluted (Hong Kong cents)</td><td>8</td><td>(2,64)</td><td>(5,81)</td></tr></table>
900
+
901
+ #
902
+ Paragraph
903
+ #
904
+ Table
905
+
906
+ 2
2401.12xxx/2401.12599/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66e805a4ecaa92d92a2bd22fd9a5422326ec3bb4fd71b0879a2a052e903f454e
3
+ size 942764
2401.12xxx/2401.12599/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12603/4f4ba12d-6258-4c7f-8b4d-1da802d6095f_content_list.json ADDED
@@ -0,0 +1,1306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "ASAP (Automatic Software for ASL Processing): A toolbox for processing Arterial Spin Labeling images",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 114,
8
+ 162,
9
+ 883,
10
+ 200
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Virginia Mato Abad<sup>1</sup>, Pablo García-Polo<sup>2</sup>, Owen O'Daly<sup>3</sup>, Juan Antonio Hernández-Tamames<sup>1</sup>, Fernando Zelaya<sup>3</sup>",
17
+ "bbox": [
18
+ 148,
19
+ 205,
20
+ 875,
21
+ 244
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ Laboratorio de Analisis de Imagen Médica y Biometría (LAIMBIO), Universidad Rey Juan Carlos, Mostoles, Madrid, Spain",
28
+ "bbox": [
29
+ 145,
30
+ 247,
31
+ 880,
32
+ 286
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "$^{2}$ M+Visión Advanced Fellowship, Medical Imaging Lab., Hospital Universitario de Fuenlabrada, Fuenlabrada, Madrid, Spain",
39
+ "bbox": [
40
+ 158,
41
+ 290,
42
+ 867,
43
+ 329
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "$^{3}$ Department of Neuroimaging, Institute of Psychiatry, King's College London, London, United Kingdom",
50
+ "bbox": [
51
+ 174,
52
+ 333,
53
+ 849,
54
+ 372
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "Corresponding Author:",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 112,
64
+ 602,
65
+ 334,
66
+ 619
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Virginia Mato Abad",
73
+ "bbox": [
74
+ 112,
75
+ 627,
76
+ 285,
77
+ 645
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Universidad Rey Juan Carlos",
84
+ "bbox": [
85
+ 112,
86
+ 645,
87
+ 372,
88
+ 662
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Departamental II. Despacho 157.",
95
+ "bbox": [
96
+ 112,
97
+ 662,
98
+ 405,
99
+ 679
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "Campus de Móstoles, C/Tulipán s/n",
106
+ "bbox": [
107
+ 112,
108
+ 679,
109
+ 428,
110
+ 696
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "text",
116
+ "text": "28933, Móstoles, Madrid (Spain)",
117
+ "bbox": [
118
+ 112,
119
+ 696,
120
+ 401,
121
+ 715
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "text",
127
+ "text": "Telephone: +34 914888522",
128
+ "bbox": [
129
+ 112,
130
+ 715,
131
+ 359,
132
+ 732
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "text",
138
+ "text": "virginia.mato@urjc.es",
139
+ "bbox": [
140
+ 112,
141
+ 732,
142
+ 305,
143
+ 750
144
+ ],
145
+ "page_idx": 0
146
+ },
147
+ {
148
+ "type": "page_number",
149
+ "text": "1",
150
+ "bbox": [
151
+ 869,
152
+ 90,
153
+ 880,
154
+ 106
155
+ ],
156
+ "page_idx": 0
157
+ },
158
+ {
159
+ "type": "text",
160
+ "text": "Abstract",
161
+ "text_level": 1,
162
+ "bbox": [
163
+ 116,
164
+ 156,
165
+ 200,
166
+ 175
167
+ ],
168
+ "page_idx": 1
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "The method of Arterial Spin Labeling (ASL) has experienced a significant rise in its application to functional imaging, since it is the only technique capable of measuring blood perfusion in a truly non-invasive manner. Currently, there are no commercial packages for processing ASL data and there is no recognised standard for normalising ASL data to a common frame of reference. This work describes a new Automated Software for ASL Processing (ASAP) that can automatically process several ASL datasets. ASAP includes functions for all stages of image pre-processing: quantification, skull-stripping, co-registration, partial volume correction and normalization. To assess the applicability and validity of the toolbox, this work shows its application in the study of hypoperfusion in a sample of healthy subjects at risk of progressing to Alzheimer's Disease. ASAP requires limited user intervention, minimising the possibility of random and systematic errors, and produces cerebral blood flow maps that are ready for statistical group analysis. The software is easy to operate and results in excellent quality of spatial normalisation. The results found in this evaluation study are consistent with previous studies that find decreased perfusion in Alzheimer's patients in similar regions and demonstrate the applicability of ASAP.",
173
+ "bbox": [
174
+ 114,
175
+ 183,
176
+ 883,
177
+ 463
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "Keywords: Arterial Spin Labeling, Cerebral Blood Flow, Automatic Processing, Partial volume effect, Alzheimer's Disease",
184
+ "bbox": [
185
+ 116,
186
+ 492,
187
+ 867,
188
+ 529
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "page_number",
194
+ "text": "2",
195
+ "bbox": [
196
+ 867,
197
+ 90,
198
+ 880,
199
+ 106
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "1. Introduction",
206
+ "text_level": 1,
207
+ "bbox": [
208
+ 116,
209
+ 156,
210
+ 254,
211
+ 174
212
+ ],
213
+ "page_idx": 2
214
+ },
215
+ {
216
+ "type": "text",
217
+ "text": "Arterial Spin Labelling (ASL) has become a popular magnetic resonance technique for imaging brain function. It is entirely non-invasive and capable of quantitatively determining regional blood perfusion; providing therefore a significant advantage over contrast agent based methods like $^{15}\\mathrm{O}$ enriched $\\mathrm{H}_2\\mathrm{O}$ Positron Emission Tomography (PET) or Gadolinium-based Dynamic Susceptibility Contrast Magnetic Resonance Imaging (DSC-MRI). The basic principle of ASL is to employ arterial blood water itself as contrast agent to measure perfusion. For cerebral blood flow (CBF) this is obtained by tagging a bolus of arterial blood in the region of the carotid arteries. The magnetization of inflowing blood water protons is inverted in that region by means of an external radiofrequency pulse, which is applied either as a short pulse (10-20ms) or as a continuous or pseudo-continuous burst of radiofrequency (1-2s) in the presence of a gradient. After a period of time (post-labelling delay), blood labelled with inverted signal is delivered to the entire brain through the smaller arteries and capillaries. This labelled arterial blood signal gives rise to a reduction in the image intensity when compared to a non-labelled (control) image. The control and labelled images are subtracted to generate a 'perfusion weighted' image. The intensity of each voxel will reflect the amount of arterial blood delivered in the inversion time; and through the use of a suitable model, the difference image is transformed to a map of CBF in conventional physiological units of ml blood/100g tissue/min.",
218
+ "bbox": [
219
+ 114,
220
+ 181,
221
+ 874,
222
+ 513
223
+ ],
224
+ "page_idx": 2
225
+ },
226
+ {
227
+ "type": "text",
228
+ "text": "The availability of ASL as a routine method for assessment of basal CBF data has provided the possibility to examine brain physiology and generate a marker to probe functional differences between groups. ASL is increasingly used in clinical studies of cerebral perfusion and has shown its validity in measuring perfusion changes in several neurodegenerative diseases including Alzheimer Disease (AD) [1,2]; as well as in psychiatric studies [3], pharmacology [4] and pain [5]. However, to perform this type of analysis, multiple image processing steps are required: quantification, registration, normalization to a standard space, partial volume correction, etc.",
229
+ "bbox": [
230
+ 114,
231
+ 520,
232
+ 875,
233
+ 660
234
+ ],
235
+ "page_idx": 2
236
+ },
237
+ {
238
+ "type": "text",
239
+ "text": "Partial volume effects (PVE) are a consequence of limited spatial resolution in imaging and especially in ASL, where the low signal-to-noise (SNR) ratio leads to the need to employ larger voxels. In an effort to increase SNR, tissue specific saturation pulses are applied to the volume of interest to suppress the static tissue signal. This is known as 'background suppression' and it is now used extensively in ASL [6]. Nevertheless, the change in the received signal due to blood water proton relaxation remains very small, such that voxels are typically of the order of $3 \\times 3 \\times 6 \\mathrm{~mm}$ , generating the need to employ some form of PVE correction as each voxel is likely to contain signal mixing from different tissue types. Normal grey matter (GM) perfusion values are around $60 \\mathrm{ml} / 100 \\mathrm{g} / \\mathrm{min}$ while white matter (WM) values are significantly lower ( $20 \\mathrm{ml} / 100 \\mathrm{g} / \\mathrm{min}$ ) [7]. Due to the relative insensitivity of ASL in white matter, the prime interest when using this technique is the study of pure GM perfusion. However, in voxels containing (for",
240
+ "bbox": [
241
+ 114,
242
+ 667,
243
+ 880,
244
+ 878
245
+ ],
246
+ "page_idx": 2
247
+ },
248
+ {
249
+ "type": "page_number",
250
+ "text": "3",
251
+ "bbox": [
252
+ 867,
253
+ 90,
254
+ 880,
255
+ 106
256
+ ],
257
+ "page_idx": 2
258
+ },
259
+ {
260
+ "type": "text",
261
+ "text": "example) $50\\%$ GM and $50\\%$ WM, the CBF values could be underestimated by up to one-third. PVE is of paramount importance in the study of neurodegenerative diseases where GM atrophy significantly affects CBF quantification and therefore the comparison of patient data with control populations.",
262
+ "bbox": [
263
+ 116,
264
+ 131,
265
+ 879,
266
+ 202
267
+ ],
268
+ "page_idx": 3
269
+ },
270
+ {
271
+ "type": "text",
272
+ "text": "The absence of a standard approach for data processing has been partly driven by the fact that several ASL methodologies have evolved independently [8]. Therefore, there is no recognised standard for normalising ASL data to a common frame of reference. This lack of a harmonised processing pipeline contributes to the potential discrepancies in studies of brain perfusion across different laboratories [9].",
273
+ "bbox": [
274
+ 116,
275
+ 209,
276
+ 880,
277
+ 297
278
+ ],
279
+ "page_idx": 3
280
+ },
281
+ {
282
+ "type": "text",
283
+ "text": "A number of packages, such as BASIL [10] and ASLTbx [11] provide a set of functions for pre-processing of ASL data and they both are free for academic use. BASIL consists of a collection of tools from the Functional Software Library (FSL) suite [12] that aid in the quantification and subsequent spatial processing of CBF images acquired with ASL. BASIL is based on Bayesian inference principles and was originally developed for ASL data acquired with several post-labelling delays (known as 'multi-TI' data). ASLTbx is a MATLAB [13] and SPM [14] based toolkit for processing ASL data, which requires basic MATLAB script programming.",
284
+ "bbox": [
285
+ 114,
286
+ 303,
287
+ 880,
288
+ 445
289
+ ],
290
+ "page_idx": 3
291
+ },
292
+ {
293
+ "type": "text",
294
+ "text": "These packages typically perform a step-by-step and subject-by-subject processing and require a large amount of manual operation. To date, a toolbox supporting a fully automated processing of raw ASL data, with minimum user intervention that can be used for effective comparison of group data, is not yet available.",
295
+ "bbox": [
296
+ 116,
297
+ 450,
298
+ 880,
299
+ 521
300
+ ],
301
+ "page_idx": 3
302
+ },
303
+ {
304
+ "type": "text",
305
+ "text": "In this article, we describe the development, implementation and test of an ASL processing toolbox (ASAP) that can automatically process several ASL datasets, from their raw image format to a spatially normalised, smoothed (if desired) version, with minimal user intervention. Ease of operation has been facilitated by a graphical user interface (GUI) whose operation is entirely intuitive. After the user sets the input/output and processing parameters using the GUI, the toolbox fully executes all processing steps for datasets of any number of subjects and results in data ready for second level statistical analysis. The data can be written in a variety of formats to facilitate its inclusion in several software packages for group analysis. The toolbox also has a facility to display the spatially normalised data in a manner that facilitates quality control by the user.",
306
+ "bbox": [
307
+ 114,
308
+ 527,
309
+ 883,
310
+ 719
311
+ ],
312
+ "page_idx": 3
313
+ },
314
+ {
315
+ "type": "text",
316
+ "text": "To assess the applicability and validity of the toolbox, we demonstrate its use in the study of hypoperfusion in a sample of healthy subjects at risk of progressing to Alzheimer's Disease (AD).",
317
+ "bbox": [
318
+ 116,
319
+ 727,
320
+ 843,
321
+ 780
322
+ ],
323
+ "page_idx": 3
324
+ },
325
+ {
326
+ "type": "text",
327
+ "text": "2. Methods",
328
+ "text_level": 1,
329
+ "bbox": [
330
+ 116,
331
+ 787,
332
+ 222,
333
+ 804
334
+ ],
335
+ "page_idx": 3
336
+ },
337
+ {
338
+ "type": "text",
339
+ "text": "2.1. Toolbox processing procedures",
340
+ "text_level": 1,
341
+ "bbox": [
342
+ 127,
343
+ 816,
344
+ 470,
345
+ 835
346
+ ],
347
+ "page_idx": 3
348
+ },
349
+ {
350
+ "type": "text",
351
+ "text": "ASAP has been developed in MATLAB with the goal of simplifying the process of quantification and pre-processing of ASL studies. It includes functions like CBF",
352
+ "bbox": [
353
+ 116,
354
+ 842,
355
+ 823,
356
+ 877
357
+ ],
358
+ "page_idx": 3
359
+ },
360
+ {
361
+ "type": "page_number",
362
+ "text": "4",
363
+ "bbox": [
364
+ 867,
365
+ 90,
366
+ 880,
367
+ 104
368
+ ],
369
+ "page_idx": 3
370
+ },
371
+ {
372
+ "type": "text",
373
+ "text": "quantification, skull stripping, co-registration, partial volume correction and normalisation. Different processing strategies have been made available depending on user requirements:",
374
+ "bbox": [
375
+ 109,
376
+ 132,
377
+ 870,
378
+ 185
379
+ ],
380
+ "page_idx": 4
381
+ },
382
+ {
383
+ "type": "list",
384
+ "sub_type": "text",
385
+ "list_items": [
386
+ "- System requirements: ASAP is written in MATLAB under a Unix system (Linux or Mac OS) but it is not entirely a stand-alone utility. It accesses both FSL software and SPM libraries, which are two of the most widely available image processing platforms for MRI. These are invoked by the toolbox and are transparent to the user, but they must be installed independently by each user and added to the MATLAB path (including the FSLDIR environment variable). The software works equally well with earlier version of SPM or with the latest release (SPM-12).",
387
+ "- Input data: The ASL input data can be the raw difference image (control image – labelled image) or the perfusion image (CBF map). Regardless of the input or the ASL modality used, computation of the CBF map is made according to the formula proposed in the recent article “Recommended implementation of arterial spin-labeled perfusion MRI for clinical applications” published by Alsop et al [15]. For subsequent spatial co-registration and normalisation, the user is able to choose between providing a high-resolution T1-weighted or T2-weighted structural scan. DICOM, NIfTI or ANALYZE formats are accepted.",
388
+ "- Resolution: The user can select between two different execution methods regarding the resolution of the images: the low-resolution native space of ASL or up-sampling the ASL images to the structural image high-resolution grid, typically of the order of $1 \\times 1 \\times 1 \\mathrm{~mm}$ voxel size (acquisition matrix of $288 \\times 288$ or $512 \\times 512$ voxels with full brain coverage. The up-sampling is made by means of the spatial interpolation 'Nearest Neighbour', which preserves the grey values of the original voxel and ensures the consistency of CBF values. After the spatial normalization, the ASL voxel size is $2 \\times 2 \\times 2 \\mathrm{~mm}$ , the resolution of the MNI template.",
389
+ "- Cerebral blood flow quantification: Due to the fact that most multi-TI ASL sequences are currently only available as experimental or prototype versions, the toolbox only includes CBF quantification for single inversion time data. In that case, the ASL difference image should be provided as input. The CBF quantification map is calculated using the formula currently recommended method [15]. In addition to the difference image, a reference proton density image and the post labelling delay time employed are also required.",
390
+ "- Partial volume correction (PVC): ASAP provides the option of PVC of the ASL data. In its current version, two different methods are provided: 1) the method described by Asllani [16] and 2) a method based on a previous approached developed for PET (from here referred to as 'tghe PET method') that assumes perfusion of WM is globally $40\\%$ of that of GM for correction of resting CBF [17]. Although the later is a more simplistic approach and has been largely superseded by the methods introduced by Asllani and Chappell, this method (hereafter referred to as the PET correction) is available in our toolbox because it has been applied historically in earlier ASL studies [18-20]. Asllani's algorithm is based on linear regression and represents the voxel intensity as a weighted sum of pure tissue contribution, where"
391
+ ],
392
+ "bbox": [
393
+ 112,
394
+ 191,
395
+ 883,
396
+ 891
397
+ ],
398
+ "page_idx": 4
399
+ },
400
+ {
401
+ "type": "page_number",
402
+ "text": "5",
403
+ "bbox": [
404
+ 867,
405
+ 90,
406
+ 880,
407
+ 104
408
+ ],
409
+ "page_idx": 4
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "the weighting coefficients are the tissue's fractional volume in the voxel. This algorithm is able to estimate the CBF for grey matter (GM) and white matter (WM) independently. The PET correction assumed that all contributions to perfusion are from brain tissue and that cerebrospinal fluid has no contribution. In that case, ASL intensities are corrected according to the following equation:",
414
+ "bbox": [
415
+ 138,
416
+ 132,
417
+ 864,
418
+ 220
419
+ ],
420
+ "page_idx": 5
421
+ },
422
+ {
423
+ "type": "equation",
424
+ "text": "\n$$\nI _ {\\text {c o r r}} = I _ {\\text {u n c o r r}} / \\left(P _ {G M} + 0. 4 ^ {*} P _ {W M}\\right)\n$$\n",
425
+ "text_format": "latex",
426
+ "bbox": [
427
+ 343,
428
+ 220,
429
+ 566,
430
+ 238
431
+ ],
432
+ "page_idx": 5
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "where $I_{\\text{corr}}$ and $I_{\\text{uncorr}}$ are the corrected and uncorrected intensities, the 0.4 factor is the global ratio between WM and GM and $P_{\\text{GM}}$ and $P_{\\text{WM}}$ are the probabilities of GM and WM, respectively. The PVC option is only available when working in the low-resolution ASL space, thus having co-registered the high-resolution structural image to the ASL image.",
437
+ "bbox": [
438
+ 138,
439
+ 238,
440
+ 874,
441
+ 325
442
+ ],
443
+ "page_idx": 5
444
+ },
445
+ {
446
+ "type": "text",
447
+ "text": "- Execution mode: The toolbox includes a Graphical User Interface (GUI) where all the input data can be setup manually. Also, it has a batch mode for advanced users.",
448
+ "bbox": [
449
+ 112,
450
+ 324,
451
+ 875,
452
+ 359
453
+ ],
454
+ "page_idx": 5
455
+ },
456
+ {
457
+ "type": "text",
458
+ "text": "The main procedure of ASAP is shown in Figure 1 and includes the following steps:",
459
+ "bbox": [
460
+ 112,
461
+ 366,
462
+ 841,
463
+ 385
464
+ ],
465
+ "page_idx": 5
466
+ },
467
+ {
468
+ "type": "list",
469
+ "sub_type": "text",
470
+ "list_items": [
471
+ "1. Optional CBF quantification for pCASL and PASL sequences.",
472
+ "2. Reorient the images. Structural and ASL images are reoriented to the AC-PC plane (Anterior Commissure - Posterior Commissure) and their origins are set to the AC. Setting of a common origin is advisable for superior performance of the subsequent processing steps. If the PD image is available, the PD image is reoriented to the AC-PC plane, applying the same transformation to the ASL image.",
473
+ "3. Rough skull-stripping of the initial resting state ASL map using the FSL Brain Extraction Tool (bet) using a conservative threshold. This step is useful for noisy ASL maps, in order to increase the quality of the rigid co-registration with the structural scan.",
474
+ "4. Estimation of the brain mask. Brain mask from the structural volume can be calculated by two different options: the FSL bet tool (recommended for T2-weighted high-resolution scan) or the SPM segmentation task (recommended for T1-weighted high resolution scan). The brain mask is required for excluding out-of-brain voxels, often encountered in subtraction techniques such as ASL. Segmentation of GM and WM probability maps is also required for the partial volume correction step.",
475
+ "5. Rigid co-registration between ASL and structural images using SPM function. ASL images are normally co-registered to anatomical images so they can be later normalized to the MNI space (or any other standard space) for group analysis. Also, the co-registration is required for the partial volume correction. T1-weighted or T2-weighted images can be used for co-registration. If direct co-registration of ASL and structural images is not reliable because of the poor signal-to-noise ratio and the limited structural features of perfusion images, the proton density (PD) image can also be used for co-registration, moving the ASL data in the process. Depending on the selected resolution, the co-registration will be made in the native space of the ASL data (down-sampling the resolution of the structural scan) or up-sampling the ASL to the high-resolution of the structural volume by interpolation.",
476
+ "6. Partial Volume Correction of the ASL maps using the methods available. Information"
477
+ ],
478
+ "bbox": [
479
+ 111,
480
+ 391,
481
+ 880,
482
+ 880
483
+ ],
484
+ "page_idx": 5
485
+ },
486
+ {
487
+ "type": "page_number",
488
+ "text": "6",
489
+ "bbox": [
490
+ 867,
491
+ 90,
492
+ 880,
493
+ 104
494
+ ],
495
+ "page_idx": 5
496
+ },
497
+ {
498
+ "type": "text",
499
+ "text": "about the proportion of each tissue type (grey matter, white matter, and cerebrospinal fluid) is used to correct perfusion data. The method described by Asllani estimates both, partial GM and partial WM ASL maps. The PET correction method only estimates the partial GM ASL map. This option is only available if the structural scan has been down-sampled by means of the rigid co-registration step to the ASL image.",
500
+ "bbox": [
501
+ 140,
502
+ 132,
503
+ 872,
504
+ 236
505
+ ],
506
+ "page_idx": 6
507
+ },
508
+ {
509
+ "type": "list",
510
+ "sub_type": "text",
511
+ "list_items": [
512
+ "7. Skull-stripping of the ASL data. Apply the brain mask previously calculated to the coregistered and partial volume corrected ASL maps in order to exclude artefactual, finite 'perfusion' values in the extra-cerebral space (These arise in all ASL modalities because of the subtraction of control and labelled images).",
513
+ "8. Spatial normalization. For comparison across subjects, location correspondence has to be established, so registration of all the individual images to a standardized space is required. Here, the images (both ASL and structural) are normalized to the MNI standard space using: 1) a MNI template selected by the user or 2) the transformation matrix earlier calculated by SPM during the segmentation process.",
514
+ "9. Smoothing. The resultant images in the standard space are ready for voxel-based statistical analysis. However, these images are commonly multiplied by a smoothing kernel larger than the voxel dimension to satisfy the random-field approximation employed in parametric statistics. The SPM Gaussian smoothing kernel is applied to the final ASL maps, the size of the kernel (in mm) is selectable by the user.",
515
+ "10. The resultant images can be directly used for statistical analysis. This procedure is very flexible, as most of the steps are optional. Thus, users can freely design the pipeline that best fit their needs."
516
+ ],
517
+ "bbox": [
518
+ 112,
519
+ 237,
520
+ 879,
521
+ 532
522
+ ],
523
+ "page_idx": 6
524
+ },
525
+ {
526
+ "type": "text",
527
+ "text": "2.2. Testing the hypoperfusion in healthy subjects in risk of developing Alzheimer's disease by using the toolbox",
528
+ "text_level": 1,
529
+ "bbox": [
530
+ 124,
531
+ 545,
532
+ 794,
533
+ 580
534
+ ],
535
+ "page_idx": 6
536
+ },
537
+ {
538
+ "type": "text",
539
+ "text": "Several studies [1,21-25] have shown that Alzheimer's patients suffer from decreased perfusion in specific cortical and sub-cortical areas that may be associated to the subsequent cognitive and structural degeneration. A subgroup of the \"Proyecto Vallecas\" study, a 4-year longitudinal study over 1,000 subjects to assess normal healthy ageing and the appearance of neurodegenerative diseases, in particular AD; was selected to validate this hypothesis and demonstrate ASAP.",
540
+ "bbox": [
541
+ 109,
542
+ 588,
543
+ 859,
544
+ 695
545
+ ],
546
+ "page_idx": 6
547
+ },
548
+ {
549
+ "type": "text",
550
+ "text": "2.2.1. Subjects",
551
+ "text_level": 1,
552
+ "bbox": [
553
+ 112,
554
+ 705,
555
+ 246,
556
+ 724
557
+ ],
558
+ "page_idx": 6
559
+ },
560
+ {
561
+ "type": "text",
562
+ "text": "A two-group study comparing 25 healthy elderly subjects (7 men and 18 women, mean age $75 \\pm 3.6$ years) and 25 elderly subjects at risk of developing Alzheimer's disease (8 men and 17 women, mean age $77 \\pm 4.5$ years) was performed. All subjects were first included in the study as healthy subjects based on several psychological and neurological tests, including the Geriatric Depression Scale [26], a Mini-Mental State Examination (MMSE) [27] above 24 and Functional Activities Questionnaire (FAQ) [28] scores above 6 at the baseline assessment. All subjects included in the study show no signs of dementia or severe cognitive deterioration and they are able to manage and independent life without any mental disorder (cognitive or psychiatric) impeding daily",
563
+ "bbox": [
564
+ 109,
565
+ 729,
566
+ 870,
567
+ 891
568
+ ],
569
+ "page_idx": 6
570
+ },
571
+ {
572
+ "type": "page_number",
573
+ "text": "7",
574
+ "bbox": [
575
+ 867,
576
+ 90,
577
+ 880,
578
+ 104
579
+ ],
580
+ "page_idx": 6
581
+ },
582
+ {
583
+ "type": "text",
584
+ "text": "functioning. All subjects underwent MRI examination as well as psychological and neurological assessment every 6-12 months. Informed consent was obtained from all participants prior to evaluation. The subjects selected as subjects at risk of developing AD were those whose left and right hippocampi suffered from a volume loss greater than 2 standard deviations from the sample mean.",
585
+ "bbox": [
586
+ 114,
587
+ 131,
588
+ 866,
589
+ 220
590
+ ],
591
+ "page_idx": 7
592
+ },
593
+ {
594
+ "type": "text",
595
+ "text": "2.2.2. Acquisition",
596
+ "text_level": 1,
597
+ "bbox": [
598
+ 116,
599
+ 231,
600
+ 267,
601
+ 250
602
+ ],
603
+ "page_idx": 7
604
+ },
605
+ {
606
+ "type": "text",
607
+ "text": "All subjects underwent MRI examination in a 3T Signa HDx MR scanner (GE Healthcare, Waukesha, WI) using an eight-channel phased array coil. The first sequence was a 3D T1 weighted SPGR with a TR=10.024ms, TE=4.56ms, TI=600ms, NEX=1, acquisition matrix=288x288, full brain coverage, resolution=1x1x1mm, flip angle=12. The second sequence was a 3D pCASL pulse sequence with full brain coverage, matrix size=128x128, resolution=1.875x1.875x4mm, flip angle = 155, labelling time 1.5s, post-labelling delay=2.025s, TR=4.733s, TE=9.812ms, NEX=3, acquisition time ~6min and was used to generate the regional cerebral blood flow (rCBF) maps. Both the perfusion difference image and the proton density image produced by this sequence were available for the study.",
608
+ "bbox": [
609
+ 114,
610
+ 256,
611
+ 867,
612
+ 431
613
+ ],
614
+ "page_idx": 7
615
+ },
616
+ {
617
+ "type": "text",
618
+ "text": "2.2.3. Image processing",
619
+ "text_level": 1,
620
+ "bbox": [
621
+ 116,
622
+ 444,
623
+ 328,
624
+ 462
625
+ ],
626
+ "page_idx": 7
627
+ },
628
+ {
629
+ "type": "text",
630
+ "text": "All 3D T1 weighted images were processed with Freesurfer [29] in order to obtain the cortical and subcortical volumes for each subject. The left and right hippocampi volume (LHV, RHV) were normalised by the total GM volume. This normalised measure allowed us to divide the sample into three groups: Control group ([LHV, RHV])(mean hippocampus $(\\mathsf{MH}) + 1$ std.), mean group (MH-2std.<[LHV, RHV]<MH+1std.) and probable AD group (PAD) ([LHV, RHV]<(MH-2std.). A selection of 25 PAD subjects and 25 age and gender matched controls was the final subset used to validate ASAP.",
631
+ "bbox": [
632
+ 114,
633
+ 468,
634
+ 885,
635
+ 590
636
+ ],
637
+ "page_idx": 7
638
+ },
639
+ {
640
+ "type": "text",
641
+ "text": "Thus, the input images for each subject for ASAP were two DICOM series: a 3D T1 weighted image and a raw ASL sequence (control-labelled subtraction and proton density images). The resulting processing pipeline (as described above) is shown in Figure 1. To evaluate the effect of PVE correction, prior to MNI normalization two different options were applied to the perfusion maps: no PVE correction and the Asllani's PVE correction with a regression-kernel of size $5 \\times 5 \\times 1$ voxels. Therefore, for each subject, original CBF maps and Asllani's PVE-corrected CBF maps were obtained.",
642
+ "bbox": [
643
+ 114,
644
+ 597,
645
+ 883,
646
+ 720
647
+ ],
648
+ "page_idx": 7
649
+ },
650
+ {
651
+ "type": "text",
652
+ "text": "2.2.4. Statistical analysis",
653
+ "text_level": 1,
654
+ "bbox": [
655
+ 116,
656
+ 732,
657
+ 333,
658
+ 750
659
+ ],
660
+ "page_idx": 7
661
+ },
662
+ {
663
+ "type": "text",
664
+ "text": "Normalized and smoothed (6mm Gaussian kernel) CBF maps produced by ASAP (both PVE corrected and uncorrected) were employed for the voxel-based statistical analysis. Statistical maps for rejecting the null hypothesis of equal perfusion between healthy and subjects at risk of developing AD were generated by means of a two sample t-test analysis within the General Linear Model (GLM) (with gender and age as covariates and mean CBF value of each subject as a regressor) as implemented in the SPM software suite.",
665
+ "bbox": [
666
+ 114,
667
+ 757,
668
+ 885,
669
+ 878
670
+ ],
671
+ "page_idx": 7
672
+ },
673
+ {
674
+ "type": "page_number",
675
+ "text": "8",
676
+ "bbox": [
677
+ 867,
678
+ 90,
679
+ 880,
680
+ 104
681
+ ],
682
+ "page_idx": 7
683
+ },
684
+ {
685
+ "type": "text",
686
+ "text": "3. Results",
687
+ "text_level": 1,
688
+ "bbox": [
689
+ 116,
690
+ 132,
691
+ 212,
692
+ 148
693
+ ],
694
+ "page_idx": 8
695
+ },
696
+ {
697
+ "type": "text",
698
+ "text": "3.1. A MATLAB Toolbox for processing ASL images: ASAP",
699
+ "text_level": 1,
700
+ "bbox": [
701
+ 116,
702
+ 156,
703
+ 666,
704
+ 175
705
+ ],
706
+ "page_idx": 8
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "ASAP has been developed for fully automated processing of ASL data. It is an open-source package and is freely available (sites.google.com/site/asltoolbox). ASAP provides a user friendly Graphical User Interface GUI (Figure 2). Users can perform several interactions with the embedded functions, e.g., setting inputs, outputs or different processing parameters. In the \"Input Files\" panel (see Figure 2) users can select all the input data while the \"Output Directories\" panel is used to designate the directory where the output files will be saved. In the \"Options\" panel, users can select the different processing parameters.",
711
+ "bbox": [
712
+ 114,
713
+ 181,
714
+ 859,
715
+ 321
716
+ ],
717
+ "page_idx": 8
718
+ },
719
+ {
720
+ "type": "text",
721
+ "text": "In addition, the advanced mode includes a \"load batch files\" option for loading the input files from text files to avoid having to select the input data individually through the GUI. With this option, a large number of datasets can be loaded into the toolbox for subsequent processing using the \"Options\" set in the panel and the same options will apply throughout for all subjects. In addition, the advanced mode contains the 'ROI Statistics' GUI (Figure 3) that offers the option to extract CBF values from anatomically or functionally defined Regions of Interest (ROI). This facility can simultaneously extract mean, median and maximum values from several ROIs in several CBF maps. Users only have to select the input files, ASL data and ROI masks (NiftI or .mat files are accepted), through the GUI (\"Select files\" action) or in batch mode (\"Load files\" action) from text files. Output results are saved in text files that can easily be incorporated into statistical analysis packages such as SPSS, etc.",
722
+ "bbox": [
723
+ 114,
724
+ 329,
725
+ 879,
726
+ 539
727
+ ],
728
+ "page_idx": 8
729
+ },
730
+ {
731
+ "type": "text",
732
+ "text": "Resultant files are stored in the directories specified by the user. Each procedure of the pipeline produces a new file, every step is recorded and files are not overwritten. The MNI normalized images can be directly used for statistical analysis, however, users can also use the intermediary results. As stated before, most of the steps described above are optional, so the procedure is very flexible and users can freely design the most appropriate pipeline. Also, the toolbox is designed to aid in reproducing some analysis by avoiding some processing steps. This feature is useful, for example, for applying different methods for partial volume correction on the same input data: if there are GM, WM and CSF maps in the same directory as the input structural scan, the toolbox does not apply the SPM segmentation step, using these files. We have performed additional extensive validation (as well as the one reported in this article) to ensure that the toolbox works correctly for both absolute perfusion images (CBF) and for perfusion-weighted difference images in which CBF computation is required.",
733
+ "bbox": [
734
+ 114,
735
+ 545,
736
+ 877,
737
+ 773
738
+ ],
739
+ "page_idx": 8
740
+ },
741
+ {
742
+ "type": "text",
743
+ "text": "3.2. Evaluation of hypoperfusion differences in healthy subjects at risk of developing Alzheimer's disease using the toolbox",
744
+ "text_level": 1,
745
+ "bbox": [
746
+ 127,
747
+ 784,
748
+ 818,
749
+ 820
750
+ ],
751
+ "page_idx": 8
752
+ },
753
+ {
754
+ "type": "text",
755
+ "text": "Figure 4 shows the result of the rigid co-registration step between the 3D T1 weighted images and the CBF maps. Both images match the same anatomical space; the 3D T1 structural image has been re-sampled (as well as the tissue probability maps) to the",
756
+ "bbox": [
757
+ 116,
758
+ 827,
759
+ 870,
760
+ 880
761
+ ],
762
+ "page_idx": 8
763
+ },
764
+ {
765
+ "type": "page_number",
766
+ "text": "9",
767
+ "bbox": [
768
+ 867,
769
+ 90,
770
+ 880,
771
+ 104
772
+ ],
773
+ "page_idx": 8
774
+ },
775
+ {
776
+ "type": "text",
777
+ "text": "low-resolution of the CBF map with a 'b-spline' interpolation method. Figure 5 shows the tissue probability maps of GM and WM for one subject.",
778
+ "bbox": [
779
+ 114,
780
+ 131,
781
+ 880,
782
+ 167
783
+ ],
784
+ "page_idx": 9
785
+ },
786
+ {
787
+ "type": "text",
788
+ "text": "The partial volume effect in ASL data is shown in Figure 6. First and second rows show the 3D T1 weighted axial, sagittal and coronal planes for one patient and a detail of the left hippocampus and parahippocampal gyrus in the same planes respectively. Partial volume effect in the CBF map is shown in the third row and fourth row shows the result of Asllani's correction with a $5 \\times 5 \\times 1$ low-resolution kernel. Figure 6 shows how the blood flow is increased in the whole region after PVC with Asllani's method. Table 1 shows quantitatively how the perfusion values change after the PVC in the whole region. Also, Table 2 shows the comparative results of CBF perfusion values for one subject after the PVC in the different subcortical brain structures for both hemispheres, showing the increase of perfusion for all ROIs, obtained after the PVC correction with Asllani's method.",
789
+ "bbox": [
790
+ 114,
791
+ 174,
792
+ 880,
793
+ 364
794
+ ],
795
+ "page_idx": 9
796
+ },
797
+ {
798
+ "type": "text",
799
+ "text": "The tissue probability maps (shown in Figure 5), the PVC method and the normalisation maps obtained from the anatomical images can be applied to the perfusion maps in order to obtain PVE corrected CBF maps in MNI space (Figure 7).",
800
+ "bbox": [
801
+ 114,
802
+ 373,
803
+ 880,
804
+ 426
805
+ ],
806
+ "page_idx": 9
807
+ },
808
+ {
809
+ "type": "text",
810
+ "text": "After performing each of the steps previously shown, in our cohort of 50 elderly subjects, the statistical group analysis was performed by means of a two-sample t-test in both cases: CBF maps with Asllani's PVE correction and the original CBF maps without PVE correction. Age and gender were introduced as confounding variables in the model. Figure 8 shows the results of these two analysis (Figure 8a.- PVE corrected, and 8b.- PVE uncorrected) for a family-wise error (FWE) corrected $p_{\\text{FWE}} < 0.05$ (cluster region of 300 voxels). Table 3 shows the T score and p values for the two analysis.",
811
+ "bbox": [
812
+ 114,
813
+ 433,
814
+ 875,
815
+ 556
816
+ ],
817
+ "page_idx": 9
818
+ },
819
+ {
820
+ "type": "text",
821
+ "text": "These results indicate decreased perfusion in healthy subjects at risk of developing Alzheimer's disease. Areas of significant hypoperfusion in Figure 8a (PVE-corrected) correspond to: caudate, hippocampi, thalamus, parahippocampal gyrus, amygdala, cingulate gyrus, precuneus, left and right insula, superior temporal lobe, uncus and choroid plexus. Results from the statistical analysis without PVE correction (Figure 8b) show regions which appeared previously in the PVE corrected version (caudate, left hippocampus, right thalamus, anterior cingulate, right insula and choroid plexus). In both analyses, part of the perfusion deficit appears displaced into the region of the ventricular space, probably because of the inherent blurring of the 3D FSE stack-of-spiral readout of the ASL pulse sequence employed in this study. In a separate analyses, we confirmed that in fact the 'at-risk' cohort exhibits ventricular enlargement and reduction of grey matter volume in the vicinity of these areas. The combination of these results with the inclusion of PV correction in ASL studies, forms part of a larger separate investigation which is beyond the scope of this paper. These results are consistent with regions found by our studies and those of other authors [18,24,30-32].",
822
+ "bbox": [
823
+ 114,
824
+ 561,
825
+ 872,
826
+ 825
827
+ ],
828
+ "page_idx": 9
829
+ },
830
+ {
831
+ "type": "text",
832
+ "text": "4. Discussion",
833
+ "text_level": 1,
834
+ "bbox": [
835
+ 114,
836
+ 832,
837
+ 243,
838
+ 849
839
+ ],
840
+ "page_idx": 9
841
+ },
842
+ {
843
+ "type": "text",
844
+ "text": "In this work, we have developed a MATLAB toolbox (ASAP) for systematically and",
845
+ "bbox": [
846
+ 114,
847
+ 856,
848
+ 830,
849
+ 875
850
+ ],
851
+ "page_idx": 9
852
+ },
853
+ {
854
+ "type": "page_number",
855
+ "text": "10",
856
+ "bbox": [
857
+ 859,
858
+ 90,
859
+ 880,
860
+ 106
861
+ ],
862
+ "page_idx": 9
863
+ },
864
+ {
865
+ "type": "text",
866
+ "text": "automatically processing ASL datasets with minimal user intervention. The key advantage of ASAP is that it automates all the processing steps of ASL datasets for any number of subjects and the ability to work with reduced user input minimises the possibility of random and systematic errors. ASAP offers easily selectable option for almost all the stages of that process. The toolbox can produce perfusion data that is ready for statistical group analysis. A fully automated pipeline makes the data processing efficient and reduces potential mistakes by avoiding manual processing of individual steps. Besides, ASAP has a very friendly and easy to use GUI (Figure 2), allowing users to select the preferred options for each case. Depending on the datasets, users may change the options of some processing steps to optimize the processing quality. Prior programming knowledge is not required. One limitation of other existing toolboxes lies in the requirement of programming knowledge, which limits their accessibility to users with programming skills.",
867
+ "bbox": [
868
+ 116,
869
+ 131,
870
+ 883,
871
+ 359
872
+ ],
873
+ "page_idx": 10
874
+ },
875
+ {
876
+ "type": "text",
877
+ "text": "In the present study, we applied ASAP to study possible changes in perfusion in a sample of healthy subjects in risk of developing AD. The analyses were run on a Macintosh OS X (10.6 Snow Leopard) computer with 8 GB of RAM and a 3.06 GHz Intel Core 2 Duo processor. The total running time was 3.44 hours (4.13 minutes per subject). The automatization of the whole post-processing pipeline minimises the variability introduced by human errors and decreases enormously the time needed to manually process all subjects. ASAP provides the images ready to perform statistical assessment. We have presented an evaluation of hypoperfusion in healthy subjects at risk of developing Alzheimer's disease and the results are consistent with those of previous studies that find decreased perfusion in Alzheimer's patients in similar regions. As an example, figure 8a and 8b show how the absence of PVE correction can lead to false negative findings. Regions affected by hypoperfusion have higher statistical significative and thus regions are more extensive due to the PVE correction. These results also highlight that PVE correction is required to maximise the predictive value of ASL in this field of research. Hypoperfusion in the right inferior insula and superior temporal lobe region can be detected with PVE corrected images whereas PVE uncorrected images cannot. This region in particular, is affected by atrophy in those subjects at the very early stages of Alzheimer's disease, but thanks to this PVE technique, it can be shown that a hypoperfusion pattern is prior to GM atrophy.",
878
+ "bbox": [
879
+ 114,
880
+ 364,
881
+ 883,
882
+ 699
883
+ ],
884
+ "page_idx": 10
885
+ },
886
+ {
887
+ "type": "text",
888
+ "text": "We envisage the intuitive and user friendly nature of the ASAP toolbox will help to facilitate the application of ASL in the clinical environment, where the method can be easily employed by clinicians and technicians without the need of intensive training or knowledge of image processing techniques. As mentioned earlier, processing data with ASAP can be very flexible and users have the freedom to design the most appropriate pipeline targeted to their data. In order to know which is the best pipeline for the user's data, there are important recommendations in the user's manual for a proper use of the different options available in the toolbox. There is a description of which is the best choice for each stage, depending on the input data, such as T1-weighted or T2-weighted structural scan, or in which cases is useful to apply a specific option, like the",
889
+ "bbox": [
890
+ 116,
891
+ 704,
892
+ 875,
893
+ 878
894
+ ],
895
+ "page_idx": 10
896
+ },
897
+ {
898
+ "type": "page_number",
899
+ "text": "11",
900
+ "bbox": [
901
+ 859,
902
+ 90,
903
+ 880,
904
+ 106
905
+ ],
906
+ "page_idx": 10
907
+ },
908
+ {
909
+ "type": "text",
910
+ "text": "rough skull-stripping or the PVE correction.",
911
+ "bbox": [
912
+ 116,
913
+ 132,
914
+ 488,
915
+ 151
916
+ ],
917
+ "page_idx": 11
918
+ },
919
+ {
920
+ "type": "text",
921
+ "text": "High quality of co-registration between anatomical and perfusion images are key for optimal partial volume effect correction and normalisation steps. The results of employing our toolbox in this sample study, demonstrate that the software can produce a high level of accuracy of spatial normalisation, paying no penalty in quality as a result of fully automated operation. The assessment of the normalisation quality has been made qualitatively by comparing a minimum of 4 external cortical landmarks on the CBF maps and ensuring that they correspond to the same landmarks on the chosen template within a $\\pm 3\\mathrm{mm}$ range. The same assessment has been made with at least 2 other sub-cortical landmarks. Nevertheless, further improvements such as the use of higher field warping options as those of the DARTEL library of SPM [33] are currently being incorporated for a subsequent version. Although this option will require the selection of a subgroup of images to generate an intermediate group specific template and computational time is likely to increase, the method may be more accurate and precise for group comparisons.",
922
+ "bbox": [
923
+ 114,
924
+ 156,
925
+ 880,
926
+ 404
927
+ ],
928
+ "page_idx": 11
929
+ },
930
+ {
931
+ "type": "text",
932
+ "text": "One of the disadvantages of automatization is that some mistakes might go undetected. To ameliorate this problem, ASAP's interface includes a \"Quick check\" option for displaying the resultant MNI-normalized ASL images in a web browser once the processing is completed. This option allows an convenient quality assurance method, making it possible to check the normalization quality or whether any intermediate step has failed. Many functionalities and features are open for improvement in future versions of the software. Other labelling schemes as well as multi-TI ASL sequences, will be included in further versions of ASAP.",
933
+ "bbox": [
934
+ 114,
935
+ 411,
936
+ 879,
937
+ 550
938
+ ],
939
+ "page_idx": 11
940
+ },
941
+ {
942
+ "type": "text",
943
+ "text": "5. Conclusion",
944
+ "text_level": 1,
945
+ "bbox": [
946
+ 116,
947
+ 558,
948
+ 246,
949
+ 574
950
+ ],
951
+ "page_idx": 11
952
+ },
953
+ {
954
+ "type": "text",
955
+ "text": "In conclusion, the results for this specific study show the applicability of ASAP in the study of perfusion changes in elder people at risk of developing AD. Furthermore, these clinical results are consistent with previous AD studies. In summary, our toolbox provides a simple, flexible and reliable solution for ASL-related studies. It has an extendable design, and new functions or utilities can and will be added in the future.",
956
+ "bbox": [
957
+ 114,
958
+ 583,
959
+ 879,
960
+ 670
961
+ ],
962
+ "page_idx": 11
963
+ },
964
+ {
965
+ "type": "text",
966
+ "text": "The ASAP manual and software can be obtained freely at sites.google.com/site/asl toolbox. Feedback from users will be encouraged to ensure the updated of the ASAP toolbox, in order to include future improvements in image processing methodology. We hope for rich participation from the ASL community.",
967
+ "bbox": [
968
+ 114,
969
+ 671,
970
+ 879,
971
+ 739
972
+ ],
973
+ "page_idx": 11
974
+ },
975
+ {
976
+ "type": "text",
977
+ "text": "Acknowledgments: ASAP was partially supported by the COST Action \"Arterial Spin Labelling Initiative in Dementia\" (BMBS COST Action BM1103).",
978
+ "bbox": [
979
+ 116,
980
+ 747,
981
+ 880,
982
+ 782
983
+ ],
984
+ "page_idx": 11
985
+ },
986
+ {
987
+ "type": "page_number",
988
+ "text": "12",
989
+ "bbox": [
990
+ 859,
991
+ 90,
992
+ 880,
993
+ 106
994
+ ],
995
+ "page_idx": 11
996
+ },
997
+ {
998
+ "type": "text",
999
+ "text": "References",
1000
+ "text_level": 1,
1001
+ "bbox": [
1002
+ 112,
1003
+ 132,
1004
+ 223,
1005
+ 150
1006
+ ],
1007
+ "page_idx": 12
1008
+ },
1009
+ {
1010
+ "type": "list",
1011
+ "sub_type": "ref_text",
1012
+ "list_items": [
1013
+ "[1] Alsop DC, Dai W, Grossman M, Detre JA. Arterial spin labeling blood flow MRI: its role in the early characterization of Alzheimer's disease. J.Alzheimers Dis. 2010;20(3):871-80.",
1014
+ "[2] Schuff N, Matsumoto S, Kmiecik J, Studholme C, Du A, Ezekiel F, et al. Cerebral blood flow in ischemic vascular dementia and Alzheimer's disease, measured by arterial spin-labeling magnetic resonance imaging. Alzheimers Dement. 2009;5(6):454-62.",
1015
+ "[3] Mikita N, Mehta MA, Zelaya FO, Stringaris A. Using arterial spin labeling to examine mood states in youth. Brain and Behavior. 2015;5(6):e00339",
1016
+ "[4] Pollak TA, De Simoni S, Barimani B, Zelaya FO, Stone JM, Mehta MA. Phenomenologically distinct psychotomimetic effects of ketamine are associated with cerebral blood flow changes in functionally relevant cerebral foci: a continuous arterial spin labelling study. Psychopharmacology. 2015; (Epub ahead of print)",
1017
+ "[5] Hodkinson DJ, Khawaja N, O'Daly O, Thacker MA, Zelaya FO, Wooldridge CL, et al. Cerebral analgesic response to nonsteroidal anti-inflammatory drug ibuprofen. Pain. 2015;156(7):1301-10",
1018
+ "[6] Ye F, Frank JA, Weinberger DR, McLaughlin AC. Noise reduction in 3D perfusion imaging by attenuating the static signal in arterial spin tagging (ASSIST) Magn. Reson Med (2000) 44: 92-100",
1019
+ "[7] Parkes LM, Rashid W, Chard DT, Tofts PS. Normal cerebral perfusion measurements using arterial spin labeling: Reproducibility, stability, and age and gender effects. Magnetic Resonance in Medicine 2004;51(4):736-43.",
1020
+ "[8] Petersen ET, Zimine I, Ho YL, Golay X. Non-invasive measurement of perfusion: a critical review of arterial spin labelling techniques. Br.J.Radiol. 2006;79(944):688-701.",
1021
+ "[9] Mutsaerts HJMM, Steketee RME, Heijtel DFR, Kuijer JPA, Osch MJPv, Majoie CBLM, et al. Inter-Vendor Reproducibility of Pseudo-Continuous Arterial Spin Labeling at 3 Tesla. PLoS One. 2014;9(8), e104108.",
1022
+ "[10] Chappell MA, Groves AR, Whitcher B, Woolrich MW. Variational Bayesian Inference for a Nonlinear Forward Model. Trans.Sig.Proc. 2009;57(1):223-36.",
1023
+ "[11] Wang Z, Aguirre GK, Rao H, Wang J, Fernandez-Seara MA, Childress AR, et al. Empirical optimization of ASL data analysis using an ASL data processing toolbox: ASLtbx. Magn.Reson.Imaging 2008;26(2):261-9.",
1024
+ "[12] Jenkinson M, Beckmann CF, Behrens TE, Woolrich MW, Smith SM. Fsl. Neuroimage 2012;62(2):782-90.",
1025
+ "[13] The MathWorks, Inc. 2012; Available at: http://www.mathworks.com/. Accessed 12/03, 2015."
1026
+ ],
1027
+ "bbox": [
1028
+ 114,
1029
+ 174,
1030
+ 880,
1031
+ 876
1032
+ ],
1033
+ "page_idx": 12
1034
+ },
1035
+ {
1036
+ "type": "page_number",
1037
+ "text": "13",
1038
+ "bbox": [
1039
+ 859,
1040
+ 90,
1041
+ 880,
1042
+ 106
1043
+ ],
1044
+ "page_idx": 12
1045
+ },
1046
+ {
1047
+ "type": "list",
1048
+ "sub_type": "ref_text",
1049
+ "list_items": [
1050
+ "[14] SPM. Statistical Parametric Mapping. The Wellcome Trust Centre for neuroimaging at University College of London. 2015; Available at: http://www.fil.ion.ucl.ac.uk/spm/. Accessed 12/03, 2015.",
1051
+ "[15] Alsop DC, Detre JA, Golay X, Gunther M, Hendrikse J, Hernandez-Garcia L, et al. Recommended implementation of arterial spin-labeled perfusion MRI for clinical applications: A consensus of the ISMRM perfusion study group and the European consortium for ASL in dementia. Magn.Reson.Med. 2014; 73:102-116",
1052
+ "[16] Asllani I, Borogovac A, Brown TR. Regression algorithm correcting for partial volume effects in arterial spin labeling MRI. Magn.Reson.Med. 2008;60(6):1362-71.",
1053
+ "[17] Meltzer CC, Leal JP, Mayberg HS, Wagner HNJ, Frost JJ. Correction of PET Data for Partial Volume Effects in Human Cerebral Cortex by MR Imaging. J.Comput.Assist.Tomogr. 1990;14(4).",
1054
+ "[18] Johnson NA, Jahng GH, Weiner MW, Miller BL, Chui HC, Jagust WJ, et al. Pattern of cerebral hypoperfusion in Alzheimer disease and mild cognitive impairment measured with arterial spin-labeling MR imaging: initial experience. Radiology 2005;234(3):851-9.",
1055
+ "[19] Du AT, Jahng GH, Hayasaka S, Kramer JH, Rosen HJ, Gorno-Tempini ML, et al. Hypoperfusion in frontotemporal dementia and Alzheimer disease by arterial spin labeling MRI. Neurology 2006;67(7):1215-20.",
1056
+ "[20] Chen Y, Wolk DA, Reddin JS, Korczykowski M, Martinez PM, Musiek ES, et al. Voxel-level comparison of arterial spin-labeled perfusion MRI and FDG-PET in Alzheimer disease. Neurology 2011;77(22):1977-85.",
1057
+ "[21] Mazza M, Marano G, Traversi G, Bria P, Mazza S. Primary cerebral blood flow deficiency and Alzheimer's disease: shadows and lights. J.Alzheimers Dis. 2011;23(3):375-89.",
1058
+ "[22] Johnson NA, Jahng GH, Weiner MW, Miller BL, Chui HC, Jagust WJ, et al. Pattern of cerebral hypoperfusion in Alzheimer disease and mild cognitive impairment measured with arterial spin-labeling MR imaging: initial experience. Radiology 2005;234(3):851-9.",
1059
+ "[23] Xu G, Antuono PG, Jones J, Xu Y, Wu G, Ward D, et al. Perfusion fMRI detects deficits in regional CBF during memory-encoding tasks in MCI subjects. Neurology 2007;69(17):1650-6.",
1060
+ "[24] Asllani I, Habeck C, Scarmeas N, Borogovac A, Brown TR, Stern Y. Multivariate and univariate analysis of continuous arterial spin labeling perfusion MRI in Alzheimer's disease. J.Cereb.Blood Flow Metab. 2008;28(4):725-36.",
1061
+ "[25] Austin BP, Nair VA, Meier TB, Xu G, Rowley HA, Carlsson CM, et al. Effects of hypoperfusion in Alzheimer's disease. J.Alzheimers Dis. 2011;26 Suppl 3:123-33.",
1062
+ "[26] Yesavage JA, Brink TL, Rose TL, Lum O, Huang V, Adey M, et al. Development"
1063
+ ],
1064
+ "bbox": [
1065
+ 114,
1066
+ 131,
1067
+ 879,
1068
+ 887
1069
+ ],
1070
+ "page_idx": 13
1071
+ },
1072
+ {
1073
+ "type": "page_number",
1074
+ "text": "14",
1075
+ "bbox": [
1076
+ 859,
1077
+ 90,
1078
+ 880,
1079
+ 104
1080
+ ],
1081
+ "page_idx": 13
1082
+ },
1083
+ {
1084
+ "type": "list",
1085
+ "sub_type": "ref_text",
1086
+ "list_items": [
1087
+ "and validation of a geriatric depression screening scale: A preliminary report. J.Psychiatr.Res. 1982-1983;17(1):37-49.",
1088
+ "[27] Folstein MF, Folstein SE, McHugh PR. \"Mini-mental state\". A practical method for grading the cognitive state of patients for the clinician. J.Psychiatr.Res. 1975;12(3):189-98.",
1089
+ "[28] Pfeffer RI, Kurosaki TT, Harrah CH, Chance JM, Filos S. Measurement of functional activities in older adults in the community. J Gerontol 1982;37(3):323-9.",
1090
+ "[29] Fischl B. FreeSurfer. Neuroimage 2012;62(2):774-81.",
1091
+ "[30] Alsop DC, Detre JA, Grossman M. Assessment of cerebral blood flow in Alzheimer's disease by spin-labeled magnetic resonance imaging. Ann.Neurol. 2000;47(1):93-100.",
1092
+ "[31] Du AT, Jahng GH, Hayasaka S, Kramer JH, Rosen HJ, Gorno-Tempini ML, et al. Hypoperfusion in frontotemporal dementia and Alzheimer disease by arterial spin labeling MRI. Neurology 2006;67(7):1215-20.",
1093
+ "[32] Xu G, Antuono PG, Jones J, Xu Y, Wu G, Ward D, et al. Perfusion fMRI detects deficits in regional CBF during memory-encoding tasks in MCI subjects. Neurology 2007;69(17):1650-6.",
1094
+ "[33] Ashburner J. A fast diffeomorphic image registration algorithm. Neuroimage 2007;38(1):95-113."
1095
+ ],
1096
+ "bbox": [
1097
+ 114,
1098
+ 132,
1099
+ 875,
1100
+ 517
1101
+ ],
1102
+ "page_idx": 14
1103
+ },
1104
+ {
1105
+ "type": "page_number",
1106
+ "text": "15",
1107
+ "bbox": [
1108
+ 859,
1109
+ 90,
1110
+ 880,
1111
+ 106
1112
+ ],
1113
+ "page_idx": 14
1114
+ },
1115
+ {
1116
+ "type": "text",
1117
+ "text": "Tables",
1118
+ "text_level": 1,
1119
+ "bbox": [
1120
+ 112,
1121
+ 140,
1122
+ 189,
1123
+ 159
1124
+ ],
1125
+ "page_idx": 15
1126
+ },
1127
+ {
1128
+ "type": "table",
1129
+ "img_path": "images/aa378da737fb7ff7db3aacf4367329f26741001e30b685f30422454141f57cac.jpg",
1130
+ "table_caption": [],
1131
+ "table_footnote": [],
1132
+ "table_body": "<table><tr><td></td><td>Original CBF</td><td>PVC CBF</td></tr><tr><td>Left Hippocampus</td><td>40±10</td><td>46±9</td></tr><tr><td>Right Hippocampus</td><td>42±11</td><td>42±11</td></tr><tr><td>Left Parahippocampal Gyrus</td><td>40±10</td><td>48±10</td></tr><tr><td>Right Parahippocampal Gyrus</td><td>35±8</td><td>44±10</td></tr></table>",
1133
+ "bbox": [
1134
+ 256,
1135
+ 165,
1136
+ 741,
1137
+ 271
1138
+ ],
1139
+ "page_idx": 15
1140
+ },
1141
+ {
1142
+ "type": "table",
1143
+ "img_path": "images/8c5ee546ef349a028524247ebaa6a71ea89dc344b3680c671b7e0ed87b922249.jpg",
1144
+ "table_caption": [
1145
+ "Table 1. CBF perfusion values (ml/100g/min) in the left and right hippocampus and parahippocampal gyrus (same regions and patient that Figure 6). Left column shows the original CBF values (mean±std) and right column shows the CBF values (mean±std) after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel."
1146
+ ],
1147
+ "table_footnote": [],
1148
+ "table_body": "<table><tr><td></td><td></td><td>Original CBF</td><td>PVC CBF</td></tr><tr><td rowspan=\"6\">Left Hemisphere</td><td>Amygdala</td><td>35±10</td><td>37±7</td></tr><tr><td>Caudate</td><td>35±13</td><td>38±10</td></tr><tr><td>Hippocampus</td><td>34±10</td><td>38±9</td></tr><tr><td>Pallidum</td><td>38±11</td><td>49±12</td></tr><tr><td>Putamen</td><td>42±8</td><td>44±6</td></tr><tr><td>Thalamus</td><td>44±17</td><td>54±16</td></tr><tr><td rowspan=\"6\">Right Hemisphere</td><td>Amygdala</td><td>34±7</td><td>38±4</td></tr><tr><td>Caudate</td><td>31±15</td><td>35±11</td></tr><tr><td>Hippocampus</td><td>34±12</td><td>38±8</td></tr><tr><td>Pallidum</td><td>27±6</td><td>42±11</td></tr><tr><td>Putamen</td><td>41±8</td><td>45±5</td></tr><tr><td>Thalamus</td><td>44±19</td><td>54±18</td></tr></table>",
1149
+ "bbox": [
1150
+ 302,
1151
+ 369,
1152
+ 697,
1153
+ 636
1154
+ ],
1155
+ "page_idx": 15
1156
+ },
1157
+ {
1158
+ "type": "table",
1159
+ "img_path": "images/74c65b80393583723e4780b9cecf01d067dfedd508d5ee6e41027300ffeea85f.jpg",
1160
+ "table_caption": [
1161
+ "Table 2. Example of CBF values (ml/100g/min) in the different subcortical brain structures for both hemispheres. Left column shows the original CBF values (mean±std) while right column shows the CBF values (mean±std) after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel."
1162
+ ],
1163
+ "table_footnote": [],
1164
+ "table_body": "<table><tr><td></td><td>T value (p-value) Non-PVC</td><td>T value (p-value) PVC</td></tr><tr><td>Left Hippocampus</td><td>2.58 (0.006465)</td><td>3.28 (0.000958)</td></tr><tr><td>Right Hippocampus</td><td>2.70 (0.004746)</td><td>3.25 (0.001045)</td></tr><tr><td>Left Parahippocampal Gyrus</td><td>1.76 (0.042325)</td><td>2.24 (0.01483)</td></tr><tr><td>Right Parahippocampal Gyrus</td><td>1.66 (0.051651)</td><td>2.52 (0.007522)</td></tr></table>",
1165
+ "bbox": [
1166
+ 114,
1167
+ 710,
1168
+ 818,
1169
+ 816
1170
+ ],
1171
+ "page_idx": 15
1172
+ },
1173
+ {
1174
+ "type": "text",
1175
+ "text": "Table 3. T scores and p-values (in brackets) of the statistical group analysis ( $p_{FWE}<0.05$ , cluster region of 300 voxels) in the same regions that Table 1 and Figure 6. Left column shows the results for the original CBF maps (non-PVC corrected) and right column shows the results for the final CBF after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel.",
1176
+ "bbox": [
1177
+ 109,
1178
+ 821,
1179
+ 874,
1180
+ 887
1181
+ ],
1182
+ "page_idx": 15
1183
+ },
1184
+ {
1185
+ "type": "page_number",
1186
+ "text": "16",
1187
+ "bbox": [
1188
+ 859,
1189
+ 90,
1190
+ 880,
1191
+ 104
1192
+ ],
1193
+ "page_idx": 15
1194
+ },
1195
+ {
1196
+ "type": "text",
1197
+ "text": "Figure Legends",
1198
+ "text_level": 1,
1199
+ "bbox": [
1200
+ 116,
1201
+ 160,
1202
+ 287,
1203
+ 181
1204
+ ],
1205
+ "page_idx": 16
1206
+ },
1207
+ {
1208
+ "type": "text",
1209
+ "text": "Figure 1. Pipeline for processing ASL datasets in ASLToolbox. Each box represents a main step in ASLToolbox's procedure and top dotted line boxes represent the input data.",
1210
+ "bbox": [
1211
+ 114,
1212
+ 205,
1213
+ 851,
1214
+ 257
1215
+ ],
1216
+ "page_idx": 16
1217
+ },
1218
+ {
1219
+ "type": "text",
1220
+ "text": "Figure 2. Graphical User Interface of the ASLToolbox for loading dataset. It consists of three main sections namely \"Input Files\", \"Output Directories\" and \"Options\".",
1221
+ "bbox": [
1222
+ 116,
1223
+ 265,
1224
+ 867,
1225
+ 300
1226
+ ],
1227
+ "page_idx": 16
1228
+ },
1229
+ {
1230
+ "type": "text",
1231
+ "text": "Figure 3. Graphical User Interface for ROI Statistics analysis in ASLToolbox.",
1232
+ "bbox": [
1233
+ 116,
1234
+ 308,
1235
+ 779,
1236
+ 325
1237
+ ],
1238
+ "page_idx": 16
1239
+ },
1240
+ {
1241
+ "type": "text",
1242
+ "text": "Figure 4. Example of the result of the co-registration step between a 3DT1 weighted image (background) and a CBF map (overlay). The 3DT1 structural image has been resampled to the resolution of the ASL data.",
1243
+ "bbox": [
1244
+ 116,
1245
+ 333,
1246
+ 874,
1247
+ 385
1248
+ ],
1249
+ "page_idx": 16
1250
+ },
1251
+ {
1252
+ "type": "text",
1253
+ "text": "Figure 5. Tissue probability maps of GM (rows 1,3,5) and WM (rows 2,4,6) of a subject, registered onto the 3DT1 structural scan on a sagittal (rows 1,2), coronal (rows 3,4) and axial planes (rows 5,6). These maps were used for the PVC of the CBF maps.",
1254
+ "bbox": [
1255
+ 116,
1256
+ 392,
1257
+ 872,
1258
+ 445
1259
+ ],
1260
+ "page_idx": 16
1261
+ },
1262
+ {
1263
+ "type": "text",
1264
+ "text": "Figure 6. 3D T1 weighted axial, sagittal and coronal planes for one patient (first row), detail of the left hippocampus and parahippocampal gyrus in the same planes (second, third and fourth rows). Color overlay in third and fourth row correspond to the CBF map. Third row shows the partial volume effect in the original CBF and fourth row shows the result of Asllani's PVC with a $5 \\times 5 \\times 1$ low-resolution kernel.",
1265
+ "bbox": [
1266
+ 116,
1267
+ 452,
1268
+ 874,
1269
+ 539
1270
+ ],
1271
+ "page_idx": 16
1272
+ },
1273
+ {
1274
+ "type": "text",
1275
+ "text": "Figure 7. Example of a final smoothed (6mm Gaussian kernel), MNI normalised and PVE corrected CBF map as an overlay onto the 3DT1 MNI template.",
1276
+ "bbox": [
1277
+ 116,
1278
+ 547,
1279
+ 846,
1280
+ 582
1281
+ ],
1282
+ "page_idx": 16
1283
+ },
1284
+ {
1285
+ "type": "text",
1286
+ "text": "Figure 8. Results of the statistical group comparison: significative hypoperfusion regions for healthy subjects in risk of developing AD pFWE<0.05 (minimum cluster 300 voxels) for: (a) PVE corrected CBF maps and (b) CBF maps with no PVE correction",
1287
+ "bbox": [
1288
+ 116,
1289
+ 590,
1290
+ 879,
1291
+ 642
1292
+ ],
1293
+ "page_idx": 16
1294
+ },
1295
+ {
1296
+ "type": "page_number",
1297
+ "text": "17",
1298
+ "bbox": [
1299
+ 859,
1300
+ 90,
1301
+ 880,
1302
+ 104
1303
+ ],
1304
+ "page_idx": 16
1305
+ }
1306
+ ]
2401.12xxx/2401.12603/4f4ba12d-6258-4c7f-8b4d-1da802d6095f_model.json ADDED
@@ -0,0 +1,1807 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "page_number",
5
+ "bbox": [
6
+ 0.87,
7
+ 0.092,
8
+ 0.882,
9
+ 0.107
10
+ ],
11
+ "angle": 0,
12
+ "content": "1"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.115,
18
+ 0.164,
19
+ 0.884,
20
+ 0.202
21
+ ],
22
+ "angle": 0,
23
+ "content": "ASAP (Automatic Software for ASL Processing): A toolbox for processing Arterial Spin Labeling images"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.15,
29
+ 0.207,
30
+ 0.877,
31
+ 0.245
32
+ ],
33
+ "angle": 0,
34
+ "content": "Virginia Mato Abad<sup>1</sup>, Pablo García-Polo<sup>2</sup>, Owen O'Daly<sup>3</sup>, Juan Antonio Hernández-Tamames<sup>1</sup>, Fernando Zelaya<sup>3</sup>"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.147,
40
+ 0.248,
41
+ 0.882,
42
+ 0.287
43
+ ],
44
+ "angle": 0,
45
+ "content": "\\(^{1}\\)Laboratorio de Analisis de Imagen Médica y Biometría (LAIMBIO), Universidad Rey Juan Carlos, Mostoles, Madrid, Spain"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.16,
51
+ 0.291,
52
+ 0.868,
53
+ 0.33
54
+ ],
55
+ "angle": 0,
56
+ "content": "\\(^{2}\\)M+Visión Advanced Fellowship, Medical Imaging Lab., Hospital Universitario de Fuenlabrada, Fuenlabrada, Madrid, Spain"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.175,
62
+ 0.334,
63
+ 0.851,
64
+ 0.373
65
+ ],
66
+ "angle": 0,
67
+ "content": "\\(^{3}\\)Department of Neuroimaging, Institute of Psychiatry, King's College London, London, United Kingdom"
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.114,
73
+ 0.603,
74
+ 0.336,
75
+ 0.621
76
+ ],
77
+ "angle": 0,
78
+ "content": "Corresponding Author:"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.114,
84
+ 0.628,
85
+ 0.286,
86
+ 0.646
87
+ ],
88
+ "angle": 0,
89
+ "content": "Virginia Mato Abad"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.114,
95
+ 0.646,
96
+ 0.373,
97
+ 0.663
98
+ ],
99
+ "angle": 0,
100
+ "content": "Universidad Rey Juan Carlos"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.114,
106
+ 0.663,
107
+ 0.406,
108
+ 0.68
109
+ ],
110
+ "angle": 0,
111
+ "content": "Departamental II. Despacho 157."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.114,
117
+ 0.68,
118
+ 0.429,
119
+ 0.698
120
+ ],
121
+ "angle": 0,
122
+ "content": "Campus de Móstoles, C/Tulipán s/n"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.114,
128
+ 0.698,
129
+ 0.402,
130
+ 0.716
131
+ ],
132
+ "angle": 0,
133
+ "content": "28933, Móstoles, Madrid (Spain)"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.114,
139
+ 0.716,
140
+ 0.36,
141
+ 0.733
142
+ ],
143
+ "angle": 0,
144
+ "content": "Telephone: +34 914888522"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.114,
150
+ 0.733,
151
+ 0.307,
152
+ 0.751
153
+ ],
154
+ "angle": 0,
155
+ "content": "virginia.mato@urjc.es"
156
+ }
157
+ ],
158
+ [
159
+ {
160
+ "type": "page_number",
161
+ "bbox": [
162
+ 0.869,
163
+ 0.092,
164
+ 0.882,
165
+ 0.107
166
+ ],
167
+ "angle": 0,
168
+ "content": "2"
169
+ },
170
+ {
171
+ "type": "title",
172
+ "bbox": [
173
+ 0.117,
174
+ 0.157,
175
+ 0.202,
176
+ 0.176
177
+ ],
178
+ "angle": 0,
179
+ "content": "Abstract"
180
+ },
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.116,
185
+ 0.184,
186
+ 0.885,
187
+ 0.464
188
+ ],
189
+ "angle": 0,
190
+ "content": "The method of Arterial Spin Labeling (ASL) has experienced a significant rise in its application to functional imaging, since it is the only technique capable of measuring blood perfusion in a truly non-invasive manner. Currently, there are no commercial packages for processing ASL data and there is no recognised standard for normalising ASL data to a common frame of reference. This work describes a new Automated Software for ASL Processing (ASAP) that can automatically process several ASL datasets. ASAP includes functions for all stages of image pre-processing: quantification, skull-stripping, co-registration, partial volume correction and normalization. To assess the applicability and validity of the toolbox, this work shows its application in the study of hypoperfusion in a sample of healthy subjects at risk of progressing to Alzheimer's Disease. ASAP requires limited user intervention, minimising the possibility of random and systematic errors, and produces cerebral blood flow maps that are ready for statistical group analysis. The software is easy to operate and results in excellent quality of spatial normalisation. The results found in this evaluation study are consistent with previous studies that find decreased perfusion in Alzheimer's patients in similar regions and demonstrate the applicability of ASAP."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.117,
196
+ 0.493,
197
+ 0.869,
198
+ 0.53
199
+ ],
200
+ "angle": 0,
201
+ "content": "Keywords: Arterial Spin Labeling, Cerebral Blood Flow, Automatic Processing, Partial volume effect, Alzheimer's Disease"
202
+ }
203
+ ],
204
+ [
205
+ {
206
+ "type": "page_number",
207
+ "bbox": [
208
+ 0.869,
209
+ 0.092,
210
+ 0.882,
211
+ 0.107
212
+ ],
213
+ "angle": 0,
214
+ "content": "3"
215
+ },
216
+ {
217
+ "type": "title",
218
+ "bbox": [
219
+ 0.117,
220
+ 0.157,
221
+ 0.255,
222
+ 0.175
223
+ ],
224
+ "angle": 0,
225
+ "content": "1. Introduction"
226
+ },
227
+ {
228
+ "type": "text",
229
+ "bbox": [
230
+ 0.115,
231
+ 0.183,
232
+ 0.875,
233
+ 0.514
234
+ ],
235
+ "angle": 0,
236
+ "content": "Arterial Spin Labelling (ASL) has become a popular magnetic resonance technique for imaging brain function. It is entirely non-invasive and capable of quantitatively determining regional blood perfusion; providing therefore a significant advantage over contrast agent based methods like \\(^{15}\\mathrm{O}\\) enriched \\(\\mathrm{H}_2\\mathrm{O}\\) Positron Emission Tomography (PET) or Gadolinium-based Dynamic Susceptibility Contrast Magnetic Resonance Imaging (DSC-MRI). The basic principle of ASL is to employ arterial blood water itself as contrast agent to measure perfusion. For cerebral blood flow (CBF) this is obtained by tagging a bolus of arterial blood in the region of the carotid arteries. The magnetization of inflowing blood water protons is inverted in that region by means of an external radiofrequency pulse, which is applied either as a short pulse (10-20ms) or as a continuous or pseudo-continuous burst of radiofrequency (1-2s) in the presence of a gradient. After a period of time (post-labelling delay), blood labelled with inverted signal is delivered to the entire brain through the smaller arteries and capillaries. This labelled arterial blood signal gives rise to a reduction in the image intensity when compared to a non-labelled (control) image. The control and labelled images are subtracted to generate a 'perfusion weighted' image. The intensity of each voxel will reflect the amount of arterial blood delivered in the inversion time; and through the use of a suitable model, the difference image is transformed to a map of CBF in conventional physiological units of ml blood/100g tissue/min."
237
+ },
238
+ {
239
+ "type": "text",
240
+ "bbox": [
241
+ 0.116,
242
+ 0.521,
243
+ 0.877,
244
+ 0.661
245
+ ],
246
+ "angle": 0,
247
+ "content": "The availability of ASL as a routine method for assessment of basal CBF data has provided the possibility to examine brain physiology and generate a marker to probe functional differences between groups. ASL is increasingly used in clinical studies of cerebral perfusion and has shown its validity in measuring perfusion changes in several neurodegenerative diseases including Alzheimer Disease (AD) [1,2]; as well as in psychiatric studies [3], pharmacology [4] and pain [5]. However, to perform this type of analysis, multiple image processing steps are required: quantification, registration, normalization to a standard space, partial volume correction, etc."
248
+ },
249
+ {
250
+ "type": "text",
251
+ "bbox": [
252
+ 0.116,
253
+ 0.668,
254
+ 0.882,
255
+ 0.879
256
+ ],
257
+ "angle": 0,
258
+ "content": "Partial volume effects (PVE) are a consequence of limited spatial resolution in imaging and especially in ASL, where the low signal-to-noise (SNR) ratio leads to the need to employ larger voxels. In an effort to increase SNR, tissue specific saturation pulses are applied to the volume of interest to suppress the static tissue signal. This is known as 'background suppression' and it is now used extensively in ASL [6]. Nevertheless, the change in the received signal due to blood water proton relaxation remains very small, such that voxels are typically of the order of \\(3 \\times 3 \\times 6 \\mathrm{~mm}\\), generating the need to employ some form of PVE correction as each voxel is likely to contain signal mixing from different tissue types. Normal grey matter (GM) perfusion values are around \\(60 \\mathrm{ml} / 100 \\mathrm{g} / \\mathrm{min}\\) while white matter (WM) values are significantly lower (\\(20 \\mathrm{ml} / 100 \\mathrm{g} / \\mathrm{min}\\)) [7]. Due to the relative insensitivity of ASL in white matter, the prime interest when using this technique is the study of pure GM perfusion. However, in voxels containing (for"
259
+ }
260
+ ],
261
+ [
262
+ {
263
+ "type": "page_number",
264
+ "bbox": [
265
+ 0.869,
266
+ 0.092,
267
+ 0.882,
268
+ 0.105
269
+ ],
270
+ "angle": 0,
271
+ "content": "4"
272
+ },
273
+ {
274
+ "type": "text",
275
+ "bbox": [
276
+ 0.117,
277
+ 0.132,
278
+ 0.88,
279
+ 0.203
280
+ ],
281
+ "angle": 0,
282
+ "content": "example) \\(50\\%\\) GM and \\(50\\%\\) WM, the CBF values could be underestimated by up to one-third. PVE is of paramount importance in the study of neurodegenerative diseases where GM atrophy significantly affects CBF quantification and therefore the comparison of patient data with control populations."
283
+ },
284
+ {
285
+ "type": "text",
286
+ "bbox": [
287
+ 0.117,
288
+ 0.21,
289
+ 0.882,
290
+ 0.298
291
+ ],
292
+ "angle": 0,
293
+ "content": "The absence of a standard approach for data processing has been partly driven by the fact that several ASL methodologies have evolved independently [8]. Therefore, there is no recognised standard for normalising ASL data to a common frame of reference. This lack of a harmonised processing pipeline contributes to the potential discrepancies in studies of brain perfusion across different laboratories [9]."
294
+ },
295
+ {
296
+ "type": "text",
297
+ "bbox": [
298
+ 0.116,
299
+ 0.304,
300
+ 0.882,
301
+ 0.446
302
+ ],
303
+ "angle": 0,
304
+ "content": "A number of packages, such as BASIL [10] and ASLTbx [11] provide a set of functions for pre-processing of ASL data and they both are free for academic use. BASIL consists of a collection of tools from the Functional Software Library (FSL) suite [12] that aid in the quantification and subsequent spatial processing of CBF images acquired with ASL. BASIL is based on Bayesian inference principles and was originally developed for ASL data acquired with several post-labelling delays (known as 'multi-TI' data). ASLTbx is a MATLAB [13] and SPM [14] based toolkit for processing ASL data, which requires basic MATLAB script programming."
305
+ },
306
+ {
307
+ "type": "text",
308
+ "bbox": [
309
+ 0.117,
310
+ 0.451,
311
+ 0.882,
312
+ 0.522
313
+ ],
314
+ "angle": 0,
315
+ "content": "These packages typically perform a step-by-step and subject-by-subject processing and require a large amount of manual operation. To date, a toolbox supporting a fully automated processing of raw ASL data, with minimum user intervention that can be used for effective comparison of group data, is not yet available."
316
+ },
317
+ {
318
+ "type": "text",
319
+ "bbox": [
320
+ 0.116,
321
+ 0.529,
322
+ 0.884,
323
+ 0.72
324
+ ],
325
+ "angle": 0,
326
+ "content": "In this article, we describe the development, implementation and test of an ASL processing toolbox (ASAP) that can automatically process several ASL datasets, from their raw image format to a spatially normalised, smoothed (if desired) version, with minimal user intervention. Ease of operation has been facilitated by a graphical user interface (GUI) whose operation is entirely intuitive. After the user sets the input/output and processing parameters using the GUI, the toolbox fully executes all processing steps for datasets of any number of subjects and results in data ready for second level statistical analysis. The data can be written in a variety of formats to facilitate its inclusion in several software packages for group analysis. The toolbox also has a facility to display the spatially normalised data in a manner that facilitates quality control by the user."
327
+ },
328
+ {
329
+ "type": "text",
330
+ "bbox": [
331
+ 0.117,
332
+ 0.728,
333
+ 0.844,
334
+ 0.781
335
+ ],
336
+ "angle": 0,
337
+ "content": "To assess the applicability and validity of the toolbox, we demonstrate its use in the study of hypoperfusion in a sample of healthy subjects at risk of progressing to Alzheimer's Disease (AD)."
338
+ },
339
+ {
340
+ "type": "title",
341
+ "bbox": [
342
+ 0.117,
343
+ 0.788,
344
+ 0.223,
345
+ 0.805
346
+ ],
347
+ "angle": 0,
348
+ "content": "2. Methods"
349
+ },
350
+ {
351
+ "type": "title",
352
+ "bbox": [
353
+ 0.129,
354
+ 0.818,
355
+ 0.472,
356
+ 0.836
357
+ ],
358
+ "angle": 0,
359
+ "content": "2.1. Toolbox processing procedures"
360
+ },
361
+ {
362
+ "type": "text",
363
+ "bbox": [
364
+ 0.117,
365
+ 0.843,
366
+ 0.825,
367
+ 0.878
368
+ ],
369
+ "angle": 0,
370
+ "content": "ASAP has been developed in MATLAB with the goal of simplifying the process of quantification and pre-processing of ASL studies. It includes functions like CBF"
371
+ }
372
+ ],
373
+ [
374
+ {
375
+ "type": "page_number",
376
+ "bbox": [
377
+ 0.869,
378
+ 0.092,
379
+ 0.882,
380
+ 0.106
381
+ ],
382
+ "angle": 0,
383
+ "content": "5"
384
+ },
385
+ {
386
+ "type": "text",
387
+ "bbox": [
388
+ 0.111,
389
+ 0.133,
390
+ 0.871,
391
+ 0.186
392
+ ],
393
+ "angle": 0,
394
+ "content": "quantification, skull stripping, co-registration, partial volume correction and normalisation. Different processing strategies have been made available depending on user requirements:"
395
+ },
396
+ {
397
+ "type": "text",
398
+ "bbox": [
399
+ 0.114,
400
+ 0.193,
401
+ 0.881,
402
+ 0.315
403
+ ],
404
+ "angle": 0,
405
+ "content": "- System requirements: ASAP is written in MATLAB under a Unix system (Linux or Mac OS) but it is not entirely a stand-alone utility. It accesses both FSL software and SPM libraries, which are two of the most widely available image processing platforms for MRI. These are invoked by the toolbox and are transparent to the user, but they must be installed independently by each user and added to the MATLAB path (including the FSLDIR environment variable). The software works equally well with earlier version of SPM or with the latest release (SPM-12)."
406
+ },
407
+ {
408
+ "type": "text",
409
+ "bbox": [
410
+ 0.114,
411
+ 0.316,
412
+ 0.885,
413
+ 0.455
414
+ ],
415
+ "angle": 0,
416
+ "content": "- Input data: The ASL input data can be the raw difference image (control image – labelled image) or the perfusion image (CBF map). Regardless of the input or the ASL modality used, computation of the CBF map is made according to the formula proposed in the recent article “Recommended implementation of arterial spin-labeled perfusion MRI for clinical applications” published by Alsop et al [15]. For subsequent spatial co-registration and normalisation, the user is able to choose between providing a high-resolution T1-weighted or T2-weighted structural scan. DICOM, NIfTI or ANALYZE formats are accepted."
417
+ },
418
+ {
419
+ "type": "text",
420
+ "bbox": [
421
+ 0.114,
422
+ 0.455,
423
+ 0.882,
424
+ 0.594
425
+ ],
426
+ "angle": 0,
427
+ "content": "- Resolution: The user can select between two different execution methods regarding the resolution of the images: the low-resolution native space of ASL or up-sampling the ASL images to the structural image high-resolution grid, typically of the order of \\(1 \\times 1 \\times 1 \\mathrm{~mm}\\) voxel size (acquisition matrix of \\(288 \\times 288\\) or \\(512 \\times 512\\) voxels with full brain coverage. The up-sampling is made by means of the spatial interpolation 'Nearest Neighbour', which preserves the grey values of the original voxel and ensures the consistency of CBF values. After the spatial normalization, the ASL voxel size is \\(2 \\times 2 \\times 2 \\mathrm{~mm}\\), the resolution of the MNI template."
428
+ },
429
+ {
430
+ "type": "text",
431
+ "bbox": [
432
+ 0.114,
433
+ 0.594,
434
+ 0.88,
435
+ 0.716
436
+ ],
437
+ "angle": 0,
438
+ "content": "- Cerebral blood flow quantification: Due to the fact that most multi-TI ASL sequences are currently only available as experimental or prototype versions, the toolbox only includes CBF quantification for single inversion time data. In that case, the ASL difference image should be provided as input. The CBF quantification map is calculated using the formula currently recommended method [15]. In addition to the difference image, a reference proton density image and the post labelling delay time employed are also required."
439
+ },
440
+ {
441
+ "type": "text",
442
+ "bbox": [
443
+ 0.114,
444
+ 0.716,
445
+ 0.88,
446
+ 0.892
447
+ ],
448
+ "angle": 0,
449
+ "content": "- Partial volume correction (PVC): ASAP provides the option of PVC of the ASL data. In its current version, two different methods are provided: 1) the method described by Asllani [16] and 2) a method based on a previous approached developed for PET (from here referred to as 'tghe PET method') that assumes perfusion of WM is globally \\(40\\%\\) of that of GM for correction of resting CBF [17]. Although the later is a more simplistic approach and has been largely superseded by the methods introduced by Asllani and Chappell, this method (hereafter referred to as the PET correction) is available in our toolbox because it has been applied historically in earlier ASL studies [18-20]. Asllani's algorithm is based on linear regression and represents the voxel intensity as a weighted sum of pure tissue contribution, where"
450
+ },
451
+ {
452
+ "type": "list",
453
+ "bbox": [
454
+ 0.114,
455
+ 0.193,
456
+ 0.885,
457
+ 0.892
458
+ ],
459
+ "angle": 0,
460
+ "content": null
461
+ }
462
+ ],
463
+ [
464
+ {
465
+ "type": "page_number",
466
+ "bbox": [
467
+ 0.869,
468
+ 0.092,
469
+ 0.882,
470
+ 0.105
471
+ ],
472
+ "angle": 0,
473
+ "content": "6"
474
+ },
475
+ {
476
+ "type": "text",
477
+ "bbox": [
478
+ 0.14,
479
+ 0.133,
480
+ 0.865,
481
+ 0.221
482
+ ],
483
+ "angle": 0,
484
+ "content": "the weighting coefficients are the tissue's fractional volume in the voxel. This algorithm is able to estimate the CBF for grey matter (GM) and white matter (WM) independently. The PET correction assumed that all contributions to perfusion are from brain tissue and that cerebrospinal fluid has no contribution. In that case, ASL intensities are corrected according to the following equation:"
485
+ },
486
+ {
487
+ "type": "equation",
488
+ "bbox": [
489
+ 0.344,
490
+ 0.221,
491
+ 0.568,
492
+ 0.239
493
+ ],
494
+ "angle": 0,
495
+ "content": "\\[\nI _ {\\text {c o r r}} = I _ {\\text {u n c o r r}} / \\left(P _ {G M} + 0. 4 ^ {*} P _ {W M}\\right)\n\\]"
496
+ },
497
+ {
498
+ "type": "text",
499
+ "bbox": [
500
+ 0.14,
501
+ 0.239,
502
+ 0.875,
503
+ 0.326
504
+ ],
505
+ "angle": 0,
506
+ "content": "where \\( I_{\\text{corr}} \\) and \\( I_{\\text{uncorr}} \\) are the corrected and uncorrected intensities, the 0.4 factor is the global ratio between WM and GM and \\( P_{\\text{GM}} \\) and \\( P_{\\text{WM}} \\) are the probabilities of GM and WM, respectively. The PVC option is only available when working in the low-resolution ASL space, thus having co-registered the high-resolution structural image to the ASL image."
507
+ },
508
+ {
509
+ "type": "text",
510
+ "bbox": [
511
+ 0.113,
512
+ 0.325,
513
+ 0.876,
514
+ 0.361
515
+ ],
516
+ "angle": 0,
517
+ "content": "- Execution mode: The toolbox includes a Graphical User Interface (GUI) where all the input data can be setup manually. Also, it has a batch mode for advanced users."
518
+ },
519
+ {
520
+ "type": "text",
521
+ "bbox": [
522
+ 0.113,
523
+ 0.367,
524
+ 0.842,
525
+ 0.386
526
+ ],
527
+ "angle": 0,
528
+ "content": "The main procedure of ASAP is shown in Figure 1 and includes the following steps:"
529
+ },
530
+ {
531
+ "type": "text",
532
+ "bbox": [
533
+ 0.114,
534
+ 0.392,
535
+ 0.682,
536
+ 0.409
537
+ ],
538
+ "angle": 0,
539
+ "content": "1. Optional CBF quantification for pCASL and PASL sequences."
540
+ },
541
+ {
542
+ "type": "text",
543
+ "bbox": [
544
+ 0.112,
545
+ 0.41,
546
+ 0.881,
547
+ 0.497
548
+ ],
549
+ "angle": 0,
550
+ "content": "2. Reorient the images. Structural and ASL images are reoriented to the AC-PC plane (Anterior Commissure - Posterior Commissure) and their origins are set to the AC. Setting of a common origin is advisable for superior performance of the subsequent processing steps. If the PD image is available, the PD image is reoriented to the AC-PC plane, applying the same transformation to the ASL image."
551
+ },
552
+ {
553
+ "type": "text",
554
+ "bbox": [
555
+ 0.113,
556
+ 0.497,
557
+ 0.844,
558
+ 0.564
559
+ ],
560
+ "angle": 0,
561
+ "content": "3. Rough skull-stripping of the initial resting state ASL map using the FSL Brain Extraction Tool (bet) using a conservative threshold. This step is useful for noisy ASL maps, in order to increase the quality of the rigid co-registration with the structural scan."
562
+ },
563
+ {
564
+ "type": "text",
565
+ "bbox": [
566
+ 0.112,
567
+ 0.567,
568
+ 0.877,
569
+ 0.67
570
+ ],
571
+ "angle": 0,
572
+ "content": "4. Estimation of the brain mask. Brain mask from the structural volume can be calculated by two different options: the FSL bet tool (recommended for T2-weighted high-resolution scan) or the SPM segmentation task (recommended for T1-weighted high resolution scan). The brain mask is required for excluding out-of-brain voxels, often encountered in subtraction techniques such as ASL. Segmentation of GM and WM probability maps is also required for the partial volume correction step."
573
+ },
574
+ {
575
+ "type": "text",
576
+ "bbox": [
577
+ 0.112,
578
+ 0.671,
579
+ 0.877,
580
+ 0.862
581
+ ],
582
+ "angle": 0,
583
+ "content": "5. Rigid co-registration between ASL and structural images using SPM function. ASL images are normally co-registered to anatomical images so they can be later normalized to the MNI space (or any other standard space) for group analysis. Also, the co-registration is required for the partial volume correction. T1-weighted or T2-weighted images can be used for co-registration. If direct co-registration of ASL and structural images is not reliable because of the poor signal-to-noise ratio and the limited structural features of perfusion images, the proton density (PD) image can also be used for co-registration, moving the ASL data in the process. Depending on the selected resolution, the co-registration will be made in the native space of the ASL data (down-sampling the resolution of the structural scan) or up-sampling the ASL to the high-resolution of the structural volume by interpolation."
584
+ },
585
+ {
586
+ "type": "text",
587
+ "bbox": [
588
+ 0.112,
589
+ 0.863,
590
+ 0.879,
591
+ 0.881
592
+ ],
593
+ "angle": 0,
594
+ "content": "6. Partial Volume Correction of the ASL maps using the methods available. Information"
595
+ },
596
+ {
597
+ "type": "list",
598
+ "bbox": [
599
+ 0.112,
600
+ 0.392,
601
+ 0.881,
602
+ 0.881
603
+ ],
604
+ "angle": 0,
605
+ "content": null
606
+ }
607
+ ],
608
+ [
609
+ {
610
+ "type": "page_number",
611
+ "bbox": [
612
+ 0.869,
613
+ 0.092,
614
+ 0.882,
615
+ 0.105
616
+ ],
617
+ "angle": 0,
618
+ "content": "7"
619
+ },
620
+ {
621
+ "type": "text",
622
+ "bbox": [
623
+ 0.141,
624
+ 0.133,
625
+ 0.874,
626
+ 0.237
627
+ ],
628
+ "angle": 0,
629
+ "content": "about the proportion of each tissue type (grey matter, white matter, and cerebrospinal fluid) is used to correct perfusion data. The method described by Asllani estimates both, partial GM and partial WM ASL maps. The PET correction method only estimates the partial GM ASL map. This option is only available if the structural scan has been down-sampled by means of the rigid co-registration step to the ASL image."
630
+ },
631
+ {
632
+ "type": "text",
633
+ "bbox": [
634
+ 0.113,
635
+ 0.238,
636
+ 0.88,
637
+ 0.307
638
+ ],
639
+ "angle": 0,
640
+ "content": "7. Skull-stripping of the ASL data. Apply the brain mask previously calculated to the coregistered and partial volume corrected ASL maps in order to exclude artefactual, finite 'perfusion' values in the extra-cerebral space (These arise in all ASL modalities because of the subtraction of control and labelled images)."
641
+ },
642
+ {
643
+ "type": "text",
644
+ "bbox": [
645
+ 0.113,
646
+ 0.308,
647
+ 0.88,
648
+ 0.393
649
+ ],
650
+ "angle": 0,
651
+ "content": "8. Spatial normalization. For comparison across subjects, location correspondence has to be established, so registration of all the individual images to a standardized space is required. Here, the images (both ASL and structural) are normalized to the MNI standard space using: 1) a MNI template selected by the user or 2) the transformation matrix earlier calculated by SPM during the segmentation process."
652
+ },
653
+ {
654
+ "type": "text",
655
+ "bbox": [
656
+ 0.113,
657
+ 0.395,
658
+ 0.876,
659
+ 0.481
660
+ ],
661
+ "angle": 0,
662
+ "content": "9. Smoothing. The resultant images in the standard space are ready for voxel-based statistical analysis. However, these images are commonly multiplied by a smoothing kernel larger than the voxel dimension to satisfy the random-field approximation employed in parametric statistics. The SPM Gaussian smoothing kernel is applied to the final ASL maps, the size of the kernel (in mm) is selectable by the user."
663
+ },
664
+ {
665
+ "type": "text",
666
+ "bbox": [
667
+ 0.114,
668
+ 0.482,
669
+ 0.862,
670
+ 0.533
671
+ ],
672
+ "angle": 0,
673
+ "content": "10. The resultant images can be directly used for statistical analysis. This procedure is very flexible, as most of the steps are optional. Thus, users can freely design the pipeline that best fit their needs."
674
+ },
675
+ {
676
+ "type": "list",
677
+ "bbox": [
678
+ 0.113,
679
+ 0.238,
680
+ 0.88,
681
+ 0.533
682
+ ],
683
+ "angle": 0,
684
+ "content": null
685
+ },
686
+ {
687
+ "type": "title",
688
+ "bbox": [
689
+ 0.125,
690
+ 0.546,
691
+ 0.795,
692
+ 0.582
693
+ ],
694
+ "angle": 0,
695
+ "content": "2.2. Testing the hypoperfusion in healthy subjects in risk of developing Alzheimer's disease by using the toolbox"
696
+ },
697
+ {
698
+ "type": "text",
699
+ "bbox": [
700
+ 0.111,
701
+ 0.589,
702
+ 0.861,
703
+ 0.696
704
+ ],
705
+ "angle": 0,
706
+ "content": "Several studies [1,21-25] have shown that Alzheimer's patients suffer from decreased perfusion in specific cortical and sub-cortical areas that may be associated to the subsequent cognitive and structural degeneration. A subgroup of the \"Proyecto Vallecas\" study, a 4-year longitudinal study over 1,000 subjects to assess normal healthy ageing and the appearance of neurodegenerative diseases, in particular AD; was selected to validate this hypothesis and demonstrate ASAP."
707
+ },
708
+ {
709
+ "type": "title",
710
+ "bbox": [
711
+ 0.113,
712
+ 0.706,
713
+ 0.248,
714
+ 0.725
715
+ ],
716
+ "angle": 0,
717
+ "content": "2.2.1. Subjects"
718
+ },
719
+ {
720
+ "type": "text",
721
+ "bbox": [
722
+ 0.111,
723
+ 0.731,
724
+ 0.871,
725
+ 0.892
726
+ ],
727
+ "angle": 0,
728
+ "content": "A two-group study comparing 25 healthy elderly subjects (7 men and 18 women, mean age \\(75 \\pm 3.6\\) years) and 25 elderly subjects at risk of developing Alzheimer's disease (8 men and 17 women, mean age \\(77 \\pm 4.5\\) years) was performed. All subjects were first included in the study as healthy subjects based on several psychological and neurological tests, including the Geriatric Depression Scale [26], a Mini-Mental State Examination (MMSE) [27] above 24 and Functional Activities Questionnaire (FAQ) [28] scores above 6 at the baseline assessment. All subjects included in the study show no signs of dementia or severe cognitive deterioration and they are able to manage and independent life without any mental disorder (cognitive or psychiatric) impeding daily"
729
+ }
730
+ ],
731
+ [
732
+ {
733
+ "type": "page_number",
734
+ "bbox": [
735
+ 0.869,
736
+ 0.092,
737
+ 0.882,
738
+ 0.106
739
+ ],
740
+ "angle": 0,
741
+ "content": "8"
742
+ },
743
+ {
744
+ "type": "text",
745
+ "bbox": [
746
+ 0.115,
747
+ 0.132,
748
+ 0.867,
749
+ 0.221
750
+ ],
751
+ "angle": 0,
752
+ "content": "functioning. All subjects underwent MRI examination as well as psychological and neurological assessment every 6-12 months. Informed consent was obtained from all participants prior to evaluation. The subjects selected as subjects at risk of developing AD were those whose left and right hippocampi suffered from a volume loss greater than 2 standard deviations from the sample mean."
753
+ },
754
+ {
755
+ "type": "title",
756
+ "bbox": [
757
+ 0.117,
758
+ 0.232,
759
+ 0.268,
760
+ 0.251
761
+ ],
762
+ "angle": 0,
763
+ "content": "2.2.2. Acquisition"
764
+ },
765
+ {
766
+ "type": "text",
767
+ "bbox": [
768
+ 0.115,
769
+ 0.257,
770
+ 0.868,
771
+ 0.433
772
+ ],
773
+ "angle": 0,
774
+ "content": "All subjects underwent MRI examination in a 3T Signa HDx MR scanner (GE Healthcare, Waukesha, WI) using an eight-channel phased array coil. The first sequence was a 3D T1 weighted SPGR with a TR=10.024ms, TE=4.56ms, TI=600ms, NEX=1, acquisition matrix=288x288, full brain coverage, resolution=1x1x1mm, flip angle=12. The second sequence was a 3D pCASL pulse sequence with full brain coverage, matrix size=128x128, resolution=1.875x1.875x4mm, flip angle = 155, labelling time 1.5s, post-labelling delay=2.025s, TR=4.733s, TE=9.812ms, NEX=3, acquisition time ~6min and was used to generate the regional cerebral blood flow (rCBF) maps. Both the perfusion difference image and the proton density image produced by this sequence were available for the study."
775
+ },
776
+ {
777
+ "type": "title",
778
+ "bbox": [
779
+ 0.117,
780
+ 0.445,
781
+ 0.329,
782
+ 0.463
783
+ ],
784
+ "angle": 0,
785
+ "content": "2.2.3. Image processing"
786
+ },
787
+ {
788
+ "type": "text",
789
+ "bbox": [
790
+ 0.115,
791
+ 0.469,
792
+ 0.887,
793
+ 0.592
794
+ ],
795
+ "angle": 0,
796
+ "content": "All 3D T1 weighted images were processed with Freesurfer [29] in order to obtain the cortical and subcortical volumes for each subject. The left and right hippocampi volume (LHV, RHV) were normalised by the total GM volume. This normalised measure allowed us to divide the sample into three groups: Control group ([LHV, RHV])(mean hippocampus \\((\\mathsf{MH}) + 1\\) std.), mean group (MH-2std.<[LHV, RHV]<MH+1std.) and probable AD group (PAD) ([LHV, RHV]<(MH-2std.). A selection of 25 PAD subjects and 25 age and gender matched controls was the final subset used to validate ASAP."
797
+ },
798
+ {
799
+ "type": "text",
800
+ "bbox": [
801
+ 0.115,
802
+ 0.598,
803
+ 0.884,
804
+ 0.722
805
+ ],
806
+ "angle": 0,
807
+ "content": "Thus, the input images for each subject for ASAP were two DICOM series: a 3D T1 weighted image and a raw ASL sequence (control-labelled subtraction and proton density images). The resulting processing pipeline (as described above) is shown in Figure 1. To evaluate the effect of PVE correction, prior to MNI normalization two different options were applied to the perfusion maps: no PVE correction and the Asllani's PVE correction with a regression-kernel of size \\(5 \\times 5 \\times 1\\) voxels. Therefore, for each subject, original CBF maps and Asllani's PVE-corrected CBF maps were obtained."
808
+ },
809
+ {
810
+ "type": "title",
811
+ "bbox": [
812
+ 0.117,
813
+ 0.733,
814
+ 0.334,
815
+ 0.751
816
+ ],
817
+ "angle": 0,
818
+ "content": "2.2.4. Statistical analysis"
819
+ },
820
+ {
821
+ "type": "text",
822
+ "bbox": [
823
+ 0.115,
824
+ 0.758,
825
+ 0.887,
826
+ 0.88
827
+ ],
828
+ "angle": 0,
829
+ "content": "Normalized and smoothed (6mm Gaussian kernel) CBF maps produced by ASAP (both PVE corrected and uncorrected) were employed for the voxel-based statistical analysis. Statistical maps for rejecting the null hypothesis of equal perfusion between healthy and subjects at risk of developing AD were generated by means of a two sample t-test analysis within the General Linear Model (GLM) (with gender and age as covariates and mean CBF value of each subject as a regressor) as implemented in the SPM software suite."
830
+ }
831
+ ],
832
+ [
833
+ {
834
+ "type": "page_number",
835
+ "bbox": [
836
+ 0.869,
837
+ 0.092,
838
+ 0.882,
839
+ 0.106
840
+ ],
841
+ "angle": 0,
842
+ "content": "9"
843
+ },
844
+ {
845
+ "type": "title",
846
+ "bbox": [
847
+ 0.117,
848
+ 0.133,
849
+ 0.214,
850
+ 0.15
851
+ ],
852
+ "angle": 0,
853
+ "content": "3. Results"
854
+ },
855
+ {
856
+ "type": "title",
857
+ "bbox": [
858
+ 0.117,
859
+ 0.157,
860
+ 0.668,
861
+ 0.176
862
+ ],
863
+ "angle": 0,
864
+ "content": "3.1. A MATLAB Toolbox for processing ASL images: ASAP"
865
+ },
866
+ {
867
+ "type": "text",
868
+ "bbox": [
869
+ 0.116,
870
+ 0.183,
871
+ 0.86,
872
+ 0.323
873
+ ],
874
+ "angle": 0,
875
+ "content": "ASAP has been developed for fully automated processing of ASL data. It is an open-source package and is freely available (sites.google.com/site/asltoolbox). ASAP provides a user friendly Graphical User Interface GUI (Figure 2). Users can perform several interactions with the embedded functions, e.g., setting inputs, outputs or different processing parameters. In the \"Input Files\" panel (see Figure 2) users can select all the input data while the \"Output Directories\" panel is used to designate the directory where the output files will be saved. In the \"Options\" panel, users can select the different processing parameters."
876
+ },
877
+ {
878
+ "type": "text",
879
+ "bbox": [
880
+ 0.116,
881
+ 0.33,
882
+ 0.88,
883
+ 0.54
884
+ ],
885
+ "angle": 0,
886
+ "content": "In addition, the advanced mode includes a \"load batch files\" option for loading the input files from text files to avoid having to select the input data individually through the GUI. With this option, a large number of datasets can be loaded into the toolbox for subsequent processing using the \"Options\" set in the panel and the same options will apply throughout for all subjects. In addition, the advanced mode contains the 'ROI Statistics' GUI (Figure 3) that offers the option to extract CBF values from anatomically or functionally defined Regions of Interest (ROI). This facility can simultaneously extract mean, median and maximum values from several ROIs in several CBF maps. Users only have to select the input files, ASL data and ROI masks (NiftI or .mat files are accepted), through the GUI (\"Select files\" action) or in batch mode (\"Load files\" action) from text files. Output results are saved in text files that can easily be incorporated into statistical analysis packages such as SPSS, etc."
887
+ },
888
+ {
889
+ "type": "text",
890
+ "bbox": [
891
+ 0.116,
892
+ 0.546,
893
+ 0.879,
894
+ 0.774
895
+ ],
896
+ "angle": 0,
897
+ "content": "Resultant files are stored in the directories specified by the user. Each procedure of the pipeline produces a new file, every step is recorded and files are not overwritten. The MNI normalized images can be directly used for statistical analysis, however, users can also use the intermediary results. As stated before, most of the steps described above are optional, so the procedure is very flexible and users can freely design the most appropriate pipeline. Also, the toolbox is designed to aid in reproducing some analysis by avoiding some processing steps. This feature is useful, for example, for applying different methods for partial volume correction on the same input data: if there are GM, WM and CSF maps in the same directory as the input structural scan, the toolbox does not apply the SPM segmentation step, using these files. We have performed additional extensive validation (as well as the one reported in this article) to ensure that the toolbox works correctly for both absolute perfusion images (CBF) and for perfusion-weighted difference images in which CBF computation is required."
898
+ },
899
+ {
900
+ "type": "title",
901
+ "bbox": [
902
+ 0.129,
903
+ 0.785,
904
+ 0.819,
905
+ 0.821
906
+ ],
907
+ "angle": 0,
908
+ "content": "3.2. Evaluation of hypoperfusion differences in healthy subjects at risk of developing Alzheimer's disease using the toolbox"
909
+ },
910
+ {
911
+ "type": "text",
912
+ "bbox": [
913
+ 0.117,
914
+ 0.828,
915
+ 0.872,
916
+ 0.881
917
+ ],
918
+ "angle": 0,
919
+ "content": "Figure 4 shows the result of the rigid co-registration step between the 3D T1 weighted images and the CBF maps. Both images match the same anatomical space; the 3D T1 structural image has been re-sampled (as well as the tissue probability maps) to the"
920
+ }
921
+ ],
922
+ [
923
+ {
924
+ "type": "page_number",
925
+ "bbox": [
926
+ 0.86,
927
+ 0.092,
928
+ 0.882,
929
+ 0.107
930
+ ],
931
+ "angle": 0,
932
+ "content": "10"
933
+ },
934
+ {
935
+ "type": "text",
936
+ "bbox": [
937
+ 0.115,
938
+ 0.132,
939
+ 0.882,
940
+ 0.168
941
+ ],
942
+ "angle": 0,
943
+ "content": "low-resolution of the CBF map with a 'b-spline' interpolation method. Figure 5 shows the tissue probability maps of GM and WM for one subject."
944
+ },
945
+ {
946
+ "type": "text",
947
+ "bbox": [
948
+ 0.115,
949
+ 0.175,
950
+ 0.882,
951
+ 0.366
952
+ ],
953
+ "angle": 0,
954
+ "content": "The partial volume effect in ASL data is shown in Figure 6. First and second rows show the 3D T1 weighted axial, sagittal and coronal planes for one patient and a detail of the left hippocampus and parahippocampal gyrus in the same planes respectively. Partial volume effect in the CBF map is shown in the third row and fourth row shows the result of Asllani's correction with a \\(5 \\times 5 \\times 1\\) low-resolution kernel. Figure 6 shows how the blood flow is increased in the whole region after PVC with Asllani's method. Table 1 shows quantitatively how the perfusion values change after the PVC in the whole region. Also, Table 2 shows the comparative results of CBF perfusion values for one subject after the PVC in the different subcortical brain structures for both hemispheres, showing the increase of perfusion for all ROIs, obtained after the PVC correction with Asllani's method."
955
+ },
956
+ {
957
+ "type": "text",
958
+ "bbox": [
959
+ 0.115,
960
+ 0.374,
961
+ 0.882,
962
+ 0.428
963
+ ],
964
+ "angle": 0,
965
+ "content": "The tissue probability maps (shown in Figure 5), the PVC method and the normalisation maps obtained from the anatomical images can be applied to the perfusion maps in order to obtain PVE corrected CBF maps in MNI space (Figure 7)."
966
+ },
967
+ {
968
+ "type": "text",
969
+ "bbox": [
970
+ 0.115,
971
+ 0.434,
972
+ 0.877,
973
+ 0.558
974
+ ],
975
+ "angle": 0,
976
+ "content": "After performing each of the steps previously shown, in our cohort of 50 elderly subjects, the statistical group analysis was performed by means of a two-sample t-test in both cases: CBF maps with Asllani's PVE correction and the original CBF maps without PVE correction. Age and gender were introduced as confounding variables in the model. Figure 8 shows the results of these two analysis (Figure 8a.- PVE corrected, and 8b.- PVE uncorrected) for a family-wise error (FWE) corrected \\( p_{\\text{FWE}} < 0.05 \\) (cluster region of 300 voxels). Table 3 shows the T score and p values for the two analysis."
977
+ },
978
+ {
979
+ "type": "text",
980
+ "bbox": [
981
+ 0.115,
982
+ 0.563,
983
+ 0.874,
984
+ 0.826
985
+ ],
986
+ "angle": 0,
987
+ "content": "These results indicate decreased perfusion in healthy subjects at risk of developing Alzheimer's disease. Areas of significant hypoperfusion in Figure 8a (PVE-corrected) correspond to: caudate, hippocampi, thalamus, parahippocampal gyrus, amygdala, cingulate gyrus, precuneus, left and right insula, superior temporal lobe, uncus and choroid plexus. Results from the statistical analysis without PVE correction (Figure 8b) show regions which appeared previously in the PVE corrected version (caudate, left hippocampus, right thalamus, anterior cingulate, right insula and choroid plexus). In both analyses, part of the perfusion deficit appears displaced into the region of the ventricular space, probably because of the inherent blurring of the 3D FSE stack-of-spiral readout of the ASL pulse sequence employed in this study. In a separate analyses, we confirmed that in fact the 'at-risk' cohort exhibits ventricular enlargement and reduction of grey matter volume in the vicinity of these areas. The combination of these results with the inclusion of PV correction in ASL studies, forms part of a larger separate investigation which is beyond the scope of this paper. These results are consistent with regions found by our studies and those of other authors [18,24,30-32]."
988
+ },
989
+ {
990
+ "type": "title",
991
+ "bbox": [
992
+ 0.115,
993
+ 0.833,
994
+ 0.245,
995
+ 0.851
996
+ ],
997
+ "angle": 0,
998
+ "content": "4. Discussion"
999
+ },
1000
+ {
1001
+ "type": "text",
1002
+ "bbox": [
1003
+ 0.115,
1004
+ 0.857,
1005
+ 0.831,
1006
+ 0.876
1007
+ ],
1008
+ "angle": 0,
1009
+ "content": "In this work, we have developed a MATLAB toolbox (ASAP) for systematically and"
1010
+ }
1011
+ ],
1012
+ [
1013
+ {
1014
+ "type": "page_number",
1015
+ "bbox": [
1016
+ 0.86,
1017
+ 0.092,
1018
+ 0.882,
1019
+ 0.107
1020
+ ],
1021
+ "angle": 0,
1022
+ "content": "11"
1023
+ },
1024
+ {
1025
+ "type": "text",
1026
+ "bbox": [
1027
+ 0.117,
1028
+ 0.132,
1029
+ 0.885,
1030
+ 0.361
1031
+ ],
1032
+ "angle": 0,
1033
+ "content": "automatically processing ASL datasets with minimal user intervention. The key advantage of ASAP is that it automates all the processing steps of ASL datasets for any number of subjects and the ability to work with reduced user input minimises the possibility of random and systematic errors. ASAP offers easily selectable option for almost all the stages of that process. The toolbox can produce perfusion data that is ready for statistical group analysis. A fully automated pipeline makes the data processing efficient and reduces potential mistakes by avoiding manual processing of individual steps. Besides, ASAP has a very friendly and easy to use GUI (Figure 2), allowing users to select the preferred options for each case. Depending on the datasets, users may change the options of some processing steps to optimize the processing quality. Prior programming knowledge is not required. One limitation of other existing toolboxes lies in the requirement of programming knowledge, which limits their accessibility to users with programming skills."
1034
+ },
1035
+ {
1036
+ "type": "text",
1037
+ "bbox": [
1038
+ 0.116,
1039
+ 0.366,
1040
+ 0.884,
1041
+ 0.7
1042
+ ],
1043
+ "angle": 0,
1044
+ "content": "In the present study, we applied ASAP to study possible changes in perfusion in a sample of healthy subjects in risk of developing AD. The analyses were run on a Macintosh OS X (10.6 Snow Leopard) computer with 8 GB of RAM and a 3.06 GHz Intel Core 2 Duo processor. The total running time was 3.44 hours (4.13 minutes per subject). The automatization of the whole post-processing pipeline minimises the variability introduced by human errors and decreases enormously the time needed to manually process all subjects. ASAP provides the images ready to perform statistical assessment. We have presented an evaluation of hypoperfusion in healthy subjects at risk of developing Alzheimer's disease and the results are consistent with those of previous studies that find decreased perfusion in Alzheimer's patients in similar regions. As an example, figure 8a and 8b show how the absence of PVE correction can lead to false negative findings. Regions affected by hypoperfusion have higher statistical significative and thus regions are more extensive due to the PVE correction. These results also highlight that PVE correction is required to maximise the predictive value of ASL in this field of research. Hypoperfusion in the right inferior insula and superior temporal lobe region can be detected with PVE corrected images whereas PVE uncorrected images cannot. This region in particular, is affected by atrophy in those subjects at the very early stages of Alzheimer's disease, but thanks to this PVE technique, it can be shown that a hypoperfusion pattern is prior to GM atrophy."
1045
+ },
1046
+ {
1047
+ "type": "text",
1048
+ "bbox": [
1049
+ 0.117,
1050
+ 0.705,
1051
+ 0.877,
1052
+ 0.88
1053
+ ],
1054
+ "angle": 0,
1055
+ "content": "We envisage the intuitive and user friendly nature of the ASAP toolbox will help to facilitate the application of ASL in the clinical environment, where the method can be easily employed by clinicians and technicians without the need of intensive training or knowledge of image processing techniques. As mentioned earlier, processing data with ASAP can be very flexible and users have the freedom to design the most appropriate pipeline targeted to their data. In order to know which is the best pipeline for the user's data, there are important recommendations in the user's manual for a proper use of the different options available in the toolbox. There is a description of which is the best choice for each stage, depending on the input data, such as T1-weighted or T2-weighted structural scan, or in which cases is useful to apply a specific option, like the"
1056
+ }
1057
+ ],
1058
+ [
1059
+ {
1060
+ "type": "page_number",
1061
+ "bbox": [
1062
+ 0.86,
1063
+ 0.092,
1064
+ 0.882,
1065
+ 0.107
1066
+ ],
1067
+ "angle": 0,
1068
+ "content": "12"
1069
+ },
1070
+ {
1071
+ "type": "text",
1072
+ "bbox": [
1073
+ 0.117,
1074
+ 0.133,
1075
+ 0.49,
1076
+ 0.152
1077
+ ],
1078
+ "angle": 0,
1079
+ "content": "rough skull-stripping or the PVE correction."
1080
+ },
1081
+ {
1082
+ "type": "text",
1083
+ "bbox": [
1084
+ 0.116,
1085
+ 0.157,
1086
+ 0.882,
1087
+ 0.405
1088
+ ],
1089
+ "angle": 0,
1090
+ "content": "High quality of co-registration between anatomical and perfusion images are key for optimal partial volume effect correction and normalisation steps. The results of employing our toolbox in this sample study, demonstrate that the software can produce a high level of accuracy of spatial normalisation, paying no penalty in quality as a result of fully automated operation. The assessment of the normalisation quality has been made qualitatively by comparing a minimum of 4 external cortical landmarks on the CBF maps and ensuring that they correspond to the same landmarks on the chosen template within a \\(\\pm 3\\mathrm{mm}\\) range. The same assessment has been made with at least 2 other sub-cortical landmarks. Nevertheless, further improvements such as the use of higher field warping options as those of the DARTEL library of SPM [33] are currently being incorporated for a subsequent version. Although this option will require the selection of a subgroup of images to generate an intermediate group specific template and computational time is likely to increase, the method may be more accurate and precise for group comparisons."
1091
+ },
1092
+ {
1093
+ "type": "text",
1094
+ "bbox": [
1095
+ 0.115,
1096
+ 0.412,
1097
+ 0.88,
1098
+ 0.551
1099
+ ],
1100
+ "angle": 0,
1101
+ "content": "One of the disadvantages of automatization is that some mistakes might go undetected. To ameliorate this problem, ASAP's interface includes a \"Quick check\" option for displaying the resultant MNI-normalized ASL images in a web browser once the processing is completed. This option allows an convenient quality assurance method, making it possible to check the normalization quality or whether any intermediate step has failed. Many functionalities and features are open for improvement in future versions of the software. Other labelling schemes as well as multi-TI ASL sequences, will be included in further versions of ASAP."
1102
+ },
1103
+ {
1104
+ "type": "title",
1105
+ "bbox": [
1106
+ 0.117,
1107
+ 0.559,
1108
+ 0.248,
1109
+ 0.575
1110
+ ],
1111
+ "angle": 0,
1112
+ "content": "5. Conclusion"
1113
+ },
1114
+ {
1115
+ "type": "text",
1116
+ "bbox": [
1117
+ 0.116,
1118
+ 0.584,
1119
+ 0.88,
1120
+ 0.671
1121
+ ],
1122
+ "angle": 0,
1123
+ "content": "In conclusion, the results for this specific study show the applicability of ASAP in the study of perfusion changes in elder people at risk of developing AD. Furthermore, these clinical results are consistent with previous AD studies. In summary, our toolbox provides a simple, flexible and reliable solution for ASL-related studies. It has an extendable design, and new functions or utilities can and will be added in the future."
1124
+ },
1125
+ {
1126
+ "type": "text",
1127
+ "bbox": [
1128
+ 0.116,
1129
+ 0.672,
1130
+ 0.88,
1131
+ 0.741
1132
+ ],
1133
+ "angle": 0,
1134
+ "content": "The ASAP manual and software can be obtained freely at sites.google.com/site/asl toolbox. Feedback from users will be encouraged to ensure the updated of the ASAP toolbox, in order to include future improvements in image processing methodology. We hope for rich participation from the ASL community."
1135
+ },
1136
+ {
1137
+ "type": "text",
1138
+ "bbox": [
1139
+ 0.117,
1140
+ 0.748,
1141
+ 0.882,
1142
+ 0.784
1143
+ ],
1144
+ "angle": 0,
1145
+ "content": "Acknowledgments: ASAP was partially supported by the COST Action \"Arterial Spin Labelling Initiative in Dementia\" (BMBS COST Action BM1103)."
1146
+ }
1147
+ ],
1148
+ [
1149
+ {
1150
+ "type": "page_number",
1151
+ "bbox": [
1152
+ 0.86,
1153
+ 0.092,
1154
+ 0.882,
1155
+ 0.107
1156
+ ],
1157
+ "angle": 0,
1158
+ "content": "13"
1159
+ },
1160
+ {
1161
+ "type": "title",
1162
+ "bbox": [
1163
+ 0.114,
1164
+ 0.133,
1165
+ 0.225,
1166
+ 0.151
1167
+ ],
1168
+ "angle": 0,
1169
+ "content": "References"
1170
+ },
1171
+ {
1172
+ "type": "ref_text",
1173
+ "bbox": [
1174
+ 0.115,
1175
+ 0.175,
1176
+ 0.857,
1177
+ 0.229
1178
+ ],
1179
+ "angle": 0,
1180
+ "content": "[1] Alsop DC, Dai W, Grossman M, Detre JA. Arterial spin labeling blood flow MRI: its role in the early characterization of Alzheimer's disease. J.Alzheimers Dis. 2010;20(3):871-80."
1181
+ },
1182
+ {
1183
+ "type": "ref_text",
1184
+ "bbox": [
1185
+ 0.115,
1186
+ 0.235,
1187
+ 0.882,
1188
+ 0.289
1189
+ ],
1190
+ "angle": 0,
1191
+ "content": "[2] Schuff N, Matsumoto S, Kmiecik J, Studholme C, Du A, Ezekiel F, et al. Cerebral blood flow in ischemic vascular dementia and Alzheimer's disease, measured by arterial spin-labeling magnetic resonance imaging. Alzheimers Dement. 2009;5(6):454-62."
1192
+ },
1193
+ {
1194
+ "type": "ref_text",
1195
+ "bbox": [
1196
+ 0.115,
1197
+ 0.295,
1198
+ 0.875,
1199
+ 0.331
1200
+ ],
1201
+ "angle": 0,
1202
+ "content": "[3] Mikita N, Mehta MA, Zelaya FO, Stringaris A. Using arterial spin labeling to examine mood states in youth. Brain and Behavior. 2015;5(6):e00339"
1203
+ },
1204
+ {
1205
+ "type": "ref_text",
1206
+ "bbox": [
1207
+ 0.115,
1208
+ 0.337,
1209
+ 0.863,
1210
+ 0.409
1211
+ ],
1212
+ "angle": 0,
1213
+ "content": "[4] Pollak TA, De Simoni S, Barimani B, Zelaya FO, Stone JM, Mehta MA. Phenomenologically distinct psychotomimetic effects of ketamine are associated with cerebral blood flow changes in functionally relevant cerebral foci: a continuous arterial spin labelling study. Psychopharmacology. 2015; (Epub ahead of print)"
1214
+ },
1215
+ {
1216
+ "type": "ref_text",
1217
+ "bbox": [
1218
+ 0.115,
1219
+ 0.415,
1220
+ 0.875,
1221
+ 0.468
1222
+ ],
1223
+ "angle": 0,
1224
+ "content": "[5] Hodkinson DJ, Khawaja N, O'Daly O, Thacker MA, Zelaya FO, Wooldridge CL, et al. Cerebral analgesic response to nonsteroidal anti-inflammatory drug ibuprofen. Pain. 2015;156(7):1301-10"
1225
+ },
1226
+ {
1227
+ "type": "ref_text",
1228
+ "bbox": [
1229
+ 0.115,
1230
+ 0.474,
1231
+ 0.866,
1232
+ 0.528
1233
+ ],
1234
+ "angle": 0,
1235
+ "content": "[6] Ye F, Frank JA, Weinberger DR, McLaughlin AC. Noise reduction in 3D perfusion imaging by attenuating the static signal in arterial spin tagging (ASSIST) Magn. Reson Med (2000) 44: 92-100"
1236
+ },
1237
+ {
1238
+ "type": "ref_text",
1239
+ "bbox": [
1240
+ 0.115,
1241
+ 0.534,
1242
+ 0.882,
1243
+ 0.588
1244
+ ],
1245
+ "angle": 0,
1246
+ "content": "[7] Parkes LM, Rashid W, Chard DT, Tofts PS. Normal cerebral perfusion measurements using arterial spin labeling: Reproducibility, stability, and age and gender effects. Magnetic Resonance in Medicine 2004;51(4):736-43."
1247
+ },
1248
+ {
1249
+ "type": "ref_text",
1250
+ "bbox": [
1251
+ 0.117,
1252
+ 0.594,
1253
+ 0.864,
1254
+ 0.631
1255
+ ],
1256
+ "angle": 0,
1257
+ "content": "[8] Petersen ET, Zimine I, Ho YL, Golay X. Non-invasive measurement of perfusion: a critical review of arterial spin labelling techniques. Br.J.Radiol. 2006;79(944):688-701."
1258
+ },
1259
+ {
1260
+ "type": "ref_text",
1261
+ "bbox": [
1262
+ 0.115,
1263
+ 0.637,
1264
+ 0.866,
1265
+ 0.689
1266
+ ],
1267
+ "angle": 0,
1268
+ "content": "[9] Mutsaerts HJMM, Steketee RME, Heijtel DFR, Kuijer JPA, Osch MJPv, Majoie CBLM, et al. Inter-Vendor Reproducibility of Pseudo-Continuous Arterial Spin Labeling at 3 Tesla. PLoS One. 2014;9(8), e104108."
1269
+ },
1270
+ {
1271
+ "type": "ref_text",
1272
+ "bbox": [
1273
+ 0.117,
1274
+ 0.696,
1275
+ 0.803,
1276
+ 0.733
1277
+ ],
1278
+ "angle": 0,
1279
+ "content": "[10] Chappell MA, Groves AR, Whitcher B, Woolrich MW. Variational Bayesian Inference for a Nonlinear Forward Model. Trans.Sig.Proc. 2009;57(1):223-36."
1280
+ },
1281
+ {
1282
+ "type": "ref_text",
1283
+ "bbox": [
1284
+ 0.115,
1285
+ 0.739,
1286
+ 0.853,
1287
+ 0.793
1288
+ ],
1289
+ "angle": 0,
1290
+ "content": "[11] Wang Z, Aguirre GK, Rao H, Wang J, Fernandez-Seara MA, Childress AR, et al. Empirical optimization of ASL data analysis using an ASL data processing toolbox: ASLtbx. Magn.Reson.Imaging 2008;26(2):261-9."
1291
+ },
1292
+ {
1293
+ "type": "ref_text",
1294
+ "bbox": [
1295
+ 0.117,
1296
+ 0.799,
1297
+ 0.783,
1298
+ 0.835
1299
+ ],
1300
+ "angle": 0,
1301
+ "content": "[12] Jenkinson M, Beckmann CF, Behrens TE, Woolrich MW, Smith SM. Fsl. Neuroimage 2012;62(2):782-90."
1302
+ },
1303
+ {
1304
+ "type": "ref_text",
1305
+ "bbox": [
1306
+ 0.115,
1307
+ 0.841,
1308
+ 0.847,
1309
+ 0.877
1310
+ ],
1311
+ "angle": 0,
1312
+ "content": "[13] The MathWorks, Inc. 2012; Available at: http://www.mathworks.com/. Accessed 12/03, 2015."
1313
+ },
1314
+ {
1315
+ "type": "list",
1316
+ "bbox": [
1317
+ 0.115,
1318
+ 0.175,
1319
+ 0.882,
1320
+ 0.877
1321
+ ],
1322
+ "angle": 0,
1323
+ "content": null
1324
+ }
1325
+ ],
1326
+ [
1327
+ {
1328
+ "type": "page_number",
1329
+ "bbox": [
1330
+ 0.86,
1331
+ 0.092,
1332
+ 0.882,
1333
+ 0.106
1334
+ ],
1335
+ "angle": 0,
1336
+ "content": "14"
1337
+ },
1338
+ {
1339
+ "type": "ref_text",
1340
+ "bbox": [
1341
+ 0.115,
1342
+ 0.132,
1343
+ 0.88,
1344
+ 0.186
1345
+ ],
1346
+ "angle": 0,
1347
+ "content": "[14] SPM. Statistical Parametric Mapping. The Wellcome Trust Centre for neuroimaging at University College of London. 2015; Available at: http://www.fil.ion.ucl.ac.uk/spm/. Accessed 12/03, 2015."
1348
+ },
1349
+ {
1350
+ "type": "ref_text",
1351
+ "bbox": [
1352
+ 0.115,
1353
+ 0.192,
1354
+ 0.867,
1355
+ 0.263
1356
+ ],
1357
+ "angle": 0,
1358
+ "content": "[15] Alsop DC, Detre JA, Golay X, Gunther M, Hendrikse J, Hernandez-Garcia L, et al. Recommended implementation of arterial spin-labeled perfusion MRI for clinical applications: A consensus of the ISMRM perfusion study group and the European consortium for ASL in dementia. Magn.Reson.Med. 2014; 73:102-116"
1359
+ },
1360
+ {
1361
+ "type": "ref_text",
1362
+ "bbox": [
1363
+ 0.115,
1364
+ 0.269,
1365
+ 0.842,
1366
+ 0.307
1367
+ ],
1368
+ "angle": 0,
1369
+ "content": "[16] Asllani I, Borogovac A, Brown TR. Regression algorithm correcting for partial volume effects in arterial spin labeling MRI. Magn.Reson.Med. 2008;60(6):1362-71."
1370
+ },
1371
+ {
1372
+ "type": "ref_text",
1373
+ "bbox": [
1374
+ 0.115,
1375
+ 0.311,
1376
+ 0.87,
1377
+ 0.366
1378
+ ],
1379
+ "angle": 0,
1380
+ "content": "[17] Meltzer CC, Leal JP, Mayberg HS, Wagner HNJ, Frost JJ. Correction of PET Data for Partial Volume Effects in Human Cerebral Cortex by MR Imaging. J.Comput.Assist.Tomogr. 1990;14(4)."
1381
+ },
1382
+ {
1383
+ "type": "ref_text",
1384
+ "bbox": [
1385
+ 0.115,
1386
+ 0.371,
1387
+ 0.872,
1388
+ 0.442
1389
+ ],
1390
+ "angle": 0,
1391
+ "content": "[18] Johnson NA, Jahng GH, Weiner MW, Miller BL, Chui HC, Jagust WJ, et al. Pattern of cerebral hypoperfusion in Alzheimer disease and mild cognitive impairment measured with arterial spin-labeling MR imaging: initial experience. Radiology 2005;234(3):851-9."
1392
+ },
1393
+ {
1394
+ "type": "ref_text",
1395
+ "bbox": [
1396
+ 0.115,
1397
+ 0.448,
1398
+ 0.858,
1399
+ 0.504
1400
+ ],
1401
+ "angle": 0,
1402
+ "content": "[19] Du AT, Jahng GH, Hayasaka S, Kramer JH, Rosen HJ, Gorno-Tempini ML, et al. Hypoperfusion in frontotemporal dementia and Alzheimer disease by arterial spin labeling MRI. Neurology 2006;67(7):1215-20."
1403
+ },
1404
+ {
1405
+ "type": "ref_text",
1406
+ "bbox": [
1407
+ 0.115,
1408
+ 0.509,
1409
+ 0.842,
1410
+ 0.563
1411
+ ],
1412
+ "angle": 0,
1413
+ "content": "[20] Chen Y, Wolk DA, Reddin JS, Korczykowski M, Martinez PM, Musiek ES, et al. Voxel-level comparison of arterial spin-labeled perfusion MRI and FDG-PET in Alzheimer disease. Neurology 2011;77(22):1977-85."
1414
+ },
1415
+ {
1416
+ "type": "ref_text",
1417
+ "bbox": [
1418
+ 0.115,
1419
+ 0.569,
1420
+ 0.841,
1421
+ 0.622
1422
+ ],
1423
+ "angle": 0,
1424
+ "content": "[21] Mazza M, Marano G, Traversi G, Bria P, Mazza S. Primary cerebral blood flow deficiency and Alzheimer's disease: shadows and lights. J.Alzheimers Dis. 2011;23(3):375-89."
1425
+ },
1426
+ {
1427
+ "type": "ref_text",
1428
+ "bbox": [
1429
+ 0.115,
1430
+ 0.628,
1431
+ 0.872,
1432
+ 0.699
1433
+ ],
1434
+ "angle": 0,
1435
+ "content": "[22] Johnson NA, Jahng GH, Weiner MW, Miller BL, Chui HC, Jagust WJ, et al. Pattern of cerebral hypoperfusion in Alzheimer disease and mild cognitive impairment measured with arterial spin-labeling MR imaging: initial experience. Radiology 2005;234(3):851-9."
1436
+ },
1437
+ {
1438
+ "type": "ref_text",
1439
+ "bbox": [
1440
+ 0.115,
1441
+ 0.705,
1442
+ 0.849,
1443
+ 0.761
1444
+ ],
1445
+ "angle": 0,
1446
+ "content": "[23] Xu G, Antuono PG, Jones J, Xu Y, Wu G, Ward D, et al. Perfusion fMRI detects deficits in regional CBF during memory-encoding tasks in MCI subjects. Neurology 2007;69(17):1650-6."
1447
+ },
1448
+ {
1449
+ "type": "ref_text",
1450
+ "bbox": [
1451
+ 0.115,
1452
+ 0.766,
1453
+ 0.875,
1454
+ 0.82
1455
+ ],
1456
+ "angle": 0,
1457
+ "content": "[24] Asllani I, Habeck C, Scarmeas N, Borogovac A, Brown TR, Stern Y. Multivariate and univariate analysis of continuous arterial spin labeling perfusion MRI in Alzheimer's disease. J.Cereb.Blood Flow Metab. 2008;28(4):725-36."
1458
+ },
1459
+ {
1460
+ "type": "ref_text",
1461
+ "bbox": [
1462
+ 0.117,
1463
+ 0.825,
1464
+ 0.849,
1465
+ 0.863
1466
+ ],
1467
+ "angle": 0,
1468
+ "content": "[25] Austin BP, Nair VA, Meier TB, Xu G, Rowley HA, Carlsson CM, et al. Effects of hypoperfusion in Alzheimer's disease. J.Alzheimers Dis. 2011;26 Suppl 3:123-33."
1469
+ },
1470
+ {
1471
+ "type": "ref_text",
1472
+ "bbox": [
1473
+ 0.115,
1474
+ 0.868,
1475
+ 0.851,
1476
+ 0.888
1477
+ ],
1478
+ "angle": 0,
1479
+ "content": "[26] Yesavage JA, Brink TL, Rose TL, Lum O, Huang V, Adey M, et al. Development"
1480
+ },
1481
+ {
1482
+ "type": "list",
1483
+ "bbox": [
1484
+ 0.115,
1485
+ 0.132,
1486
+ 0.88,
1487
+ 0.888
1488
+ ],
1489
+ "angle": 0,
1490
+ "content": null
1491
+ }
1492
+ ],
1493
+ [
1494
+ {
1495
+ "type": "page_number",
1496
+ "bbox": [
1497
+ 0.86,
1498
+ 0.092,
1499
+ 0.882,
1500
+ 0.107
1501
+ ],
1502
+ "angle": 0,
1503
+ "content": "15"
1504
+ },
1505
+ {
1506
+ "type": "ref_text",
1507
+ "bbox": [
1508
+ 0.115,
1509
+ 0.133,
1510
+ 0.783,
1511
+ 0.169
1512
+ ],
1513
+ "angle": 0,
1514
+ "content": "and validation of a geriatric depression screening scale: A preliminary report. J.Psychiatr.Res. 1982-1983;17(1):37-49."
1515
+ },
1516
+ {
1517
+ "type": "ref_text",
1518
+ "bbox": [
1519
+ 0.115,
1520
+ 0.175,
1521
+ 0.877,
1522
+ 0.228
1523
+ ],
1524
+ "angle": 0,
1525
+ "content": "[27] Folstein MF, Folstein SE, McHugh PR. \"Mini-mental state\". A practical method for grading the cognitive state of patients for the clinician. J.Psychiatr.Res. 1975;12(3):189-98."
1526
+ },
1527
+ {
1528
+ "type": "ref_text",
1529
+ "bbox": [
1530
+ 0.115,
1531
+ 0.234,
1532
+ 0.83,
1533
+ 0.272
1534
+ ],
1535
+ "angle": 0,
1536
+ "content": "[28] Pfeffer RI, Kurosaki TT, Harrah CH, Chance JM, Filos S. Measurement of functional activities in older adults in the community. J Gerontol 1982;37(3):323-9."
1537
+ },
1538
+ {
1539
+ "type": "ref_text",
1540
+ "bbox": [
1541
+ 0.116,
1542
+ 0.277,
1543
+ 0.621,
1544
+ 0.297
1545
+ ],
1546
+ "angle": 0,
1547
+ "content": "[29] Fischl B. FreeSurfer. Neuroimage 2012;62(2):774-81."
1548
+ },
1549
+ {
1550
+ "type": "ref_text",
1551
+ "bbox": [
1552
+ 0.115,
1553
+ 0.302,
1554
+ 0.798,
1555
+ 0.356
1556
+ ],
1557
+ "angle": 0,
1558
+ "content": "[30] Alsop DC, Detre JA, Grossman M. Assessment of cerebral blood flow in Alzheimer's disease by spin-labeled magnetic resonance imaging. Ann.Neurol. 2000;47(1):93-100."
1559
+ },
1560
+ {
1561
+ "type": "ref_text",
1562
+ "bbox": [
1563
+ 0.115,
1564
+ 0.362,
1565
+ 0.856,
1566
+ 0.415
1567
+ ],
1568
+ "angle": 0,
1569
+ "content": "[31] Du AT, Jahng GH, Hayasaka S, Kramer JH, Rosen HJ, Gorno-Tempini ML, et al. Hypoperfusion in frontotemporal dementia and Alzheimer disease by arterial spin labeling MRI. Neurology 2006;67(7):1215-20."
1570
+ },
1571
+ {
1572
+ "type": "ref_text",
1573
+ "bbox": [
1574
+ 0.115,
1575
+ 0.422,
1576
+ 0.846,
1577
+ 0.476
1578
+ ],
1579
+ "angle": 0,
1580
+ "content": "[32] Xu G, Antuono PG, Jones J, Xu Y, Wu G, Ward D, et al. Perfusion fMRI detects deficits in regional CBF during memory-encoding tasks in MCI subjects. Neurology 2007;69(17):1650-6."
1581
+ },
1582
+ {
1583
+ "type": "ref_text",
1584
+ "bbox": [
1585
+ 0.115,
1586
+ 0.482,
1587
+ 0.811,
1588
+ 0.518
1589
+ ],
1590
+ "angle": 0,
1591
+ "content": "[33] Ashburner J. A fast diffeomorphic image registration algorithm. Neuroimage 2007;38(1):95-113."
1592
+ },
1593
+ {
1594
+ "type": "list",
1595
+ "bbox": [
1596
+ 0.115,
1597
+ 0.133,
1598
+ 0.877,
1599
+ 0.518
1600
+ ],
1601
+ "angle": 0,
1602
+ "content": null
1603
+ }
1604
+ ],
1605
+ [
1606
+ {
1607
+ "type": "page_number",
1608
+ "bbox": [
1609
+ 0.86,
1610
+ 0.092,
1611
+ 0.882,
1612
+ 0.106
1613
+ ],
1614
+ "angle": 0,
1615
+ "content": "16"
1616
+ },
1617
+ {
1618
+ "type": "title",
1619
+ "bbox": [
1620
+ 0.114,
1621
+ 0.141,
1622
+ 0.191,
1623
+ 0.16
1624
+ ],
1625
+ "angle": 0,
1626
+ "content": "Tables"
1627
+ },
1628
+ {
1629
+ "type": "table",
1630
+ "bbox": [
1631
+ 0.258,
1632
+ 0.166,
1633
+ 0.742,
1634
+ 0.272
1635
+ ],
1636
+ "angle": 0,
1637
+ "content": "<table><tr><td></td><td>Original CBF</td><td>PVC CBF</td></tr><tr><td>Left Hippocampus</td><td>40±10</td><td>46±9</td></tr><tr><td>Right Hippocampus</td><td>42±11</td><td>42±11</td></tr><tr><td>Left Parahippocampal Gyrus</td><td>40±10</td><td>48±10</td></tr><tr><td>Right Parahippocampal Gyrus</td><td>35±8</td><td>44±10</td></tr></table>"
1638
+ },
1639
+ {
1640
+ "type": "table_caption",
1641
+ "bbox": [
1642
+ 0.111,
1643
+ 0.277,
1644
+ 0.882,
1645
+ 0.343
1646
+ ],
1647
+ "angle": 0,
1648
+ "content": "Table 1. CBF perfusion values (ml/100g/min) in the left and right hippocampus and parahippocampal gyrus (same regions and patient that Figure 6). Left column shows the original CBF values (mean±std) and right column shows the CBF values (mean±std) after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel."
1649
+ },
1650
+ {
1651
+ "type": "table",
1652
+ "bbox": [
1653
+ 0.303,
1654
+ 0.37,
1655
+ 0.699,
1656
+ 0.637
1657
+ ],
1658
+ "angle": 0,
1659
+ "content": "<table><tr><td></td><td></td><td>Original CBF</td><td>PVC CBF</td></tr><tr><td rowspan=\"6\">Left Hemisphere</td><td>Amygdala</td><td>35±10</td><td>37±7</td></tr><tr><td>Caudate</td><td>35±13</td><td>38±10</td></tr><tr><td>Hippocampus</td><td>34±10</td><td>38±9</td></tr><tr><td>Pallidum</td><td>38±11</td><td>49±12</td></tr><tr><td>Putamen</td><td>42±8</td><td>44±6</td></tr><tr><td>Thalamus</td><td>44±17</td><td>54±16</td></tr><tr><td rowspan=\"6\">Right Hemisphere</td><td>Amygdala</td><td>34±7</td><td>38±4</td></tr><tr><td>Caudate</td><td>31±15</td><td>35±11</td></tr><tr><td>Hippocampus</td><td>34±12</td><td>38±8</td></tr><tr><td>Pallidum</td><td>27±6</td><td>42±11</td></tr><tr><td>Putamen</td><td>41±8</td><td>45±5</td></tr><tr><td>Thalamus</td><td>44±19</td><td>54±18</td></tr></table>"
1660
+ },
1661
+ {
1662
+ "type": "table_caption",
1663
+ "bbox": [
1664
+ 0.111,
1665
+ 0.642,
1666
+ 0.865,
1667
+ 0.707
1668
+ ],
1669
+ "angle": 0,
1670
+ "content": "Table 2. Example of CBF values (ml/100g/min) in the different subcortical brain structures for both hemispheres. Left column shows the original CBF values (mean±std) while right column shows the CBF values (mean±std) after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel."
1671
+ },
1672
+ {
1673
+ "type": "table",
1674
+ "bbox": [
1675
+ 0.115,
1676
+ 0.712,
1677
+ 0.819,
1678
+ 0.817
1679
+ ],
1680
+ "angle": 0,
1681
+ "content": "<table><tr><td></td><td>T value (p-value) Non-PVC</td><td>T value (p-value) PVC</td></tr><tr><td>Left Hippocampus</td><td>2.58 (0.006465)</td><td>3.28 (0.000958)</td></tr><tr><td>Right Hippocampus</td><td>2.70 (0.004746)</td><td>3.25 (0.001045)</td></tr><tr><td>Left Parahippocampal Gyrus</td><td>1.76 (0.042325)</td><td>2.24 (0.01483)</td></tr><tr><td>Right Parahippocampal Gyrus</td><td>1.66 (0.051651)</td><td>2.52 (0.007522)</td></tr></table>"
1682
+ },
1683
+ {
1684
+ "type": "table_caption",
1685
+ "bbox": [
1686
+ 0.111,
1687
+ 0.822,
1688
+ 0.875,
1689
+ 0.888
1690
+ ],
1691
+ "angle": 0,
1692
+ "content": "Table 3. T scores and p-values (in brackets) of the statistical group analysis (\\(p_{FWE}<0.05\\), cluster region of 300 voxels) in the same regions that Table 1 and Figure 6. Left column shows the results for the original CBF maps (non-PVC corrected) and right column shows the results for the final CBF after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel."
1693
+ }
1694
+ ],
1695
+ [
1696
+ {
1697
+ "type": "page_number",
1698
+ "bbox": [
1699
+ 0.86,
1700
+ 0.092,
1701
+ 0.882,
1702
+ 0.106
1703
+ ],
1704
+ "angle": 0,
1705
+ "content": "17"
1706
+ },
1707
+ {
1708
+ "type": "title",
1709
+ "bbox": [
1710
+ 0.117,
1711
+ 0.161,
1712
+ 0.289,
1713
+ 0.182
1714
+ ],
1715
+ "angle": 0,
1716
+ "content": "Figure Legends"
1717
+ },
1718
+ {
1719
+ "type": "text",
1720
+ "bbox": [
1721
+ 0.116,
1722
+ 0.206,
1723
+ 0.852,
1724
+ 0.258
1725
+ ],
1726
+ "angle": 0,
1727
+ "content": "Figure 1. Pipeline for processing ASL datasets in ASLToolbox. Each box represents a main step in ASLToolbox's procedure and top dotted line boxes represent the input data."
1728
+ },
1729
+ {
1730
+ "type": "text",
1731
+ "bbox": [
1732
+ 0.117,
1733
+ 0.266,
1734
+ 0.868,
1735
+ 0.301
1736
+ ],
1737
+ "angle": 0,
1738
+ "content": "Figure 2. Graphical User Interface of the ASLToolbox for loading dataset. It consists of three main sections namely \"Input Files\", \"Output Directories\" and \"Options\"."
1739
+ },
1740
+ {
1741
+ "type": "text",
1742
+ "bbox": [
1743
+ 0.117,
1744
+ 0.309,
1745
+ 0.78,
1746
+ 0.326
1747
+ ],
1748
+ "angle": 0,
1749
+ "content": "Figure 3. Graphical User Interface for ROI Statistics analysis in ASLToolbox."
1750
+ },
1751
+ {
1752
+ "type": "text",
1753
+ "bbox": [
1754
+ 0.117,
1755
+ 0.334,
1756
+ 0.875,
1757
+ 0.386
1758
+ ],
1759
+ "angle": 0,
1760
+ "content": "Figure 4. Example of the result of the co-registration step between a 3DT1 weighted image (background) and a CBF map (overlay). The 3DT1 structural image has been resampled to the resolution of the ASL data."
1761
+ },
1762
+ {
1763
+ "type": "text",
1764
+ "bbox": [
1765
+ 0.117,
1766
+ 0.393,
1767
+ 0.874,
1768
+ 0.446
1769
+ ],
1770
+ "angle": 0,
1771
+ "content": "Figure 5. Tissue probability maps of GM (rows 1,3,5) and WM (rows 2,4,6) of a subject, registered onto the 3DT1 structural scan on a sagittal (rows 1,2), coronal (rows 3,4) and axial planes (rows 5,6). These maps were used for the PVC of the CBF maps."
1772
+ },
1773
+ {
1774
+ "type": "text",
1775
+ "bbox": [
1776
+ 0.117,
1777
+ 0.453,
1778
+ 0.875,
1779
+ 0.54
1780
+ ],
1781
+ "angle": 0,
1782
+ "content": "Figure 6. 3D T1 weighted axial, sagittal and coronal planes for one patient (first row), detail of the left hippocampus and parahippocampal gyrus in the same planes (second, third and fourth rows). Color overlay in third and fourth row correspond to the CBF map. Third row shows the partial volume effect in the original CBF and fourth row shows the result of Asllani's PVC with a \\(5 \\times 5 \\times 1\\) low-resolution kernel."
1783
+ },
1784
+ {
1785
+ "type": "text",
1786
+ "bbox": [
1787
+ 0.117,
1788
+ 0.548,
1789
+ 0.847,
1790
+ 0.583
1791
+ ],
1792
+ "angle": 0,
1793
+ "content": "Figure 7. Example of a final smoothed (6mm Gaussian kernel), MNI normalised and PVE corrected CBF map as an overlay onto the 3DT1 MNI template."
1794
+ },
1795
+ {
1796
+ "type": "text",
1797
+ "bbox": [
1798
+ 0.117,
1799
+ 0.591,
1800
+ 0.88,
1801
+ 0.643
1802
+ ],
1803
+ "angle": 0,
1804
+ "content": "Figure 8. Results of the statistical group comparison: significative hypoperfusion regions for healthy subjects in risk of developing AD pFWE<0.05 (minimum cluster 300 voxels) for: (a) PVE corrected CBF maps and (b) CBF maps with no PVE correction"
1805
+ }
1806
+ ]
1807
+ ]
2401.12xxx/2401.12603/4f4ba12d-6258-4c7f-8b4d-1da802d6095f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d15bc7ec170a465a995bbae2cc63311c41f8f047068a9954cd2b851fb0fbc7a8
3
+ size 134791
2401.12xxx/2401.12603/full.md ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ASAP (Automatic Software for ASL Processing): A toolbox for processing Arterial Spin Labeling images
2
+
3
+ Virginia Mato Abad<sup>1</sup>, Pablo García-Polo<sup>2</sup>, Owen O'Daly<sup>3</sup>, Juan Antonio Hernández-Tamames<sup>1</sup>, Fernando Zelaya<sup>3</sup>
4
+
5
+ $^{1}$ Laboratorio de Analisis de Imagen Médica y Biometría (LAIMBIO), Universidad Rey Juan Carlos, Mostoles, Madrid, Spain
6
+
7
+ $^{2}$ M+Visión Advanced Fellowship, Medical Imaging Lab., Hospital Universitario de Fuenlabrada, Fuenlabrada, Madrid, Spain
8
+
9
+ $^{3}$ Department of Neuroimaging, Institute of Psychiatry, King's College London, London, United Kingdom
10
+
11
+ # Corresponding Author:
12
+
13
+ Virginia Mato Abad
14
+
15
+ Universidad Rey Juan Carlos
16
+
17
+ Departamental II. Despacho 157.
18
+
19
+ Campus de Móstoles, C/Tulipán s/n
20
+
21
+ 28933, Móstoles, Madrid (Spain)
22
+
23
+ Telephone: +34 914888522
24
+
25
+ virginia.mato@urjc.es
26
+
27
+ # Abstract
28
+
29
+ The method of Arterial Spin Labeling (ASL) has experienced a significant rise in its application to functional imaging, since it is the only technique capable of measuring blood perfusion in a truly non-invasive manner. Currently, there are no commercial packages for processing ASL data and there is no recognised standard for normalising ASL data to a common frame of reference. This work describes a new Automated Software for ASL Processing (ASAP) that can automatically process several ASL datasets. ASAP includes functions for all stages of image pre-processing: quantification, skull-stripping, co-registration, partial volume correction and normalization. To assess the applicability and validity of the toolbox, this work shows its application in the study of hypoperfusion in a sample of healthy subjects at risk of progressing to Alzheimer's Disease. ASAP requires limited user intervention, minimising the possibility of random and systematic errors, and produces cerebral blood flow maps that are ready for statistical group analysis. The software is easy to operate and results in excellent quality of spatial normalisation. The results found in this evaluation study are consistent with previous studies that find decreased perfusion in Alzheimer's patients in similar regions and demonstrate the applicability of ASAP.
30
+
31
+ Keywords: Arterial Spin Labeling, Cerebral Blood Flow, Automatic Processing, Partial volume effect, Alzheimer's Disease
32
+
33
+ # 1. Introduction
34
+
35
+ Arterial Spin Labelling (ASL) has become a popular magnetic resonance technique for imaging brain function. It is entirely non-invasive and capable of quantitatively determining regional blood perfusion; providing therefore a significant advantage over contrast agent based methods like $^{15}\mathrm{O}$ enriched $\mathrm{H}_2\mathrm{O}$ Positron Emission Tomography (PET) or Gadolinium-based Dynamic Susceptibility Contrast Magnetic Resonance Imaging (DSC-MRI). The basic principle of ASL is to employ arterial blood water itself as contrast agent to measure perfusion. For cerebral blood flow (CBF) this is obtained by tagging a bolus of arterial blood in the region of the carotid arteries. The magnetization of inflowing blood water protons is inverted in that region by means of an external radiofrequency pulse, which is applied either as a short pulse (10-20ms) or as a continuous or pseudo-continuous burst of radiofrequency (1-2s) in the presence of a gradient. After a period of time (post-labelling delay), blood labelled with inverted signal is delivered to the entire brain through the smaller arteries and capillaries. This labelled arterial blood signal gives rise to a reduction in the image intensity when compared to a non-labelled (control) image. The control and labelled images are subtracted to generate a 'perfusion weighted' image. The intensity of each voxel will reflect the amount of arterial blood delivered in the inversion time; and through the use of a suitable model, the difference image is transformed to a map of CBF in conventional physiological units of ml blood/100g tissue/min.
36
+
37
+ The availability of ASL as a routine method for assessment of basal CBF data has provided the possibility to examine brain physiology and generate a marker to probe functional differences between groups. ASL is increasingly used in clinical studies of cerebral perfusion and has shown its validity in measuring perfusion changes in several neurodegenerative diseases including Alzheimer Disease (AD) [1,2]; as well as in psychiatric studies [3], pharmacology [4] and pain [5]. However, to perform this type of analysis, multiple image processing steps are required: quantification, registration, normalization to a standard space, partial volume correction, etc.
38
+
39
+ Partial volume effects (PVE) are a consequence of limited spatial resolution in imaging and especially in ASL, where the low signal-to-noise (SNR) ratio leads to the need to employ larger voxels. In an effort to increase SNR, tissue specific saturation pulses are applied to the volume of interest to suppress the static tissue signal. This is known as 'background suppression' and it is now used extensively in ASL [6]. Nevertheless, the change in the received signal due to blood water proton relaxation remains very small, such that voxels are typically of the order of $3 \times 3 \times 6 \mathrm{~mm}$ , generating the need to employ some form of PVE correction as each voxel is likely to contain signal mixing from different tissue types. Normal grey matter (GM) perfusion values are around $60 \mathrm{ml} / 100 \mathrm{g} / \mathrm{min}$ while white matter (WM) values are significantly lower ( $20 \mathrm{ml} / 100 \mathrm{g} / \mathrm{min}$ ) [7]. Due to the relative insensitivity of ASL in white matter, the prime interest when using this technique is the study of pure GM perfusion. However, in voxels containing (for
40
+
41
+ example) $50\%$ GM and $50\%$ WM, the CBF values could be underestimated by up to one-third. PVE is of paramount importance in the study of neurodegenerative diseases where GM atrophy significantly affects CBF quantification and therefore the comparison of patient data with control populations.
42
+
43
+ The absence of a standard approach for data processing has been partly driven by the fact that several ASL methodologies have evolved independently [8]. Therefore, there is no recognised standard for normalising ASL data to a common frame of reference. This lack of a harmonised processing pipeline contributes to the potential discrepancies in studies of brain perfusion across different laboratories [9].
44
+
45
+ A number of packages, such as BASIL [10] and ASLTbx [11] provide a set of functions for pre-processing of ASL data and they both are free for academic use. BASIL consists of a collection of tools from the Functional Software Library (FSL) suite [12] that aid in the quantification and subsequent spatial processing of CBF images acquired with ASL. BASIL is based on Bayesian inference principles and was originally developed for ASL data acquired with several post-labelling delays (known as 'multi-TI' data). ASLTbx is a MATLAB [13] and SPM [14] based toolkit for processing ASL data, which requires basic MATLAB script programming.
46
+
47
+ These packages typically perform a step-by-step and subject-by-subject processing and require a large amount of manual operation. To date, a toolbox supporting a fully automated processing of raw ASL data, with minimum user intervention that can be used for effective comparison of group data, is not yet available.
48
+
49
+ In this article, we describe the development, implementation and test of an ASL processing toolbox (ASAP) that can automatically process several ASL datasets, from their raw image format to a spatially normalised, smoothed (if desired) version, with minimal user intervention. Ease of operation has been facilitated by a graphical user interface (GUI) whose operation is entirely intuitive. After the user sets the input/output and processing parameters using the GUI, the toolbox fully executes all processing steps for datasets of any number of subjects and results in data ready for second level statistical analysis. The data can be written in a variety of formats to facilitate its inclusion in several software packages for group analysis. The toolbox also has a facility to display the spatially normalised data in a manner that facilitates quality control by the user.
50
+
51
+ To assess the applicability and validity of the toolbox, we demonstrate its use in the study of hypoperfusion in a sample of healthy subjects at risk of progressing to Alzheimer's Disease (AD).
52
+
53
+ # 2. Methods
54
+
55
+ # 2.1. Toolbox processing procedures
56
+
57
+ ASAP has been developed in MATLAB with the goal of simplifying the process of quantification and pre-processing of ASL studies. It includes functions like CBF
58
+
59
+ quantification, skull stripping, co-registration, partial volume correction and normalisation. Different processing strategies have been made available depending on user requirements:
60
+
61
+ - System requirements: ASAP is written in MATLAB under a Unix system (Linux or Mac OS) but it is not entirely a stand-alone utility. It accesses both FSL software and SPM libraries, which are two of the most widely available image processing platforms for MRI. These are invoked by the toolbox and are transparent to the user, but they must be installed independently by each user and added to the MATLAB path (including the FSLDIR environment variable). The software works equally well with earlier version of SPM or with the latest release (SPM-12).
62
+ - Input data: The ASL input data can be the raw difference image (control image – labelled image) or the perfusion image (CBF map). Regardless of the input or the ASL modality used, computation of the CBF map is made according to the formula proposed in the recent article “Recommended implementation of arterial spin-labeled perfusion MRI for clinical applications” published by Alsop et al [15]. For subsequent spatial co-registration and normalisation, the user is able to choose between providing a high-resolution T1-weighted or T2-weighted structural scan. DICOM, NIfTI or ANALYZE formats are accepted.
63
+ - Resolution: The user can select between two different execution methods regarding the resolution of the images: the low-resolution native space of ASL or up-sampling the ASL images to the structural image high-resolution grid, typically of the order of $1 \times 1 \times 1 \mathrm{~mm}$ voxel size (acquisition matrix of $288 \times 288$ or $512 \times 512$ voxels with full brain coverage. The up-sampling is made by means of the spatial interpolation 'Nearest Neighbour', which preserves the grey values of the original voxel and ensures the consistency of CBF values. After the spatial normalization, the ASL voxel size is $2 \times 2 \times 2 \mathrm{~mm}$ , the resolution of the MNI template.
64
+ - Cerebral blood flow quantification: Due to the fact that most multi-TI ASL sequences are currently only available as experimental or prototype versions, the toolbox only includes CBF quantification for single inversion time data. In that case, the ASL difference image should be provided as input. The CBF quantification map is calculated using the formula currently recommended method [15]. In addition to the difference image, a reference proton density image and the post labelling delay time employed are also required.
65
+ - Partial volume correction (PVC): ASAP provides the option of PVC of the ASL data. In its current version, two different methods are provided: 1) the method described by Asllani [16] and 2) a method based on a previous approached developed for PET (from here referred to as 'tghe PET method') that assumes perfusion of WM is globally $40\%$ of that of GM for correction of resting CBF [17]. Although the later is a more simplistic approach and has been largely superseded by the methods introduced by Asllani and Chappell, this method (hereafter referred to as the PET correction) is available in our toolbox because it has been applied historically in earlier ASL studies [18-20]. Asllani's algorithm is based on linear regression and represents the voxel intensity as a weighted sum of pure tissue contribution, where
66
+
67
+ the weighting coefficients are the tissue's fractional volume in the voxel. This algorithm is able to estimate the CBF for grey matter (GM) and white matter (WM) independently. The PET correction assumed that all contributions to perfusion are from brain tissue and that cerebrospinal fluid has no contribution. In that case, ASL intensities are corrected according to the following equation:
68
+
69
+ $$
70
+ I _ {\text {c o r r}} = I _ {\text {u n c o r r}} / \left(P _ {G M} + 0. 4 ^ {*} P _ {W M}\right)
71
+ $$
72
+
73
+ where $I_{\text{corr}}$ and $I_{\text{uncorr}}$ are the corrected and uncorrected intensities, the 0.4 factor is the global ratio between WM and GM and $P_{\text{GM}}$ and $P_{\text{WM}}$ are the probabilities of GM and WM, respectively. The PVC option is only available when working in the low-resolution ASL space, thus having co-registered the high-resolution structural image to the ASL image.
74
+
75
+ - Execution mode: The toolbox includes a Graphical User Interface (GUI) where all the input data can be setup manually. Also, it has a batch mode for advanced users.
76
+
77
+ The main procedure of ASAP is shown in Figure 1 and includes the following steps:
78
+
79
+ 1. Optional CBF quantification for pCASL and PASL sequences.
80
+ 2. Reorient the images. Structural and ASL images are reoriented to the AC-PC plane (Anterior Commissure - Posterior Commissure) and their origins are set to the AC. Setting of a common origin is advisable for superior performance of the subsequent processing steps. If the PD image is available, the PD image is reoriented to the AC-PC plane, applying the same transformation to the ASL image.
81
+ 3. Rough skull-stripping of the initial resting state ASL map using the FSL Brain Extraction Tool (bet) using a conservative threshold. This step is useful for noisy ASL maps, in order to increase the quality of the rigid co-registration with the structural scan.
82
+ 4. Estimation of the brain mask. Brain mask from the structural volume can be calculated by two different options: the FSL bet tool (recommended for T2-weighted high-resolution scan) or the SPM segmentation task (recommended for T1-weighted high resolution scan). The brain mask is required for excluding out-of-brain voxels, often encountered in subtraction techniques such as ASL. Segmentation of GM and WM probability maps is also required for the partial volume correction step.
83
+ 5. Rigid co-registration between ASL and structural images using SPM function. ASL images are normally co-registered to anatomical images so they can be later normalized to the MNI space (or any other standard space) for group analysis. Also, the co-registration is required for the partial volume correction. T1-weighted or T2-weighted images can be used for co-registration. If direct co-registration of ASL and structural images is not reliable because of the poor signal-to-noise ratio and the limited structural features of perfusion images, the proton density (PD) image can also be used for co-registration, moving the ASL data in the process. Depending on the selected resolution, the co-registration will be made in the native space of the ASL data (down-sampling the resolution of the structural scan) or up-sampling the ASL to the high-resolution of the structural volume by interpolation.
84
+ 6. Partial Volume Correction of the ASL maps using the methods available. Information
85
+
86
+ about the proportion of each tissue type (grey matter, white matter, and cerebrospinal fluid) is used to correct perfusion data. The method described by Asllani estimates both, partial GM and partial WM ASL maps. The PET correction method only estimates the partial GM ASL map. This option is only available if the structural scan has been down-sampled by means of the rigid co-registration step to the ASL image.
87
+
88
+ 7. Skull-stripping of the ASL data. Apply the brain mask previously calculated to the coregistered and partial volume corrected ASL maps in order to exclude artefactual, finite 'perfusion' values in the extra-cerebral space (These arise in all ASL modalities because of the subtraction of control and labelled images).
89
+ 8. Spatial normalization. For comparison across subjects, location correspondence has to be established, so registration of all the individual images to a standardized space is required. Here, the images (both ASL and structural) are normalized to the MNI standard space using: 1) a MNI template selected by the user or 2) the transformation matrix earlier calculated by SPM during the segmentation process.
90
+ 9. Smoothing. The resultant images in the standard space are ready for voxel-based statistical analysis. However, these images are commonly multiplied by a smoothing kernel larger than the voxel dimension to satisfy the random-field approximation employed in parametric statistics. The SPM Gaussian smoothing kernel is applied to the final ASL maps, the size of the kernel (in mm) is selectable by the user.
91
+ 10. The resultant images can be directly used for statistical analysis. This procedure is very flexible, as most of the steps are optional. Thus, users can freely design the pipeline that best fit their needs.
92
+
93
+ # 2.2. Testing the hypoperfusion in healthy subjects in risk of developing Alzheimer's disease by using the toolbox
94
+
95
+ Several studies [1,21-25] have shown that Alzheimer's patients suffer from decreased perfusion in specific cortical and sub-cortical areas that may be associated to the subsequent cognitive and structural degeneration. A subgroup of the "Proyecto Vallecas" study, a 4-year longitudinal study over 1,000 subjects to assess normal healthy ageing and the appearance of neurodegenerative diseases, in particular AD; was selected to validate this hypothesis and demonstrate ASAP.
96
+
97
+ # 2.2.1. Subjects
98
+
99
+ A two-group study comparing 25 healthy elderly subjects (7 men and 18 women, mean age $75 \pm 3.6$ years) and 25 elderly subjects at risk of developing Alzheimer's disease (8 men and 17 women, mean age $77 \pm 4.5$ years) was performed. All subjects were first included in the study as healthy subjects based on several psychological and neurological tests, including the Geriatric Depression Scale [26], a Mini-Mental State Examination (MMSE) [27] above 24 and Functional Activities Questionnaire (FAQ) [28] scores above 6 at the baseline assessment. All subjects included in the study show no signs of dementia or severe cognitive deterioration and they are able to manage and independent life without any mental disorder (cognitive or psychiatric) impeding daily
100
+
101
+ functioning. All subjects underwent MRI examination as well as psychological and neurological assessment every 6-12 months. Informed consent was obtained from all participants prior to evaluation. The subjects selected as subjects at risk of developing AD were those whose left and right hippocampi suffered from a volume loss greater than 2 standard deviations from the sample mean.
102
+
103
+ # 2.2.2. Acquisition
104
+
105
+ All subjects underwent MRI examination in a 3T Signa HDx MR scanner (GE Healthcare, Waukesha, WI) using an eight-channel phased array coil. The first sequence was a 3D T1 weighted SPGR with a TR=10.024ms, TE=4.56ms, TI=600ms, NEX=1, acquisition matrix=288x288, full brain coverage, resolution=1x1x1mm, flip angle=12. The second sequence was a 3D pCASL pulse sequence with full brain coverage, matrix size=128x128, resolution=1.875x1.875x4mm, flip angle = 155, labelling time 1.5s, post-labelling delay=2.025s, TR=4.733s, TE=9.812ms, NEX=3, acquisition time ~6min and was used to generate the regional cerebral blood flow (rCBF) maps. Both the perfusion difference image and the proton density image produced by this sequence were available for the study.
106
+
107
+ # 2.2.3. Image processing
108
+
109
+ All 3D T1 weighted images were processed with Freesurfer [29] in order to obtain the cortical and subcortical volumes for each subject. The left and right hippocampi volume (LHV, RHV) were normalised by the total GM volume. This normalised measure allowed us to divide the sample into three groups: Control group ([LHV, RHV])(mean hippocampus $(\mathsf{MH}) + 1$ std.), mean group (MH-2std.<[LHV, RHV]<MH+1std.) and probable AD group (PAD) ([LHV, RHV]<(MH-2std.). A selection of 25 PAD subjects and 25 age and gender matched controls was the final subset used to validate ASAP.
110
+
111
+ Thus, the input images for each subject for ASAP were two DICOM series: a 3D T1 weighted image and a raw ASL sequence (control-labelled subtraction and proton density images). The resulting processing pipeline (as described above) is shown in Figure 1. To evaluate the effect of PVE correction, prior to MNI normalization two different options were applied to the perfusion maps: no PVE correction and the Asllani's PVE correction with a regression-kernel of size $5 \times 5 \times 1$ voxels. Therefore, for each subject, original CBF maps and Asllani's PVE-corrected CBF maps were obtained.
112
+
113
+ # 2.2.4. Statistical analysis
114
+
115
+ Normalized and smoothed (6mm Gaussian kernel) CBF maps produced by ASAP (both PVE corrected and uncorrected) were employed for the voxel-based statistical analysis. Statistical maps for rejecting the null hypothesis of equal perfusion between healthy and subjects at risk of developing AD were generated by means of a two sample t-test analysis within the General Linear Model (GLM) (with gender and age as covariates and mean CBF value of each subject as a regressor) as implemented in the SPM software suite.
116
+
117
+ # 3. Results
118
+
119
+ # 3.1. A MATLAB Toolbox for processing ASL images: ASAP
120
+
121
+ ASAP has been developed for fully automated processing of ASL data. It is an open-source package and is freely available (sites.google.com/site/asltoolbox). ASAP provides a user friendly Graphical User Interface GUI (Figure 2). Users can perform several interactions with the embedded functions, e.g., setting inputs, outputs or different processing parameters. In the "Input Files" panel (see Figure 2) users can select all the input data while the "Output Directories" panel is used to designate the directory where the output files will be saved. In the "Options" panel, users can select the different processing parameters.
122
+
123
+ In addition, the advanced mode includes a "load batch files" option for loading the input files from text files to avoid having to select the input data individually through the GUI. With this option, a large number of datasets can be loaded into the toolbox for subsequent processing using the "Options" set in the panel and the same options will apply throughout for all subjects. In addition, the advanced mode contains the 'ROI Statistics' GUI (Figure 3) that offers the option to extract CBF values from anatomically or functionally defined Regions of Interest (ROI). This facility can simultaneously extract mean, median and maximum values from several ROIs in several CBF maps. Users only have to select the input files, ASL data and ROI masks (NiftI or .mat files are accepted), through the GUI ("Select files" action) or in batch mode ("Load files" action) from text files. Output results are saved in text files that can easily be incorporated into statistical analysis packages such as SPSS, etc.
124
+
125
+ Resultant files are stored in the directories specified by the user. Each procedure of the pipeline produces a new file, every step is recorded and files are not overwritten. The MNI normalized images can be directly used for statistical analysis, however, users can also use the intermediary results. As stated before, most of the steps described above are optional, so the procedure is very flexible and users can freely design the most appropriate pipeline. Also, the toolbox is designed to aid in reproducing some analysis by avoiding some processing steps. This feature is useful, for example, for applying different methods for partial volume correction on the same input data: if there are GM, WM and CSF maps in the same directory as the input structural scan, the toolbox does not apply the SPM segmentation step, using these files. We have performed additional extensive validation (as well as the one reported in this article) to ensure that the toolbox works correctly for both absolute perfusion images (CBF) and for perfusion-weighted difference images in which CBF computation is required.
126
+
127
+ # 3.2. Evaluation of hypoperfusion differences in healthy subjects at risk of developing Alzheimer's disease using the toolbox
128
+
129
+ Figure 4 shows the result of the rigid co-registration step between the 3D T1 weighted images and the CBF maps. Both images match the same anatomical space; the 3D T1 structural image has been re-sampled (as well as the tissue probability maps) to the
130
+
131
+ low-resolution of the CBF map with a 'b-spline' interpolation method. Figure 5 shows the tissue probability maps of GM and WM for one subject.
132
+
133
+ The partial volume effect in ASL data is shown in Figure 6. First and second rows show the 3D T1 weighted axial, sagittal and coronal planes for one patient and a detail of the left hippocampus and parahippocampal gyrus in the same planes respectively. Partial volume effect in the CBF map is shown in the third row and fourth row shows the result of Asllani's correction with a $5 \times 5 \times 1$ low-resolution kernel. Figure 6 shows how the blood flow is increased in the whole region after PVC with Asllani's method. Table 1 shows quantitatively how the perfusion values change after the PVC in the whole region. Also, Table 2 shows the comparative results of CBF perfusion values for one subject after the PVC in the different subcortical brain structures for both hemispheres, showing the increase of perfusion for all ROIs, obtained after the PVC correction with Asllani's method.
134
+
135
+ The tissue probability maps (shown in Figure 5), the PVC method and the normalisation maps obtained from the anatomical images can be applied to the perfusion maps in order to obtain PVE corrected CBF maps in MNI space (Figure 7).
136
+
137
+ After performing each of the steps previously shown, in our cohort of 50 elderly subjects, the statistical group analysis was performed by means of a two-sample t-test in both cases: CBF maps with Asllani's PVE correction and the original CBF maps without PVE correction. Age and gender were introduced as confounding variables in the model. Figure 8 shows the results of these two analysis (Figure 8a.- PVE corrected, and 8b.- PVE uncorrected) for a family-wise error (FWE) corrected $p_{\text{FWE}} < 0.05$ (cluster region of 300 voxels). Table 3 shows the T score and p values for the two analysis.
138
+
139
+ These results indicate decreased perfusion in healthy subjects at risk of developing Alzheimer's disease. Areas of significant hypoperfusion in Figure 8a (PVE-corrected) correspond to: caudate, hippocampi, thalamus, parahippocampal gyrus, amygdala, cingulate gyrus, precuneus, left and right insula, superior temporal lobe, uncus and choroid plexus. Results from the statistical analysis without PVE correction (Figure 8b) show regions which appeared previously in the PVE corrected version (caudate, left hippocampus, right thalamus, anterior cingulate, right insula and choroid plexus). In both analyses, part of the perfusion deficit appears displaced into the region of the ventricular space, probably because of the inherent blurring of the 3D FSE stack-of-spiral readout of the ASL pulse sequence employed in this study. In a separate analyses, we confirmed that in fact the 'at-risk' cohort exhibits ventricular enlargement and reduction of grey matter volume in the vicinity of these areas. The combination of these results with the inclusion of PV correction in ASL studies, forms part of a larger separate investigation which is beyond the scope of this paper. These results are consistent with regions found by our studies and those of other authors [18,24,30-32].
140
+
141
+ # 4. Discussion
142
+
143
+ In this work, we have developed a MATLAB toolbox (ASAP) for systematically and
144
+
145
+ automatically processing ASL datasets with minimal user intervention. The key advantage of ASAP is that it automates all the processing steps of ASL datasets for any number of subjects and the ability to work with reduced user input minimises the possibility of random and systematic errors. ASAP offers easily selectable option for almost all the stages of that process. The toolbox can produce perfusion data that is ready for statistical group analysis. A fully automated pipeline makes the data processing efficient and reduces potential mistakes by avoiding manual processing of individual steps. Besides, ASAP has a very friendly and easy to use GUI (Figure 2), allowing users to select the preferred options for each case. Depending on the datasets, users may change the options of some processing steps to optimize the processing quality. Prior programming knowledge is not required. One limitation of other existing toolboxes lies in the requirement of programming knowledge, which limits their accessibility to users with programming skills.
146
+
147
+ In the present study, we applied ASAP to study possible changes in perfusion in a sample of healthy subjects in risk of developing AD. The analyses were run on a Macintosh OS X (10.6 Snow Leopard) computer with 8 GB of RAM and a 3.06 GHz Intel Core 2 Duo processor. The total running time was 3.44 hours (4.13 minutes per subject). The automatization of the whole post-processing pipeline minimises the variability introduced by human errors and decreases enormously the time needed to manually process all subjects. ASAP provides the images ready to perform statistical assessment. We have presented an evaluation of hypoperfusion in healthy subjects at risk of developing Alzheimer's disease and the results are consistent with those of previous studies that find decreased perfusion in Alzheimer's patients in similar regions. As an example, figure 8a and 8b show how the absence of PVE correction can lead to false negative findings. Regions affected by hypoperfusion have higher statistical significative and thus regions are more extensive due to the PVE correction. These results also highlight that PVE correction is required to maximise the predictive value of ASL in this field of research. Hypoperfusion in the right inferior insula and superior temporal lobe region can be detected with PVE corrected images whereas PVE uncorrected images cannot. This region in particular, is affected by atrophy in those subjects at the very early stages of Alzheimer's disease, but thanks to this PVE technique, it can be shown that a hypoperfusion pattern is prior to GM atrophy.
148
+
149
+ We envisage the intuitive and user friendly nature of the ASAP toolbox will help to facilitate the application of ASL in the clinical environment, where the method can be easily employed by clinicians and technicians without the need of intensive training or knowledge of image processing techniques. As mentioned earlier, processing data with ASAP can be very flexible and users have the freedom to design the most appropriate pipeline targeted to their data. In order to know which is the best pipeline for the user's data, there are important recommendations in the user's manual for a proper use of the different options available in the toolbox. There is a description of which is the best choice for each stage, depending on the input data, such as T1-weighted or T2-weighted structural scan, or in which cases is useful to apply a specific option, like the
150
+
151
+ rough skull-stripping or the PVE correction.
152
+
153
+ High quality of co-registration between anatomical and perfusion images are key for optimal partial volume effect correction and normalisation steps. The results of employing our toolbox in this sample study, demonstrate that the software can produce a high level of accuracy of spatial normalisation, paying no penalty in quality as a result of fully automated operation. The assessment of the normalisation quality has been made qualitatively by comparing a minimum of 4 external cortical landmarks on the CBF maps and ensuring that they correspond to the same landmarks on the chosen template within a $\pm 3\mathrm{mm}$ range. The same assessment has been made with at least 2 other sub-cortical landmarks. Nevertheless, further improvements such as the use of higher field warping options as those of the DARTEL library of SPM [33] are currently being incorporated for a subsequent version. Although this option will require the selection of a subgroup of images to generate an intermediate group specific template and computational time is likely to increase, the method may be more accurate and precise for group comparisons.
154
+
155
+ One of the disadvantages of automatization is that some mistakes might go undetected. To ameliorate this problem, ASAP's interface includes a "Quick check" option for displaying the resultant MNI-normalized ASL images in a web browser once the processing is completed. This option allows an convenient quality assurance method, making it possible to check the normalization quality or whether any intermediate step has failed. Many functionalities and features are open for improvement in future versions of the software. Other labelling schemes as well as multi-TI ASL sequences, will be included in further versions of ASAP.
156
+
157
+ # 5. Conclusion
158
+
159
+ In conclusion, the results for this specific study show the applicability of ASAP in the study of perfusion changes in elder people at risk of developing AD. Furthermore, these clinical results are consistent with previous AD studies. In summary, our toolbox provides a simple, flexible and reliable solution for ASL-related studies. It has an extendable design, and new functions or utilities can and will be added in the future.
160
+
161
+ The ASAP manual and software can be obtained freely at sites.google.com/site/asl toolbox. Feedback from users will be encouraged to ensure the updated of the ASAP toolbox, in order to include future improvements in image processing methodology. We hope for rich participation from the ASL community.
162
+
163
+ Acknowledgments: ASAP was partially supported by the COST Action "Arterial Spin Labelling Initiative in Dementia" (BMBS COST Action BM1103).
164
+
165
+ # References
166
+
167
+ [1] Alsop DC, Dai W, Grossman M, Detre JA. Arterial spin labeling blood flow MRI: its role in the early characterization of Alzheimer's disease. J.Alzheimers Dis. 2010;20(3):871-80.
168
+ [2] Schuff N, Matsumoto S, Kmiecik J, Studholme C, Du A, Ezekiel F, et al. Cerebral blood flow in ischemic vascular dementia and Alzheimer's disease, measured by arterial spin-labeling magnetic resonance imaging. Alzheimers Dement. 2009;5(6):454-62.
169
+ [3] Mikita N, Mehta MA, Zelaya FO, Stringaris A. Using arterial spin labeling to examine mood states in youth. Brain and Behavior. 2015;5(6):e00339
170
+ [4] Pollak TA, De Simoni S, Barimani B, Zelaya FO, Stone JM, Mehta MA. Phenomenologically distinct psychotomimetic effects of ketamine are associated with cerebral blood flow changes in functionally relevant cerebral foci: a continuous arterial spin labelling study. Psychopharmacology. 2015; (Epub ahead of print)
171
+ [5] Hodkinson DJ, Khawaja N, O'Daly O, Thacker MA, Zelaya FO, Wooldridge CL, et al. Cerebral analgesic response to nonsteroidal anti-inflammatory drug ibuprofen. Pain. 2015;156(7):1301-10
172
+ [6] Ye F, Frank JA, Weinberger DR, McLaughlin AC. Noise reduction in 3D perfusion imaging by attenuating the static signal in arterial spin tagging (ASSIST) Magn. Reson Med (2000) 44: 92-100
173
+ [7] Parkes LM, Rashid W, Chard DT, Tofts PS. Normal cerebral perfusion measurements using arterial spin labeling: Reproducibility, stability, and age and gender effects. Magnetic Resonance in Medicine 2004;51(4):736-43.
174
+ [8] Petersen ET, Zimine I, Ho YL, Golay X. Non-invasive measurement of perfusion: a critical review of arterial spin labelling techniques. Br.J.Radiol. 2006;79(944):688-701.
175
+ [9] Mutsaerts HJMM, Steketee RME, Heijtel DFR, Kuijer JPA, Osch MJPv, Majoie CBLM, et al. Inter-Vendor Reproducibility of Pseudo-Continuous Arterial Spin Labeling at 3 Tesla. PLoS One. 2014;9(8), e104108.
176
+ [10] Chappell MA, Groves AR, Whitcher B, Woolrich MW. Variational Bayesian Inference for a Nonlinear Forward Model. Trans.Sig.Proc. 2009;57(1):223-36.
177
+ [11] Wang Z, Aguirre GK, Rao H, Wang J, Fernandez-Seara MA, Childress AR, et al. Empirical optimization of ASL data analysis using an ASL data processing toolbox: ASLtbx. Magn.Reson.Imaging 2008;26(2):261-9.
178
+ [12] Jenkinson M, Beckmann CF, Behrens TE, Woolrich MW, Smith SM. Fsl. Neuroimage 2012;62(2):782-90.
179
+ [13] The MathWorks, Inc. 2012; Available at: http://www.mathworks.com/. Accessed 12/03, 2015.
180
+
181
+ [14] SPM. Statistical Parametric Mapping. The Wellcome Trust Centre for neuroimaging at University College of London. 2015; Available at: http://www.fil.ion.ucl.ac.uk/spm/. Accessed 12/03, 2015.
182
+ [15] Alsop DC, Detre JA, Golay X, Gunther M, Hendrikse J, Hernandez-Garcia L, et al. Recommended implementation of arterial spin-labeled perfusion MRI for clinical applications: A consensus of the ISMRM perfusion study group and the European consortium for ASL in dementia. Magn.Reson.Med. 2014; 73:102-116
183
+ [16] Asllani I, Borogovac A, Brown TR. Regression algorithm correcting for partial volume effects in arterial spin labeling MRI. Magn.Reson.Med. 2008;60(6):1362-71.
184
+ [17] Meltzer CC, Leal JP, Mayberg HS, Wagner HNJ, Frost JJ. Correction of PET Data for Partial Volume Effects in Human Cerebral Cortex by MR Imaging. J.Comput.Assist.Tomogr. 1990;14(4).
185
+ [18] Johnson NA, Jahng GH, Weiner MW, Miller BL, Chui HC, Jagust WJ, et al. Pattern of cerebral hypoperfusion in Alzheimer disease and mild cognitive impairment measured with arterial spin-labeling MR imaging: initial experience. Radiology 2005;234(3):851-9.
186
+ [19] Du AT, Jahng GH, Hayasaka S, Kramer JH, Rosen HJ, Gorno-Tempini ML, et al. Hypoperfusion in frontotemporal dementia and Alzheimer disease by arterial spin labeling MRI. Neurology 2006;67(7):1215-20.
187
+ [20] Chen Y, Wolk DA, Reddin JS, Korczykowski M, Martinez PM, Musiek ES, et al. Voxel-level comparison of arterial spin-labeled perfusion MRI and FDG-PET in Alzheimer disease. Neurology 2011;77(22):1977-85.
188
+ [21] Mazza M, Marano G, Traversi G, Bria P, Mazza S. Primary cerebral blood flow deficiency and Alzheimer's disease: shadows and lights. J.Alzheimers Dis. 2011;23(3):375-89.
189
+ [22] Johnson NA, Jahng GH, Weiner MW, Miller BL, Chui HC, Jagust WJ, et al. Pattern of cerebral hypoperfusion in Alzheimer disease and mild cognitive impairment measured with arterial spin-labeling MR imaging: initial experience. Radiology 2005;234(3):851-9.
190
+ [23] Xu G, Antuono PG, Jones J, Xu Y, Wu G, Ward D, et al. Perfusion fMRI detects deficits in regional CBF during memory-encoding tasks in MCI subjects. Neurology 2007;69(17):1650-6.
191
+ [24] Asllani I, Habeck C, Scarmeas N, Borogovac A, Brown TR, Stern Y. Multivariate and univariate analysis of continuous arterial spin labeling perfusion MRI in Alzheimer's disease. J.Cereb.Blood Flow Metab. 2008;28(4):725-36.
192
+ [25] Austin BP, Nair VA, Meier TB, Xu G, Rowley HA, Carlsson CM, et al. Effects of hypoperfusion in Alzheimer's disease. J.Alzheimers Dis. 2011;26 Suppl 3:123-33.
193
+ [26] Yesavage JA, Brink TL, Rose TL, Lum O, Huang V, Adey M, et al. Development
194
+
195
+ and validation of a geriatric depression screening scale: A preliminary report. J.Psychiatr.Res. 1982-1983;17(1):37-49.
196
+ [27] Folstein MF, Folstein SE, McHugh PR. "Mini-mental state". A practical method for grading the cognitive state of patients for the clinician. J.Psychiatr.Res. 1975;12(3):189-98.
197
+ [28] Pfeffer RI, Kurosaki TT, Harrah CH, Chance JM, Filos S. Measurement of functional activities in older adults in the community. J Gerontol 1982;37(3):323-9.
198
+ [29] Fischl B. FreeSurfer. Neuroimage 2012;62(2):774-81.
199
+ [30] Alsop DC, Detre JA, Grossman M. Assessment of cerebral blood flow in Alzheimer's disease by spin-labeled magnetic resonance imaging. Ann.Neurol. 2000;47(1):93-100.
200
+ [31] Du AT, Jahng GH, Hayasaka S, Kramer JH, Rosen HJ, Gorno-Tempini ML, et al. Hypoperfusion in frontotemporal dementia and Alzheimer disease by arterial spin labeling MRI. Neurology 2006;67(7):1215-20.
201
+ [32] Xu G, Antuono PG, Jones J, Xu Y, Wu G, Ward D, et al. Perfusion fMRI detects deficits in regional CBF during memory-encoding tasks in MCI subjects. Neurology 2007;69(17):1650-6.
202
+ [33] Ashburner J. A fast diffeomorphic image registration algorithm. Neuroimage 2007;38(1):95-113.
203
+
204
+ # Tables
205
+
206
+ <table><tr><td></td><td>Original CBF</td><td>PVC CBF</td></tr><tr><td>Left Hippocampus</td><td>40±10</td><td>46±9</td></tr><tr><td>Right Hippocampus</td><td>42±11</td><td>42±11</td></tr><tr><td>Left Parahippocampal Gyrus</td><td>40±10</td><td>48±10</td></tr><tr><td>Right Parahippocampal Gyrus</td><td>35±8</td><td>44±10</td></tr></table>
207
+
208
+ Table 1. CBF perfusion values (ml/100g/min) in the left and right hippocampus and parahippocampal gyrus (same regions and patient that Figure 6). Left column shows the original CBF values (mean±std) and right column shows the CBF values (mean±std) after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel.
209
+
210
+ <table><tr><td></td><td></td><td>Original CBF</td><td>PVC CBF</td></tr><tr><td rowspan="6">Left Hemisphere</td><td>Amygdala</td><td>35±10</td><td>37±7</td></tr><tr><td>Caudate</td><td>35±13</td><td>38±10</td></tr><tr><td>Hippocampus</td><td>34±10</td><td>38±9</td></tr><tr><td>Pallidum</td><td>38±11</td><td>49±12</td></tr><tr><td>Putamen</td><td>42±8</td><td>44±6</td></tr><tr><td>Thalamus</td><td>44±17</td><td>54±16</td></tr><tr><td rowspan="6">Right Hemisphere</td><td>Amygdala</td><td>34±7</td><td>38±4</td></tr><tr><td>Caudate</td><td>31±15</td><td>35±11</td></tr><tr><td>Hippocampus</td><td>34±12</td><td>38±8</td></tr><tr><td>Pallidum</td><td>27±6</td><td>42±11</td></tr><tr><td>Putamen</td><td>41±8</td><td>45±5</td></tr><tr><td>Thalamus</td><td>44±19</td><td>54±18</td></tr></table>
211
+
212
+ Table 2. Example of CBF values (ml/100g/min) in the different subcortical brain structures for both hemispheres. Left column shows the original CBF values (mean±std) while right column shows the CBF values (mean±std) after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel.
213
+
214
+ <table><tr><td></td><td>T value (p-value) Non-PVC</td><td>T value (p-value) PVC</td></tr><tr><td>Left Hippocampus</td><td>2.58 (0.006465)</td><td>3.28 (0.000958)</td></tr><tr><td>Right Hippocampus</td><td>2.70 (0.004746)</td><td>3.25 (0.001045)</td></tr><tr><td>Left Parahippocampal Gyrus</td><td>1.76 (0.042325)</td><td>2.24 (0.01483)</td></tr><tr><td>Right Parahippocampal Gyrus</td><td>1.66 (0.051651)</td><td>2.52 (0.007522)</td></tr></table>
215
+
216
+ Table 3. T scores and p-values (in brackets) of the statistical group analysis ( $p_{FWE}<0.05$ , cluster region of 300 voxels) in the same regions that Table 1 and Figure 6. Left column shows the results for the original CBF maps (non-PVC corrected) and right column shows the results for the final CBF after the PVC using the Asllani's method with a 5x5x1 low-resolution kernel.
217
+
218
+ # Figure Legends
219
+
220
+ Figure 1. Pipeline for processing ASL datasets in ASLToolbox. Each box represents a main step in ASLToolbox's procedure and top dotted line boxes represent the input data.
221
+
222
+ Figure 2. Graphical User Interface of the ASLToolbox for loading dataset. It consists of three main sections namely "Input Files", "Output Directories" and "Options".
223
+
224
+ Figure 3. Graphical User Interface for ROI Statistics analysis in ASLToolbox.
225
+
226
+ Figure 4. Example of the result of the co-registration step between a 3DT1 weighted image (background) and a CBF map (overlay). The 3DT1 structural image has been resampled to the resolution of the ASL data.
227
+
228
+ Figure 5. Tissue probability maps of GM (rows 1,3,5) and WM (rows 2,4,6) of a subject, registered onto the 3DT1 structural scan on a sagittal (rows 1,2), coronal (rows 3,4) and axial planes (rows 5,6). These maps were used for the PVC of the CBF maps.
229
+
230
+ Figure 6. 3D T1 weighted axial, sagittal and coronal planes for one patient (first row), detail of the left hippocampus and parahippocampal gyrus in the same planes (second, third and fourth rows). Color overlay in third and fourth row correspond to the CBF map. Third row shows the partial volume effect in the original CBF and fourth row shows the result of Asllani's PVC with a $5 \times 5 \times 1$ low-resolution kernel.
231
+
232
+ Figure 7. Example of a final smoothed (6mm Gaussian kernel), MNI normalised and PVE corrected CBF map as an overlay onto the 3DT1 MNI template.
233
+
234
+ Figure 8. Results of the statistical group comparison: significative hypoperfusion regions for healthy subjects in risk of developing AD pFWE<0.05 (minimum cluster 300 voxels) for: (a) PVE corrected CBF maps and (b) CBF maps with no PVE correction
2401.12xxx/2401.12603/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f42d1167a25623da4c1f614b465a8c9064e9e855a146606f27a4619c636682ce
3
+ size 134371
2401.12xxx/2401.12603/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12665/0c8eb889-4021-4ec5-b2eb-27542fa21f64_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12665/0c8eb889-4021-4ec5-b2eb-27542fa21f64_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12665/0c8eb889-4021-4ec5-b2eb-27542fa21f64_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a261ed79fc358c0800fdd6f41d26b2b02ff2f30a6109dd8afd60ce46851f2721
3
+ size 12491583
2401.12xxx/2401.12665/full.md ADDED
@@ -0,0 +1,611 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ClipSAM: CLIP and SAM Collaboration for Zero-Shot Anomaly Segmentation
2
+
3
+ Shengze Li $^{1}$ , Jianjian Cao $^{1}$ , Peng Ye $^{1}$ , Yuhan Ding $^{1}$ , Chongjun Tu $^{1}$ and Tao Chen $^{1*}$ \
4
+ $^{1}$ School of Information Science and Technology, Fudan University
5
+
6
+ # Abstract
7
+
8
+ Recently, foundational models such as CLIP and SAM have shown promising performance for the task of Zero-Shot Anomaly Segmentation (ZSAS). However, either CLIP-based or SAM-based ZSAS methods still suffer from non-negligible key drawbacks: 1) CLIP primarily focuses on global feature alignment across different inputs, leading to imprecise segmentation of local anomalous parts; 2) SAM tends to generate numerous redundant masks without proper prompt constraints, resulting in complex post-processing requirements. In this work, we innovatively propose a CLIP and SAM collaboration framework called ClipSAM for ZSAS. The insight behind ClipSAM is to employ CLIP's semantic understanding capability for anomaly localization and rough segmentation, which is further used as the prompt constraints for SAM to refine the anomaly segmentation results. In details, we introduce a crucial Unified Multiscale Cross-modal Interaction (UMCI) module for interacting language with visual features at multiple scales of CLIP to reason anomaly positions. Then, we design a novel Multi-level Mask Refinement (MMR) module, which utilizes the positional information as multi-level prompts for SAM to acquire hierarchical levels of masks and merges them. Extensive experiments validate the effectiveness of our approach, achieving the optimal segmentation performance on the MVTec-AD and VisA datasets. Our code is public.<sup>1</sup>
9
+
10
+ # 1 Introduction
11
+
12
+ Zero-Shot Anomaly Segmentation (ZSAS) is a critical task in fields such as image analysis [Fomalont, 1999] and industrial quality inspection [Mishra et al., 2021; Bergmann et al., 2022; Bergmann et al., 2019]. Its objective is to accurately localize and segment anomalous regions within images, without relying on prior class-specific training samples. As a result, the diversity of industrial products and the uncertainty in anomaly types pose significant challenges for the ZSAS task.
13
+
14
+ ![](images/b406d968581fa52260351b7f55ee763a9be7aa63014323f40a3cc472770533c2.jpg)
15
+ Figure 1: Structural comparisons among different approaches for Zero-Shot Anomaly Segmentation. Top: CLIP-based approaches. Middle: SAM-based approaches. Bottom: Our ClipSAM approach that leverages the strengths of both CLIP and SAM methods.
16
+
17
+ With the emergence of foundational models such as CLIP [Radford et al., 2021] and SAM [Kirillov et al., 2023], the notable advancements have been achieved in Zero-Shot Anomaly Segmentation. As depicted in Figure 1, the CLIP-based approaches, like WinCLIP [Jeong et al., 2023] and APRIL-GAN [Chen et al., 2023a], determine the anomaly classification of each patch by comparing the similarity between image patch tokens and text tokens. While CLIP exhibits a strong semantic understanding capability, it is achieved by aligning the global features of language and vision, making it less suitable for fine-grained segmentation tasks [Wang et al., 2022]. Due to the fact that anomalies consistently manifest in specific regions of objects, the global semantic consistency inherent in CLIP is unable to achieve precise identification of the edges of local anomalies. On the other hand, researchers have explored the SAM-based approaches to assist the ZSAS task. SAM has superior segmentation capabilities and can accept diverse prompts, including points, boxes, and textual prompts, to guide the segmentation process. To this end, SAA [Cao et al., 2023], as shown in Figure 1, utilizes the SAM with textual prompts to generate vast candidate masks and applies filters for postprocessing. However, simple textual prompts may be insufficient for accurately describing anomalous regions, resulting in subpar anomaly localization performance and under
18
+
19
+ utilization of SAM's capabilities. Meanwhile, the ambiguous prompts lead to the generation of redundant masks, which requires further selection of correct masks.
20
+
21
+ Based on these observations, we innovatively propose a CLIP and SAM collaboration framework, first employing CLIP for anomaly localization and rough segmentation and then utilizing SAM and the localization information to refine the anomaly segmentation results. For the stage of CLIP, it is crucial to incorporate the fusion of language tokens and image patch tokens and model their dependencies to strengthen CLIP's ability to segment anomaly parts, since cross-modal interaction and fusion have been proven to be beneficial for localization and object segmentation in several studies [Jing et al., 2021; Xu et al., 2023; Feng et al., 2021]. Further, several notable works [Ding et al., 2019; Huang et al., 2019; Hou et al., 2020] have focused on enhancing the model's local semantic comprehension by paying attention to row and column features. Motivated by these studies, we have developed a novel cross-modal interaction strategy that facilitates the interaction of text and visual features at both row-column and multi-scale levels, adequately enhancing CLIP's capabilities for positioning and segmenting anomaly parts. For the stage of SAM, in order to fully harness its fine-grained segmentation capability, we exploit the localization abilities of CLIP to provide more explicit prompts in the form of both points and bounding boxes. This approach greatly enhances SAM's ability to segment anomaly regions accurately. Further, we have noticed that SAM's segmentation results often display masks with different levels of granularity, even when provided with the same prompt. To avoid the inefficient postprocessing caused by further mask filtering, we propose a more efficient mask refinement strategy that seamlessly integrates different levels of masks, leading to enhanced anomaly segmentation results.
22
+
23
+ As a conclusion, we propose a novel two-stage framework named CLIP and SAM Collaboration (ClipSAM) for ZSAS. The structural comparisons with previous works are illustrated in Figure 1. In the first stage, we employ CLIP for localization and rough segmentation. To achieve the fusion of multi-modal features at different levels, we design the Unified Multi-scale Cross-modal Interaction (UMCI) module. UMCI aggregates image patch tokens from both horizontal and vertical directions and utilizes the corresponding row and column features to interact with language features to perceive local anomalies in different directions. UMCI also considers the interaction of language and multi-scale visual features. In the second stage, we exploit the CLIP's localization information to guide SAM for segmentation refinement. Specifically, we propose the Multi-level Mask Refinement (MMR) module, which first extracts diverse point and bounding box prompts from the CLIP's anomaly localization results and then uses these prompts to guide SAM to generate precise masks. Finally, we fuse these masks with the results obtained from CLIP based on different mask confidences. Our main contributions can be summarized as follows:
24
+
25
+ - We propose a novel framework named CLIP and SAM Collaboration (ClipSAM) to fully leverage the characteristics of different large models for ZSAS. Specifically, we first use CLIP to locate and roughly segment the
26
+
27
+ anomaly objects, and then refine the segmentation results with SAM and the positioning information.
28
+
29
+ - To better assist CLIP in realizing desired localization and rough segmentation, we propose the Unified Multiscale Cross-modal Interaction (UMCI) module, which learns local and global semantics about anomalous parts by interacting language features with visual features at both row-column and multi-scale levels.
30
+ - To refine the segmentation results with SAM adequately, we designed the Multi-level Mask Refinement (MMR) module. It extracts point and bounding box prompts from the CLIP's localization information to guide SAM in generating accurate masks, and fuse them with the results of CLIP to achieve fine-grained segmentation.
31
+ - Extensive experiments on various datasets consistently validate that our approach can achieve new state-of-the-art zero-shot anomaly segmentation results. Particularly on the MVTec-AD dataset, our method outperforms the SAM-based method by $+19.1\uparrow$ in pixel-level AUROC, $+10.0\uparrow$ in $F_{1}$ -max and $+45.5\uparrow$ in Pro metrics.
32
+
33
+ # 2 Related Work
34
+
35
+ # 2.1 Zero-shot Anomaly Segmentation
36
+
37
+ The methods for Zero-Shot Anomaly Segmentation (ZSAS) can be mainly divided into two categories. The first category is based on CLIP. As the pioneering work, WinCLIP [Jeong et al., 2023] calculates the similarity between image patch tokens and textual features for ZSAS. Further, APRIL-GAN [Chen et al., 2023a] employs linear layers to better align features of different modalities. AnoVL [Deng et al., 2023] and ANOMALYCLIP [Zhou et al., 2023] propose to enhance the generalization of text. SDP [Chen et al., 2023b] proposes to address noise in the encoding process of the CLIP image encoder. The second category is based on SAM. Specifically, SAA [Cao et al., 2023] utilizes text prompts for SAM to generate vast candidate masks and uses a complex evaluation mechanism to filter out irrelevant masks.
38
+
39
+ However, relying solely on CLIP or SAM may lead to certain limitations. For instance, CLIP-based methods struggle with precise segmentation of local anomalies, while SAM-based methods heavily rely on specific prompts. To address these drawbacks, we explore the collaboration mechanism of CLIP and SAM and propose a novel framework to leverage their strengths. Besides, we further design the Unified Multiscale Cross-modal Interaction (UMCI) module and Multi-level Mask Refinement module to better exploit the specific ability of CLIP and SAM, respectively.
40
+
41
+ # 2.2 Foundation Models
42
+
43
+ Recently, there has been an increasing focus and attention on foundation models. Various foundation models have achieved satisfactory performance on kinds of downstream tasks [Devlin et al., 2018; Brown et al., 2020]. Notably, CLIP [Radford et al., 2021] and SAM [Kirillov et al., 2023] have emerged as two representative models with impressive zero-shot reasoning capabilities in classification and segmentation tasks, respectively. More specifically, CLIP focuses on aligning
44
+
45
+ ![](images/62a3286eace6b6a20e6b9e065bb0d1837ffd9f0b81311954a57e7aa42fd5451a.jpg)
46
+ ClipSAM: CLIP and SAM Collaboration
47
+ CLIP for Localization and Rough Segmentation
48
+
49
+ ![](images/85e1f9182cff89394a1166141661938a507ab4f397d81810c1241e41e879ab1f.jpg)
50
+ SAM for Multi-level Mask Refinement
51
+
52
+ Unified Multi-scale Cross-modal Interaction (UMCI) module
53
+ ![](images/2290b3880efeae8724666cb8bf3dd06df62f3d64cb47d80f41b9362957e3f896.jpg)
54
+ $P_{i}$ :Patch Tokens
55
+ L:Language Tokens
56
+ AP:AveragePooling
57
+
58
+ ![](images/25cdc1a8ae92883eeadc79d821e27c0da9d5dbc6b80aaef074ff6a30efae06df.jpg)
59
+ Scale Path
60
+
61
+ ![](images/f4ad2ad237abfb3117600a4492cef23ec91b096db2bda0adb926940848f1498d.jpg)
62
+ A Strip Path
63
+
64
+ Figure 2: Overview of the proposed ClipSAM framework. ClipSAM includes two main processes: using CLIP for localization and rough segmentation, and using positioning information to prompt SAM to refine the segmentation results. These processes contain two important components: the Unified Multi-scale Cross-modal Interaction (UMCI) module and the Multi-level Mask Refinement (MMR) module. The UMCI module is employed for the interaction of language features with visual features of different directions and scales, facilitating CLIP's ability to locate and segment anomaly objects. Meanwhile, the MMR module combines SAM, and uses point and box prompts extracted from location information to guide SAM to output the desired masks, and fuses them with the rough segmentation result obtained by CLIP.
65
+ ![](images/67a94e72f4dcdda6418e8698ddfe24870523db82c18fff95096a42592bbf9e60.jpg)
66
+ Frozen
67
+
68
+ ![](images/3723b6e0b7a29fcb1bfa59461a982e96a4bc0e98b45b14b8f4a0084cc3e99bca.jpg)
69
+ Trainable
70
+
71
+ ![](images/2956e64aea81d1ec504fa18d18fb0e678318035f9a5e3b50aa05e796b9fed12c.jpg)
72
+ Matrix Addition
73
+
74
+ multi-modal features and possesses robust semantic understanding abilities for both language and vision, while SAM excels in achieving fine-grained segmentation based on different prompts. Recently, [Yue et al., 2023] attempts to establish a connection between SAM's image encoder and CLIP's text encoder for surgical instrument segmentation, [Wang et al., 2023] attempts to merge SAM and CLIP to facilitate downstream tasks. These works highlight the critical importance of exploring collaboration among foundational models.
75
+
76
+ # 2.3 Cross-modal Interaction
77
+
78
+ In the field of multi-modal learning, cross-modal interaction is becoming increasingly important. Specifically, [Hu et al., 2016] concatenates features from different modalities and uses convolutions for multi-modal information fusion. Further, STEP [Chen et al., 2019] establishes correlations between important areas in the image and relevant keywords in the text to enhance the fusion of cross-modal information. BRINet [Feng et al., 2021] exchanges cross-modal information between different blocks of the encoder to facilitate im
79
+
80
+ age segmentation. The success of cross-modal interaction in diverse domains has motivated us to explore it in the context of zero-shot anomaly segmentation. To effectively address the challenge of localizing abnormal regions within an object, we introduce the Unified Multi-scale Cross-modal Interaction module, taking into account the interaction between text and both row-column and multi-scaled visual features.
81
+
82
+ # 3 Methodology
83
+
84
+ # 3.1 CLIP and SAM Collaboration
85
+
86
+ CLIP has a strong semantic understanding of different modalities, while SAM can easily detect edges of fine-grained objects, both of which are important for anomaly segmentation. In this paper, we present a novel CLIP and SAM collaboration framework called ClipSAM, which aims to boost the performance of ZSAS. The overview architecture is illustrated in Fig. 2. Specifically, we leverage the CLIP for initial rough segmentation and utilize it as a constraint to refine the segmentation results with SAM. In Sec. 3.2, we introduce the
87
+
88
+ Unified Multi-scale Cross-modal Interaction (UMCI) module within the CLIP stage to achieve accurate rough segmentation and anomaly localization. In Sec. 3.3, we design the Multilevel Mask Refinement (MMR) module, which incorporates the guidance from CLIP to facilitate SAM in generating more precise masks for achieving fine-grained segmentation. In addition, the optimization function of the overall framework is discussed in Sec. 3.4.
89
+
90
+ # 3.2 Unified Multi-scale Cross-modal Interaction
91
+
92
+ In our ClipSAM framework, the CLIP encoder is employed to process both text and image inputs. For a specific pair of text and image, the encoder generates two outputs: $L \in \mathbb{R}^{C_t \times 2}$ and $P_i \in \mathbb{R}^{H \times W \times C}$ . Here, $L$ represents the textual feature vector, in line with WinCLIP [Jeong et al., 2023], reflecting two categories of normal and abnormal. $P_i$ denotes the patch tokens derived from the i-th stage of the encoder. For more details of the textual feature $L$ , please refer to Appendix A.
93
+
94
+ As discussed in Sec. 1, cross-modal fusion has been proven to be beneficial for object segmentation. To achieve this, the Unified Multi-scale Cross-modal Interaction (UMCI) module is designed. Specifically, the UMCI model consists of two parallel paths: the Strip Path and the Scale Path. The Strip Path captures both row- and column-level features of the patch tokens to precisely pinpoint the location. The Scale Path focuses on grasping the image's global features of various scales, enabling a comprehensive understanding of the anomaly. More details are described below.
95
+
96
+ (1) Strip Path. Denote the inputs of a specific UMCI module as textual feature vector $L$ and patch tokens $P$ . We first process the patch tokens to grasp the visual features. The image features are projected to align with the text features in dimension, resulting in $\hat{P} \in \mathbb{R}^{H \times W \times C_t}$ . To extract row- and column-level features from $\hat{P}$ , we apply two average pooling layers, with kernel sizes of $1 \times W$ and $H \times 1$ respectively:
97
+
98
+ $$
99
+ v _ {r o w} = c o n v _ {1 \times 3} (A v g \_ P o o l _ {1 \times W} (\hat {P})),
100
+ $$
101
+
102
+ $$
103
+ v _ {c o l} = c o n v _ {3 \times 1} (A v g _ {-} P o o l _ {H \times 1} (\hat {P})),
104
+ $$
105
+
106
+ where $v_{row} \in \mathbb{R}^{H \times c_h}$ and $v_{col} \in \mathbb{R}^{W \times c_h}$ are the row-level and column-level features. $H$ and $W$ denote the height of the vertical feature and the width of the horizontal feature.
107
+
108
+ We then focus on the internal process of the Strip Path. Take $v_{row}$ for example, we apply convolution layers to the text features $L$ , obtaining $t_{row}^{1}, t_{row}^{2} \in \mathbb{R}^{c_{h} \times 2}$ . $t_{row}^{1}$ and $t_{row}^{2}$ serve as the text feature input of the subsequent Scaled Dot-Product Attention mechanism [Vaswani et al., 2017], a crucial component for the interaction between the language and visual domains. Specifically, we implement a two-step attention mechanism to efficiently predict the language perception of the pixels in $v_{row}$ (normal or abnormal), denoted as $M_{row} \mathbb{R}^{H \times c_{h}}$ . More details are provided in Appendix B. In parallel, $v_{col}$ is similarly processed to obtain $M_{col} \in \mathbb{R}^{W \times c_{h}}$ .
109
+
110
+ We then utilize the bilinear interpolation to expand $M_{row}$ and $M_{col}$ to their original scales and combine the results:
111
+
112
+ $$
113
+ M _ {r o w, c o l} = \operatorname {c o n v} _ {3 \times 3} (B (M _ {r o w}) + B (M _ {c o l})), \tag {2}
114
+ $$
115
+
116
+ where $B$ symbolizes the bilinear interpolation layer. This process results in $M_{row, col} \in \mathbb{R}^{H \times W \times c_h}$ .
117
+
118
+ Only Points
119
+
120
+ Only Boxes
121
+
122
+ Points & Boxes
123
+
124
+ GT
125
+
126
+ ![](images/15149aa71392017431f178a2e6208de2ba153937d42cf5a65eb7970b211247dc.jpg)
127
+ Figure 3: The results produced by SAM with different spatial prompts. As we can see, constraining SAM with the spatial prompt that represents points and boxes as a whole leads to better results.
128
+
129
+ ![](images/c8d05e5534fe5750c7f35c2dd23966921d7ddf1f0da3cf39c867c374986b8420.jpg)
130
+
131
+ ![](images/24e8cf4ee473677ede37fc258229314bf4510615b3ccad60c069bc86fbad98d0.jpg)
132
+
133
+ ![](images/14b46925dd3954f9fbe76db465fe0c7417564c76abc38d46013eb3026e769f9f.jpg)
134
+
135
+ (2) Scale Path. In this path, the image features are also projected to $\hat{P} \in \mathbb{R}^{H \times W \times C_t}$ . Then we apply two average pooling layers with kernel sizes of $s_1$ and $s_2$ to grasp the visual features of different scales:
136
+
137
+ $$
138
+ v _ {g _ {1}} = \operatorname {c o n v} _ {3 \times 3} ^ {g _ {1}} \left(\operatorname {A v g} _ {-} \operatorname {P o o l} _ {s _ {1} \times s _ {1}} (\hat {P})\right), \tag {3}
139
+ $$
140
+
141
+ $$
142
+ v _ {g _ {2}} = \operatorname {c o n v} _ {3 \times 3} ^ {g _ {2}} \left(\operatorname {A v g} \text {P o o l} _ {s _ {2} \times s _ {2}} (\hat {P})\right),
143
+ $$
144
+
145
+ where $v_{g1} \in \mathbb{R}^{hg1 \times wg1 \times ch}$ and $v_{g2} \in \mathbb{R}^{hg2 \times wg2 \times ch}$ represent visual features at different scales.
146
+
147
+ For the internal process of Scale Path, we consider $v_{g_1}$ for example. The text features are processed by convolution layers and yield $t_{g_1}^k, t_{g_1}^v \in \mathbb{R}^{c_{g_1} \times 2}$ . We then obtain the language perception of the pixels $M_{g_1} \in \mathbb{R}^{h_{g_1} \times w_{g_1} \times c_h}$ by the attention with $v_{g_1}$ as the query, $t_{g_1}^k$ as the key and $t_{g_1}^v$ as the value. More details are provided in Appendix B. Similar to the Strip Path, we utilize the bilinear interpolation to resize and combine $M_{g_1}$ and $M_{g_2}$ :
148
+
149
+ $$
150
+ M _ {g _ {1}, g _ {2}} = \operatorname {c o n v} _ {3 \times 3} ^ {g _ {1}, g _ {2}} \left(B \left(M _ {g _ {1}}\right) + B \left(M _ {g _ {2}}\right)\right), \tag {4}
151
+ $$
152
+
153
+ (3) Dual - path Fusion. After the Strip Path and Scale Path, we have obtained the pixel-wise predictions $M_{row,col}$ and $M_{g_1,g_2}$ , providing comprehensive location and semantics information about the anomaly. The last step of the UMCI module involves fusing these results to get the rough segmentation of the anomalous region. Specifically, we introduce a residual connection from the input patch token $\hat{P}$ , and fuse it with the pixel-wise predictions by a convolution layer:
154
+
155
+ $$
156
+ v _ {o r i} = \operatorname {c o n v} _ {3 \times 3} ^ {o r i} (\hat {P}), \tag {5}
157
+ $$
158
+
159
+ $$
160
+ M _ {a l l} = \operatorname {c o n v} _ {3 \times 3} ^ {a l l} (\operatorname {c o n c a t} \left(v _ {o r i}, M _ {r o w, c o l}, M _ {g _ {1}, g _ {2}}\right)).
161
+ $$
162
+
163
+ We employ a Multi-Layer Perceptron as the segmentation head, and the rough segmentation of the anomalous regions is mathematically described as:
164
+
165
+ $$
166
+ O = M L P \left(R e L U \left(M _ {a l l} + \hat {P}\right)\right). \tag {6}
167
+ $$
168
+
169
+ where $O \in \mathbb{R}^{H \times W \times 2}$ denotes the segmentation output of a specific UMCI module, and dimension 2 represents the classification of the foreground anomalous parts and the background. Assume there are $n$ stages in the encoder, and denote $O_{i}$ as the segmentation output of stage $i$ . Then the final segmentation results can be calculated as $O = \frac{1}{n}\sum_{i=1}^{n}O_{i}$ .
170
+
171
+ # 3.3 Multi-level Mask Refinement
172
+
173
+ With the rough segmentation $O$ from the CLIP phase, we propose the Multi-level Mask Refinement (MMR) module to extract point and box prompts to guide SAM to generate accurate masks. In the MMR module, the foreground of the
174
+
175
+ rough segmentation, denoted as $O_{f}\in \mathbb{R}^{H\times W}$ , is firstly postprocessed with a binarization step to obtain a binary mask $O_{b}(x,y)$ . Denote $v(x,y)_f,x\in H,y\in W$ as the value of a specific pixel in $O_{f}$ , then the value of each pixel $v(x,y)_b$ in $O_{b}(x,y)$ can be calculated as:
176
+
177
+ $$
178
+ v (x, y) _ {b} = \left\{ \begin{array}{l l} 1, \text {i f} v (x, y) _ {f} > t h r e s h o l d \\ 0, \text {o t h e r w i s e} \end{array} \right. \tag {7}
179
+ $$
180
+
181
+ where threshold represents the binary threshold, and the value 1 corresponds to the anomalous pixels. Within the connected areas of this binary mask, we identify some boxes and points to provide spatial prompts for SAM. For point selection, $m$ random points are chosen, represented as $S_{p} = [(x_{p_{1}},y_{p_{1}}),\ldots ,(x_{p_{m}},y_{p_{m}})]$ , where $(x_{p_i},y_{p_i})$ represents the position of the i-th point. Boxes are generated based on the size of connected regions in the binary mask, with the i-th box denoted by $S_{b_i} = [(x_{b_i},y_{b_i},h_{b_i},w_{b_i})]$ . The complete set with $q$ boxes is represented as $S_{b} = [S_{b_{1}},\dots,S_{b_{q}}]$ .
182
+
183
+ With the point prompts $S_{p}$ and box prompts $S_{b}$ , we explore the optimal prompt sets for SAM. As can be seen in Figure 3, employing either points or boxes alone leads to biased results. In contrast, the combined application of both points and boxes can yield more precise and detailed segmentation. Therefore, in our ClipSAM framework, we use $S = S_{b} \cup S_{p}$ as the prompt set for SAM. With the original image $I$ and spatial prompts $S$ as inputs, SAM generates encoded features $z_{i}$ and $z_{s}$ . The decoder within SAM then outputs the refined masks and corresponding confidence scores:
184
+
185
+ $$
186
+ (m a s k s, \text {s c o r e s}) = \mathbb {D} ^ {s a m} \left(z _ {i} \mid z _ {s}\right). \tag {8}
187
+ $$
188
+
189
+ Each box shares the same point constraints, resulting in $q$ distinct segmentation masks. In our ClipSAM framework, SAM is configured to produce three masks with varying confidence scores for each box, represented as masks = [(m1, m2, m3); ...; (mq, mq2, mq3)] and scores = [(s1, s2, s3); ...; (sq, sq2, sq3)]. The final fine-grained segmentation result $O_{final}$ is obtained by normalizing the fusion of rough segmentation and the refined masks:
190
+
191
+ $$
192
+ O _ {f i n a l} = \operatorname {N o r m} \left(O + \sum_ {i = 1} ^ {q} \sum_ {j = 1} ^ {3} m _ {i} ^ {j} \times s _ {i} ^ {j}\right). \tag {9}
193
+ $$
194
+
195
+ # 3.4 Objective Function
196
+
197
+ In our ClipSAM framework, the only part involving training is the UMCI module. To effectively optimize this module, we employ the Focal Loss [Lin et al., 2017] and the Dice Loss [Milletari et al., 2016], both of which are well-suited for segmentation tasks.
198
+
199
+ Focal Loss. Focal loss is primarily applied to address class imbalance problem, a common challenge in segmentation tasks. It is appropriate for anomaly segmentation because, usually, the anomaly merely occupies a small fraction of the entire object. The expression of Focal loss is:
200
+
201
+ $$
202
+ l _ {f o c a l} = - \frac {1}{H \times W} \sum_ {i = 0} ^ {H \times W} (1 - p _ {i}) ^ {\gamma} \log \left(p _ {i}\right), \tag {10}
203
+ $$
204
+
205
+ where $p_i$ is the predicted probability for a pixel being abnormal, and $\gamma$ is a tunable parameter and set to 2 in our paper.
206
+
207
+ Dice Loss. Dice Loss calculates a score based on the overlap between the target area and the model's output. This metric is also effective for class imbalance issue. Dice Loss can be calculated as:
208
+
209
+ $$
210
+ l _ {d i c e} = 1 - \frac {1}{N} \frac {2 \times \sum_ {i = 1} ^ {N} y _ {i} \hat {y} _ {i}}{\sum_ {i = 1} ^ {N} y _ {i} ^ {2} + \sum_ {i = 1} ^ {N} \hat {y} _ {i} ^ {2}}, \tag {11}
211
+ $$
212
+
213
+ where $N = H\times W$ is the total number of pixels in features. Total Loss. We set separate loss weights for each stage and the total loss can be expressed as:
214
+
215
+ $$
216
+ l _ {a l l} = \sum_ {i = 1} ^ {n} \lambda_ {i} \left(l _ {f o c a l} ^ {i} + l _ {d i c e} ^ {i}\right), \tag {12}
217
+ $$
218
+
219
+ where $i$ denotes the index of stages, $\lambda_{i}$ is the loss weight of the i-th stage. The CLIP encoder in our implementation consists of 4 stages in total, and we set the loss weights for these stages at 0.1, 0.1, 0.1, and 0.7 respectively.
220
+
221
+ # 4 Experiments
222
+
223
+ # 4.1 Experimental Setup
224
+
225
+ Datasets. In this study, we conduct experiments on two commonly-used datasets of industrial anomaly detection, namely VisA [Zou et al., 2022] and MVTec-AD [Bergmann et al., 2019], which encompass a diverse range of industrial objects categorized as normal or abnormal. We follow the same training setup as existing zero-shot anomaly segmentation studies [Jeong et al., 2023; Chen et al., 2023a] to evaluate the performance of our method. Specifically, the model is first trained on the MVTec-AD dataset and then tested on the VisA dataset, and vice versa. Additional experimental results on other datasets are provided in Appendix C.
226
+
227
+ Metrics. Following [Jeong et al., 2023], we employ widely-used metrics, i.e., AUROC, AP, $F_{1}$ -max, and PRO [Bergmann et al., 2020], to provide a fair and comprehensive comparison with existing ZSAS methods. Specifically, AUROC reflects the model's ability to distinguish between classes at various threshold levels. AP quantifies the model's accuracy across different levels of recall. $F_{1}$ -max is the harmonic mean of precision and recall at the optimal threshold, implying the accuracy and coverage of the model. PRO assesses the proportion of correctly predicted pixels within each connected anomalous region, offering insights into the model's local prediction accuracy. Higher values of these metrics mean better performance of the evaluated method.
228
+
229
+ Implementation details. In the experiments, the pre-trained ViT-L-14-336 model released by OpenAI, which consists of 24 Transformer layers, is utilized for CLIP encoders. We extracted the image patch tokens after each stage of the image encoder (i.e., layers 6, 12,18, and 24) for the training of our proposed UMCI module, respectively. The optimization process is conducted on a single NVIDIA 3090 GPU using AdamW optimizer with the learning rate of $1 \times 10^{-4}$ and the batch size of 8 for 6 epochs. For SAM, we use the ViT-H pre-trained model.
230
+
231
+ # 4.2 Experiments on MVtec-AD and VisA
232
+
233
+ Comparison with state-of-the-art approaches. In this section, we evaluate the effectiveness of our proposed ClipSAM
234
+
235
+ <table><tr><td rowspan="2">Base model</td><td rowspan="2">Method</td><td rowspan="2">AUROC</td><td colspan="3">MVTec-AD</td><td rowspan="2">AUROC</td><td colspan="4">VisA</td></tr><tr><td>F1-max</td><td>AP</td><td>PRO</td><td>F1-max</td><td>AP</td><td>PRO</td><td></td></tr><tr><td rowspan="4">CLIP-based Approaches</td><td>WinCLIP</td><td>85.1</td><td>31.7</td><td>-</td><td>64.6</td><td>79.6</td><td>14.8</td><td>-</td><td>56.8</td><td></td></tr><tr><td>APRIL-GAN</td><td>87.6</td><td>43.3</td><td>40.8</td><td>44.0</td><td>94.2</td><td>32.3</td><td>25.7</td><td>86.8</td><td></td></tr><tr><td>SDP</td><td>88.7</td><td>35.3</td><td>28.5</td><td>79.1</td><td>84.1</td><td>16.0</td><td>9.6</td><td>63.4</td><td></td></tr><tr><td>SDP+</td><td>91.2</td><td>41.9</td><td>39.4</td><td>85.6</td><td>94.8</td><td>26.5</td><td>20.3</td><td>85.3</td><td></td></tr><tr><td rowspan="2">SAM-based Approaches</td><td>SAA</td><td>67.7</td><td>23.8</td><td>15.2</td><td>31.9</td><td>83.7</td><td>12.8</td><td>5.5</td><td>41.9</td><td></td></tr><tr><td>SAA+</td><td>73.2</td><td>37.8</td><td>28.8</td><td>42.8</td><td>74.0</td><td>27.1</td><td>22.4</td><td>36.8</td><td></td></tr><tr><td>CLIP &amp; SAM</td><td>ClipSAM(Ours)</td><td>92.3</td><td>47.8</td><td>45.9</td><td>88.3</td><td>95.6</td><td>33.1</td><td>26.0</td><td>87.5</td><td></td></tr></table>
236
+
237
+ Table 1: Performance comparison of different kinds of ZSAS approaches on the MVTec-AD and VisA datasets. Evaluation metrics include AUROC, $F_{1}$ -max, AP, and PRO. Bold indicates the best results.
238
+
239
+ ![](images/6fca1d78996dfb2534a4484f63d19f98108ae62f5d616c893d32b019d15cf47b.jpg)
240
+ Figure 4: Comparison of visualization results among ClipSAM, CLIP-based, and SAM-based methods on the MVTec-AD dataset. Our ClipSAM performs much better on the location and boundary of the anomaly segmentation.
241
+
242
+ ![](images/66595108247b27ebade619d4cb5611b1f8c393d9cf6111e32cd11d9fcf73d29f.jpg)
243
+ Figure 5: Visualization of the results of each step of our ClipSAM collaboration framework. ClipSAM first uses CLIP for rough segmentation and then uses SAM for refinement.
244
+
245
+ framework for ZSAS on the MVtec-AD and VisA datasets. Table 1 shows the comprehensive comparison between our proposed ClipSAM and the state-of-the-art ZSAS meth
246
+
247
+ ods [Jeong et al., 2023; Chen et al., 2023a; Cao et al., 2023; Chen et al., 2023b] on different datasets and various metrics. It can be concluded that our proposed ClipSAM outperforms existing state-of-the-art methods in all four metrics. Taking the MVtec-AD dataset as an example, our proposed ClipSAM outperforms the advanced CLIP-based method SDP+ by $1.1\%$ , $5.9\%$ , $6.5\%$ and $2.7\%$ on the AUROC, $F_{1}$ -max, AP and PRO metrics respectively. Compared to the SAM-based approach, our method exhibits superior performance benefits, i.e., improvements of $19.1\%$ , $10.0\%$ , $17.1\%$ , and $45.5\%$ for the metrics. On the VisA dataset, our proposed method similarly shows an overall performance enhancement, demonstrating the effectiveness and generalization of our ClipSAM.
248
+
249
+ Qualitative comparisons. We provide some visualization of ZSAS results in Figure 4 to further demonstrate the effectiveness of the proposed method. For comparison, we also show the segmentation visualization of APRIL-GAN (CLIP-based method) and SAA (SAM-based method). It can be observed that APRIL-GAN can roughly locate the anomalies but fails to provide excellent segmentation results. In contrast, SAA can perform the segmentation well but cannot
250
+
251
+ <table><tr><td colspan="2">Components of ClipSAM</td><td>AUROC</td><td>F1-max</td><td>AP</td><td>PRO</td></tr><tr><td rowspan="2">UMCI</td><td>only w/strip</td><td>90.8</td><td>44.3</td><td>34.9</td><td>79.6</td></tr><tr><td>only w/scale</td><td>90.9</td><td>44.4</td><td>42.7</td><td>81.9</td></tr><tr><td rowspan="2">Module</td><td>w/o UMCI</td><td>60.4</td><td>19.7</td><td>25.0</td><td>34.7</td></tr><tr><td>w/o MMR</td><td>91.8</td><td>46.7</td><td>44.7</td><td>84.8</td></tr><tr><td colspan="2">ClipSAM(Ours)</td><td>92.3</td><td>47.8</td><td>45.9</td><td>88.3</td></tr></table>
252
+
253
+ Table 2: Ablation study of different components in our ClipSAM framework on MVtec-AD dataset. Bold indicates the best results.
254
+
255
+ <table><tr><td colspan="2">Hyperparameters</td><td>AUROC</td><td>F1-max</td><td>AP</td><td>PRO</td></tr><tr><td rowspan="3">Hidden dim (ch)</td><td>194</td><td>91.8</td><td>45.6</td><td>43.8</td><td>83.1</td></tr><tr><td>256</td><td>91.7</td><td>46.4</td><td>44.6</td><td>83.6</td></tr><tr><td>384</td><td>92.3</td><td>47.8</td><td>45.9</td><td>88.3</td></tr><tr><td rowspan="3">Kernel size (s1 &amp; s2)</td><td>2 &amp; 4</td><td>91.9</td><td>45.7</td><td>45.3</td><td>85.1</td></tr><tr><td>3 &amp; 9</td><td>92.3</td><td>47.8</td><td>45.9</td><td>88.3</td></tr><tr><td>6 &amp; 10</td><td>91.8</td><td>46.8</td><td>44.7</td><td>84.9</td></tr><tr><td rowspan="3">Threshold (thr)</td><td>0.45</td><td>91.5</td><td>43.3</td><td>44.9</td><td>82.5</td></tr><tr><td>0.47</td><td>92.3</td><td>47.8</td><td>45.9</td><td>88.3</td></tr><tr><td>0.50</td><td>91.7</td><td>44.6</td><td>43.4</td><td>83.2</td></tr></table>
256
+
257
+ Table 3: Ablation study of different hyperparameters used in our ClipSAM framework on MVTec-AD dataset. Bold indicates the best results in the UMCI module. $c_{h}$ represents the hidden dimension of the convolutional layer. $s_{i}$ denotes the kernel size of the average pooling layer used in the scale path. $thr$ means the threshold for binarization.
258
+
259
+ cover the anomalous region accurately. Compared with these methods, our proposed ClipSAM provides accurate localization as well as good segmentation results. More visualization results are provided in Appendix D. To better understand the role of each module in the ClipSAM framework, we also visualize the rough segmentation results of the CLIP phase and the processed prompts fed into the SAM phase in Figure 5. It shows that CLIP performs a rough segmentation of abnormal parts and generates corresponding prompts based on their locations to complete SAM's further refinement of the results. Please refer to Appendix E for more details.
260
+
261
+ # 4.3 Ablation Studies
262
+
263
+ In this section, we conduct several ablation studies on the MVtec-AD dataset to further explore the effect of different components and the experiment settings on the results in the proposed ClipSAM framework.
264
+
265
+ Effect of components. Table 2 shows the results of the ablation study of different components in ClipSAM. Specifically, we first explore the impact of preserving only the strip path or the scale path in the UMCI module. Subsequently, the performance of removing either the UMCI or MMR module from the framework is tested. It can be found that removing a path in the UMCI module can lead to performance degradation, and removing the scale path has a greater impact, reflecting the necessity of combining two paths in the UMCI module. At the module level, removing the MMR module will slightly lower performance. Comparing this result with SDP+, we can surprisingly find that the rough segmentation results of the CLIP phase yield even better performance than CLIP-based methods. Figure 6 shows the visualization comparison between these two methods.
266
+
267
+ ![](images/e823c31ba347708a11e100193f64f9f11921ed8753fa37c086e737858c89bb16.jpg)
268
+ Figure 6: Visualization of anomaly localization and rough segmentation by CLIP with the UMCI module and CLIP with similarity calculation.
269
+
270
+ tween the rough segmentation and APRIL-GAN (Since SDP is not open source). The first two columns of Figure 6 indicates that UMCI can locate the anomalies more accurately, and the last two columns shows that UMCI provides better segmentation. In comparison with the MMR module, removing the UMCI module means regarding the similarity-based segmentation as the rough segmentation result. However, as shown in Figure 4, the similarity-based segmentation cannot provide text-aligned patch tokens and accurate local spatial prompts for the subsequent MMR module. This results in a performance collapse, which demonstrates the important role of the UMCI module in the ClipSAM framework.
271
+
272
+ Effect of hyperparameters. We explore the effects of various hyperparameters on our ClipSAM framework and record the results in Table 3. An analysis of the roles of each hyperparameter in the ClipSAM framework is provided based on the results. Hidden Dimension $(c_h)$ determines the output feature dimension of the convolution layer. Larger $c_h$ values contribute to the effective interpretation of visual features by the model. Kernel size $(s_i)$ affects the size of multi-scale visual features, which should be moderate to provide easy-to-understand visual context. Threshold Value (thr) primarily impacts the initial binarized segmentation of the SAM phase. The setting of thr should also be moderate: a small value may cause the non-anomalous regions to be misclassified and thus unable to generate accurate masks for SAM; a large value may cause some anomalies to be ignored and not detected.
273
+
274
+ # 5 Conclusion
275
+
276
+ We propose the CLIP and SAM Collaboration (ClipSAM) framework to solve zero-shot anomaly segmentation for the first time. To cascade the two foundation models effectively, we introduce two modules. One is the UMCI module, which explicitly reasons anomaly locations and achieves rough segmentation. The other is the MMR module, which refines the rough segmentation results by employing the SAM with precise spatial prompts. Sufficient experiments showcase that ClipSAM provides a new direction for improving ZSAS by leveraging the characteristics of different foundation models. In future work, we will further investigate how to integrate knowledge from different models to enhance the performance of zero-shot anomaly segmentation.
277
+
278
+ # A Text design
279
+
280
+ As shown in Figure 7, the prompt design in ClipSAM follows the same approach in WinCLIP [Jeong et al., 2023]. Specifically, for a given category, such as 'bottle' in the MVTec-AD dataset, phrases describing the normal state, like "perfect bottle", are combined using the category name. Subsequently, these phrases describing the normal state are integrated separately with the prompt templates. This process can yield multiple descriptive statements about a normal bottle, such as "a photo of a perfect bottle." Assuming we have $m$ prompt templates and $n$ phrases describing normal states, we can generate a total of $m \times n$ sentences to describe a normal 'bottle'. To obtain text features corresponding to each sentence, we utilize the text encoder of CLIP and then compute the average of all text features that describe normality. This yields $L_{\text{normal}} \in \mathbb{R}^{c_t \times 1}$ . Here, $c_t$ represents the feature dimension, and 1 denotes the text category, namely normal. Similarly, we can calculate the averaged feature $L_{\text{anomaly}} \in \mathbb{R}^{c_t \times 1}$ for sentences describing anomalies. Finally, we concatenate $L_{\text{normal}}$ and $L_{\text{anomaly}}$ on the category dimension to obtain $L \in \mathbb{R}^{c_t \times 2}$ , where 2 represents the two categories, normal and abnormal.
281
+
282
+ During the training and testing processes, it is assumed that the object categories are known, while the specific anomaly categories are unknown. Consequently, for a batch of data, text features corresponding to their respective categories can be generated based on the known object categories. In situations where the categories are unknown, the placeholder 'object' can be used to substitute for specific object categories, which has been proven effective in experiments [Zhou et al., 2023].
283
+
284
+ # B Strip path and Scale path
285
+
286
+ The UMCI module consists of two parallel paths: the Strip Path and the Scale Path. The Strip Path employs interactions between language features and row- and column-level visual features to calculate visually salient pixels with the strongest language perception in different directions. The Scale Path utilizes interactions between language features and globally visual features at different scales to comprehend what is considered anomaly.
287
+
288
+ (1) Strip Path. The interaction conducted by the strip path involves the fusion of text features and row-and-column visual features. Specifically, visual features are aggregated into row-level and column-level features from horizontal and vertical directions through average pooling layers. Taking row-level features $v_{row}$ as an example, text features $L$ are processed by convolutional layers to obtain language features $t_{row}^{1}$ , $t_{row}^{2}$ with dimensions matching the visual features. Subsequently, a two-stage attention mechanism is employed to perceive relevant language features at each pixel of the row-level features. In the attention computation process, we employ the Scaled Dot-Product Attention mechanism [Vaswani et al., 2017]:
289
+
290
+ $$
291
+ \operatorname {A t t e n t i o n} (Q, K, V) = \operatorname {s o f t m a x} \left(\frac {Q K ^ {T}}{\sqrt {d _ {k}}}\right) V. \tag {13}
292
+ $$
293
+
294
+ As shown in Figure 8, the first attention step is designed to capture the correlated visual features corresponding to each
295
+
296
+ # Algorithm 1 Strip Path
297
+
298
+ Input: Image patch token $P$ , Language feature $L$ ; Output: strip path output $M_{row,col}$
299
+
300
+ 1: $\hat{P} = \operatorname{Linear}(P)$ .
301
+ 2: $v_{row} = \text{conv}_{1 \times 3}(\text{Avg\_Pool}_{1 \times W}(\hat{P}))$ ,
302
+
303
+ $$
304
+ v _ {c o l} = \operatorname {c o n v} _ {3 \times 1} \left(\operatorname {A v g} - \operatorname {P o o l} _ {H \times 1} (\hat {P})\right),
305
+ $$
306
+
307
+ 3: for $i$ in (row, col) do
308
+
309
+ 4: $t_i^1 = conv_{1\times 1}^1 (L)$
310
+
311
+ $$
312
+ \begin{array}{l} t _ {i} ^ {2} = \operatorname {c o n v} _ {1 \times 1} ^ {2} (L), \\ t _ {n e w} = G R U (A t t e n t i o n \left(t _ {i} ^ {1}, v _ {i}, v _ {i}\right)), \\ v _ {i} ^ {a t t} = A t t e n t i o n (v _ {i}, t _ {n e w}, t _ {i} ^ {2}), \\ M _ {i} = N o r m (v _ {i} ^ {a t t} + v _ {i}), \\ \end{array}
313
+ $$
314
+
315
+ 5: end for
316
+
317
+ 6: $M_{row, col} = \text{conv}_{3 \times 3}(B(M_{row}) + B(M_{col}))$
318
+ 7: return $M_{row,col}$
319
+
320
+ # Algorithm 2 Scale Path
321
+
322
+ Input: Image patch token $P$ , Language feature $L$ ;
323
+
324
+ Output: strip path output $M_{g_1,g_2}$
325
+
326
+ 1: $\hat{P} = \operatorname{Linear}(P)$ .
327
+ 2: $v_{g_1} = \text{conv}_{3 \times 3}^{g_1}(\text{Avg\_Pool}_{s_1 \times s_1}(\hat{P}))$ ,
328
+
329
+ $$
330
+ v _ {g _ {2}} = \operatorname {c o n v} _ {3 \times 3} ^ {g _ {2}} \left(\operatorname {A v g} - \operatorname {P o o l} _ {s _ {2} \times s _ {2}} (\hat {P})\right),
331
+ $$
332
+
333
+ 3: for $i$ in $(g_{1}, g_{2})$ do
334
+
335
+ 4: $t_i^k = \operatorname{conv}_{1 \times 1}^k(L)$ ,
336
+
337
+ $$
338
+ t _ {i} ^ {v} = \operatorname {c o n v} _ {1 \times 1} ^ {v} (L),
339
+ $$
340
+
341
+ $$
342
+ v _ {i} ^ {a t t} = \text {A t t e n t i o n} \left(v _ {i}, \text {t e x t} _ {i} ^ {k}, \text {t e x t} _ {i} ^ {v}\right),
343
+ $$
344
+
345
+ $$
346
+ M _ {i} = N o r m (v _ {i} ^ {a t t} + v _ {i}),
347
+ $$
348
+
349
+ 5: end for
350
+
351
+ 6: $M_{g_1,g_2} = \operatorname{conv}_{3\times 3}^{g_1,g_2}\left(B(M_{g_1}) + B(M_{g_2})\right)$
352
+ 7: return $M_{g_1, g_2}$
353
+
354
+ language feature. Then we use GRU [Cho et al., 2014] to merge the learned visual features with the original language features, which we can obtain language features enriched with visual information. Taking this new language feature, original visual and language features as K, Q, V respectively for attention computation can effectively aggregating features. Finally, we use the residual method to add $v_{row}$ to the result and get $M_{row} \in \mathbb{R}^{H \times c_h}$ :
355
+
356
+ $$
357
+ t _ {\text {n e w}} = \operatorname {G R U} \left(\operatorname {A t t e n t i o n} \left(t _ {\text {r o w}} ^ {1}, v _ {\text {r o w}}, v _ {\text {r o w}}\right)\right),
358
+ $$
359
+
360
+ $$
361
+ v _ {r o w} ^ {\text {a t t}} = \text {A t t e n t i o n} \left(v _ {r o w}, t _ {\text {n e w}}, t _ {r o w} ^ {2}\right), \tag {14}
362
+ $$
363
+
364
+ $$
365
+ M _ {r o w} = \operatorname {N o r m} \left(v _ {r o w} ^ {\text {a t t}} + v _ {r o w}\right),
366
+ $$
367
+
368
+ where $Norm$ denotes L2 regularization. Please refer to Algorithm 1 for specific content.
369
+
370
+ (2) Scale Path. The scale path is utilized for the interaction between text and multi-scaled visual features. Initially, visual features at different scales are obtained through average pooling layers with different kernel sizes. Taking one of these scales as an example, convolutional layers are employed to process text features to match the feature dimensions. Subsequently, a dedicated attention mechanism is employed for cross-modal interaction to obtain $M_{g_1} \in \mathbb{R}^{h_{g_1} \times w_{g_1} \times c_h}$ . The residual connection is also used here to add the original vision
371
+
372
+ # (a) State-level (normal)
373
+
374
+ c :=[o]"
375
+ c := "flawless [o]"
376
+ c: $=$ "perfect [o]"
377
+ c: $\equiv$ "unblemished [o]"
378
+ c $\coloneqq$ ["o] without flaw"
379
+ c $\coloneqq$ ["o] without defect"
380
+ c:=[o] without damage"
381
+
382
+ # (b) State-level (anomaly)
383
+
384
+ c := "damaged [o]"
385
+ c $\coloneqq$ "broken [o]"
386
+ c $\coloneqq$ ["o] with flaw"
387
+ c $\coloneqq$ " [o] with defect
388
+ c $\coloneqq$ ["o] with damage"
389
+
390
+ # (c) Template-level
391
+
392
+ "a bad photo of a/the [c].",
393
+ "a low resolution photo of a/the [c]."
394
+ "a cropped photo of a/the [c]."
395
+ "abright photo of a/the [c]."
396
+ "a dark photo of a/the [c]."
397
+ "aphotofa/thecool[c]."
398
+ "aback and white photo of a/the [c]."
399
+ "ajpegcorrupted photo of a/the [c]."
400
+ "ablurphy photo of a/the [c]."
401
+ "aphotofa/the [c]."
402
+ "aphotofa/the large [c]."
403
+ "aphotofa/the small [c]."
404
+ "There is a/the [c] in the scene."
405
+ "this is a/the [c] in the scene."
406
+ "this is one [c] in the scene."
407
+
408
+ "a close-up photo of the [c]."
409
+ "aphotofmy[c]."
410
+ "a photo of one [c]."
411
+ "a good photo of the [c]."
412
+ "a close-up photo of a [c]."
413
+
414
+ # (d) Object class (MVTec-AD)
415
+
416
+ o $\coloneqq$ "bottle"
417
+ o $\coloneqq$ "cable"
418
+ $\mathrm{o}\coloneqq$ "capsule"
419
+ o $\coloneqq$ "carpet"
420
+ $\mathrm{o}\coloneqq \mathrm{"grid"}$
421
+ o $\coloneqq$ "hazelnut"
422
+ o: $\equiv$ "leather"
423
+ o: $\equiv$ "metal_nut"
424
+
425
+ o : "pill"
426
+ o :="screw"
427
+ o := "tile"
428
+ o :="toothbrush"
429
+ o := "transistor"
430
+ o $\coloneqq$ "wood"
431
+ $\mathrm{o}\coloneqq$ "zipper"
432
+
433
+ ![](images/7f6a1e418cb628ea3e276efc64c62a906b0448da5716d2b5f390eeed7df36fc3.jpg)
434
+ Figure 7: Lists of multi-level prompts considered in this paper to construct compositional prompt ensemble. The integrated descriptive statements are primarily categorized into phrases that describe both normal and abnormal states and templates. The category names are illustrated using the MVtec-AD dataset as example.
435
+
436
+ ![](images/a5356ded3d37a2ed42632a362e75f1c3dc1b96972e10d6d10a954d3574355837.jpg)
437
+ Figure 8: Diagram depicting the attention calculation process in the strip path and scale path.
438
+
439
+ and the result. The specific formula is as follows:
440
+
441
+ $$
442
+ v _ {g _ {1}} ^ {a t t} = \text {A t t e n t i o n} \left(v _ {g _ {1}}, \operatorname {t e x t} _ {g _ {1}} ^ {k}, \operatorname {t e x t} _ {g _ {1}} ^ {v}\right),
443
+ $$
444
+
445
+ $$
446
+ M _ {g _ {1}} = \operatorname {N o r m} \left(v _ {g _ {1}} ^ {\text {a t t}} + v _ {g _ {1}}\right),
447
+ $$
448
+
449
+ Please refer to Algorithm 2 for specific content.
450
+
451
+ It is worth noting that all convolutional layers in the UMCI module are independent of each other.
452
+
453
+ # C Additional experiments on more datasets
454
+
455
+ We validated the effectiveness of ClipSAM on two commonly used datasets for zero-shot anomaly segmentation, namely MVTec-AD [Bergmann et al., 2019] and ViSA [Zou et al., 2022]. Additionally, we conducted relevant experiments on the MTD [Huang et al., 2020] and KSDD2 [Božić et al., 2021] datasets to further confirm the generalizability of our approach.
456
+
457
+ # C.1 MVtec-AD
458
+
459
+ The MVtec-AD dataset serves as an unsupervised anomaly detection dataset, comprising a total of 3466 unlabeled im
460
+
461
+ ages and 1888 annotated images with pixel-level segmentation annotations. The image sizes are either $700 \times 700$ or $1024 \times 1024$ pixels. The training dataset consists of 3629 images, all depicting defect-free instances. The test dataset comprises 1725 images, including both defective and defect-free samples. The dataset encompasses 15 categories, comprising 5 texture categories such as carpets and leather, and 10 object categories including bottles, cables, capsules, chestnuts, and others. In total, the dataset contains 73 types of anomalies, such as scratches, dents, and missing parts. Standard evaluation metrics, including AUROC, AP, $F_{i}$ -max and PRO are commonly employed for assessment.
462
+
463
+ # C.2 VisA
464
+
465
+ The VisA dataset is also one of the commonly used datasets for zero-shot anomaly segmentation. It consists of 10,821 images, comprising 9,621 normal samples and 1,200 anomaly samples. The dataset is organized into 12 subsets, each corresponding to a different object category. Among them, four subsets represent different types of printed circuit boards (PCBs) with relatively complex structures, including transistors, capacitors, chips, and other components. Additionally, four subsets (Capsules, Candles, Macaroni1, and Macaroni2) contain multiple instances in their views. Instances in Capsules and Macaroni2 exhibit significant variations in both position and orientation. The performance of the model on this dataset can also be evaluated using AUROC, AP, $F_{i}$ -max and PRO.
466
+
467
+ # C.3 MTD
468
+
469
+ The Magnetic Tile Defect (MTD) dataset is a more specialized dataset that consists of images related to a single object category but with various defect categories. It encompasses six common defect types in magnetic tiles, such as blowhole, break, crack, and others. The dataset comprises a total of 925 images without defects and 392 images with anomalies, each with corresponding image annotations. Performance evaluation can be conducted using the AUROC, AP, $F_{i}$ -max and PRO metrics in a similar manner.
470
+
471
+ <table><tr><td rowspan="2">Base model</td><td rowspan="2">Method</td><td colspan="4">MTD</td><td colspan="4">KSSDD2</td></tr><tr><td>AUROC</td><td>F1-max</td><td>AP</td><td>PRO</td><td>AUROC</td><td>F1-max</td><td>AP</td><td>PRO</td></tr><tr><td>CLIP-based Approaches</td><td>APRIL-GAN</td><td>51.1</td><td>16.6</td><td>9.8</td><td>17.2</td><td>52.1</td><td>10.7</td><td>9.3</td><td>13.3</td></tr><tr><td>SAM-based Approaches</td><td>SAA+</td><td>69.4</td><td>37.3</td><td>28.8</td><td>-</td><td>77.5</td><td>61.6</td><td>49.6</td><td>-</td></tr><tr><td>CLIP &amp; SAM</td><td>ClipSAM(Ours)</td><td>88.0</td><td>55.2</td><td>51.9</td><td>71.3</td><td>90.7</td><td>67.2</td><td>67.9</td><td>88.8</td></tr></table>
472
+
473
+ Table 4: Performance comparison of different kinds of ZSAS approaches on the MTD and KSDD2 datasets. Evaluation metrics include AUROC, $F_{1}$ -max, AP, and PRO. Bold indicates the best results.
474
+
475
+ ![](images/0801c3e45e79e7e0799bae61f263f0b199d25df6532851f2757947cf096214c0.jpg)
476
+ Figure 9: Comparison of visualization results among ClipSAM, CLIP-based, and SAM-based methods on the MTD dataset. Our ClipSAM performs much better on the location and boundary of the anomaly segmentation.
477
+
478
+ # C.4 KSDD2
479
+
480
+ The Kolektor Surface-Defect Dataset 2 (KSDD2) is relevant to industrial quality inspection and can also be used for anomaly segmentation. It comprises 356 images with obvious defects and 2979 images without any defects. The dimensions of each image in the dataset are approximately $230 \times 630$ pixels. The dataset is divided into a training set and a test set, with the training set consisting of 246 positive images and 2085 negative images, while the test set includes 110 positive images and 894 negative images. The dataset encompasses various types of defects, including scratches, small spots, surface defects, and others. The performance of the model in zero-shot anomaly segmentation on this dataset can also be assessed using the AUROC, $F_{1}$ -max, AP, and PRO metrics.
481
+
482
+ # C.5 Experimental results
483
+
484
+ Implementation details. Our experiments adhere to the settings defined in AnomalyCLIP [Zhou et al., 2023]. Specifically, the model is trained on the MVtec-AD dataset and tested on the MTD and KSDD2 datasets. For the MTD dataset, all data samples are considered as the test set. In
485
+
486
+ the case of the KSDD2 dataset, we directly utilize its test set.
487
+
488
+ In the experiments, the pre-trained ViT-L-14-336 model released by OpenAI, which consists of 24 Transformer layers, is utilized for CLIP encoders. We extracted the image patch tokens after each stage of the image encoder (i.e., layers 6, 12, 18, and 24) for the training of our proposed UMCI module, respectively. The optimization process is conducted on a single NVIDIA 3090 GPU using AdamW optimizer with the learning rate of $1 \times 10^{-4}$ and the batch size of 8 for 6 epochs. For SAM, we use the ViT-H pre-trained model.
489
+
490
+ Comparison with state-of-the-art approaches. Due to the fact that CLIP-based methods such as SDP [Chen et al., 2023b] are not open source and have not been experimentally evaluated on the MTD and KSDD2 datasets, no comparison is provided here. Therefore, we evaluate the performance of the CLIP-based method APRIL-GAN [Chen et al., 2023a], the SAM-based method SAA [Cao et al., 2023], and our proposed ClipSAM on the zero-shot anomaly segmentation task. As shown in Table 4, our method demonstrates significant improvements over the other two methods across different metrics. In particular, taking the MTD dataset as an
491
+
492
+ ![](images/3afc3ab98452c2c7bb4f250f924734831585b93193d06cc980482bc4c2a33520.jpg)
493
+ Figure 10: Comparison of visualization results among ClipSAM, CLIP-based, and SAM-based methods on the KSDD2 dataset. Our ClipSAM performs much better on the location and boundary of the anomaly segmentation.
494
+
495
+ example, when compared to APRIL-GAN, ClipSAM demonstrates improvements of $36.9\%$ in AUROC, $38.6\%$ in $F_{1}$ -max, $42.1\%$ in AP, and $54.1\%$ in PRO. When contrasted with $\mathrm{SAA+}$ , ClipSAM shows enhancements of $18.6\%$ in AUROC, $17.9\%$ in $F_{1}$ -max, and $23.1\%$ in AP. Additionally, ClipSAM also demonstrates remarkable performance advantages on the KSDD2 dataset.
496
+
497
+ Qualitative comparisons. We further visualize the experimental results on the MTD dataset and KSDD2 dataset in Figure 9 and Figure 10. The compared methods include APRILGAN [Chen et al., 2023a] (CLIP-based method), SAA [Cao et al., 2023] (SAM-based method), and ClipSAM. Similar conclusions can be observed from both figures, indicating that ClipSAM outperforms the other two methods in terms of localization and segmentation capabilities. Meanwhile, APRIL-GAN and SAA exhibit similar disadvantages on both datasets. Specifically, the CLIP-based method, although capable of roughly locating the position of defects guided by language, suffers from inaccurate segmentation areas. Additionally, unexpected high predictions occur outside the real masks, as shown in the third and fifth columns of Figure 9. This is attributed to misalignment between image patch tokens and text tokens, resulting in erroneous predictions and a decrease in performance.
498
+
499
+ Furthermore, while the SAM-based method demonstrates good segmentation performance, it generates incorrect segmentation masks due to the use of ambiguous positional information words as prompts. In particular, in the third column of Figure 9, SAA produces large-area erroneous masks, and in the eighth column of Figure 10, SAA misses parts of the masks. In contrast, ClipSAM performs well on the entire dataset, accurately locating anomalous positions and generating precise segmentation masks without exhibiting errors in unexpected locations, as seen in APRIL-GAN.
500
+
501
+ # D Additional visualization
502
+
503
+ In this section, we visualize the anomaly segmentation results of the proposed ClipSAM framework on the MVTec-AD dataset and the VisA dataset. Figure 11, 12, 13, 14 illustrate the visual comparisons between APRIL-GAN [Chen et al., 2023a] (CLIP-based method), SAA [Cao et al., 2023] (SAM-based method), and our ClipSAM method. Clearly, our ClipSAM demonstrates stronger understanding of anomalous regions, benefiting from the designed UMCI and MMR modules. The process of rough segmentation by CLIP followed by refinement using SAM successfully mitigates misdetections and omissions of certain anomalies.
504
+
505
+ Specifically, CLIP-based methods tend to incorrectly classify regions outside actual anomaly areas as anomalies. Even when identifying anomaly locations, their segmentation results often deviate significantly from the ground truth labels. On the other hand, SAM-based methods heavily rely on post-processing steps for masks. While the initial candidate masks may contain correct masks, complex filtering can introduce substantial biases. Additionally, due to the vague semantic descriptions guiding the model's attention, SAM might focus on parts outside the anomaly regions and segment them entirely, as shown in the fourth column of Figure 11. In reality, anomalies usually constitute a small portion of the entire object, and such errors can significantly impact the results.
506
+
507
+ Observing the figures, APRIL-GAN tends to identify anomaly locations, though less accurately. SAM provides accurate segmentation of components but lacks precise constraints, leading to significant deviations in results. In contrast, ClipSAM's two-stage strategy effectively combines the strengths of CLIP and SAM, resulting in better performance in zero-shot anomaly segmentation.
508
+
509
+ ![](images/4b471c73745d94de78ef93bf4df158f71429b488c6fc3736a24a215220cfc7fc.jpg)
510
+ Figure 11: Comparison of visualization results among ClipSAM, CLIP-based, and SAM-based methods on the MVTec-AD dataset. Our ClipSAM performs much better on the location and boundary of the anomaly segmentation.
511
+
512
+ ![](images/c3bd87dc012a5678cd834e2acae141969ffd69a4ae3a7f177957e0877cd757d3.jpg)
513
+ Figure 12: Comparison of visualization results among ClipSAM, CLIP-based, and SAM-based methods on the MVTec-AD dataset. Our ClipSAM performs much better on the location and boundary of the anomaly segmentation.
514
+
515
+ # E Two-stage visualization
516
+
517
+ The ClipSAM framework consists of two stages. In the first stage, CLIP is employed for localization and rough segmentation, while the second stage utilizes SAM for refining the results. During the process, we binarize the rough segmenta
518
+
519
+ tion output of CLIP to generate spatial prompts as constraints in connected regions, namely points and boxes. We visualize each of these steps separately to qualitatively observe the output of each stage of the model.
520
+
521
+ As illustrated in Figure 15 and 16, with the assistance
522
+
523
+ ![](images/0f88738a2ef4fbe7b444f18827e57fa7dda0acc1428628b3d47dbc8f3e50f5d9.jpg)
524
+ Figure 13: Comparison of visualization results among ClipSAM, CLIP-based, and SAM-based methods on the MVTec-AD dataset. Our ClipSAM performs much better on the location and boundary of the anomaly segmentation.
525
+
526
+ ![](images/abf122f58b57e16e91773e153d9831239cde17341130f4bde4086192d8f52d45.jpg)
527
+ Figure 14: Comparison of visualization results among ClipSAM, CLIP-based, and SAM-based methods on the VisA dataset. Our ClipSAM performs much better on the location and boundary of the anomaly segmentation.
528
+
529
+ of the Unified Multi-scale Cross-modal Interaction (UMCI) module, CLIP achieves rough segmentation of abnormal regions. However, due to CLIP learning multi-modal semantics tailored for classification tasks, it has limitations in fine-grained segmentation tasks. This is typically manifested as CLIP correctly predicting only parts of a given real anom-
530
+
531
+ lous region. With the aid of SAM, it is possible to further refine the abnormal regions predicted by CLIP, obtaining abnormal masks that are closer to ground truth values. It is noteworthy that the binarization of results is based on a certain threshold, which usually does not result in a one-to-one correspondence between the red regions in the rough segmentation
532
+
533
+ ![](images/0a1484025100fd5254ff3b95c2c24762c609e18a42061e5156e8cf3b2233d0df.jpg)
534
+ Figure 15: Visualization of different stages (the process of localization and rough segmentation by CLIP followed by result refinement through SAM) under the ClipSAM framework on the MVTec-AD dataset.
535
+
536
+ ![](images/6e9f3a4f8ea2a66c9bf0c950477397f2b01921d4b55b7a64a49a7bdb506ba7ee.jpg)
537
+ Figure 16: Visualization of different stages (the process of localization and rough segmentation by CLIP followed by result refinement through SAM) under the ClipSAM framework on the MVTec-AD dataset.
538
+
539
+ output and the connected regions in the binarized mask.
540
+
541
+ Additionally, as shown in Figure 17, due to the typically small size of anomaly categories in the ViSA dataset, the designed UMCI module has already assisted CLIP in achieving accurate anomaly segmentation. However, these results often exceed the ground truth values, as seen in the second
542
+
543
+ column of the figure. SAM aids in further focusing on these small anomalous areas. After merging the masks generated by SAM with the rough segmentation results, a certain value suppression is applied to regions beyond the ground truth, which is advantageous when computing metrics.
544
+
545
+ ![](images/6cd127a22756b938da588c32482b6a5e3b31848b9861e337e33efa2fe37da82d.jpg)
546
+ Figure 17: Visualization of different stages (the process of localization and rough segmentation by CLIP followed by result refinement through SAM) under the ClipSAM framework on the VisA dataset.
547
+
548
+ <table><tr><td rowspan="2">Base model</td><td rowspan="2">Method</td><td rowspan="2">AUROC</td><td colspan="3">MVTec-AD</td><td rowspan="2">AUROC</td><td colspan="4">VisA</td></tr><tr><td>F1-max</td><td>AP</td><td>PRO</td><td>F1-max</td><td>AP</td><td>PRO</td><td></td></tr><tr><td rowspan="6">CLIP-based Approaches</td><td>WinCLIP</td><td>85.1</td><td>31.7</td><td>-</td><td>64.6</td><td>79.6</td><td>14.8</td><td>-</td><td>56.8</td><td></td></tr><tr><td>APRIL-GAN</td><td>87.6</td><td>43.3</td><td>40.8</td><td>44.0</td><td>94.2</td><td>32.3</td><td>25.7</td><td>86.8</td><td></td></tr><tr><td>AnoVL</td><td>90.6</td><td>36.5</td><td>-</td><td>77.8</td><td>91.4</td><td>17.4</td><td>-</td><td>75.0</td><td></td></tr><tr><td>AnomalyCLIP</td><td>91.1</td><td>-</td><td>-</td><td>81.4</td><td>95.5</td><td>-</td><td>-</td><td>87.0</td><td></td></tr><tr><td>SDP</td><td>88.7</td><td>35.3</td><td>28.5</td><td>79.1</td><td>84.1</td><td>16.0</td><td>9.6</td><td>63.4</td><td></td></tr><tr><td>SDP+</td><td>91.2</td><td>41.9</td><td>39.4</td><td>85.6</td><td>94.8</td><td>26.5</td><td>20.3</td><td>85.3</td><td></td></tr><tr><td rowspan="2">SAM-based Approaches</td><td>SAA</td><td>67.7</td><td>23.8</td><td>15.2</td><td>31.9</td><td>83.7</td><td>12.8</td><td>5.5</td><td>41.9</td><td></td></tr><tr><td>SAA+</td><td>73.2</td><td>37.8</td><td>28.8</td><td>42.8</td><td>74.0</td><td>27.1</td><td>22.4</td><td>36.8</td><td></td></tr><tr><td>CLIP &amp; SAM</td><td>ClipSAM(Ours)</td><td>92.3</td><td>47.8</td><td>45.9</td><td>88.3</td><td>95.6</td><td>33.1</td><td>26.0</td><td>87.5</td><td></td></tr></table>
549
+
550
+ Table 5: Performance comparison of different kinds of ZSAS approaches on the MVTec-AD and VisA datasets. Evaluation metrics include AUROC, $F_{1}$ -max, AP, and PRO. Bold indicates the best results.
551
+
552
+ # F Comparing with more methods.
553
+
554
+ We additionally compared the model's performance on zero-shot anomaly segmentation with two other works, AnoVL [Deng et al., 2023] and AnomalyCLIP [Zhou et al., 2023]. AnoVL enhances the prompt templates with domain-specific textual designs such as "industrial" and "manufacturing." AnomalyCLIP introduces the concept of prompt learning to the text encoder part of CLIP. As shown in the Table 5, our approach still achieves optimal performance on the zero-shot anomaly segmentation task. However, it is noteworthy that the design for text generality is an interesting approach for addressing zero-shot anomaly segmentation tasks. ClipSAM adopts the text strategy from WinCLIP [Jeong et al., 2023] without further modifications, presenting a potential
555
+
556
+ challenge for future exploration.
557
+
558
+ # References
559
+
560
+ [Bergmann et al., 2019] Paul Bergmann, Michael Fauser, David Sattlegger, and Carsten Steger. Mvtec ad-a comprehensive real-world dataset for unsupervised anomaly detection. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9592-9600, 2019.
561
+ [Bergmann et al., 2020] Paul Bergmann, Michael Fauser, David Sattlegger, and Carsten Steger. Uninformed students: Student-teacher anomaly detection with discriminative latent embeddings. In Proceedings of the IEEE/CVF
562
+
563
+ conference on computer vision and pattern recognition, pages 4183-4192, 2020.
564
+ [Bergmann et al., 2022] Paul Bergmann, Kilian Batzner, Michael Fauser, David Sattlegger, and Carsten Steger. Beyond dents and scratches: Logical constraints in unsupervised anomaly detection and localization. International Journal of Computer Vision, 130(4):947-969, 2022.
565
+ [Božić et al., 2021] Jakob Božić, Domen Tabernik, and Danijel Skočaj. Mixed supervision for surface-defect detection: from weakly to fully supervised learning. Computers in Industry, 2021.
566
+ [Brown et al., 2020] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.
567
+ [Cao et al., 2023] Yunkang Cao, Xiaohao Xu, Chen Sun, Yuqi Cheng, Zongwei Du, Liang Gao, and Weiming Shen. Segment any anomaly without training via hybrid prompt regularization. arXiv preprint arXiv:2305.10724, 2023.
568
+ [Chen et al., 2019] Ding-Jie Chen, Songhao Jia, Yi-Chen Lo, Hwann-Tzong Chen, and Tyng-Luh Liu. See-through-text grouping for referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7454-7463, 2019.
569
+ [Chen et al., 2023a] Xuhai Chen, Yue Han, and Jiangning Zhang. A zero-/few-shot anomaly classification and segmentation method for cvpr 2023 vand workshop challenge tracks 1&2: 1st place on zero-shot ad and 4th place on few-shot ad. arXiv preprint arXiv:2305.17382, 2023.
570
+ [Chen et al., 2023b] Xuhai Chen, Jiangning Zhang, Guanzhong Tian, Haoyang He, Wuhao Zhang, Yabiao Wang, Chengjie Wang, Yunsheng Wu, and Yong Liu. Clip-ad: A language-guided staged dual-path model for zero-shot anomaly detection. arXiv preprint arXiv:2311.00453, 2023.
571
+ [Chen et al., 2023c] Yuxiao Chen, Jianbo Yuan, Yu Tian, Shijie Geng, Xinyu Li, Ding Zhou, Dimitris N Metaxas, and Hongxia Yang. Revisiting multimodal representation in contrastive learning: from patch and token embeddings to finite discrete tokens. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15095-15104, 2023.
572
+ [Cho et al., 2014] Kyunghyun Cho, Bart Van Merrienboer, Dzmitry Bahdanau, and Yoshua Bengio. On the properties of neural machine translation: Encoder-decoder approaches. arXiv preprint arXiv:1409.1259, 2014.
573
+ [Deng et al., 2023] Hanqiu Deng, Zhaoxiang Zhang, Jinan Bao, and Xingyu Li. Anovl: Adapting vision-language models for unified zero-shot anomaly localization. arXiv preprint arXiv:2308.15939, 2023.
574
+ [Devlin et al., 2018] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
575
+
576
+ [Ding et al., 2019] Xiaohan Ding, Yuchen Guo, Guiguang Ding, and Jungong Han. Acnet: Strengthening the kernel skeletons for powerful cnn via asymmetric convolution blocks. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2019.
577
+ [Feng et al., 2021] Guang Feng, Zhiwei Hu, Lihe Zhang, Jiayu Sun, and Huchuan Lu. Bidirectional relationship inferring network for referring image localization and segmentation. IEEE Transactions on Neural Networks and Learning Systems, 2021.
578
+ [Fomalont, 1999] Ed B Fomalont. Image analysis. In Synthesis Imaging in Radio Astronomy II, volume 180, page 301, 1999.
579
+ [Gu et al., 2023] Zhaopeng Gu, Bingke Zhu, Guibo Zhu, Yingying Chen, Ming Tang, and Jinqiao Wang. Anomalypt: Detecting industrial anomalies using large vision-language models. arXiv preprint arXiv:2308.15366, 2023.
580
+ [He et al., 2016] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016.
581
+ [Hou et al., 2020] Qibin Hou, Li Zhang, Ming-Ming Cheng, and Jiashi Feng. Strip pooling: Rethinking spatial pooling for scene parsing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020.
582
+ [Hu et al., 2016] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 108-124. Springer, 2016.
583
+ [Huang et al., 2019] Zilong Huang, Xinggang Wang, Lichao Huang, Chang Huang, Yunchao Wei, and Wenyu Liu. Cernet: Criss-cross attention for semantic segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), October 2019.
584
+ [Huang et al., 2020] Yibin Huang, Congying Qiu, and Kui Yuan. Surface defect saliency of magnetic tile. The Visual Computer, 36:85-96, 2020.
585
+ [Jeong et al., 2023] Jongheon Jeong, Yang Zou, Taewan Kim, Dongqing Zhang, Avinash Ravichandran, and Onkar Dabeer. Winclip: Zero-/few-shot anomaly classification and segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19606-19616, 2023.
586
+ [Jing et al., 2021] Ya Jing, Tao Kong, Wei Wang, Liang Wang, Lei Li, and Tieniu Tan. Locate then segment: A strong pipeline for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9858-9867, 2021.
587
+ [Khattak et al., 2023] Muhammad Uzair Khattak, Syed Talal Wasim, Muzammal Naseer, Salman Khan, Ming-Hsuan Yang, and Fahad Shahbaz Khan. Self-regulating prompts:
588
+
589
+ Foundational model adaptation without forgetting. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15190-15200, 2023.
590
+ [Kirillov et al., 2023] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dolkar, and Ross Girshick. Segment anything. arXiv:2304.02643, 2023.
591
+ [Lin et al., 2017] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017.
592
+ [Liu et al., 2023] Tongkun Liu, Bing Li, Xiao Du, Bingke Jiang, Xiao Jin, Liuyi Jin, and Zhuo Zhao. Component-aware anomaly detection framework for adjustable and logical industrial visual inspection. arXiv preprint arXiv:2305.08509, 2023.
593
+ [Milletari et al., 2016] Fausto Miletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. Ieee, 2016.
594
+ [Mishra et al., 2021] Pankaj Mishra, Riccardo Verk, Daniele Fornasier, Claudio Piciarelli, and Gian Luca Foresti. Vtadl: A vision transformer network for image anomaly detection and localization. In 2021 IEEE 30th International Symposium on Industrial Electronics (ISIE), pages 01-06. IEEE, 2021.
595
+ [Radford et al., 2021] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021.
596
+ [Rafiei et al., 2023] Mehdi Rafiei, Toby P Breckon, and Alexandros Iosifidis. On pixel-level performance assessment in anomaly detection. arXiv preprint arXiv:2310.16435, 2023.
597
+ [Vaswani et al., 2017] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017.
598
+ [Wang et al., 2022] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 11686-11695, June 2022.
599
+ [Wang et al., 2023] Haoxiang Wang, Pavan Kumar Anasosalu Vasu, Fartash Faghri, Raviteja Vemulapalli, Mehrdad Farajtabar, Sachin Mehta, Mohammad Rastegari, Oncel Tuzel, and Hadi Pouransari. Sam-clip: Merging vision foundation models towards semantic and spatial understanding. arXiv preprint arXiv:2310.15308, 2023.
600
+
601
+ [Xu et al., 2023] Zunnan Xu, Zhihong Chen, Yong Zhang, Yibing Song, Xiang Wan, and Guanbin Li. Bridging vision and language encoders: Parameter-efficient tuning for referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17503-17512, 2023.
602
+ [Yang and Gong, 2024] Xiaobo Yang and Xiaojin Gong. Foundation model assisted weakly supervised semantic segmentation. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 523-532, 2024.
603
+ [Yang et al., 2023] Minghui Yang, Peng Wu, and Hui Feng. Memseg: A semi-supervised method for image surface defect detection using differences and commonalities. Engineering Applications of Artificial Intelligence, 119:105835, 2023.
604
+ [Ye et al., 2022a] Peng Ye, Baopu Li, Tao Chen, Jiayuan Fan, Zhen Mei, Chen Lin, Chongyan Zuo, Qinghua Chi, and Wanli Ouyang. Efficient joint-dimensional search with solution space regularization for real-time semantic segmentation. International Journal of Computer Vision, 130(11):2674-2694, 2022.
605
+ [Ye et al., 2022b] Peng Ye, Baopu Li, Yikang Li, Tao Chen, Jiayuan Fan, and Wanli Ouyang. b-darts: Beta-decay regularization for differentiable architecture search. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10874-10883, 2022.
606
+ [Ye et al., 2022c] Peng Ye, Shengji Tang, Baopu Li, Tao Chen, and Wanli Ouyang. Stimulative training of residual networks: A social psychology perspective of loafing. Advances in Neural Information Processing Systems, 35:3596-3608, 2022.
607
+ [Yue et al., 2023] Wenxi Yue, Jing Zhang, Kun Hu, Qiumia Wu, Zongyuan Ge, Yong Xia, Jiebo Luo, and Zhiyong Wang. Part to whole: Collaborative prompting for surgical instrument segmentation, 2023.
608
+ [Zhang et al., 2023a] Chaoning Zhang, Fachrina Dewi Puspitasari, Sheng Zheng, Chenghao Li, Yu Qiao, Taegoo Kang, Xinru Shan, Chenshuang Zhang, Caiyan Qin, Francois Rameau, et al. A survey on segment anything model (sam): Vision foundation model meets prompt engineering. arXiv preprint arXiv:2306.06211, 2023.
609
+ [Zhang et al., 2023b] Chunhui Zhang, Li Liu, Yawen Cui, Guanjie Huang, Weilin Lin, Yiqian Yang, and Yuehong Hu. A comprehensive survey on segment anything model for vision and beyond. arXiv preprint arXiv:2305.08196, 2023.
610
+ [Zhou et al., 2023] Qihang Zhou, Guansong Pang, Yu Tian, Shibo He, and Jiming Chen. Anomalyclip: Object-agnostic prompt learning for zero-shot anomaly detection. arXiv preprint arXiv:2310.18961, 2023.
611
+ [Zou et al., 2022] Yang Zou, Jongheon Jeong, Latha Pemula, Dongqing Zhang, and Onkar Dabeer. Spot-the-difference self-supervised pre-training for anomaly detection and segmentation. arXiv preprint arXiv:2207.14315, 2022.
2401.12xxx/2401.12665/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d55481e49a664b6b444cf4ae6836ed81c1349ec478cf7c91d141c6bbee083032
3
+ size 1715009
2401.12xxx/2401.12665/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12690/144bbb49-024f-4544-960f-9726a73d392b_content_list.json ADDED
@@ -0,0 +1,1633 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Availability-aware Service Placement Policy in Fog Computing Based on Graph Partitions",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 99,
8
+ 65,
9
+ 897,
10
+ 137
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Isaac Lera, Carlos Guerrero, and Carlos Juiz, Senior Member, IEEE",
17
+ "bbox": [
18
+ 223,
19
+ 151,
20
+ 771,
21
+ 167
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—This paper presents a policy for service placement of fog applications inspired on complex networks and graph theory. We propose a twofold partition process based on communities for the partition of the fog devices and based on transitive closures for the application services partition. The allocation of the services is performed sequentially by, firstly, mapping applications to device communities and, secondly, mapping service transitive closures to fog devices in the community. The underlying idea is to place as many inter-related services as possible in the most nearby devices to the users. The optimization objectives are the availability of the applications and the Quality of Service (QoS) of the system, measured as the number of requests that are executed before the application deadlines. We compared our solution with an Integer Linear Programming approach, and the simulation results showed that our proposal obtains higher QoS and availability when fails in the nodes are considered.",
28
+ "bbox": [
29
+ 104,
30
+ 190,
31
+ 890,
32
+ 297
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Index Terms—Fog computing, Service placement, Service availability, Performance optimization, Complex network communities, Graph transitive closures.",
39
+ "bbox": [
40
+ 104,
41
+ 308,
42
+ 859,
43
+ 335
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "1 INTRODUCTION",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 73,
53
+ 375,
54
+ 230,
55
+ 390
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Fog computing has emerged as a suitable solution for the increase of application execution time and network usage that Internet of Things applications based on cloud services generate. This paradigm establishes that the in-network devices are provided with computational and storage capacities, and it enables them to allocate or execute services of the IoT applications that are commonly executed in the cloud provider [1]. By this, the application services are placed closer to the users (or IoT) devices and, consequently, the network latency between users and services and the network usage are reduced. Nevertheless, the limited capacities of the in-network devices, also known as fog devices in this domain, make the definition of management policies even more necessary than in other distributed systems such as cloud computing.",
62
+ "bbox": [
63
+ 73,
64
+ 402,
65
+ 491,
66
+ 621
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "The objective of our work is to study an application service placement policy to maximize service availability in case of failures. The placement consists on the selection of the most suitable fog devices to map service instances. We consider that the IoT applications are defined as a set of interrelated services that are initially and permanently deployed on the cloud provider, but that they can be horizontally scaled by creating new stateless instances in the fog devices. We also consider that the users of our domain are unalterable connected to a same gateway or access point, i.e., we consider that our users are IoT devices such as sensors or actuators, instead of considering mobility patterns, as for example in the case of mobile users.",
73
+ "bbox": [
74
+ 71,
75
+ 621,
76
+ 490,
77
+ 811
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "We propose a two phases policy that is addressed to optimize the service availability, in terms of reachability of the services from the IoT devices, and the deadline satisfaction ratios, in terms of the percentage of requests that obtain the application responses before their deadlines.",
84
+ "bbox": [
85
+ 71,
86
+ 811,
87
+ 491,
88
+ 885
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "In the first phase, the policy maps applications (the complete set of interrelated services) to a set of well-connected devices to guarantee the availability of the application for the users connected to that set. We propose to use the community structure of the fog devices for the generation of the partitions of those devices. Once that an application is mapped to a fog community, a second allocation process is performed, by mapping the services of the application to the fog devices in the community. This second phase addresses the optimization of the response time by prioritizing the allocation of interrelated services in the same fog device. We propose to partition the services of an application by using the transitive closure of a service to determine the services to be placed together in the same device.",
95
+ "bbox": [
96
+ 501,
97
+ 402,
98
+ 924,
99
+ 606
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "text",
105
+ "text": "Fog service placement problem has been addressed in previous researches, even considering community-based approaches [2], but we address some features that have not been previously considered, and the novel contributions of our approach are:",
106
+ "bbox": [
107
+ 503,
108
+ 607,
109
+ 924,
110
+ 680
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "list",
116
+ "sub_type": "text",
117
+ "list_items": [
118
+ "- The combination of the use of complex network communities for the device partition and service transitive closures for the application partition, that has not been used simultaneously in previous studies.",
119
+ "- The optimization of both the application deadline satisfaction, considered in some previous studies, and the application availability, not included in previous studies, and their evolution along the simulation.",
120
+ "- An experimental validation that includes dynamic fails of the infrastructure along the simulation."
121
+ ],
122
+ "bbox": [
123
+ 519,
124
+ 681,
125
+ 921,
126
+ 828
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "2 RELATED WORK",
133
+ "text_level": 1,
134
+ "bbox": [
135
+ 504,
136
+ 849,
137
+ 671,
138
+ 864
139
+ ],
140
+ "page_idx": 0
141
+ },
142
+ {
143
+ "type": "text",
144
+ "text": "The problem of the optimization of service placement in a fog architecture has been previously addressed from several different perspectives, by considering algorithm proposals such as genetic algorithms [3], [4], Montecarlo methods [5], distributed solutions [6], Petri Nets [7], Markov",
145
+ "bbox": [
146
+ 503,
147
+ 869,
148
+ 923,
149
+ 944
150
+ ],
151
+ "page_idx": 0
152
+ },
153
+ {
154
+ "type": "header",
155
+ "text": "IEEE",
156
+ "bbox": [
157
+ 75,
158
+ 32,
159
+ 104,
160
+ 42
161
+ ],
162
+ "page_idx": 0
163
+ },
164
+ {
165
+ "type": "page_number",
166
+ "text": "1",
167
+ "bbox": [
168
+ 911,
169
+ 32,
170
+ 921,
171
+ 42
172
+ ],
173
+ "page_idx": 0
174
+ },
175
+ {
176
+ "type": "aside_text",
177
+ "text": "arXiv:2401.12690v1 [cs.NI] 23 Jan 2024",
178
+ "bbox": [
179
+ 22,
180
+ 276,
181
+ 57,
182
+ 707
183
+ ],
184
+ "page_idx": 0
185
+ },
186
+ {
187
+ "type": "page_footnote",
188
+ "text": "The authors are with the Computer Science Department, Balearic Islands University, Palma, SPAIN, E07122.",
189
+ "bbox": [
190
+ 71,
191
+ 900,
192
+ 491,
193
+ 924
194
+ ],
195
+ "page_idx": 0
196
+ },
197
+ {
198
+ "type": "page_footnote",
199
+ "text": "Corresponding author: Carlos Guerrero E-mail: carlos.guerrero@uib.es",
200
+ "bbox": [
201
+ 96,
202
+ 924,
203
+ 475,
204
+ 936
205
+ ],
206
+ "page_idx": 0
207
+ },
208
+ {
209
+ "type": "text",
210
+ "text": "processes [8], and being linear programming one of the most common solutions [9], [10], [11], [12], [13], [14].",
211
+ "bbox": [
212
+ 71,
213
+ 53,
214
+ 491,
215
+ 82
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "Nevertheless, there is still room for improvement and some research challenges have not been still covered. For example, most of the previous solutions have included the optimization of response time, power consumption, cost, or network usage. But to the best of our knowledge, they have not studied the availability and the influence of failures in the infrastructure.",
222
+ "bbox": [
223
+ 71,
224
+ 83,
225
+ 490,
226
+ 183
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "text",
232
+ "text": "The use of the community relationship of the devices of a distributed system for the optimization of the resource management was initially proposed by Filiposka et al. [15], and they applied it in the optimization of the allocation of virtual machines in a datacenter to optimize the hop distances between related virtual machines. In the field of fog computing, the use of other topological features of graphs and complex network was proposed at a later stage, such as centrality indexes for the static definition of fog colonies [16] or the placement of data in fog devices [17].",
233
+ "bbox": [
234
+ 71,
235
+ 184,
236
+ 491,
237
+ 330
238
+ ],
239
+ "page_idx": 1
240
+ },
241
+ {
242
+ "type": "text",
243
+ "text": "The idea of organizing the complex structure of a fog architecture have been applied in several studies, where the authors defined these static infrastructure organizations as fog colonies [4], micro-clouds [18], Foglets [19], or fog domains [20]. For example, Skarlat et al. [4] defined a twofold distributed placement policy that first considered if a service should be allocated in a fog colony or migrated to the neighbor colony. Once that the colony was chosen, the control node of the colony decided the device that allocated the service. In all those studies, the partition of the fog devices was static and unique for all the applications.",
244
+ "bbox": [
245
+ 71,
246
+ 330,
247
+ 490,
248
+ 489
249
+ ],
250
+ "page_idx": 1
251
+ },
252
+ {
253
+ "type": "text",
254
+ "text": "On the contrary, Filiposka, Mishev and Gilly proposed a virtual partition of the devices that is specific for each application and it is dynamically established by the conditions of the system. They implemented an evolution of the proposal in [15] for the case of allocation of virtual machines (VM) into fog devices [2]. They considered that the fog services where encapsulated in one VM and they proposed a two phases optimization process, where in the first step the VM is mapped to a device community, and in the second step, the VM is allocated in any of the devices in the community with a traditional optimization technique. This is probably the most similar work to our proposal in terms of the optimization algorithm, but with a different optimization objective. Their objective was to propose a runtime algorithm for the migration of the VM as mobile user of the applications move through different access points to reduce the average service delay.",
255
+ "bbox": [
256
+ 71,
257
+ 489,
258
+ 491,
259
+ 736
260
+ ],
261
+ "page_idx": 1
262
+ },
263
+ {
264
+ "type": "text",
265
+ "text": "The main differences of the work of Filiposka et al. with our proposal are: first, we study the suitability of the community relationships to improve service availability instead of the migration of VMs due to the user mobility; second, we consider a more complex structure of the applications because we defined them as a set of interrelated services that can be allocated in different devices, while they defined the applications as a single encapsulating element, the VM; third, we also study the use of a graph partitioning approach, the transitive closure of the services, for the allocation of the services inside the communities to also benefit the placement of the most interrelated services in the same devices to reduce the network delays between interrelated services.",
266
+ "bbox": [
267
+ 71,
268
+ 738,
269
+ 491,
270
+ 941
271
+ ],
272
+ "page_idx": 1
273
+ },
274
+ {
275
+ "type": "image",
276
+ "img_path": "images/4ee8894b7bd3407721aab9be19c14d6a1d1ab2e29b18a19a6655c6adebd4c954.jpg",
277
+ "image_caption": [
278
+ "Fig. 1. Fog computing architecture."
279
+ ],
280
+ "image_footnote": [],
281
+ "bbox": [
282
+ 563,
283
+ 53,
284
+ 866,
285
+ 186
286
+ ],
287
+ "page_idx": 1
288
+ },
289
+ {
290
+ "type": "text",
291
+ "text": "3 PROBLEM STATEMENT",
292
+ "text_level": 1,
293
+ "bbox": [
294
+ 504,
295
+ 234,
296
+ 718,
297
+ 250
298
+ ],
299
+ "page_idx": 1
300
+ },
301
+ {
302
+ "type": "text",
303
+ "text": "A general fog computing architecture is represented in Fig. 1 where three layers can be identified: cloud layer, fog layer and client layer. Three types of devices can be differentiated: a device for the cloud provider of the cloud layer; the gateways, that are the access points for the clients; the fog devices, the network devices between the cloud provider and the gateways. All the devices have resources to allocate and execute services.",
304
+ "bbox": [
305
+ 501,
306
+ 253,
307
+ 923,
308
+ 371
309
+ ],
310
+ "page_idx": 1
311
+ },
312
+ {
313
+ "type": "text",
314
+ "text": "The fog infrastructure can be modeled as a graph where the nodes are the devices and the edges the direct network links between devices. We identify those devices as $D_{i}$ , considering two special cases for the cloud provider ( $D_{i}^{cloud}$ ) and the gateways ( $D_{i}^{gw}$ ). The devices are defined by the available capacity of their resources $AR_{D_i}$ , that is a vector which contains the capacities of each physical component. For the sake of simplicity, we have considered a scalar value, but it could easily be extended by including as many elements as necessary. We suppose unlimited resources for the specific case of the cloud provider, $AR_{D_i^{cloud}} = \\infty$ . The devices are also defined by the processing speed $IPT_{D_i}$ measured in terms of instructions per unit of time. The network links are identified by the two connected nodes $NL_{D_i,D_j}$ , and we consider that it is a bidirectional communication, $NL_{D_i,D_j} = NL_{D_j,D_i}$ . The network links are defined by the propagation delay, $PR_{NL_{D_i},D_j}$ , and the network bandwidth, $BW_{NL_{D_i},D_j}$ . Thus, the network delay, $ND_{NL_{D_i},D_j}$ , for the transmission of a packet between two connected devices is calculated as:",
315
+ "bbox": [
316
+ 501,
317
+ 371,
318
+ 923,
319
+ 664
320
+ ],
321
+ "page_idx": 1
322
+ },
323
+ {
324
+ "type": "equation",
325
+ "text": "\n$$\nN D _ {N L _ {D _ {i}, D _ {j}}} = P R _ {N L _ {D _ {i}, D _ {j}}} + \\frac {\\text {s i z e}}{B W _ {N L _ {D _ {i} , D _ {j}}}} \\tag {1}\n$$\n",
326
+ "text_format": "latex",
327
+ "bbox": [
328
+ 563,
329
+ 669,
330
+ 921,
331
+ 704
332
+ ],
333
+ "page_idx": 1
334
+ },
335
+ {
336
+ "type": "text",
337
+ "text": "where size is the size of the packet to be transmitted.",
338
+ "bbox": [
339
+ 503,
340
+ 708,
341
+ 877,
342
+ 723
343
+ ],
344
+ "page_idx": 1
345
+ },
346
+ {
347
+ "type": "text",
348
+ "text": "The applications in our problem domain follow a microservice based development pattern, that is increasingly being used in IoT applications [21], [22], [23]. This type of applications are modeled as a set of small and stateless services that interoperate between them to accomplish a complex task [24]. Thus, the services can be easily scale up, by downloading the encapsulating element and executing it, or scale down, by just stopping and removing instances of the service. We assume that there is at least one instance of each service running in the cloud provider $(D_{i}^{cloud})$ .",
349
+ "bbox": [
350
+ 501,
351
+ 724,
352
+ 921,
353
+ 869
354
+ ],
355
+ "page_idx": 1
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "We model each application $APP_{x}$ as a directed graph, where the nodes are the services and the edges are the request messages between the services. We identify the services as $S_{u}$ and they are defined by the resource consumption generated in the device that allocates the service,",
360
+ "bbox": [
361
+ 501,
362
+ 869,
363
+ 923,
364
+ 944
365
+ ],
366
+ "page_idx": 1
367
+ },
368
+ {
369
+ "type": "header",
370
+ "text": "IEEE",
371
+ "bbox": [
372
+ 73,
373
+ 32,
374
+ 104,
375
+ 42
376
+ ],
377
+ "page_idx": 1
378
+ },
379
+ {
380
+ "type": "page_number",
381
+ "text": "2",
382
+ "bbox": [
383
+ 911,
384
+ 32,
385
+ 921,
386
+ 42
387
+ ],
388
+ "page_idx": 1
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "$CR_{S_u}$ . As in the case of the available resources in a device, the resource consumption is generally defined as a vector which measures the consumption of each physical component, but we have considered a scalar value for a simpler definition of the problem. Services are executed when a request message is received. We classify the services in two types depending on the origin of the service request: the entry-point service $S_u^{sep}$ , the origins of the request messages that arrive to those services are users $US_a$ or IoT devices (sensors typically) $ID_b$ ; the intra-services $S_u^{intra}$ , that are only requested by other services. An intra-service can be requested for several different services and the entry-point service can be requested for several users or IoT devices. But, we suppose that there is only one entry-point service for each application.",
393
+ "bbox": [
394
+ 71,
395
+ 53,
396
+ 491,
397
+ 272
398
+ ],
399
+ "page_idx": 2
400
+ },
401
+ {
402
+ "type": "text",
403
+ "text": "The task performed by a service is different depending on the requester, so the execution generated by a request not only depends on the service but also on the requester, i.e. the request message. The request messages are identified by the origin and target services, $MS_{S_u,S_v}$ , and they are modeled as unidirectional edges, $MS_{S_u,S_v} \\neq MS_{S_v,S_u}$ . The requests generated by the users or the IoT services, i.e. the requests to the entry-point services, are only identified by the target entry-point service $MS_{\\emptyset,S_u}$ .",
404
+ "bbox": [
405
+ 71,
406
+ 273,
407
+ 491,
408
+ 406
409
+ ],
410
+ "page_idx": 2
411
+ },
412
+ {
413
+ "type": "text",
414
+ "text": "The request messages are defined by the size of the request message $S Z_{MS_{S_u,S_v}}$ , that determines the transmission time of the service request, and the execution load that the target service will generate in the device, defined by the number of instructions to be executed, $EI_{MS_{S_u,S_v}}$ .",
415
+ "bbox": [
416
+ 71,
417
+ 407,
418
+ 491,
419
+ 482
420
+ ],
421
+ "page_idx": 2
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "We assume that there is at least one instance of each service in the cloud provider. But those services can be horizontally scaled by deploying new instances in the fog devices. By this, the workload can be distributed between instances and the network delay from the user to te service is reduced. We define a placement matrix, $P$ , of size $|S_u| \\times |D_i|$ , number of services per number of fog devices, where a element $p_{ui}$ is equal 1 if service $S_u$ is deployed in device $D_i$ , and 0 otherwise.",
426
+ "bbox": [
427
+ 71,
428
+ 483,
429
+ 491,
430
+ 613
431
+ ],
432
+ "page_idx": 2
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "The placement of the services are constrained by the device resource capacity. The resources consumed by the allocated services should not exceed the available resources in the device:",
437
+ "bbox": [
438
+ 71,
439
+ 616,
440
+ 491,
441
+ 674
442
+ ],
443
+ "page_idx": 2
444
+ },
445
+ {
446
+ "type": "equation",
447
+ "text": "\n$$\n\\sum_ {u = 1} ^ {| S _ {u} |} \\left(p _ {u i} \\times C R _ {S _ {u}}\\right) \\leq A R _ {D _ {i}}, \\forall D _ {i} \\tag {2}\n$$\n",
448
+ "text_format": "latex",
449
+ "bbox": [
450
+ 163,
451
+ 689,
452
+ 488,
453
+ 731
454
+ ],
455
+ "page_idx": 2
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "Our optimization objectives are to increase the application deadline satisfaction ratio, and the application availability as the devices or the network links fail.",
460
+ "bbox": [
461
+ 71,
462
+ 750,
463
+ 488,
464
+ 792
465
+ ],
466
+ "page_idx": 2
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "We define the deadline satisfaction ratio as the percentage of application requests that are processed before the application deadline. Consequently, the applications in the system, $APP_{x}$ , need to be defined by their deadlines, $DL_{APP_{x}}$ . The user perceived response time, $RT_{RQ_{US_{a},APP_{x}}^{n}}$ , is the metric that measures the time between a specific application request is sent by the user $(RQ_{US_{a},APP_{x}}^{n})$ and all the application services finish their execution. It includes the network delay of the request between services and the response times (execution and waiting time) of the services.",
471
+ "bbox": [
472
+ 71,
473
+ 796,
474
+ 491,
475
+ 944
476
+ ],
477
+ "page_idx": 2
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "The equation for the deadline satisfaction ratio is:",
482
+ "bbox": [
483
+ 527,
484
+ 53,
485
+ 877,
486
+ 69
487
+ ],
488
+ "page_idx": 2
489
+ },
490
+ {
491
+ "type": "equation",
492
+ "text": "\n$$\n\\operatorname {d e a d l i n e} \\left(U S _ {a}, A P P _ {x}\\right) = \\frac {\\left| R T _ {R Q _ {U S _ {a} , A P P _ {x}} ^ {n}} < D L _ {A P P _ {x}} \\right|}{\\left| R Q _ {U S _ {a} , A P P _ {x}} ^ {n} \\right|} \\tag {3}\n$$\n",
493
+ "text_format": "latex",
494
+ "bbox": [
495
+ 513,
496
+ 74,
497
+ 921,
498
+ 112
499
+ ],
500
+ "page_idx": 2
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "where $|RQ_{US_a,APP_x}^n|$ is the number of times that a request for $APP_x$ is sent from user $US_a$ , and $|RT_{RQ_{US_a,APP_x}}^n| < DL_{APP_x}|$ is the number of those requests that satisfied the application deadline. This metric can be generalized by considering the request to an application from any user, deadline $(APP_x)$ , or the ratio for all the applications and users in the system, deadline(system).",
505
+ "bbox": [
506
+ 503,
507
+ 118,
508
+ 921,
509
+ 220
510
+ ],
511
+ "page_idx": 2
512
+ },
513
+ {
514
+ "type": "text",
515
+ "text": "Our second objective, the application availability, is defined as the ratio of users that are able to reach all the services of the applications they request for a given point in time. In a hypothetical case, where any of the elements in the system fails, the service availability would be 1.0. But the devices or the network links can fall down, breaking the shortest paths between the users and the application services. At best, this only would generate an increase in the network delay due to the requests would be routed by a longer path, damaging the deadline satisfaction ratios. But it could even result in making the user impossible to reach all the application services, damaging the service availability ratio. The equation for the service availability ratios is:",
516
+ "bbox": [
517
+ 501,
518
+ 220,
519
+ 923,
520
+ 412
521
+ ],
522
+ "page_idx": 2
523
+ },
524
+ {
525
+ "type": "equation",
526
+ "text": "\n$$\n\\text {a v a i l a b i l i t y} \\left(\\mathrm {A P P} _ {x}\\right) = \\frac {\\left| U S _ {a} , g . t . \\exists \\text {p a t h} U S _ {a} \\text {t o} A P P _ {x} \\right|}{\\left| U S _ {a} , g . t . U S _ {a} \\text {r e q u e s t s} A P P _ {x} \\right|} \\tag {4}\n$$\n",
527
+ "text_format": "latex",
528
+ "bbox": [
529
+ 514,
530
+ 417,
531
+ 921,
532
+ 460
533
+ ],
534
+ "page_idx": 2
535
+ },
536
+ {
537
+ "type": "text",
538
+ "text": "In summary, our domain problem is addressed to find $P$ , $p_{ui} \\forall S_u, D_i$ by minimizing deadline $(US_a, APP_x) \\wedge (1 - availability(APP_x)) \\forall US_a, APP_x$ subject to the constraint in Eq.(2).",
539
+ "bbox": [
540
+ 503,
541
+ 462,
542
+ 921,
543
+ 522
544
+ ],
545
+ "page_idx": 2
546
+ },
547
+ {
548
+ "type": "text",
549
+ "text": "4 TWO PHASES PARTITION-BASED OPTIMIZATION PROPOSAL",
550
+ "text_level": 1,
551
+ "bbox": [
552
+ 503,
553
+ 542,
554
+ 921,
555
+ 575
556
+ ],
557
+ "page_idx": 2
558
+ },
559
+ {
560
+ "type": "text",
561
+ "text": "Our optimization algorithm is based on a two phases placement process with a first mapping of applications in fog communities and a second phase which allocates the services of an application in the devices of a fog community. We partition the fog devices using the community relationship of the complex network that models the network infrastructure of the system. The application services are partitioned considering the transitive closures of the nodes that represent the services in the application graph.",
562
+ "bbox": [
563
+ 501,
564
+ 580,
565
+ 921,
566
+ 712
567
+ ],
568
+ "page_idx": 2
569
+ },
570
+ {
571
+ "type": "text",
572
+ "text": "We study if the community relationships of the fog devices is a good indicator to detect device sets that guarantee the availability of the services and the reachability of the devices when device and network links failures are considered. Additionally, we also study if the transitive closure of a service is a good indicator to decide the services that are allocated in the same device to avoid network communications overheads.",
573
+ "bbox": [
574
+ 501,
575
+ 712,
576
+ 921,
577
+ 829
578
+ ],
579
+ "page_idx": 2
580
+ },
581
+ {
582
+ "type": "text",
583
+ "text": "4.1 Community-based Fog Devices Partition",
584
+ "text_level": 1,
585
+ "bbox": [
586
+ 503,
587
+ 849,
588
+ 846,
589
+ 864
590
+ ],
591
+ "page_idx": 2
592
+ },
593
+ {
594
+ "type": "text",
595
+ "text": "The first phase of our optimization algorithm deals with the mapping between applications (a set of interrelated services) and a device partitioning. We propose to partition the devices with the use of the community relationship between them. This phase of our optimization algorithm is based",
596
+ "bbox": [
597
+ 501,
598
+ 869,
599
+ 921,
600
+ 944
601
+ ],
602
+ "page_idx": 2
603
+ },
604
+ {
605
+ "type": "header",
606
+ "text": "IEEE",
607
+ "bbox": [
608
+ 73,
609
+ 32,
610
+ 104,
611
+ 42
612
+ ],
613
+ "page_idx": 2
614
+ },
615
+ {
616
+ "type": "page_number",
617
+ "text": "3",
618
+ "bbox": [
619
+ 911,
620
+ 32,
621
+ 921,
622
+ 42
623
+ ],
624
+ "page_idx": 2
625
+ },
626
+ {
627
+ "type": "text",
628
+ "text": "on the previous work of Filiposka, Mishev and Gilly, where they studied and validated community-based algorithms for placement optimization in cloud computing [15] and in fog computing [2].",
629
+ "bbox": [
630
+ 71,
631
+ 53,
632
+ 491,
633
+ 112
634
+ ],
635
+ "page_idx": 3
636
+ },
637
+ {
638
+ "type": "text",
639
+ "text": "The community structure is a topological feature of graphs that determines the sets of nodes which are better connected between them than with the rest of the network. The most popular community detection method is the one proposed by Girvan and Newman [25], which detects communities by progressively removing edges from the original graph. The algorithm removes the edges with the highest betweenness centrality, at each step. Betweenness centrality of an edge is the sum of the fraction of the shortest paths that pass through the edge. Therefore, a community, that is organized with two regions that are mainly communicated by only one edge, is split into two new communities in each algorithm iteration.",
640
+ "bbox": [
641
+ 71,
642
+ 111,
643
+ 491,
644
+ 300
645
+ ],
646
+ "page_idx": 3
647
+ },
648
+ {
649
+ "type": "text",
650
+ "text": "Under the conditions of our domain problem, a device community can be understood as a set of devices that are well connected between them, with alternatives communication paths, and that the shortest paths between devices are evenly distributed between the topology. Consequently, a fail in an edge inside the community will have a lower influence in the communication paths between devices than a fail in the edges that connect the communities. This lower influence means that the fails inside the communities will not generate isolated regions in the topology neither an important increase in the communication delays.",
651
+ "bbox": [
652
+ 71,
653
+ 301,
654
+ 491,
655
+ 460
656
+ ],
657
+ "page_idx": 3
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "The Girvan-Newman method iteratively determines the communities and the dendrogram, the tree structure of the communities, can be built. We characterized those communities with its depth in the dendrogram. We define this depth as the iteration in which the community was obtained. The higher the depth value is, the better communicated the device community is. Consequently, from the point of view of the availability, it is better to place the applications in device communities with higher depth values, since the devices inside those communities are better communicated between them than the devices in communities with lower depths values [26].",
662
+ "bbox": [
663
+ 71,
664
+ 460,
665
+ 491,
666
+ 635
667
+ ],
668
+ "page_idx": 3
669
+ },
670
+ {
671
+ "type": "text",
672
+ "text": "For example, consider the fog infrastructure in Fig. 2. The network link $NL_{D_c,D_f}$ is the one with the highest edge betweenness centrality since it is passed through the highest number of shortest paths. If we iterate the Girvan-Newman method over this example, communities 2 and 3 have higher depth values than community 1 since they are obtained when $NL_{D_c,D_f}$ is removed in the next iteration of the community generation algorithm. Consider also that we deploy an application with services $S_i$ and $S_j$ in community 1, allocating $S_i$ in $D_a$ and $S_j$ in $D_h$ , and that the user that requests the application is connected to device $D_b$ . Under those conditions, a fail in $NL_{D_c,D_f}$ would make impossible to finish the execution of the application since their services are unreachable. On the contrary, if we deploy the application in community 2, any fail in a edge would not make impossible to execute the application. Finally consider that a second user is connected to device $D_h$ . The best alternative, from the point of view of the availability, would be to horizontally scale up by deploying the same application twice in both communities 2 and 3, than only once in any of them.",
673
+ "bbox": [
674
+ 71,
675
+ 636,
676
+ 491,
677
+ 941
678
+ ],
679
+ "page_idx": 3
680
+ },
681
+ {
682
+ "type": "image",
683
+ "img_path": "images/9d008cea65a83e40768a77b353aca966dc1ff0a2ea645aa46009132a440b965f.jpg",
684
+ "image_caption": [
685
+ "Fig. 2. Example of fog device communities."
686
+ ],
687
+ "image_footnote": [],
688
+ "bbox": [
689
+ 568,
690
+ 53,
691
+ 862,
692
+ 164
693
+ ],
694
+ "page_idx": 3
695
+ },
696
+ {
697
+ "type": "text",
698
+ "text": "This example shows that, in an unrealistic situation with unlimited resources in all the devices, the best option would be to deploy an instance of the application for each client that requests it and this deployment would be placed in the community with the highest depth value that includes the device where the client is connected to. But this cannot be performed due to the limited resources in the devices of a community. Moreover, if we note that the higher the depth value of the community, the smaller the number of devices in the community, i.e., the communities with the highest values are the ones formed by only one device. Consequently, it is necessary to prioritize the allocation of the applications in the communities. We propose to use a greedy algorithm for this prioritization, more concretely, the First-Fit Decreasing algorithm [27].",
699
+ "bbox": [
700
+ 501,
701
+ 222,
702
+ 924,
703
+ 441
704
+ ],
705
+ "page_idx": 3
706
+ },
707
+ {
708
+ "type": "text",
709
+ "text": "Our optimization algorithm deals, in this first step, with the placement of applications in device communities using a First-Fit Decreasing approach. The priority criteria for ordering the applications is their execution deadlines, by prioritizing the applications with shortest deadlines. The algorithm starts checking the allocation of the application from the device communities with highest depth to the ones with the lowest, and the application is allocated in the first community with enough resources to allocate all the services of the application. If after checking all the communities, the application has not been allocated, this will be available only in the cloud provider. The process for the same application is repeated as many times as the number of users in the system that request this application. Algorithm 1 shows the pseudo-code of our proposal. The algorithm goes through the applications (in ascending deadline order), the users that request them and the communities (in descending depth order), trying to allocate the services of the application in the devices in the community.",
710
+ "bbox": [
711
+ 501,
712
+ 441,
713
+ 924,
714
+ 719
715
+ ],
716
+ "page_idx": 3
717
+ },
718
+ {
719
+ "type": "text",
720
+ "text": "In this first step, we map the applications in communities, but the map of services remains to be defined. We separate the process in two steps because we mainly focus the first one (mapping applications to communities) on increasing the application availability, and the second one (mapping services of an application to devices in a device community) on the application deadlines. This second step is performed by the function placeServicesInDevices(), in line 15, and its details are explained in Section 4.2 and Algorithm 2.",
721
+ "bbox": [
722
+ 503,
723
+ 720,
724
+ 924,
725
+ 867
726
+ ],
727
+ "page_idx": 3
728
+ },
729
+ {
730
+ "type": "text",
731
+ "text": "Our algorithm checks if an application has been previously placed in a community (line 11), and if not, it delegates the decision to place the application to the community to the algorithm which checks if the application services fit into the device community (Algorithm 2).",
732
+ "bbox": [
733
+ 503,
734
+ 869,
735
+ 924,
736
+ 941
737
+ ],
738
+ "page_idx": 3
739
+ },
740
+ {
741
+ "type": "header",
742
+ "text": "IEEE",
743
+ "bbox": [
744
+ 73,
745
+ 32,
746
+ 104,
747
+ 42
748
+ ],
749
+ "page_idx": 3
750
+ },
751
+ {
752
+ "type": "page_number",
753
+ "text": "4",
754
+ "bbox": [
755
+ 911,
756
+ 32,
757
+ 921,
758
+ 42
759
+ ],
760
+ "page_idx": 3
761
+ },
762
+ {
763
+ "type": "code",
764
+ "sub_type": "algorithm",
765
+ "code_caption": [
766
+ "Algorithm 1 Device community-based application allocation"
767
+ ],
768
+ "code_body": "1: $\\mathbb{C}\\gets$ calculate device communities \n2: $\\mathbb{C}$ order communities C by descending depth \n3: A $\\leftarrow$ order applications by ascending deadline \n4: appPlacement $\\leftarrow$ 0 \n5: for app in A do \n6: U $\\leftarrow$ get users requesting application app \n7: for user in U do \n8: dev $\\leftarrow$ get device where user is connected \n9: for infCom in IC do \n10: if dev $\\in$ infCom then \n11: if infCom $\\in$ appPlacement[app] then \n12: \"application app already placed in community infCom\"\" \n13: break \n14: else \n15: if placeServicesInDevices(app,infCom) then \n16: appPlacement[app].append(infCom) \n17: update resource usages in infCom \n18: \"placed application app in community infCom\"\" \n19: break",
769
+ "bbox": [
770
+ 73,
771
+ 80,
772
+ 488,
773
+ 275
774
+ ],
775
+ "page_idx": 4
776
+ },
777
+ {
778
+ "type": "text",
779
+ "text": "4.2 Transitive Closure-based Application Partition",
780
+ "text_level": 1,
781
+ "bbox": [
782
+ 73,
783
+ 304,
784
+ 457,
785
+ 319
786
+ ],
787
+ "page_idx": 4
788
+ },
789
+ {
790
+ "type": "text",
791
+ "text": "Once that the mapping of a given application into a candidate community of devices is performed by the first phase of the optimization algorithm, the second phase deals with the allocation of the services of the application into the devices in the community. We first partition the applications into sets of services, and it is checked if each of those service sets can be placed in just one device. If not, smaller sets are considered. The partition of the service into sets is based on our previous work [6], where we studied and validated the use of a distributed placement algorithm where the service sets are created by considering the transitive closure of the services in the application graph.",
792
+ "bbox": [
793
+ 71,
794
+ 327,
795
+ 490,
796
+ 501
797
+ ],
798
+ "page_idx": 4
799
+ },
800
+ {
801
+ "type": "text",
802
+ "text": "The transitive closure of a directed graph indicates the nodes that are reachable for each of the nodes in the graph. If a vertex $j$ is reachable by a vertex $i$ means that there is a path from $i$ to $j$ . The reachability matrix of a graph is called the transitive closure of the graph, and the set of reachable nodes for a given node is called the transitive closure of a node [28].",
803
+ "bbox": [
804
+ 71,
805
+ 503,
806
+ 491,
807
+ 604
808
+ ],
809
+ "page_idx": 4
810
+ },
811
+ {
812
+ "type": "text",
813
+ "text": "Under the conditions of our domain problem, the transitive closure of a node can be understood as the set of services that are requested for the execution of the given service, i.e., the outgoing requests generated by a service when it receives an incoming request. If we are interested in reducing the response time of the application execution, the services of the transitive closure should be allocated in the same device to reduce the communication delays between them, since the network delay is 0.0 for request messages inside the same device. Moreover, the best case is when all the services of an application are allocated in the same device, but this is limited by the resource constraint (Equation 2).",
814
+ "bbox": [
815
+ 71,
816
+ 606,
817
+ 491,
818
+ 795
819
+ ],
820
+ "page_idx": 4
821
+ },
822
+ {
823
+ "type": "text",
824
+ "text": "We also propose a First-Fit algorithm for this second phase (Algorithm 2), which orders the sets of services from the ones with the biggest sizes (only one transitive closure with all the services) to the smallest sets of services (the transitive closures with only one node or with the loops in the service flow), and tries to place those sets of services into a same device. The devices are ordered by a fitness value which is the theoretical user perceived response time. This value is obtained by adding the network latency between the device and the user and the execution time of all the",
825
+ "bbox": [
826
+ 71,
827
+ 796,
828
+ 491,
829
+ 941
830
+ ],
831
+ "page_idx": 4
832
+ },
833
+ {
834
+ "type": "image",
835
+ "img_path": "images/f0da09f8b81d1b3384485f0faa16d87af12aca32bd47b350a951825911ed060b.jpg",
836
+ "image_caption": [
837
+ "Iter. 1",
838
+ "Fig. 3. Example of service transitive closures."
839
+ ],
840
+ "image_footnote": [],
841
+ "bbox": [
842
+ 522,
843
+ 51,
844
+ 612,
845
+ 119
846
+ ],
847
+ "page_idx": 4
848
+ },
849
+ {
850
+ "type": "image",
851
+ "img_path": "images/6a6c64fccc964409e836ca2dbb792652b9617033c3e80781bdea05f1ec0e3d72.jpg",
852
+ "image_caption": [
853
+ "Iter. 2"
854
+ ],
855
+ "image_footnote": [],
856
+ "bbox": [
857
+ 617,
858
+ 53,
859
+ 707,
860
+ 119
861
+ ],
862
+ "page_idx": 4
863
+ },
864
+ {
865
+ "type": "image",
866
+ "img_path": "images/d6bdc432e54c1dc6763a03abc153d17c247f5231474752aa6eb5b88aa2b972a8.jpg",
867
+ "image_caption": [
868
+ "Iter. 3"
869
+ ],
870
+ "image_footnote": [],
871
+ "bbox": [
872
+ 720,
873
+ 53,
874
+ 810,
875
+ 119
876
+ ],
877
+ "page_idx": 4
878
+ },
879
+ {
880
+ "type": "image",
881
+ "img_path": "images/392749778008f722a0585f5e5b1b33e743bc12f8f1387326d2e45711a8e35ebb.jpg",
882
+ "image_caption": [
883
+ "Iter. 4"
884
+ ],
885
+ "image_footnote": [],
886
+ "bbox": [
887
+ 818,
888
+ 53,
889
+ 906,
890
+ 119
891
+ ],
892
+ "page_idx": 4
893
+ },
894
+ {
895
+ "type": "code",
896
+ "sub_type": "algorithm",
897
+ "code_caption": [
898
+ "Algorithm 2 Transitive closure-based service allocation"
899
+ ],
900
+ "code_body": "1: function PLACESERVICESINDEVICES \n2: TC $\\leftarrow$ generate transitive closure partitions for app \n3: D $\\leftarrow$ order devices in infCom by reponse time \n4: SP $\\leftarrow$ $\\emptyset$ /*Services already placed*/ \n5: servPlacement $\\leftarrow$ $\\emptyset$ \n6: for dev in D do \n7: for appPartition in TC do \n8: for closure in appPartition do \n9: if (closure not in SP) and (closure fits in dev) then \n10: SP = SP $\\cup$ closure \n11: for service in closure do \n12: servPlacement[service] = dev \n13: update resource usages in dev \n14: if SP == app then \n15: return True, servPlacement \n16: return False, $\\emptyset$",
901
+ "bbox": [
902
+ 506,
903
+ 188,
904
+ 849,
905
+ 356
906
+ ],
907
+ "page_idx": 4
908
+ },
909
+ {
910
+ "type": "text",
911
+ "text": "services in the device. This prioritize the devices that are both closer to the users and faster in the execution. By this, the second step of the algorithm optimizes the user perceived response time, and, consequently, improves the deadline satisfaction ratio.",
912
+ "bbox": [
913
+ 501,
914
+ 385,
915
+ 923,
916
+ 457
917
+ ],
918
+ "page_idx": 4
919
+ },
920
+ {
921
+ "type": "text",
922
+ "text": "Initially, Algorithm 2 goes through the devices ordered by the fitness value, and tries to allocate as much services as possible in the devices with the highest values. For the first device, it first tries to allocate all the services of the application. If they do not fit, the service set is split in several sets, one for each entry-point service and one additional set for the transitive closures of each of its neighbor services of the entry-point one, and it checks if any of those new sets fits in the first device. This is recursively repeated for each transitive closure set that contains services not previously allocated. Fig. 3 shows an example of how the transitive closure of the services is generated along the iterations of the algorithm that partition the services of the application.",
923
+ "bbox": [
924
+ 501,
925
+ 458,
926
+ 921,
927
+ 648
928
+ ],
929
+ "page_idx": 4
930
+ },
931
+ {
932
+ "type": "text",
933
+ "text": "Once that all the service sets have been evaluated to be placed in the first device, this process is sequentially repeated for all the devices for the unallocated services. If after considering all the devices, there are still unallocated services, the mapping of the application in the current device community is rejected. Consequently, the first phase of the algorithm has to consider a greater community for the placement.",
934
+ "bbox": [
935
+ 501,
936
+ 648,
937
+ 921,
938
+ 765
939
+ ],
940
+ "page_idx": 4
941
+ },
942
+ {
943
+ "type": "text",
944
+ "text": "5 EXPERIMENTAL EVALUATION",
945
+ "text_level": 1,
946
+ "bbox": [
947
+ 504,
948
+ 789,
949
+ 769,
950
+ 804
951
+ ],
952
+ "page_idx": 4
953
+ },
954
+ {
955
+ "type": "text",
956
+ "text": "We defined random characteristics for the elements of our simulation experiments. We modeled the parameters of the elements in the domain with uniform distributions and the minimum and maximum values are shown in Table 1.",
957
+ "bbox": [
958
+ 501,
959
+ 810,
960
+ 921,
961
+ 868
962
+ ],
963
+ "page_idx": 4
964
+ },
965
+ {
966
+ "type": "text",
967
+ "text": "The service applications were generated randomly following a growing network (GN) graph structure. GN graphs are built by adding nodes one at a time with a link to one previously added node. The network infrastructure was created as a random Barabasi-Albert network with 100 fog",
968
+ "bbox": [
969
+ 501,
970
+ 869,
971
+ 921,
972
+ 943
973
+ ],
974
+ "page_idx": 4
975
+ },
976
+ {
977
+ "type": "header",
978
+ "text": "IEEE",
979
+ "bbox": [
980
+ 75,
981
+ 32,
982
+ 104,
983
+ 42
984
+ ],
985
+ "page_idx": 4
986
+ },
987
+ {
988
+ "type": "page_number",
989
+ "text": "5",
990
+ "bbox": [
991
+ 911,
992
+ 32,
993
+ 921,
994
+ 42
995
+ ],
996
+ "page_idx": 4
997
+ },
998
+ {
999
+ "type": "table",
1000
+ "img_path": "images/73fe16f3bbbf2972c976b5b9c6835bbbc0e3a9e7a0214766632f1d1033c14f8e.jpg",
1001
+ "table_caption": [
1002
+ "TABLE 1 Values of the parameters for the experiment characterization"
1003
+ ],
1004
+ "table_footnote": [],
1005
+ "table_body": "<table><tr><td>Parameter</td><td></td><td>min.-max.</td></tr><tr><td>Network</td><td></td><td></td></tr><tr><td>Propagation time (ms)</td><td>PRNLDi,Dj</td><td>5</td></tr><tr><td>Bandwidth (bytes/ms)</td><td>BWNLDi,Dj</td><td>75000</td></tr><tr><td>Fog device</td><td></td><td></td></tr><tr><td>Resources (res. units)</td><td>ARDSi</td><td>10-25</td></tr><tr><td>Speed (Intrs/ms)</td><td>IPTDi</td><td>100-1000</td></tr><tr><td>Application</td><td></td><td></td></tr><tr><td>Deadline (ms)</td><td>DLAPPx</td><td>300-50000</td></tr><tr><td>Services (number)</td><td></td><td>2-10</td></tr><tr><td>Resources (res. units)</td><td>CRSu</td><td>1-6</td></tr><tr><td>Execution (Intrs/req)</td><td>EIMSSu,Sv</td><td>20000-60000</td></tr><tr><td>Message size (bytes)</td><td>SZMSSu,Sv</td><td>1500000-4500000</td></tr><tr><td>IoT device</td><td></td><td></td></tr><tr><td>Request rate (1/ms)</td><td></td><td>1/1000-1/200</td></tr><tr><td>Popularity (prob.)</td><td></td><td>0.25</td></tr></table>",
1006
+ "bbox": [
1007
+ 94,
1008
+ 92,
1009
+ 470,
1010
+ 347
1011
+ ],
1012
+ "page_idx": 5
1013
+ },
1014
+ {
1015
+ "type": "text",
1016
+ "text": "devices. Betweenness centrality index is a topological metric that measures the number of shortest path that goes through a device. The gateway devices were selected from the nodes placed in the edges of the network, i.e., the nodes with the smallest betweenness centrality indices. Betweenness centrality index is a topological metric that measures the number of shortest path that goes through a device. We selected the $25\\%$ of devices with the lowest centrality value to behave as gateways (25 gateways). The number and the applications requested from the IoT devices connected to the gateways were determined with a popularity distribution modeled with an uniform distribution.",
1017
+ "bbox": [
1018
+ 71,
1019
+ 372,
1020
+ 491,
1021
+ 546
1022
+ ],
1023
+ "page_idx": 5
1024
+ },
1025
+ {
1026
+ "type": "text",
1027
+ "text": "The random experimental scenario finally resulted on 20 applications with 106 services, that totally needed 360 resource units and the fog devices were able to offer up to 1874 resources units. 70 IoT devices (or users) were deployed and they generated an application request each $1/557$ ms in average.",
1028
+ "bbox": [
1029
+ 71,
1030
+ 547,
1031
+ 491,
1032
+ 635
1033
+ ],
1034
+ "page_idx": 5
1035
+ },
1036
+ {
1037
+ "type": "text",
1038
+ "text": "We compared the results of our proposal with the ones obtained from the implementation of an integer linear programming (ILP) service allocation optimizer. As we mention in Section 2, ILP solutions are the most numerous in fog service placement optimization.",
1039
+ "bbox": [
1040
+ 71,
1041
+ 635,
1042
+ 490,
1043
+ 709
1044
+ ],
1045
+ "page_idx": 5
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "text": "The experiments were executed using the YAFS simulator that we had previously developed for other research works. This simulator is able to include graph-based network topologies and pluggable fog service placement policies, apart from other features that, to the best of our knowledge, are not provided by other fog simulators, such as node failures, or dynamic service placement and routing. The simulator is open source and it can be downloaded from its code repository [29].",
1050
+ "bbox": [
1051
+ 71,
1052
+ 709,
1053
+ 491,
1054
+ 840
1055
+ ],
1056
+ "page_idx": 5
1057
+ },
1058
+ {
1059
+ "type": "text",
1060
+ "text": "The experiment results are presented and analyzed in two separated sections. Section 5.1 includes the analysis of the results obtained with the YAFS simulator. Those results compare the user perceived response time and the availability of the applications for the IoT devices. In Section 5.2, it is presented an analysis of the service placement obtained with both optimization policies (our proposal and the ILP",
1061
+ "bbox": [
1062
+ 71,
1063
+ 840,
1064
+ 491,
1065
+ 944
1066
+ ],
1067
+ "page_idx": 5
1068
+ },
1069
+ {
1070
+ "type": "text",
1071
+ "text": "one).",
1072
+ "bbox": [
1073
+ 504,
1074
+ 54,
1075
+ 542,
1076
+ 66
1077
+ ],
1078
+ "page_idx": 5
1079
+ },
1080
+ {
1081
+ "type": "text",
1082
+ "text": "5.1 Simulation Results",
1083
+ "text_level": 1,
1084
+ "bbox": [
1085
+ 504,
1086
+ 90,
1087
+ 687,
1088
+ 104
1089
+ ],
1090
+ "page_idx": 5
1091
+ },
1092
+ {
1093
+ "type": "text",
1094
+ "text": "A first simulation scenario included fails in the fog devices to study the availability of the services when the nodes are getting down. The simulation included random and permanent fails in the nodes, starting with all the devices (100 nodes) alive, and finishing the simulation with fails in all of them. The fails were generated uniformly along the simulation. The results of this simulation are presented in Fig. 4 and shows the QoS in terms of the total number of requests that are executed satisfying the application deadline. The reason because a request does not satisfy the deadline can be both due to the response time is higher than the deadline or due to none device with the services of the requested application are reachable from the IoT device due to all the paths between them have failed devices. Three data series are represented in Fig. 4: one for the total number of requests that are sent from the IoT devices (labeled with Total num. of requests), one for the number of requests that are executed before the deadline when the placement of our solution is considered (labeled with Partition); and the number of requests that satisfied the deadline with the ILP policy (labeled with ILP).",
1095
+ "bbox": [
1096
+ 501,
1097
+ 109,
1098
+ 924,
1099
+ 416
1100
+ ],
1101
+ "page_idx": 5
1102
+ },
1103
+ {
1104
+ "type": "text",
1105
+ "text": "It is observed that our approach results in a higher number of satisfied requests, mainly during the first half of the simulation (up to 50 failed devices). In the second part of the simulation, improvements in the QoS are also observed but these are less significant in regard with the ILP.",
1106
+ "bbox": [
1107
+ 503,
1108
+ 416,
1109
+ 924,
1110
+ 489
1111
+ ],
1112
+ "page_idx": 5
1113
+ },
1114
+ {
1115
+ "type": "text",
1116
+ "text": "For the sake of a deeper analysis of the availability, it has been also measured in terms of the number of IoT devices that are able to request their applications thank to that all the services they need are reachable with network paths without failed devices. This is represented in Fig. 5, where the y-axis are the number of IoT devices that are able to request their applications, and the x-axis the number of devices that have failed. The figure also includes the hypothetical and impossible case, due to the resource limit constraint, of allocating all the services in the gateways (labeled as All in gtws.). This is the best case and is useful to compare the solutions with the best upper bound. These results confirm that our proposal is able to increase the availability of the system when fails happen in the fog devices.",
1117
+ "bbox": [
1118
+ 501,
1119
+ 489,
1120
+ 924,
1121
+ 708
1122
+ ],
1123
+ "page_idx": 5
1124
+ },
1125
+ {
1126
+ "type": "text",
1127
+ "text": "A second simulation scenario did not include fails in the fog devices and was used to study the user perceived response time of the applications. These response times were measured as the time between the user request was generated in the IoT device and all the application services finished. The results were measured independently for each pair application-IoT device. They are summarized in Fig. 6. Each plot in the figure represents the response times of an application, an each item in the x-axis corresponds to one gateway that has an IoT device (or user) that request the application. The results of our solution are labeled as Partition and the results of the ILP approach are labeled as ILP.",
1128
+ "bbox": [
1129
+ 503,
1130
+ 708,
1131
+ 924,
1132
+ 897
1133
+ ],
1134
+ "page_idx": 5
1135
+ },
1136
+ {
1137
+ "type": "text",
1138
+ "text": "It is observed that the placement obtained with our proposal does not reduce the response time for all the applications, but it is shorter for 13 of the 20 applications.",
1139
+ "bbox": [
1140
+ 503,
1141
+ 898,
1142
+ 924,
1143
+ 944
1144
+ ],
1145
+ "page_idx": 5
1146
+ },
1147
+ {
1148
+ "type": "header",
1149
+ "text": "IEEE",
1150
+ "bbox": [
1151
+ 75,
1152
+ 32,
1153
+ 104,
1154
+ 42
1155
+ ],
1156
+ "page_idx": 5
1157
+ },
1158
+ {
1159
+ "type": "page_number",
1160
+ "text": "6",
1161
+ "bbox": [
1162
+ 911,
1163
+ 32,
1164
+ 921,
1165
+ 42
1166
+ ],
1167
+ "page_idx": 5
1168
+ },
1169
+ {
1170
+ "type": "image",
1171
+ "img_path": "images/1d291efd4cb94d7aecc2b27bd5237b70b1b449ca7b7d407ce67a9d60c8814e4b.jpg",
1172
+ "image_caption": [
1173
+ "Fig. 4. Evolution of the QoS with regard to the fail of fog devices, in terms of the number of requests which satisfy application deadlines $\\left(|RT_{RQ_{US_a,APP_x}^n} < DL_{APP_x}|\\right)$ compared with the total number of requests $\\left(|RQ_{US_a,APP_x}^n|\\right)$ ."
1174
+ ],
1175
+ "image_footnote": [],
1176
+ "bbox": [
1177
+ 161,
1178
+ 55,
1179
+ 834,
1180
+ 224
1181
+ ],
1182
+ "page_idx": 6
1183
+ },
1184
+ {
1185
+ "type": "image",
1186
+ "img_path": "images/ef8958092fb54eff69d9e2a20b951d9dcd2959f382473650aa1df00bbe0fc9d1.jpg",
1187
+ "image_caption": [
1188
+ "Fig. 5. Number of IoT devices that get services in regard with the number of failed fog devices (availability $(APP_{x})$ )."
1189
+ ],
1190
+ "image_footnote": [],
1191
+ "bbox": [
1192
+ 138,
1193
+ 300,
1194
+ 428,
1195
+ 438
1196
+ ],
1197
+ "page_idx": 6
1198
+ },
1199
+ {
1200
+ "type": "text",
1201
+ "text": "Additionally, we can observed that in some applications an important damage of the response time is obtained. This is explained because both policies prioritize applications with shorter deadlines in front of the ones with longer deadlines. Nevertheless, there are less of these extreme cases, and with shorter times, when our policy is used: our policy only damages application 15 with a time of around 1000 ms, in front of four applications up to 400 s with the ILP policy (around 400000 ms for application 1, 300000 ms for application 8, 200000 ms for application 12, and 70000 for application 2).",
1202
+ "bbox": [
1203
+ 71,
1204
+ 506,
1205
+ 491,
1206
+ 667
1207
+ ],
1208
+ "page_idx": 6
1209
+ },
1210
+ {
1211
+ "type": "text",
1212
+ "text": "In summary, our service placement policy shows a better behavior in terms of availability of the services that also results on a better QoS in the system. On the contrary, the response time of some applications results damaged but this behavior is also observed with the ILP policy, generating even worse response times.",
1213
+ "bbox": [
1214
+ 71,
1215
+ 667,
1216
+ 491,
1217
+ 755
1218
+ ],
1219
+ "page_idx": 6
1220
+ },
1221
+ {
1222
+ "type": "text",
1223
+ "text": "5.2 Placement Results",
1224
+ "text_level": 1,
1225
+ "bbox": [
1226
+ 73,
1227
+ 776,
1228
+ 254,
1229
+ 789
1230
+ ],
1231
+ "page_idx": 6
1232
+ },
1233
+ {
1234
+ "type": "text",
1235
+ "text": "This section is devoted to compare the placement of the services obtained from the execution of our algorithm with regard to the ILP one. This analysis is included to give a brief idea of how the services are spread across the fog devices.",
1236
+ "bbox": [
1237
+ 71,
1238
+ 795,
1239
+ 488,
1240
+ 854
1241
+ ],
1242
+ "page_idx": 6
1243
+ },
1244
+ {
1245
+ "type": "text",
1246
+ "text": "Firstly, Fig. 7a shows that the placement of the services differs a lot between both placement policies. A mark in the plot of the figure indicates that a given service (y-axes) is placed in a given device (x-axes). Taking into account that the services of the same application have consecutive identifiers, it is also observed that in the case of our policy",
1247
+ "bbox": [
1248
+ 71,
1249
+ 854,
1250
+ 491,
1251
+ 944
1252
+ ],
1253
+ "page_idx": 6
1254
+ },
1255
+ {
1256
+ "type": "text",
1257
+ "text": "(Partition), there are more cases of devices that allocate several services of the same application (consecutive marks in the same device).",
1258
+ "bbox": [
1259
+ 503,
1260
+ 296,
1261
+ 923,
1262
+ 340
1263
+ ],
1264
+ "page_idx": 6
1265
+ },
1266
+ {
1267
+ "type": "text",
1268
+ "text": "Fig. 7b represent the resource usage of the fog devices. The y-axis represents the percentage of resources that are used by the services allocated in a given device and the x-axis are the devices ordered by these percentages in ascending order. By the analysis of the figure, we can observe that in the placement of our solution, there are almost the double of nodes that do not allocate any service (the resource usage is 0.0), and there is not any device that is fully used (resource usage of 1.0), with regard to the case of the ILP where almost 40 devices have a $100\\%$ usage of the resources. The first interpretation of these results is that the scale level of our solution is smaller than the ILP one, in fact, we calculated that our policy deployed 357 (and 1161 resource units) instances of the services and the ILP deployed 374 (and 1203 resource units), around $5\\%$ more services ( $3.6\\%$ more resources). Consequently, our solution is able to obtain better QoS and availability with a lower use of the fog resources (smaller number of instances). The second interpretation is that the services are more evenly distributed, since the workload of the devices is smaller, avoiding the saturation of the devices and keeping the system in a more flexible state in order to allocate new service instances.",
1269
+ "bbox": [
1270
+ 503,
1271
+ 364,
1272
+ 923,
1273
+ 700
1274
+ ],
1275
+ "page_idx": 6
1276
+ },
1277
+ {
1278
+ "type": "text",
1279
+ "text": "Finally, Fig. 7c shows the relationship between the service placement and the hop distance between the allocated service and the IoT device that requests it. A point in the scatter plot indicates how many IoT devices has a given distance with a service of the application they request. For example, in the case of our policy, there are around 100 services that are allocated in the gateways where the IoT devices are connected (a hop distance of 0.0). On the contrary, the ILP policy allocates more than 160 services in the gateways, the point (0,160) in the plot. We observe that the services are distributed more evenly and placed further from the gateways (higher distances) for the case our policy. Consequently, the ILP is able to place the services closer to the IoT devices. Despite this, our policy shows a better general behavior also in terms of application response time.",
1280
+ "bbox": [
1281
+ 501,
1282
+ 722,
1283
+ 923,
1284
+ 944
1285
+ ],
1286
+ "page_idx": 6
1287
+ },
1288
+ {
1289
+ "type": "header",
1290
+ "text": "IEEE",
1291
+ "bbox": [
1292
+ 75,
1293
+ 32,
1294
+ 104,
1295
+ 42
1296
+ ],
1297
+ "page_idx": 6
1298
+ },
1299
+ {
1300
+ "type": "page_number",
1301
+ "text": "7",
1302
+ "bbox": [
1303
+ 911,
1304
+ 32,
1305
+ 921,
1306
+ 42
1307
+ ],
1308
+ "page_idx": 6
1309
+ },
1310
+ {
1311
+ "type": "image",
1312
+ "img_path": "images/7dde587e6e3a1ef8227348ba9ec53164170addba766968a8274a959cb322fe72.jpg",
1313
+ "image_caption": [
1314
+ "Fig. 6. User perceived response times of the applications for each user (or IoT device) in the system $(RT_{RQ_{U S_{a},A P P_{x}}^{n}})$ ."
1315
+ ],
1316
+ "image_footnote": [],
1317
+ "bbox": [
1318
+ 133,
1319
+ 84,
1320
+ 808,
1321
+ 473
1322
+ ],
1323
+ "page_idx": 7
1324
+ },
1325
+ {
1326
+ "type": "image",
1327
+ "img_path": "images/7061f4795f3d236abf5caacb3b908fcd825dab9f10a3fb0d65648a87c087f5e1.jpg",
1328
+ "image_caption": [
1329
+ "(a) Allocation of the services in the fog devices $(P, p_{ui} \\forall S_u, D_i)$ ."
1330
+ ],
1331
+ "image_footnote": [],
1332
+ "bbox": [
1333
+ 98,
1334
+ 535,
1335
+ 352,
1336
+ 654
1337
+ ],
1338
+ "page_idx": 7
1339
+ },
1340
+ {
1341
+ "type": "image",
1342
+ "img_path": "images/5a4c4ea2cfbe257089515f2ba900beb04edff453e025e4fe46ec6e9a4c818a4d.jpg",
1343
+ "image_caption": [
1344
+ "(b) Resource usage of the fog devices $\\begin{array}{r}\\sum_{u = 1}^{|S_u|}\\left(p_{ui}\\times CR_{S_u}\\right),\\forall D_i) \\end{array}$",
1345
+ "Fig. 7. Comparison of the services placement between our partition-based algorithm and the ILP optimizer."
1346
+ ],
1347
+ "image_footnote": [],
1348
+ "bbox": [
1349
+ 372,
1350
+ 535,
1351
+ 624,
1352
+ 654
1353
+ ],
1354
+ "page_idx": 7
1355
+ },
1356
+ {
1357
+ "type": "image",
1358
+ "img_path": "images/880d4454a148a72613fd918941578e188755339fcd9116e5ff8af2833c8e7fbd.jpg",
1359
+ "image_caption": [
1360
+ "(c) Service allocation in terms of hop distance with the IoT devices."
1361
+ ],
1362
+ "image_footnote": [],
1363
+ "bbox": [
1364
+ 645,
1365
+ 536,
1366
+ 895,
1367
+ 654
1368
+ ],
1369
+ "page_idx": 7
1370
+ },
1371
+ {
1372
+ "type": "text",
1373
+ "text": "6 CONCLUSION",
1374
+ "text_level": 1,
1375
+ "bbox": [
1376
+ 73,
1377
+ 731,
1378
+ 215,
1379
+ 744
1380
+ ],
1381
+ "page_idx": 7
1382
+ },
1383
+ {
1384
+ "type": "text",
1385
+ "text": "We have proposed an algorithm for service placement in fog devices based on the partition of the fog devices (into communities) and the services of the applications (into transitive closures) for the optimization of the QoS of the system and the service availability for the users (or IoT devices).",
1386
+ "bbox": [
1387
+ 71,
1388
+ 752,
1389
+ 490,
1390
+ 839
1391
+ ],
1392
+ "page_idx": 7
1393
+ },
1394
+ {
1395
+ "type": "text",
1396
+ "text": "Two simulation scenarios have been executed, one including fails in the fog devices and another one without fails, to measure the response time of the applications, the service availability and the number of request that were served satisfying the application deadlines. The service placement obtained with our policy resulted in a higher QoS and service availability, with regard to the placement",
1397
+ "bbox": [
1398
+ 71,
1399
+ 840,
1400
+ 491,
1401
+ 944
1402
+ ],
1403
+ "page_idx": 7
1404
+ },
1405
+ {
1406
+ "type": "text",
1407
+ "text": "of an ILP-based algorithm. In the case of the user perceived response time, our policy obtained better times for 13 of the total 20 applications. Both policies showed a high degradation of service for some applications, but in the case of the ILP, this degradation happened in more applications and resulting in longer response times.",
1408
+ "bbox": [
1409
+ 503,
1410
+ 732,
1411
+ 921,
1412
+ 821
1413
+ ],
1414
+ "page_idx": 7
1415
+ },
1416
+ {
1417
+ "type": "text",
1418
+ "text": "As future works, the use of complex networks and graph theory for the optimization of other parameters of the systems, such as service cost, network usage, migration cost, and service provider cost could be studied. By the own nature of the proposed policy, the optimization of these other metrics probably would need to be combined with other type of heuristics to obtain suitable results, and consequently, further research is necessary.",
1419
+ "bbox": [
1420
+ 501,
1421
+ 825,
1422
+ 923,
1423
+ 943
1424
+ ],
1425
+ "page_idx": 7
1426
+ },
1427
+ {
1428
+ "type": "header",
1429
+ "text": "IEEE",
1430
+ "bbox": [
1431
+ 75,
1432
+ 32,
1433
+ 104,
1434
+ 42
1435
+ ],
1436
+ "page_idx": 7
1437
+ },
1438
+ {
1439
+ "type": "page_number",
1440
+ "text": "8",
1441
+ "bbox": [
1442
+ 911,
1443
+ 32,
1444
+ 921,
1445
+ 42
1446
+ ],
1447
+ "page_idx": 7
1448
+ },
1449
+ {
1450
+ "type": "text",
1451
+ "text": "ACKNOWLEDGMENTS",
1452
+ "text_level": 1,
1453
+ "bbox": [
1454
+ 73,
1455
+ 51,
1456
+ 250,
1457
+ 66
1458
+ ],
1459
+ "page_idx": 8
1460
+ },
1461
+ {
1462
+ "type": "text",
1463
+ "text": "This research was supported by the Spanish Government (Agencia Estatal de Investigación) and the European Commission (Fondo Europeo de Desarrollo Regional) through grant number TIN2017-88547-P (MINECO/AEI/FEDER, UE).",
1464
+ "bbox": [
1465
+ 71,
1466
+ 71,
1467
+ 491,
1468
+ 145
1469
+ ],
1470
+ "page_idx": 8
1471
+ },
1472
+ {
1473
+ "type": "text",
1474
+ "text": "REFERENCES",
1475
+ "text_level": 1,
1476
+ "bbox": [
1477
+ 73,
1478
+ 167,
1479
+ 189,
1480
+ 181
1481
+ ],
1482
+ "page_idx": 8
1483
+ },
1484
+ {
1485
+ "type": "list",
1486
+ "sub_type": "ref_text",
1487
+ "list_items": [
1488
+ "[1] O. Consortium et al., \"Openfog reference architecture for fog computing,\" Tech. Rep., February, Tech. Rep., 2017.",
1489
+ "[2] S. Filiposka, A. Mishev, and K. Gilly, \"Community-based allocation and migration strategies for fog computing,\" in 2018 IEEE Wireless Communications and Networking Conference (WCNC), April 2018, pp. 1-6.",
1490
+ "[3] Z. Wen, R. Yang, P. Garraghan, T. Lin, J. Xu, and M. Rovatsos, \"Fog orchestration for internet of things services,\" IEEE Internet Computing, vol. 21, no. 2, pp. 16-24, Mar 2017.",
1491
+ "[4] O. Skarlat, M. Nardelli, S. Schulte, M. Borkowski, and P. Leitner, \"Optimized IoT service placement in the fog,\" Service Oriented Computing and Applications, Oct 2017. [Online]. Available: https://doi.org/10.1007/s11761-017-0219-8",
1492
+ "[5] A. Brogi and S. Forti, \"Qos-aware deployment of IoT applications through the fog,\" IEEE Internet of Things Journal, vol. 4, no. 5, pp. 1185-1192, Oct 2017.",
1493
+ "[6] C. Guerrero, I. Lera, and C. Juiz, \"A lightweight decentralized service placement policy for performance optimization in fog computing,\" Journal of Ambient Intelligence and Humanized Computing, Jun 2018. [Online]. Available: https://doi.org/10.1007/s12652-018-0914-0",
1494
+ "[7] L. Ni, J. Zhang, C. Jiang, C. Yan, and K. Yu, \"Resource allocation strategy in fog computing based on priced timed petri nets,\" IEEE Internet of Things Journal, vol. 4, no. 5, pp. 1216-1228, Oct 2017.",
1495
+ "[8] R. Urgaonkar, S. Wang, T. He, M. Zafer, K. Chan, and K. K. Leung, \"Dynamic service migration and workload scheduling in edge-clouds,\" Performance Evaluation, vol. 91, no. Supplement C, pp. 205 - 228, 2015, special Issue: Performance 2015. [Online]. Available: http://www.sciencedirect.com/science/article/pii/S0166531615000619",
1496
+ "[9] L. Gu, D. Zeng, S. Guo, A. Barnawi, and Y. Xiang, \"Cost efficient resource management in fog computing supported medical cyberphysical system,\" IEEE Transactions on Emerging Topics in Computing, vol. 5, no. 1, pp. 108-119, Jan 2017.",
1497
+ "[10] K. Velasquez, D. P. Abreu, M. Curado, and E. Monteiro, \"Service placement for latency reduction in the internet of things,\" Annals of Telecommunications, vol. 72, no. 1, pp. 105-115, Feb 2017. [Online]. Available: https://doi.org/10.1007/s12243-016-0524-9",
1498
+ "[11] Z. Huang, K.-J. Lin, S.-Y. Yu, and J. Y. Jen Hsu, \"Co-locating services in IoT systems to minimize the communication energy cost,\" Journal of Innovation in Digital Ecosystems, vol. 1, no. 1, pp. 47 - 57, 2014. [Online]. Available: http://www.sciencedirect.com/science/article/pii/S2352664515000061",
1499
+ "[12] L. Yang, J. Cao, G. Liang, and X. Han, \"Cost aware service placement and load dispatching in mobile cloud systems,\" IEEE Transactions on Computers, vol. 65, no. 5, pp. 1440-1452, May 2016.",
1500
+ "[13] V. B. C. Souza, W. Ramírez, X. Masip-Bruin, E. Marín-Tordera, G. Ren, and G. Tashakor, \"Handling service allocation in combined fog-cloud scenarios,\" in 2016 IEEE International Conference on Communications (ICC), May 2016, pp. 1-5.",
1501
+ "[14] D. Zeng, L. Gu, S. Guo, Z. Cheng, and S. Yu, \"Joint optimization of task scheduling and image placement in fog computing supported software-defined embedded system,\" IEEE Transactions on Computers, vol. 65, no. 12, pp. 3702-3712, Dec 2016.",
1502
+ "[15] S. Filiposka, A. Mishev, and C. Juiz, \"Community-based vm placement framework,\" The Journal of Supercomputing, vol. 71, no. 12, pp. 4504-4528, Dec 2015. [Online]. Available: https://doi.org/10.1007/s11227-015-1546-1",
1503
+ "[16] C. Guerrero, I. Lera, and C. Juiz, \"On the influence of fog colonies partitioning in fog application makespan,\" in 2019 IEEE 6th International Conference on Future Internet of Things and Cloud (FiCloud), August 2018.",
1504
+ "[17] I. Lera, C. Guerrero, and C. Juiz, \"Comparing centrality indices for network usage optimization of data placement policies in fog devices,\" in 2018 Third International Conference on Fog and Mobile Edge Computing (FMEC), April 2018, pp. 115-122."
1505
+ ],
1506
+ "bbox": [
1507
+ 73,
1508
+ 188,
1509
+ 491,
1510
+ 941
1511
+ ],
1512
+ "page_idx": 8
1513
+ },
1514
+ {
1515
+ "type": "list",
1516
+ "sub_type": "ref_text",
1517
+ "list_items": [
1518
+ "[18] Y. Elkhatib, B. Porter, H. B. Ribeiro, M. F. Zhani, J. Qadir, and E. Riviere, \"On using micro-clouds to deliver the fog,\" IEEE Internet Computing, vol. 21, no. 2, pp. 8-15, Mar 2017.",
1519
+ "[19] F. Bonomi, R. Milito, P. Natarajan, and J. Zhu, Fog Computing: A Platform for Internet of Things and Analytics. Cham: Springer International Publishing, 2014, pp. 169-186.",
1520
+ "[20] A. Yousefpour, G. Ishigaki, R. Gour, and J. P. Jue, \"On reducing iot service delay via fog offloading,\" IEEE Internet of Things Journal, vol. PP, no. 99, pp. 1-1, 2018.",
1521
+ "[21] M. Vogler, J. M. Schleicher, C. Inzinger, and S. Dustdar, \"A scalable framework for provisioning large-scale IoT deployments,\" ACM Trans. Internet Technol., vol. 16, no. 2, pp. 11:1-11:20, Mar. 2016. [Online]. Available: http://doi.acm.org/10.1145/2850416",
1522
+ "[22] A. Krylovskiy, M. Jahn, and E. Patti, \"Designing a smart city internet of things platform with microservice architecture,\" in 2015 3rd International Conference on Future Internet of Things and Cloud, Aug 2015, pp. 25-30.",
1523
+ "[23] E. Saurez, K. Hong, D. Lillethun, U. Ramachandran, and B. Ottenwalder, \"Incremental deployment and migration of geo-distributed situation awareness applications in the fog,\" in Proceedings of the 10th ACM International Conference on Distributed and Event-based Systems, ser. DEBS '16. New York, NY, USA: ACM, 2016, pp. 258-269. [Online]. Available: http://doi.acm.org/10.1145/2933267.2933317",
1524
+ "[24] A. Balalaie, A. Heydarnoori, and P. Jamshidi, \"Microservices architecture enables devops: Migration to a cloud-native architecture,\" IEEE Software, vol. 33, no. 3, pp. 42-52, May 2016.",
1525
+ "[25] M. E. J. Newman and M. Girvan, \"Finding and evaluating community structure in networks,\" Phys. Rev. E, vol. 69, no. 2, p. 026113, Feb. 2004. [Online]. Available: http://link.aps.org/doi/10.1103/PhysRevE.69.026113",
1526
+ "[26] S. Fortunato, V. Latora, and M. Marchiori, \"Method to find community structures based on information centrality,\" Phys. Rev. E, vol. 70, p. 056104, Nov 2004. [Online]. Available: https://link.aps.org/doi/10.1103/PhysRevE.70.056104",
1527
+ "[27] A. Alahmadi, A. Alnowiser, M. M. Zhu, D. Che, and P. Ghodous, \"Enhanced first-fit decreasing algorithm for energy-aware job scheduling in cloud,\" in 2014 International Conference on Computational Science and Computational Intelligence, vol. 2, March 2014, pp. 69-74.",
1528
+ "[28] H. S. Warren Jr, \"A modification of warshall's algorithm for the transitive closure of binary relations,\" Communications of the ACM, vol. 18, no. 4, pp. 218-220, 1975.",
1529
+ "[29] I. Lera and C. Guerrero, \"Yafs, yet another fog simulator,\" https: //github.com/acsicuib/YAFS, accessed: 2018-02-03."
1530
+ ],
1531
+ "bbox": [
1532
+ 506,
1533
+ 54,
1534
+ 921,
1535
+ 566
1536
+ ],
1537
+ "page_idx": 8
1538
+ },
1539
+ {
1540
+ "type": "image",
1541
+ "img_path": "images/d06f4273a2fb8d6577d64fec81d97ae2b6d367555ef5dc862426499cb1bfe7e4.jpg",
1542
+ "image_caption": [],
1543
+ "image_footnote": [],
1544
+ "bbox": [
1545
+ 506,
1546
+ 579,
1547
+ 627,
1548
+ 696
1549
+ ],
1550
+ "page_idx": 8
1551
+ },
1552
+ {
1553
+ "type": "text",
1554
+ "text": "Isaac Lera received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2012. He is an assistant professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research lines are semantic web, open data, system performance, educational innovation and human mobility. He has authored in several journals and international conferences.",
1555
+ "bbox": [
1556
+ 638,
1557
+ 579,
1558
+ 921,
1559
+ 693
1560
+ ],
1561
+ "page_idx": 8
1562
+ },
1563
+ {
1564
+ "type": "image",
1565
+ "img_path": "images/d8d825a07523da247e5fbbc78fe46d563cd15790c3ad9ba2187f0954f1dc9ff2.jpg",
1566
+ "image_caption": [],
1567
+ "image_footnote": [],
1568
+ "bbox": [
1569
+ 506,
1570
+ 696,
1571
+ 627,
1572
+ 814
1573
+ ],
1574
+ "page_idx": 8
1575
+ },
1576
+ {
1577
+ "type": "text",
1578
+ "text": "Carlos Guerrero received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2012. He is an assistant professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research interests include web performance, resource management, web engineering, and cloud computing. He has authored around 40 papers in international conferences and journals.",
1579
+ "bbox": [
1580
+ 638,
1581
+ 696,
1582
+ 921,
1583
+ 811
1584
+ ],
1585
+ "page_idx": 8
1586
+ },
1587
+ {
1588
+ "type": "image",
1589
+ "img_path": "images/e87d17a2916bfdfa643a61628ccf93722b8668ac63f425b092c8fb00bc67b95e.jpg",
1590
+ "image_caption": [],
1591
+ "image_footnote": [],
1592
+ "bbox": [
1593
+ 506,
1594
+ 816,
1595
+ 627,
1596
+ 931
1597
+ ],
1598
+ "page_idx": 8
1599
+ },
1600
+ {
1601
+ "type": "text",
1602
+ "text": "Carlos Juiz received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2001. He is an associate professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research interests include performance engineering, cloud computing and IT governance. He has authored around 150 papers in different international conferences and journals.",
1603
+ "bbox": [
1604
+ 638,
1605
+ 814,
1606
+ 921,
1607
+ 929
1608
+ ],
1609
+ "page_idx": 8
1610
+ },
1611
+ {
1612
+ "type": "header",
1613
+ "text": "IEEE",
1614
+ "bbox": [
1615
+ 73,
1616
+ 32,
1617
+ 104,
1618
+ 42
1619
+ ],
1620
+ "page_idx": 8
1621
+ },
1622
+ {
1623
+ "type": "page_number",
1624
+ "text": "9",
1625
+ "bbox": [
1626
+ 911,
1627
+ 32,
1628
+ 921,
1629
+ 42
1630
+ ],
1631
+ "page_idx": 8
1632
+ }
1633
+ ]
2401.12xxx/2401.12690/144bbb49-024f-4544-960f-9726a73d392b_model.json ADDED
@@ -0,0 +1,2077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "header",
5
+ "bbox": [
6
+ 0.076,
7
+ 0.033,
8
+ 0.106,
9
+ 0.044
10
+ ],
11
+ "angle": 0,
12
+ "content": "IEEE"
13
+ },
14
+ {
15
+ "type": "page_number",
16
+ "bbox": [
17
+ 0.913,
18
+ 0.034,
19
+ 0.922,
20
+ 0.043
21
+ ],
22
+ "angle": 0,
23
+ "content": "1"
24
+ },
25
+ {
26
+ "type": "aside_text",
27
+ "bbox": [
28
+ 0.023,
29
+ 0.277,
30
+ 0.058,
31
+ 0.708
32
+ ],
33
+ "angle": 270,
34
+ "content": "arXiv:2401.12690v1 [cs.NI] 23 Jan 2024"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.1,
40
+ 0.066,
41
+ 0.898,
42
+ 0.138
43
+ ],
44
+ "angle": 0,
45
+ "content": "Availability-aware Service Placement Policy in Fog Computing Based on Graph Partitions"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.225,
51
+ 0.152,
52
+ 0.772,
53
+ 0.168
54
+ ],
55
+ "angle": 0,
56
+ "content": "Isaac Lera, Carlos Guerrero, and Carlos Juiz, Senior Member, IEEE"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.105,
62
+ 0.191,
63
+ 0.892,
64
+ 0.298
65
+ ],
66
+ "angle": 0,
67
+ "content": "Abstract—This paper presents a policy for service placement of fog applications inspired on complex networks and graph theory. We propose a twofold partition process based on communities for the partition of the fog devices and based on transitive closures for the application services partition. The allocation of the services is performed sequentially by, firstly, mapping applications to device communities and, secondly, mapping service transitive closures to fog devices in the community. The underlying idea is to place as many inter-related services as possible in the most nearby devices to the users. The optimization objectives are the availability of the applications and the Quality of Service (QoS) of the system, measured as the number of requests that are executed before the application deadlines. We compared our solution with an Integer Linear Programming approach, and the simulation results showed that our proposal obtains higher QoS and availability when fails in the nodes are considered."
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.105,
73
+ 0.309,
74
+ 0.861,
75
+ 0.336
76
+ ],
77
+ "angle": 0,
78
+ "content": "Index Terms—Fog computing, Service placement, Service availability, Performance optimization, Complex network communities, Graph transitive closures."
79
+ },
80
+ {
81
+ "type": "title",
82
+ "bbox": [
83
+ 0.075,
84
+ 0.376,
85
+ 0.231,
86
+ 0.391
87
+ ],
88
+ "angle": 0,
89
+ "content": "1 INTRODUCTION"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.074,
95
+ 0.403,
96
+ 0.492,
97
+ 0.622
98
+ ],
99
+ "angle": 0,
100
+ "content": "Fog computing has emerged as a suitable solution for the increase of application execution time and network usage that Internet of Things applications based on cloud services generate. This paradigm establishes that the in-network devices are provided with computational and storage capacities, and it enables them to allocate or execute services of the IoT applications that are commonly executed in the cloud provider [1]. By this, the application services are placed closer to the users (or IoT) devices and, consequently, the network latency between users and services and the network usage are reduced. Nevertheless, the limited capacities of the in-network devices, also known as fog devices in this domain, make the definition of management policies even more necessary than in other distributed systems such as cloud computing."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.073,
106
+ 0.622,
107
+ 0.491,
108
+ 0.812
109
+ ],
110
+ "angle": 0,
111
+ "content": "The objective of our work is to study an application service placement policy to maximize service availability in case of failures. The placement consists on the selection of the most suitable fog devices to map service instances. We consider that the IoT applications are defined as a set of interrelated services that are initially and permanently deployed on the cloud provider, but that they can be horizontally scaled by creating new stateless instances in the fog devices. We also consider that the users of our domain are unalterable connected to a same gateway or access point, i.e., we consider that our users are IoT devices such as sensors or actuators, instead of considering mobility patterns, as for example in the case of mobile users."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.073,
117
+ 0.812,
118
+ 0.492,
119
+ 0.886
120
+ ],
121
+ "angle": 0,
122
+ "content": "We propose a two phases policy that is addressed to optimize the service availability, in terms of reachability of the services from the IoT devices, and the deadline satisfaction ratios, in terms of the percentage of requests that obtain the application responses before their deadlines."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.503,
128
+ 0.403,
129
+ 0.925,
130
+ 0.607
131
+ ],
132
+ "angle": 0,
133
+ "content": "In the first phase, the policy maps applications (the complete set of interrelated services) to a set of well-connected devices to guarantee the availability of the application for the users connected to that set. We propose to use the community structure of the fog devices for the generation of the partitions of those devices. Once that an application is mapped to a fog community, a second allocation process is performed, by mapping the services of the application to the fog devices in the community. This second phase addresses the optimization of the response time by prioritizing the allocation of interrelated services in the same fog device. We propose to partition the services of an application by using the transitive closure of a service to determine the services to be placed together in the same device."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.504,
139
+ 0.608,
140
+ 0.925,
141
+ 0.681
142
+ ],
143
+ "angle": 0,
144
+ "content": "Fog service placement problem has been addressed in previous researches, even considering community-based approaches [2], but we address some features that have not been previously considered, and the novel contributions of our approach are:"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.521,
150
+ 0.683,
151
+ 0.923,
152
+ 0.741
153
+ ],
154
+ "angle": 0,
155
+ "content": "- The combination of the use of complex network communities for the device partition and service transitive closures for the application partition, that has not been used simultaneously in previous studies."
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.522,
161
+ 0.741,
162
+ 0.923,
163
+ 0.799
164
+ ],
165
+ "angle": 0,
166
+ "content": "- The optimization of both the application deadline satisfaction, considered in some previous studies, and the application availability, not included in previous studies, and their evolution along the simulation."
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.522,
172
+ 0.8,
173
+ 0.923,
174
+ 0.829
175
+ ],
176
+ "angle": 0,
177
+ "content": "- An experimental validation that includes dynamic fails of the infrastructure along the simulation."
178
+ },
179
+ {
180
+ "type": "list",
181
+ "bbox": [
182
+ 0.521,
183
+ 0.683,
184
+ 0.923,
185
+ 0.829
186
+ ],
187
+ "angle": 0,
188
+ "content": null
189
+ },
190
+ {
191
+ "type": "title",
192
+ "bbox": [
193
+ 0.505,
194
+ 0.85,
195
+ 0.673,
196
+ 0.865
197
+ ],
198
+ "angle": 0,
199
+ "content": "2 RELATED WORK"
200
+ },
201
+ {
202
+ "type": "text",
203
+ "bbox": [
204
+ 0.504,
205
+ 0.87,
206
+ 0.924,
207
+ 0.945
208
+ ],
209
+ "angle": 0,
210
+ "content": "The problem of the optimization of service placement in a fog architecture has been previously addressed from several different perspectives, by considering algorithm proposals such as genetic algorithms [3], [4], Montecarlo methods [5], distributed solutions [6], Petri Nets [7], Markov"
211
+ },
212
+ {
213
+ "type": "page_footnote",
214
+ "bbox": [
215
+ 0.073,
216
+ 0.901,
217
+ 0.492,
218
+ 0.925
219
+ ],
220
+ "angle": 0,
221
+ "content": "The authors are with the Computer Science Department, Balearic Islands University, Palma, SPAIN, E07122."
222
+ },
223
+ {
224
+ "type": "page_footnote",
225
+ "bbox": [
226
+ 0.098,
227
+ 0.925,
228
+ 0.476,
229
+ 0.937
230
+ ],
231
+ "angle": 0,
232
+ "content": "Corresponding author: Carlos Guerrero E-mail: carlos.guerrero@uib.es"
233
+ },
234
+ {
235
+ "type": "list",
236
+ "bbox": [
237
+ 0.073,
238
+ 0.901,
239
+ 0.492,
240
+ 0.937
241
+ ],
242
+ "angle": 0,
243
+ "content": null
244
+ }
245
+ ],
246
+ [
247
+ {
248
+ "type": "header",
249
+ "bbox": [
250
+ 0.075,
251
+ 0.033,
252
+ 0.106,
253
+ 0.044
254
+ ],
255
+ "angle": 0,
256
+ "content": "IEEE"
257
+ },
258
+ {
259
+ "type": "page_number",
260
+ "bbox": [
261
+ 0.913,
262
+ 0.034,
263
+ 0.923,
264
+ 0.043
265
+ ],
266
+ "angle": 0,
267
+ "content": "2"
268
+ },
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.073,
273
+ 0.054,
274
+ 0.492,
275
+ 0.083
276
+ ],
277
+ "angle": 0,
278
+ "content": "processes [8], and being linear programming one of the most common solutions [9], [10], [11], [12], [13], [14]."
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.072,
284
+ 0.084,
285
+ 0.491,
286
+ 0.184
287
+ ],
288
+ "angle": 0,
289
+ "content": "Nevertheless, there is still room for improvement and some research challenges have not been still covered. For example, most of the previous solutions have included the optimization of response time, power consumption, cost, or network usage. But to the best of our knowledge, they have not studied the availability and the influence of failures in the infrastructure."
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.072,
295
+ 0.185,
296
+ 0.492,
297
+ 0.331
298
+ ],
299
+ "angle": 0,
300
+ "content": "The use of the community relationship of the devices of a distributed system for the optimization of the resource management was initially proposed by Filiposka et al. [15], and they applied it in the optimization of the allocation of virtual machines in a datacenter to optimize the hop distances between related virtual machines. In the field of fog computing, the use of other topological features of graphs and complex network was proposed at a later stage, such as centrality indexes for the static definition of fog colonies [16] or the placement of data in fog devices [17]."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.072,
306
+ 0.332,
307
+ 0.491,
308
+ 0.491
309
+ ],
310
+ "angle": 0,
311
+ "content": "The idea of organizing the complex structure of a fog architecture have been applied in several studies, where the authors defined these static infrastructure organizations as fog colonies [4], micro-clouds [18], Foglets [19], or fog domains [20]. For example, Skarlat et al. [4] defined a twofold distributed placement policy that first considered if a service should be allocated in a fog colony or migrated to the neighbor colony. Once that the colony was chosen, the control node of the colony decided the device that allocated the service. In all those studies, the partition of the fog devices was static and unique for all the applications."
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.072,
317
+ 0.491,
318
+ 0.492,
319
+ 0.737
320
+ ],
321
+ "angle": 0,
322
+ "content": "On the contrary, Filiposka, Mishev and Gilly proposed a virtual partition of the devices that is specific for each application and it is dynamically established by the conditions of the system. They implemented an evolution of the proposal in [15] for the case of allocation of virtual machines (VM) into fog devices [2]. They considered that the fog services where encapsulated in one VM and they proposed a two phases optimization process, where in the first step the VM is mapped to a device community, and in the second step, the VM is allocated in any of the devices in the community with a traditional optimization technique. This is probably the most similar work to our proposal in terms of the optimization algorithm, but with a different optimization objective. Their objective was to propose a runtime algorithm for the migration of the VM as mobile user of the applications move through different access points to reduce the average service delay."
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.072,
328
+ 0.739,
329
+ 0.492,
330
+ 0.942
331
+ ],
332
+ "angle": 0,
333
+ "content": "The main differences of the work of Filiposka et al. with our proposal are: first, we study the suitability of the community relationships to improve service availability instead of the migration of VMs due to the user mobility; second, we consider a more complex structure of the applications because we defined them as a set of interrelated services that can be allocated in different devices, while they defined the applications as a single encapsulating element, the VM; third, we also study the use of a graph partitioning approach, the transitive closure of the services, for the allocation of the services inside the communities to also benefit the placement of the most interrelated services in the same devices to reduce the network delays between interrelated services."
334
+ },
335
+ {
336
+ "type": "image",
337
+ "bbox": [
338
+ 0.565,
339
+ 0.054,
340
+ 0.867,
341
+ 0.187
342
+ ],
343
+ "angle": 0,
344
+ "content": null
345
+ },
346
+ {
347
+ "type": "image_caption",
348
+ "bbox": [
349
+ 0.505,
350
+ 0.198,
351
+ 0.714,
352
+ 0.213
353
+ ],
354
+ "angle": 0,
355
+ "content": "Fig. 1. Fog computing architecture."
356
+ },
357
+ {
358
+ "type": "title",
359
+ "bbox": [
360
+ 0.505,
361
+ 0.235,
362
+ 0.72,
363
+ 0.25
364
+ ],
365
+ "angle": 0,
366
+ "content": "3 PROBLEM STATEMENT"
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.503,
372
+ 0.255,
373
+ 0.924,
374
+ 0.372
375
+ ],
376
+ "angle": 0,
377
+ "content": "A general fog computing architecture is represented in Fig. 1 where three layers can be identified: cloud layer, fog layer and client layer. Three types of devices can be differentiated: a device for the cloud provider of the cloud layer; the gateways, that are the access points for the clients; the fog devices, the network devices between the cloud provider and the gateways. All the devices have resources to allocate and execute services."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.503,
383
+ 0.372,
384
+ 0.924,
385
+ 0.665
386
+ ],
387
+ "angle": 0,
388
+ "content": "The fog infrastructure can be modeled as a graph where the nodes are the devices and the edges the direct network links between devices. We identify those devices as \\( D_{i} \\), considering two special cases for the cloud provider (\\( D_{i}^{cloud} \\)) and the gateways (\\( D_{i}^{gw} \\)). The devices are defined by the available capacity of their resources \\( AR_{D_i} \\), that is a vector which contains the capacities of each physical component. For the sake of simplicity, we have considered a scalar value, but it could easily be extended by including as many elements as necessary. We suppose unlimited resources for the specific case of the cloud provider, \\( AR_{D_i^{cloud}} = \\infty \\). The devices are also defined by the processing speed \\( IPT_{D_i} \\) measured in terms of instructions per unit of time. The network links are identified by the two connected nodes \\( NL_{D_i,D_j} \\), and we consider that it is a bidirectional communication, \\( NL_{D_i,D_j} = NL_{D_j,D_i} \\). The network links are defined by the propagation delay, \\( PR_{NL_{D_i},D_j} \\), and the network bandwidth, \\( BW_{NL_{D_i},D_j} \\). Thus, the network delay, \\( ND_{NL_{D_i},D_j} \\), for the transmission of a packet between two connected devices is calculated as:"
389
+ },
390
+ {
391
+ "type": "equation",
392
+ "bbox": [
393
+ 0.565,
394
+ 0.67,
395
+ 0.923,
396
+ 0.705
397
+ ],
398
+ "angle": 0,
399
+ "content": "\\[\nN D _ {N L _ {D _ {i}, D _ {j}}} = P R _ {N L _ {D _ {i}, D _ {j}}} + \\frac {\\text {s i z e}}{B W _ {N L _ {D _ {i} , D _ {j}}}} \\tag {1}\n\\]"
400
+ },
401
+ {
402
+ "type": "text",
403
+ "bbox": [
404
+ 0.504,
405
+ 0.709,
406
+ 0.878,
407
+ 0.724
408
+ ],
409
+ "angle": 0,
410
+ "content": "where size is the size of the packet to be transmitted."
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.503,
416
+ 0.725,
417
+ 0.923,
418
+ 0.87
419
+ ],
420
+ "angle": 0,
421
+ "content": "The applications in our problem domain follow a microservice based development pattern, that is increasingly being used in IoT applications [21], [22], [23]. This type of applications are modeled as a set of small and stateless services that interoperate between them to accomplish a complex task [24]. Thus, the services can be easily scale up, by downloading the encapsulating element and executing it, or scale down, by just stopping and removing instances of the service. We assume that there is at least one instance of each service running in the cloud provider \\((D_{i}^{cloud})\\)."
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.503,
427
+ 0.87,
428
+ 0.924,
429
+ 0.945
430
+ ],
431
+ "angle": 0,
432
+ "content": "We model each application \\(APP_{x}\\) as a directed graph, where the nodes are the services and the edges are the request messages between the services. We identify the services as \\(S_{u}\\) and they are defined by the resource consumption generated in the device that allocates the service,"
433
+ }
434
+ ],
435
+ [
436
+ {
437
+ "type": "header",
438
+ "bbox": [
439
+ 0.075,
440
+ 0.033,
441
+ 0.106,
442
+ 0.044
443
+ ],
444
+ "angle": 0,
445
+ "content": "IEEE"
446
+ },
447
+ {
448
+ "type": "page_number",
449
+ "bbox": [
450
+ 0.913,
451
+ 0.034,
452
+ 0.923,
453
+ 0.043
454
+ ],
455
+ "angle": 0,
456
+ "content": "3"
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.072,
462
+ 0.054,
463
+ 0.493,
464
+ 0.273
465
+ ],
466
+ "angle": 0,
467
+ "content": "\\(CR_{S_u}\\). As in the case of the available resources in a device, the resource consumption is generally defined as a vector which measures the consumption of each physical component, but we have considered a scalar value for a simpler definition of the problem. Services are executed when a request message is received. We classify the services in two types depending on the origin of the service request: the entry-point service \\(S_u^{sep}\\), the origins of the request messages that arrive to those services are users \\(US_a\\) or IoT devices (sensors typically) \\(ID_b\\); the intra-services \\(S_u^{intra}\\), that are only requested by other services. An intra-service can be requested for several different services and the entry-point service can be requested for several users or IoT devices. But, we suppose that there is only one entry-point service for each application."
468
+ },
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.072,
473
+ 0.274,
474
+ 0.492,
475
+ 0.407
476
+ ],
477
+ "angle": 0,
478
+ "content": "The task performed by a service is different depending on the requester, so the execution generated by a request not only depends on the service but also on the requester, i.e. the request message. The request messages are identified by the origin and target services, \\(MS_{S_u,S_v}\\), and they are modeled as unidirectional edges, \\(MS_{S_u,S_v} \\neq MS_{S_v,S_u}\\). The requests generated by the users or the IoT services, i.e. the requests to the entry-point services, are only identified by the target entry-point service \\(MS_{\\emptyset,S_u}\\)."
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.073,
484
+ 0.408,
485
+ 0.492,
486
+ 0.483
487
+ ],
488
+ "angle": 0,
489
+ "content": "The request messages are defined by the size of the request message \\( S Z_{MS_{S_u,S_v}} \\), that determines the transmission time of the service request, and the execution load that the target service will generate in the device, defined by the number of instructions to be executed, \\( EI_{MS_{S_u,S_v}} \\)."
490
+ },
491
+ {
492
+ "type": "text",
493
+ "bbox": [
494
+ 0.072,
495
+ 0.484,
496
+ 0.492,
497
+ 0.614
498
+ ],
499
+ "angle": 0,
500
+ "content": "We assume that there is at least one instance of each service in the cloud provider. But those services can be horizontally scaled by deploying new instances in the fog devices. By this, the workload can be distributed between instances and the network delay from the user to te service is reduced. We define a placement matrix, \\( P \\), of size \\( |S_u| \\times |D_i| \\), number of services per number of fog devices, where a element \\( p_{ui} \\) is equal 1 if service \\( S_u \\) is deployed in device \\( D_i \\), and 0 otherwise."
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.072,
506
+ 0.617,
507
+ 0.493,
508
+ 0.675
509
+ ],
510
+ "angle": 0,
511
+ "content": "The placement of the services are constrained by the device resource capacity. The resources consumed by the allocated services should not exceed the available resources in the device:"
512
+ },
513
+ {
514
+ "type": "equation",
515
+ "bbox": [
516
+ 0.164,
517
+ 0.69,
518
+ 0.49,
519
+ 0.732
520
+ ],
521
+ "angle": 0,
522
+ "content": "\\[\n\\sum_ {u = 1} ^ {| S _ {u} |} \\left(p _ {u i} \\times C R _ {S _ {u}}\\right) \\leq A R _ {D _ {i}}, \\forall D _ {i} \\tag {2}\n\\]"
523
+ },
524
+ {
525
+ "type": "text",
526
+ "bbox": [
527
+ 0.073,
528
+ 0.751,
529
+ 0.49,
530
+ 0.794
531
+ ],
532
+ "angle": 0,
533
+ "content": "Our optimization objectives are to increase the application deadline satisfaction ratio, and the application availability as the devices or the network links fail."
534
+ },
535
+ {
536
+ "type": "text",
537
+ "bbox": [
538
+ 0.072,
539
+ 0.797,
540
+ 0.493,
541
+ 0.945
542
+ ],
543
+ "angle": 0,
544
+ "content": "We define the deadline satisfaction ratio as the percentage of application requests that are processed before the application deadline. Consequently, the applications in the system, \\(APP_{x}\\), need to be defined by their deadlines, \\(DL_{APP_{x}}\\). The user perceived response time, \\(RT_{RQ_{US_{a},APP_{x}}^{n}}\\), is the metric that measures the time between a specific application request is sent by the user \\((RQ_{US_{a},APP_{x}}^{n})\\) and all the application services finish their execution. It includes the network delay of the request between services and the response times (execution and waiting time) of the services."
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.528,
550
+ 0.054,
551
+ 0.878,
552
+ 0.07
553
+ ],
554
+ "angle": 0,
555
+ "content": "The equation for the deadline satisfaction ratio is:"
556
+ },
557
+ {
558
+ "type": "equation",
559
+ "bbox": [
560
+ 0.514,
561
+ 0.075,
562
+ 0.923,
563
+ 0.113
564
+ ],
565
+ "angle": 0,
566
+ "content": "\\[\n\\operatorname {d e a d l i n e} \\left(U S _ {a}, A P P _ {x}\\right) = \\frac {\\left| R T _ {R Q _ {U S _ {a} , A P P _ {x}} ^ {n}} < D L _ {A P P _ {x}} \\right|}{\\left| R Q _ {U S _ {a} , A P P _ {x}} ^ {n} \\right|} \\tag {3}\n\\]"
567
+ },
568
+ {
569
+ "type": "text",
570
+ "bbox": [
571
+ 0.504,
572
+ 0.119,
573
+ 0.923,
574
+ 0.222
575
+ ],
576
+ "angle": 0,
577
+ "content": "where \\( |RQ_{US_a,APP_x}^n| \\) is the number of times that a request for \\( APP_x \\) is sent from user \\( US_a \\), and \\( |RT_{RQ_{US_a,APP_x}}^n| < DL_{APP_x}| \\) is the number of those requests that satisfied the application deadline. This metric can be generalized by considering the request to an application from any user, deadline \\( (APP_x) \\), or the ratio for all the applications and users in the system, deadline(system)."
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.503,
583
+ 0.222,
584
+ 0.924,
585
+ 0.413
586
+ ],
587
+ "angle": 0,
588
+ "content": "Our second objective, the application availability, is defined as the ratio of users that are able to reach all the services of the applications they request for a given point in time. In a hypothetical case, where any of the elements in the system fails, the service availability would be 1.0. But the devices or the network links can fall down, breaking the shortest paths between the users and the application services. At best, this only would generate an increase in the network delay due to the requests would be routed by a longer path, damaging the deadline satisfaction ratios. But it could even result in making the user impossible to reach all the application services, damaging the service availability ratio. The equation for the service availability ratios is:"
589
+ },
590
+ {
591
+ "type": "equation",
592
+ "bbox": [
593
+ 0.515,
594
+ 0.418,
595
+ 0.922,
596
+ 0.462
597
+ ],
598
+ "angle": 0,
599
+ "content": "\\[\n\\text {a v a i l a b i l i t y} \\left(\\mathrm {A P P} _ {x}\\right) = \\frac {\\left| U S _ {a} , g . t . \\exists \\text {p a t h} U S _ {a} \\text {t o} A P P _ {x} \\right|}{\\left| U S _ {a} , g . t . U S _ {a} \\text {r e q u e s t s} A P P _ {x} \\right|} \\tag {4}\n\\]"
600
+ },
601
+ {
602
+ "type": "text",
603
+ "bbox": [
604
+ 0.504,
605
+ 0.463,
606
+ 0.923,
607
+ 0.523
608
+ ],
609
+ "angle": 0,
610
+ "content": "In summary, our domain problem is addressed to find \\( P \\), \\( p_{ui} \\forall S_u, D_i \\) by minimizing deadline \\( (US_a, APP_x) \\wedge (1 - availability(APP_x)) \\forall US_a, APP_x \\) subject to the constraint in Eq.(2)."
611
+ },
612
+ {
613
+ "type": "title",
614
+ "bbox": [
615
+ 0.504,
616
+ 0.544,
617
+ 0.922,
618
+ 0.576
619
+ ],
620
+ "angle": 0,
621
+ "content": "4 TWO PHASES PARTITION-BASED OPTIMIZATION PROPOSAL"
622
+ },
623
+ {
624
+ "type": "text",
625
+ "bbox": [
626
+ 0.503,
627
+ 0.581,
628
+ 0.923,
629
+ 0.713
630
+ ],
631
+ "angle": 0,
632
+ "content": "Our optimization algorithm is based on a two phases placement process with a first mapping of applications in fog communities and a second phase which allocates the services of an application in the devices of a fog community. We partition the fog devices using the community relationship of the complex network that models the network infrastructure of the system. The application services are partitioned considering the transitive closures of the nodes that represent the services in the application graph."
633
+ },
634
+ {
635
+ "type": "text",
636
+ "bbox": [
637
+ 0.503,
638
+ 0.713,
639
+ 0.923,
640
+ 0.83
641
+ ],
642
+ "angle": 0,
643
+ "content": "We study if the community relationships of the fog devices is a good indicator to detect device sets that guarantee the availability of the services and the reachability of the devices when device and network links failures are considered. Additionally, we also study if the transitive closure of a service is a good indicator to decide the services that are allocated in the same device to avoid network communications overheads."
644
+ },
645
+ {
646
+ "type": "title",
647
+ "bbox": [
648
+ 0.504,
649
+ 0.85,
650
+ 0.848,
651
+ 0.866
652
+ ],
653
+ "angle": 0,
654
+ "content": "4.1 Community-based Fog Devices Partition"
655
+ },
656
+ {
657
+ "type": "text",
658
+ "bbox": [
659
+ 0.503,
660
+ 0.87,
661
+ 0.923,
662
+ 0.945
663
+ ],
664
+ "angle": 0,
665
+ "content": "The first phase of our optimization algorithm deals with the mapping between applications (a set of interrelated services) and a device partitioning. We propose to partition the devices with the use of the community relationship between them. This phase of our optimization algorithm is based"
666
+ }
667
+ ],
668
+ [
669
+ {
670
+ "type": "header",
671
+ "bbox": [
672
+ 0.075,
673
+ 0.033,
674
+ 0.106,
675
+ 0.044
676
+ ],
677
+ "angle": 0,
678
+ "content": "IEEE"
679
+ },
680
+ {
681
+ "type": "page_number",
682
+ "bbox": [
683
+ 0.913,
684
+ 0.034,
685
+ 0.923,
686
+ 0.043
687
+ ],
688
+ "angle": 0,
689
+ "content": "4"
690
+ },
691
+ {
692
+ "type": "text",
693
+ "bbox": [
694
+ 0.073,
695
+ 0.054,
696
+ 0.493,
697
+ 0.113
698
+ ],
699
+ "angle": 0,
700
+ "content": "on the previous work of Filiposka, Mishev and Gilly, where they studied and validated community-based algorithms for placement optimization in cloud computing [15] and in fog computing [2]."
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.072,
706
+ 0.112,
707
+ 0.493,
708
+ 0.301
709
+ ],
710
+ "angle": 0,
711
+ "content": "The community structure is a topological feature of graphs that determines the sets of nodes which are better connected between them than with the rest of the network. The most popular community detection method is the one proposed by Girvan and Newman [25], which detects communities by progressively removing edges from the original graph. The algorithm removes the edges with the highest betweenness centrality, at each step. Betweenness centrality of an edge is the sum of the fraction of the shortest paths that pass through the edge. Therefore, a community, that is organized with two regions that are mainly communicated by only one edge, is split into two new communities in each algorithm iteration."
712
+ },
713
+ {
714
+ "type": "text",
715
+ "bbox": [
716
+ 0.073,
717
+ 0.302,
718
+ 0.492,
719
+ 0.462
720
+ ],
721
+ "angle": 0,
722
+ "content": "Under the conditions of our domain problem, a device community can be understood as a set of devices that are well connected between them, with alternatives communication paths, and that the shortest paths between devices are evenly distributed between the topology. Consequently, a fail in an edge inside the community will have a lower influence in the communication paths between devices than a fail in the edges that connect the communities. This lower influence means that the fails inside the communities will not generate isolated regions in the topology neither an important increase in the communication delays."
723
+ },
724
+ {
725
+ "type": "text",
726
+ "bbox": [
727
+ 0.073,
728
+ 0.462,
729
+ 0.492,
730
+ 0.636
731
+ ],
732
+ "angle": 0,
733
+ "content": "The Girvan-Newman method iteratively determines the communities and the dendrogram, the tree structure of the communities, can be built. We characterized those communities with its depth in the dendrogram. We define this depth as the iteration in which the community was obtained. The higher the depth value is, the better communicated the device community is. Consequently, from the point of view of the availability, it is better to place the applications in device communities with higher depth values, since the devices inside those communities are better communicated between them than the devices in communities with lower depths values [26]."
734
+ },
735
+ {
736
+ "type": "text",
737
+ "bbox": [
738
+ 0.073,
739
+ 0.637,
740
+ 0.493,
741
+ 0.943
742
+ ],
743
+ "angle": 0,
744
+ "content": "For example, consider the fog infrastructure in Fig. 2. The network link \\(NL_{D_c,D_f}\\) is the one with the highest edge betweenness centrality since it is passed through the highest number of shortest paths. If we iterate the Girvan-Newman method over this example, communities 2 and 3 have higher depth values than community 1 since they are obtained when \\(NL_{D_c,D_f}\\) is removed in the next iteration of the community generation algorithm. Consider also that we deploy an application with services \\(S_i\\) and \\(S_j\\) in community 1, allocating \\(S_i\\) in \\(D_a\\) and \\(S_j\\) in \\(D_h\\), and that the user that requests the application is connected to device \\(D_b\\). Under those conditions, a fail in \\(NL_{D_c,D_f}\\) would make impossible to finish the execution of the application since their services are unreachable. On the contrary, if we deploy the application in community 2, any fail in a edge would not make impossible to execute the application. Finally consider that a second user is connected to device \\(D_h\\). The best alternative, from the point of view of the availability, would be to horizontally scale up by deploying the same application twice in both communities 2 and 3, than only once in any of them."
745
+ },
746
+ {
747
+ "type": "image",
748
+ "bbox": [
749
+ 0.569,
750
+ 0.054,
751
+ 0.864,
752
+ 0.165
753
+ ],
754
+ "angle": 0,
755
+ "content": null
756
+ },
757
+ {
758
+ "type": "image_caption",
759
+ "bbox": [
760
+ 0.505,
761
+ 0.18,
762
+ 0.759,
763
+ 0.194
764
+ ],
765
+ "angle": 0,
766
+ "content": "Fig. 2. Example of fog device communities."
767
+ },
768
+ {
769
+ "type": "text",
770
+ "bbox": [
771
+ 0.503,
772
+ 0.223,
773
+ 0.925,
774
+ 0.442
775
+ ],
776
+ "angle": 0,
777
+ "content": "This example shows that, in an unrealistic situation with unlimited resources in all the devices, the best option would be to deploy an instance of the application for each client that requests it and this deployment would be placed in the community with the highest depth value that includes the device where the client is connected to. But this cannot be performed due to the limited resources in the devices of a community. Moreover, if we note that the higher the depth value of the community, the smaller the number of devices in the community, i.e., the communities with the highest values are the ones formed by only one device. Consequently, it is necessary to prioritize the allocation of the applications in the communities. We propose to use a greedy algorithm for this prioritization, more concretely, the First-Fit Decreasing algorithm [27]."
778
+ },
779
+ {
780
+ "type": "text",
781
+ "bbox": [
782
+ 0.503,
783
+ 0.443,
784
+ 0.925,
785
+ 0.72
786
+ ],
787
+ "angle": 0,
788
+ "content": "Our optimization algorithm deals, in this first step, with the placement of applications in device communities using a First-Fit Decreasing approach. The priority criteria for ordering the applications is their execution deadlines, by prioritizing the applications with shortest deadlines. The algorithm starts checking the allocation of the application from the device communities with highest depth to the ones with the lowest, and the application is allocated in the first community with enough resources to allocate all the services of the application. If after checking all the communities, the application has not been allocated, this will be available only in the cloud provider. The process for the same application is repeated as many times as the number of users in the system that request this application. Algorithm 1 shows the pseudo-code of our proposal. The algorithm goes through the applications (in ascending deadline order), the users that request them and the communities (in descending depth order), trying to allocate the services of the application in the devices in the community."
789
+ },
790
+ {
791
+ "type": "text",
792
+ "bbox": [
793
+ 0.504,
794
+ 0.722,
795
+ 0.925,
796
+ 0.868
797
+ ],
798
+ "angle": 0,
799
+ "content": "In this first step, we map the applications in communities, but the map of services remains to be defined. We separate the process in two steps because we mainly focus the first one (mapping applications to communities) on increasing the application availability, and the second one (mapping services of an application to devices in a device community) on the application deadlines. This second step is performed by the function placeServicesInDevices(), in line 15, and its details are explained in Section 4.2 and Algorithm 2."
800
+ },
801
+ {
802
+ "type": "text",
803
+ "bbox": [
804
+ 0.504,
805
+ 0.87,
806
+ 0.925,
807
+ 0.943
808
+ ],
809
+ "angle": 0,
810
+ "content": "Our algorithm checks if an application has been previously placed in a community (line 11), and if not, it delegates the decision to place the application to the community to the algorithm which checks if the application services fit into the device community (Algorithm 2)."
811
+ }
812
+ ],
813
+ [
814
+ {
815
+ "type": "header",
816
+ "bbox": [
817
+ 0.076,
818
+ 0.033,
819
+ 0.106,
820
+ 0.044
821
+ ],
822
+ "angle": 0,
823
+ "content": "IEEE"
824
+ },
825
+ {
826
+ "type": "page_number",
827
+ "bbox": [
828
+ 0.913,
829
+ 0.034,
830
+ 0.922,
831
+ 0.043
832
+ ],
833
+ "angle": 0,
834
+ "content": "5"
835
+ },
836
+ {
837
+ "type": "code_caption",
838
+ "bbox": [
839
+ 0.075,
840
+ 0.051,
841
+ 0.49,
842
+ 0.079
843
+ ],
844
+ "angle": 0,
845
+ "content": "Algorithm 1 Device community-based application allocation"
846
+ },
847
+ {
848
+ "type": "algorithm",
849
+ "bbox": [
850
+ 0.075,
851
+ 0.081,
852
+ 0.49,
853
+ 0.276
854
+ ],
855
+ "angle": 0,
856
+ "content": "1: \\(\\mathbb{C}\\gets\\) calculate device communities \n2: \\(\\mathbb{C}\\) order communities C by descending depth \n3: A \\(\\leftarrow\\) order applications by ascending deadline \n4: appPlacement \\(\\leftarrow\\) 0 \n5: for app in A do \n6: U \\(\\leftarrow\\) get users requesting application app \n7: for user in U do \n8: dev \\(\\leftarrow\\) get device where user is connected \n9: for infCom in IC do \n10: if dev \\(\\in\\) infCom then \n11: if infCom \\(\\in\\) appPlacement[app] then \n12: \"application app already placed in community infCom\"\" \n13: break \n14: else \n15: if placeServicesInDevices(app,infCom) then \n16: appPlacement[app].append(infCom) \n17: update resource usages in infCom \n18: \"placed application app in community infCom\"\" \n19: break"
857
+ },
858
+ {
859
+ "type": "title",
860
+ "bbox": [
861
+ 0.074,
862
+ 0.305,
863
+ 0.458,
864
+ 0.32
865
+ ],
866
+ "angle": 0,
867
+ "content": "4.2 Transitive Closure-based Application Partition"
868
+ },
869
+ {
870
+ "type": "text",
871
+ "bbox": [
872
+ 0.072,
873
+ 0.328,
874
+ 0.491,
875
+ 0.502
876
+ ],
877
+ "angle": 0,
878
+ "content": "Once that the mapping of a given application into a candidate community of devices is performed by the first phase of the optimization algorithm, the second phase deals with the allocation of the services of the application into the devices in the community. We first partition the applications into sets of services, and it is checked if each of those service sets can be placed in just one device. If not, smaller sets are considered. The partition of the service into sets is based on our previous work [6], where we studied and validated the use of a distributed placement algorithm where the service sets are created by considering the transitive closure of the services in the application graph."
879
+ },
880
+ {
881
+ "type": "text",
882
+ "bbox": [
883
+ 0.073,
884
+ 0.504,
885
+ 0.492,
886
+ 0.605
887
+ ],
888
+ "angle": 0,
889
+ "content": "The transitive closure of a directed graph indicates the nodes that are reachable for each of the nodes in the graph. If a vertex \\( j \\) is reachable by a vertex \\( i \\) means that there is a path from \\( i \\) to \\( j \\). The reachability matrix of a graph is called the transitive closure of the graph, and the set of reachable nodes for a given node is called the transitive closure of a node [28]."
890
+ },
891
+ {
892
+ "type": "text",
893
+ "bbox": [
894
+ 0.072,
895
+ 0.607,
896
+ 0.492,
897
+ 0.796
898
+ ],
899
+ "angle": 0,
900
+ "content": "Under the conditions of our domain problem, the transitive closure of a node can be understood as the set of services that are requested for the execution of the given service, i.e., the outgoing requests generated by a service when it receives an incoming request. If we are interested in reducing the response time of the application execution, the services of the transitive closure should be allocated in the same device to reduce the communication delays between them, since the network delay is 0.0 for request messages inside the same device. Moreover, the best case is when all the services of an application are allocated in the same device, but this is limited by the resource constraint (Equation 2)."
901
+ },
902
+ {
903
+ "type": "text",
904
+ "bbox": [
905
+ 0.073,
906
+ 0.797,
907
+ 0.492,
908
+ 0.942
909
+ ],
910
+ "angle": 0,
911
+ "content": "We also propose a First-Fit algorithm for this second phase (Algorithm 2), which orders the sets of services from the ones with the biggest sizes (only one transitive closure with all the services) to the smallest sets of services (the transitive closures with only one node or with the loops in the service flow), and tries to place those sets of services into a same device. The devices are ordered by a fitness value which is the theoretical user perceived response time. This value is obtained by adding the network latency between the device and the user and the execution time of all the"
912
+ },
913
+ {
914
+ "type": "image",
915
+ "bbox": [
916
+ 0.523,
917
+ 0.053,
918
+ 0.614,
919
+ 0.12
920
+ ],
921
+ "angle": 0,
922
+ "content": null
923
+ },
924
+ {
925
+ "type": "image_caption",
926
+ "bbox": [
927
+ 0.551,
928
+ 0.12,
929
+ 0.577,
930
+ 0.127
931
+ ],
932
+ "angle": 0,
933
+ "content": "Iter. 1"
934
+ },
935
+ {
936
+ "type": "image",
937
+ "bbox": [
938
+ 0.619,
939
+ 0.054,
940
+ 0.709,
941
+ 0.12
942
+ ],
943
+ "angle": 0,
944
+ "content": null
945
+ },
946
+ {
947
+ "type": "image_caption",
948
+ "bbox": [
949
+ 0.658,
950
+ 0.12,
951
+ 0.684,
952
+ 0.127
953
+ ],
954
+ "angle": 0,
955
+ "content": "Iter. 2"
956
+ },
957
+ {
958
+ "type": "image",
959
+ "bbox": [
960
+ 0.722,
961
+ 0.054,
962
+ 0.811,
963
+ 0.12
964
+ ],
965
+ "angle": 0,
966
+ "content": null
967
+ },
968
+ {
969
+ "type": "image_caption",
970
+ "bbox": [
971
+ 0.761,
972
+ 0.12,
973
+ 0.786,
974
+ 0.127
975
+ ],
976
+ "angle": 0,
977
+ "content": "Iter. 3"
978
+ },
979
+ {
980
+ "type": "image",
981
+ "bbox": [
982
+ 0.82,
983
+ 0.054,
984
+ 0.907,
985
+ 0.12
986
+ ],
987
+ "angle": 0,
988
+ "content": null
989
+ },
990
+ {
991
+ "type": "image_caption",
992
+ "bbox": [
993
+ 0.859,
994
+ 0.12,
995
+ 0.886,
996
+ 0.127
997
+ ],
998
+ "angle": 0,
999
+ "content": "Iter. 4"
1000
+ },
1001
+ {
1002
+ "type": "image_caption",
1003
+ "bbox": [
1004
+ 0.506,
1005
+ 0.145,
1006
+ 0.772,
1007
+ 0.159
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": "Fig. 3. Example of service transitive closures."
1011
+ },
1012
+ {
1013
+ "type": "code_caption",
1014
+ "bbox": [
1015
+ 0.507,
1016
+ 0.172,
1017
+ 0.892,
1018
+ 0.187
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": "Algorithm 2 Transitive closure-based service allocation"
1022
+ },
1023
+ {
1024
+ "type": "algorithm",
1025
+ "bbox": [
1026
+ 0.508,
1027
+ 0.189,
1028
+ 0.851,
1029
+ 0.357
1030
+ ],
1031
+ "angle": 0,
1032
+ "content": "1: function PLACESERVICESINDEVICES \n2: TC \\(\\leftarrow\\) generate transitive closure partitions for app \n3: D \\(\\leftarrow\\) order devices in infCom by reponse time \n4: SP \\(\\leftarrow\\) \\(\\emptyset\\) /*Services already placed*/ \n5: servPlacement \\(\\leftarrow\\) \\(\\emptyset\\) \n6: for dev in D do \n7: for appPartition in TC do \n8: for closure in appPartition do \n9: if (closure not in SP) and (closure fits in dev) then \n10: SP = SP \\(\\cup\\) closure \n11: for service in closure do \n12: servPlacement[service] = dev \n13: update resource usages in dev \n14: if SP == app then \n15: return True, servPlacement \n16: return False, \\(\\emptyset\\)"
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "bbox": [
1037
+ 0.503,
1038
+ 0.386,
1039
+ 0.924,
1040
+ 0.458
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "services in the device. This prioritize the devices that are both closer to the users and faster in the execution. By this, the second step of the algorithm optimizes the user perceived response time, and, consequently, improves the deadline satisfaction ratio."
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "bbox": [
1048
+ 0.503,
1049
+ 0.459,
1050
+ 0.923,
1051
+ 0.649
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "Initially, Algorithm 2 goes through the devices ordered by the fitness value, and tries to allocate as much services as possible in the devices with the highest values. For the first device, it first tries to allocate all the services of the application. If they do not fit, the service set is split in several sets, one for each entry-point service and one additional set for the transitive closures of each of its neighbor services of the entry-point one, and it checks if any of those new sets fits in the first device. This is recursively repeated for each transitive closure set that contains services not previously allocated. Fig. 3 shows an example of how the transitive closure of the services is generated along the iterations of the algorithm that partition the services of the application."
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "bbox": [
1059
+ 0.503,
1060
+ 0.65,
1061
+ 0.922,
1062
+ 0.766
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "Once that all the service sets have been evaluated to be placed in the first device, this process is sequentially repeated for all the devices for the unallocated services. If after considering all the devices, there are still unallocated services, the mapping of the application in the current device community is rejected. Consequently, the first phase of the algorithm has to consider a greater community for the placement."
1066
+ },
1067
+ {
1068
+ "type": "title",
1069
+ "bbox": [
1070
+ 0.505,
1071
+ 0.79,
1072
+ 0.771,
1073
+ 0.805
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "5 EXPERIMENTAL EVALUATION"
1077
+ },
1078
+ {
1079
+ "type": "text",
1080
+ "bbox": [
1081
+ 0.503,
1082
+ 0.811,
1083
+ 0.922,
1084
+ 0.869
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "We defined random characteristics for the elements of our simulation experiments. We modeled the parameters of the elements in the domain with uniform distributions and the minimum and maximum values are shown in Table 1."
1088
+ },
1089
+ {
1090
+ "type": "text",
1091
+ "bbox": [
1092
+ 0.503,
1093
+ 0.87,
1094
+ 0.923,
1095
+ 0.944
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "The service applications were generated randomly following a growing network (GN) graph structure. GN graphs are built by adding nodes one at a time with a link to one previously added node. The network infrastructure was created as a random Barabasi-Albert network with 100 fog"
1099
+ }
1100
+ ],
1101
+ [
1102
+ {
1103
+ "type": "header",
1104
+ "bbox": [
1105
+ 0.076,
1106
+ 0.034,
1107
+ 0.106,
1108
+ 0.043
1109
+ ],
1110
+ "angle": 0,
1111
+ "content": "IEEE"
1112
+ },
1113
+ {
1114
+ "type": "page_number",
1115
+ "bbox": [
1116
+ 0.913,
1117
+ 0.034,
1118
+ 0.923,
1119
+ 0.043
1120
+ ],
1121
+ "angle": 0,
1122
+ "content": "6"
1123
+ },
1124
+ {
1125
+ "type": "table_caption",
1126
+ "bbox": [
1127
+ 0.107,
1128
+ 0.055,
1129
+ 0.46,
1130
+ 0.08
1131
+ ],
1132
+ "angle": 0,
1133
+ "content": "TABLE 1 Values of the parameters for the experiment characterization"
1134
+ },
1135
+ {
1136
+ "type": "table",
1137
+ "bbox": [
1138
+ 0.096,
1139
+ 0.093,
1140
+ 0.471,
1141
+ 0.348
1142
+ ],
1143
+ "angle": 0,
1144
+ "content": "<table><tr><td>Parameter</td><td></td><td>min.-max.</td></tr><tr><td>Network</td><td></td><td></td></tr><tr><td>Propagation time (ms)</td><td>PRNLDi,Dj</td><td>5</td></tr><tr><td>Bandwidth (bytes/ms)</td><td>BWNLDi,Dj</td><td>75000</td></tr><tr><td>Fog device</td><td></td><td></td></tr><tr><td>Resources (res. units)</td><td>ARDSi</td><td>10-25</td></tr><tr><td>Speed (Intrs/ms)</td><td>IPTDi</td><td>100-1000</td></tr><tr><td>Application</td><td></td><td></td></tr><tr><td>Deadline (ms)</td><td>DLAPPx</td><td>300-50000</td></tr><tr><td>Services (number)</td><td></td><td>2-10</td></tr><tr><td>Resources (res. units)</td><td>CRSu</td><td>1-6</td></tr><tr><td>Execution (Intrs/req)</td><td>EIMSSu,Sv</td><td>20000-60000</td></tr><tr><td>Message size (bytes)</td><td>SZMSSu,Sv</td><td>1500000-4500000</td></tr><tr><td>IoT device</td><td></td><td></td></tr><tr><td>Request rate (1/ms)</td><td></td><td>1/1000-1/200</td></tr><tr><td>Popularity (prob.)</td><td></td><td>0.25</td></tr></table>"
1145
+ },
1146
+ {
1147
+ "type": "text",
1148
+ "bbox": [
1149
+ 0.072,
1150
+ 0.373,
1151
+ 0.493,
1152
+ 0.547
1153
+ ],
1154
+ "angle": 0,
1155
+ "content": "devices. Betweenness centrality index is a topological metric that measures the number of shortest path that goes through a device. The gateway devices were selected from the nodes placed in the edges of the network, i.e., the nodes with the smallest betweenness centrality indices. Betweenness centrality index is a topological metric that measures the number of shortest path that goes through a device. We selected the \\(25\\%\\) of devices with the lowest centrality value to behave as gateways (25 gateways). The number and the applications requested from the IoT devices connected to the gateways were determined with a popularity distribution modeled with an uniform distribution."
1156
+ },
1157
+ {
1158
+ "type": "text",
1159
+ "bbox": [
1160
+ 0.073,
1161
+ 0.548,
1162
+ 0.492,
1163
+ 0.636
1164
+ ],
1165
+ "angle": 0,
1166
+ "content": "The random experimental scenario finally resulted on 20 applications with 106 services, that totally needed 360 resource units and the fog devices were able to offer up to 1874 resources units. 70 IoT devices (or users) were deployed and they generated an application request each \\(1/557\\) ms in average."
1167
+ },
1168
+ {
1169
+ "type": "text",
1170
+ "bbox": [
1171
+ 0.073,
1172
+ 0.636,
1173
+ 0.491,
1174
+ 0.71
1175
+ ],
1176
+ "angle": 0,
1177
+ "content": "We compared the results of our proposal with the ones obtained from the implementation of an integer linear programming (ILP) service allocation optimizer. As we mention in Section 2, ILP solutions are the most numerous in fog service placement optimization."
1178
+ },
1179
+ {
1180
+ "type": "text",
1181
+ "bbox": [
1182
+ 0.072,
1183
+ 0.71,
1184
+ 0.492,
1185
+ 0.841
1186
+ ],
1187
+ "angle": 0,
1188
+ "content": "The experiments were executed using the YAFS simulator that we had previously developed for other research works. This simulator is able to include graph-based network topologies and pluggable fog service placement policies, apart from other features that, to the best of our knowledge, are not provided by other fog simulators, such as node failures, or dynamic service placement and routing. The simulator is open source and it can be downloaded from its code repository [29]."
1189
+ },
1190
+ {
1191
+ "type": "text",
1192
+ "bbox": [
1193
+ 0.072,
1194
+ 0.841,
1195
+ 0.493,
1196
+ 0.945
1197
+ ],
1198
+ "angle": 0,
1199
+ "content": "The experiment results are presented and analyzed in two separated sections. Section 5.1 includes the analysis of the results obtained with the YAFS simulator. Those results compare the user perceived response time and the availability of the applications for the IoT devices. In Section 5.2, it is presented an analysis of the service placement obtained with both optimization policies (our proposal and the ILP"
1200
+ },
1201
+ {
1202
+ "type": "text",
1203
+ "bbox": [
1204
+ 0.505,
1205
+ 0.055,
1206
+ 0.544,
1207
+ 0.068
1208
+ ],
1209
+ "angle": 0,
1210
+ "content": "one)."
1211
+ },
1212
+ {
1213
+ "type": "title",
1214
+ "bbox": [
1215
+ 0.505,
1216
+ 0.091,
1217
+ 0.688,
1218
+ 0.105
1219
+ ],
1220
+ "angle": 0,
1221
+ "content": "5.1 Simulation Results"
1222
+ },
1223
+ {
1224
+ "type": "text",
1225
+ "bbox": [
1226
+ 0.503,
1227
+ 0.11,
1228
+ 0.925,
1229
+ 0.417
1230
+ ],
1231
+ "angle": 0,
1232
+ "content": "A first simulation scenario included fails in the fog devices to study the availability of the services when the nodes are getting down. The simulation included random and permanent fails in the nodes, starting with all the devices (100 nodes) alive, and finishing the simulation with fails in all of them. The fails were generated uniformly along the simulation. The results of this simulation are presented in Fig. 4 and shows the QoS in terms of the total number of requests that are executed satisfying the application deadline. The reason because a request does not satisfy the deadline can be both due to the response time is higher than the deadline or due to none device with the services of the requested application are reachable from the IoT device due to all the paths between them have failed devices. Three data series are represented in Fig. 4: one for the total number of requests that are sent from the IoT devices (labeled with Total num. of requests), one for the number of requests that are executed before the deadline when the placement of our solution is considered (labeled with Partition); and the number of requests that satisfied the deadline with the ILP policy (labeled with ILP)."
1233
+ },
1234
+ {
1235
+ "type": "text",
1236
+ "bbox": [
1237
+ 0.504,
1238
+ 0.417,
1239
+ 0.925,
1240
+ 0.49
1241
+ ],
1242
+ "angle": 0,
1243
+ "content": "It is observed that our approach results in a higher number of satisfied requests, mainly during the first half of the simulation (up to 50 failed devices). In the second part of the simulation, improvements in the QoS are also observed but these are less significant in regard with the ILP."
1244
+ },
1245
+ {
1246
+ "type": "text",
1247
+ "bbox": [
1248
+ 0.503,
1249
+ 0.49,
1250
+ 0.925,
1251
+ 0.709
1252
+ ],
1253
+ "angle": 0,
1254
+ "content": "For the sake of a deeper analysis of the availability, it has been also measured in terms of the number of IoT devices that are able to request their applications thank to that all the services they need are reachable with network paths without failed devices. This is represented in Fig. 5, where the y-axis are the number of IoT devices that are able to request their applications, and the x-axis the number of devices that have failed. The figure also includes the hypothetical and impossible case, due to the resource limit constraint, of allocating all the services in the gateways (labeled as All in gtws.). This is the best case and is useful to compare the solutions with the best upper bound. These results confirm that our proposal is able to increase the availability of the system when fails happen in the fog devices."
1255
+ },
1256
+ {
1257
+ "type": "text",
1258
+ "bbox": [
1259
+ 0.504,
1260
+ 0.709,
1261
+ 0.925,
1262
+ 0.898
1263
+ ],
1264
+ "angle": 0,
1265
+ "content": "A second simulation scenario did not include fails in the fog devices and was used to study the user perceived response time of the applications. These response times were measured as the time between the user request was generated in the IoT device and all the application services finished. The results were measured independently for each pair application-IoT device. They are summarized in Fig. 6. Each plot in the figure represents the response times of an application, an each item in the x-axis corresponds to one gateway that has an IoT device (or user) that request the application. The results of our solution are labeled as Partition and the results of the ILP approach are labeled as ILP."
1266
+ },
1267
+ {
1268
+ "type": "text",
1269
+ "bbox": [
1270
+ 0.504,
1271
+ 0.899,
1272
+ 0.925,
1273
+ 0.945
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": "It is observed that the placement obtained with our proposal does not reduce the response time for all the applications, but it is shorter for 13 of the 20 applications."
1277
+ }
1278
+ ],
1279
+ [
1280
+ {
1281
+ "type": "header",
1282
+ "bbox": [
1283
+ 0.076,
1284
+ 0.033,
1285
+ 0.106,
1286
+ 0.044
1287
+ ],
1288
+ "angle": 0,
1289
+ "content": "IEEE"
1290
+ },
1291
+ {
1292
+ "type": "page_number",
1293
+ "bbox": [
1294
+ 0.913,
1295
+ 0.034,
1296
+ 0.922,
1297
+ 0.043
1298
+ ],
1299
+ "angle": 0,
1300
+ "content": "7"
1301
+ },
1302
+ {
1303
+ "type": "image",
1304
+ "bbox": [
1305
+ 0.162,
1306
+ 0.056,
1307
+ 0.835,
1308
+ 0.226
1309
+ ],
1310
+ "angle": 0,
1311
+ "content": null
1312
+ },
1313
+ {
1314
+ "type": "image_caption",
1315
+ "bbox": [
1316
+ 0.073,
1317
+ 0.243,
1318
+ 0.925,
1319
+ 0.273
1320
+ ],
1321
+ "angle": 0,
1322
+ "content": "Fig. 4. Evolution of the QoS with regard to the fail of fog devices, in terms of the number of requests which satisfy application deadlines \\(\\left(|RT_{RQ_{US_a,APP_x}^n} < DL_{APP_x}|\\right)\\) compared with the total number of requests \\(\\left(|RQ_{US_a,APP_x}^n|\\right)\\)."
1323
+ },
1324
+ {
1325
+ "type": "image",
1326
+ "bbox": [
1327
+ 0.139,
1328
+ 0.301,
1329
+ 0.429,
1330
+ 0.439
1331
+ ],
1332
+ "angle": 0,
1333
+ "content": null
1334
+ },
1335
+ {
1336
+ "type": "image_caption",
1337
+ "bbox": [
1338
+ 0.073,
1339
+ 0.456,
1340
+ 0.493,
1341
+ 0.482
1342
+ ],
1343
+ "angle": 0,
1344
+ "content": "Fig. 5. Number of IoT devices that get services in regard with the number of failed fog devices (availability \\((APP_{x})\\))."
1345
+ },
1346
+ {
1347
+ "type": "text",
1348
+ "bbox": [
1349
+ 0.072,
1350
+ 0.507,
1351
+ 0.493,
1352
+ 0.668
1353
+ ],
1354
+ "angle": 0,
1355
+ "content": "Additionally, we can observed that in some applications an important damage of the response time is obtained. This is explained because both policies prioritize applications with shorter deadlines in front of the ones with longer deadlines. Nevertheless, there are less of these extreme cases, and with shorter times, when our policy is used: our policy only damages application 15 with a time of around 1000 ms, in front of four applications up to 400 s with the ILP policy (around 400000 ms for application 1, 300000 ms for application 8, 200000 ms for application 12, and 70000 for application 2)."
1356
+ },
1357
+ {
1358
+ "type": "text",
1359
+ "bbox": [
1360
+ 0.072,
1361
+ 0.668,
1362
+ 0.493,
1363
+ 0.756
1364
+ ],
1365
+ "angle": 0,
1366
+ "content": "In summary, our service placement policy shows a better behavior in terms of availability of the services that also results on a better QoS in the system. On the contrary, the response time of some applications results damaged but this behavior is also observed with the ILP policy, generating even worse response times."
1367
+ },
1368
+ {
1369
+ "type": "title",
1370
+ "bbox": [
1371
+ 0.074,
1372
+ 0.777,
1373
+ 0.255,
1374
+ 0.79
1375
+ ],
1376
+ "angle": 0,
1377
+ "content": "5.2 Placement Results"
1378
+ },
1379
+ {
1380
+ "type": "text",
1381
+ "bbox": [
1382
+ 0.073,
1383
+ 0.796,
1384
+ 0.49,
1385
+ 0.855
1386
+ ],
1387
+ "angle": 0,
1388
+ "content": "This section is devoted to compare the placement of the services obtained from the execution of our algorithm with regard to the ILP one. This analysis is included to give a brief idea of how the services are spread across the fog devices."
1389
+ },
1390
+ {
1391
+ "type": "text",
1392
+ "bbox": [
1393
+ 0.072,
1394
+ 0.856,
1395
+ 0.492,
1396
+ 0.945
1397
+ ],
1398
+ "angle": 0,
1399
+ "content": "Firstly, Fig. 7a shows that the placement of the services differs a lot between both placement policies. A mark in the plot of the figure indicates that a given service (y-axes) is placed in a given device (x-axes). Taking into account that the services of the same application have consecutive identifiers, it is also observed that in the case of our policy"
1400
+ },
1401
+ {
1402
+ "type": "text",
1403
+ "bbox": [
1404
+ 0.504,
1405
+ 0.297,
1406
+ 0.924,
1407
+ 0.342
1408
+ ],
1409
+ "angle": 0,
1410
+ "content": "(Partition), there are more cases of devices that allocate several services of the same application (consecutive marks in the same device)."
1411
+ },
1412
+ {
1413
+ "type": "text",
1414
+ "bbox": [
1415
+ 0.504,
1416
+ 0.365,
1417
+ 0.924,
1418
+ 0.701
1419
+ ],
1420
+ "angle": 0,
1421
+ "content": "Fig. 7b represent the resource usage of the fog devices. The y-axis represents the percentage of resources that are used by the services allocated in a given device and the x-axis are the devices ordered by these percentages in ascending order. By the analysis of the figure, we can observe that in the placement of our solution, there are almost the double of nodes that do not allocate any service (the resource usage is 0.0), and there is not any device that is fully used (resource usage of 1.0), with regard to the case of the ILP where almost 40 devices have a \\(100\\%\\) usage of the resources. The first interpretation of these results is that the scale level of our solution is smaller than the ILP one, in fact, we calculated that our policy deployed 357 (and 1161 resource units) instances of the services and the ILP deployed 374 (and 1203 resource units), around \\(5\\%\\) more services (\\(3.6\\%\\) more resources). Consequently, our solution is able to obtain better QoS and availability with a lower use of the fog resources (smaller number of instances). The second interpretation is that the services are more evenly distributed, since the workload of the devices is smaller, avoiding the saturation of the devices and keeping the system in a more flexible state in order to allocate new service instances."
1422
+ },
1423
+ {
1424
+ "type": "text",
1425
+ "bbox": [
1426
+ 0.503,
1427
+ 0.723,
1428
+ 0.924,
1429
+ 0.945
1430
+ ],
1431
+ "angle": 0,
1432
+ "content": "Finally, Fig. 7c shows the relationship between the service placement and the hop distance between the allocated service and the IoT device that requests it. A point in the scatter plot indicates how many IoT devices has a given distance with a service of the application they request. For example, in the case of our policy, there are around 100 services that are allocated in the gateways where the IoT devices are connected (a hop distance of 0.0). On the contrary, the ILP policy allocates more than 160 services in the gateways, the point (0,160) in the plot. We observe that the services are distributed more evenly and placed further from the gateways (higher distances) for the case our policy. Consequently, the ILP is able to place the services closer to the IoT devices. Despite this, our policy shows a better general behavior also in terms of application response time."
1433
+ }
1434
+ ],
1435
+ [
1436
+ {
1437
+ "type": "header",
1438
+ "bbox": [
1439
+ 0.076,
1440
+ 0.033,
1441
+ 0.106,
1442
+ 0.044
1443
+ ],
1444
+ "angle": 0,
1445
+ "content": "IEEE"
1446
+ },
1447
+ {
1448
+ "type": "page_number",
1449
+ "bbox": [
1450
+ 0.913,
1451
+ 0.034,
1452
+ 0.923,
1453
+ 0.043
1454
+ ],
1455
+ "angle": 0,
1456
+ "content": "8"
1457
+ },
1458
+ {
1459
+ "type": "image",
1460
+ "bbox": [
1461
+ 0.135,
1462
+ 0.085,
1463
+ 0.81,
1464
+ 0.474
1465
+ ],
1466
+ "angle": 0,
1467
+ "content": null
1468
+ },
1469
+ {
1470
+ "type": "image_caption",
1471
+ "bbox": [
1472
+ 0.073,
1473
+ 0.489,
1474
+ 0.768,
1475
+ 0.509
1476
+ ],
1477
+ "angle": 0,
1478
+ "content": "Fig. 6. User perceived response times of the applications for each user (or IoT device) in the system \\((RT_{RQ_{U S_{a},A P P_{x}}^{n}})\\)."
1479
+ },
1480
+ {
1481
+ "type": "image",
1482
+ "bbox": [
1483
+ 0.099,
1484
+ 0.536,
1485
+ 0.353,
1486
+ 0.655
1487
+ ],
1488
+ "angle": 0,
1489
+ "content": null
1490
+ },
1491
+ {
1492
+ "type": "image_caption",
1493
+ "bbox": [
1494
+ 0.096,
1495
+ 0.659,
1496
+ 0.354,
1497
+ 0.686
1498
+ ],
1499
+ "angle": 0,
1500
+ "content": "(a) Allocation of the services in the fog devices \\((P, p_{ui} \\forall S_u, D_i)\\)."
1501
+ },
1502
+ {
1503
+ "type": "image",
1504
+ "bbox": [
1505
+ 0.373,
1506
+ 0.536,
1507
+ 0.625,
1508
+ 0.655
1509
+ ],
1510
+ "angle": 0,
1511
+ "content": null
1512
+ },
1513
+ {
1514
+ "type": "image_caption",
1515
+ "bbox": [
1516
+ 0.371,
1517
+ 0.659,
1518
+ 0.627,
1519
+ 0.688
1520
+ ],
1521
+ "angle": 0,
1522
+ "content": "(b) Resource usage of the fog devices \\(\\begin{array}{r}\\sum_{u = 1}^{|S_u|}\\left(p_{ui}\\times CR_{S_u}\\right),\\forall D_i) \\end{array}\\)"
1523
+ },
1524
+ {
1525
+ "type": "image",
1526
+ "bbox": [
1527
+ 0.647,
1528
+ 0.537,
1529
+ 0.897,
1530
+ 0.655
1531
+ ],
1532
+ "angle": 0,
1533
+ "content": null
1534
+ },
1535
+ {
1536
+ "type": "image_caption",
1537
+ "bbox": [
1538
+ 0.644,
1539
+ 0.659,
1540
+ 0.902,
1541
+ 0.684
1542
+ ],
1543
+ "angle": 0,
1544
+ "content": "(c) Service allocation in terms of hop distance with the IoT devices."
1545
+ },
1546
+ {
1547
+ "type": "image_caption",
1548
+ "bbox": [
1549
+ 0.073,
1550
+ 0.694,
1551
+ 0.695,
1552
+ 0.709
1553
+ ],
1554
+ "angle": 0,
1555
+ "content": "Fig. 7. Comparison of the services placement between our partition-based algorithm and the ILP optimizer."
1556
+ },
1557
+ {
1558
+ "type": "title",
1559
+ "bbox": [
1560
+ 0.074,
1561
+ 0.732,
1562
+ 0.217,
1563
+ 0.746
1564
+ ],
1565
+ "angle": 0,
1566
+ "content": "6 CONCLUSION"
1567
+ },
1568
+ {
1569
+ "type": "text",
1570
+ "bbox": [
1571
+ 0.073,
1572
+ 0.753,
1573
+ 0.491,
1574
+ 0.84
1575
+ ],
1576
+ "angle": 0,
1577
+ "content": "We have proposed an algorithm for service placement in fog devices based on the partition of the fog devices (into communities) and the services of the applications (into transitive closures) for the optimization of the QoS of the system and the service availability for the users (or IoT devices)."
1578
+ },
1579
+ {
1580
+ "type": "text",
1581
+ "bbox": [
1582
+ 0.072,
1583
+ 0.841,
1584
+ 0.492,
1585
+ 0.945
1586
+ ],
1587
+ "angle": 0,
1588
+ "content": "Two simulation scenarios have been executed, one including fails in the fog devices and another one without fails, to measure the response time of the applications, the service availability and the number of request that were served satisfying the application deadlines. The service placement obtained with our policy resulted in a higher QoS and service availability, with regard to the placement"
1589
+ },
1590
+ {
1591
+ "type": "text",
1592
+ "bbox": [
1593
+ 0.504,
1594
+ 0.733,
1595
+ 0.923,
1596
+ 0.822
1597
+ ],
1598
+ "angle": 0,
1599
+ "content": "of an ILP-based algorithm. In the case of the user perceived response time, our policy obtained better times for 13 of the total 20 applications. Both policies showed a high degradation of service for some applications, but in the case of the ILP, this degradation happened in more applications and resulting in longer response times."
1600
+ },
1601
+ {
1602
+ "type": "text",
1603
+ "bbox": [
1604
+ 0.503,
1605
+ 0.826,
1606
+ 0.924,
1607
+ 0.944
1608
+ ],
1609
+ "angle": 0,
1610
+ "content": "As future works, the use of complex networks and graph theory for the optimization of other parameters of the systems, such as service cost, network usage, migration cost, and service provider cost could be studied. By the own nature of the proposed policy, the optimization of these other metrics probably would need to be combined with other type of heuristics to obtain suitable results, and consequently, further research is necessary."
1611
+ }
1612
+ ],
1613
+ [
1614
+ {
1615
+ "type": "header",
1616
+ "bbox": [
1617
+ 0.075,
1618
+ 0.033,
1619
+ 0.106,
1620
+ 0.044
1621
+ ],
1622
+ "angle": 0,
1623
+ "content": "IEEE"
1624
+ },
1625
+ {
1626
+ "type": "page_number",
1627
+ "bbox": [
1628
+ 0.913,
1629
+ 0.034,
1630
+ 0.923,
1631
+ 0.044
1632
+ ],
1633
+ "angle": 0,
1634
+ "content": "9"
1635
+ },
1636
+ {
1637
+ "type": "title",
1638
+ "bbox": [
1639
+ 0.075,
1640
+ 0.053,
1641
+ 0.251,
1642
+ 0.067
1643
+ ],
1644
+ "angle": 0,
1645
+ "content": "ACKNOWLEDGMENTS"
1646
+ },
1647
+ {
1648
+ "type": "text",
1649
+ "bbox": [
1650
+ 0.073,
1651
+ 0.073,
1652
+ 0.493,
1653
+ 0.146
1654
+ ],
1655
+ "angle": 0,
1656
+ "content": "This research was supported by the Spanish Government (Agencia Estatal de Investigación) and the European Commission (Fondo Europeo de Desarrollo Regional) through grant number TIN2017-88547-P (MINECO/AEI/FEDER, UE)."
1657
+ },
1658
+ {
1659
+ "type": "title",
1660
+ "bbox": [
1661
+ 0.075,
1662
+ 0.168,
1663
+ 0.19,
1664
+ 0.183
1665
+ ],
1666
+ "angle": 0,
1667
+ "content": "REFERENCES"
1668
+ },
1669
+ {
1670
+ "type": "ref_text",
1671
+ "bbox": [
1672
+ 0.074,
1673
+ 0.189,
1674
+ 0.492,
1675
+ 0.214
1676
+ ],
1677
+ "angle": 0,
1678
+ "content": "[1] O. Consortium et al., \"Openfog reference architecture for fog computing,\" Tech. Rep., February, Tech. Rep., 2017."
1679
+ },
1680
+ {
1681
+ "type": "ref_text",
1682
+ "bbox": [
1683
+ 0.074,
1684
+ 0.214,
1685
+ 0.492,
1686
+ 0.259
1687
+ ],
1688
+ "angle": 0,
1689
+ "content": "[2] S. Filiposka, A. Mishev, and K. Gilly, \"Community-based allocation and migration strategies for fog computing,\" in 2018 IEEE Wireless Communications and Networking Conference (WCNC), April 2018, pp. 1-6."
1690
+ },
1691
+ {
1692
+ "type": "ref_text",
1693
+ "bbox": [
1694
+ 0.074,
1695
+ 0.259,
1696
+ 0.492,
1697
+ 0.293
1698
+ ],
1699
+ "angle": 0,
1700
+ "content": "[3] Z. Wen, R. Yang, P. Garraghan, T. Lin, J. Xu, and M. Rovatsos, \"Fog orchestration for internet of things services,\" IEEE Internet Computing, vol. 21, no. 2, pp. 16-24, Mar 2017."
1701
+ },
1702
+ {
1703
+ "type": "ref_text",
1704
+ "bbox": [
1705
+ 0.074,
1706
+ 0.293,
1707
+ 0.492,
1708
+ 0.338
1709
+ ],
1710
+ "angle": 0,
1711
+ "content": "[4] O. Skarlat, M. Nardelli, S. Schulte, M. Borkowski, and P. Leitner, \"Optimized IoT service placement in the fog,\" Service Oriented Computing and Applications, Oct 2017. [Online]. Available: https://doi.org/10.1007/s11761-017-0219-8"
1712
+ },
1713
+ {
1714
+ "type": "ref_text",
1715
+ "bbox": [
1716
+ 0.074,
1717
+ 0.338,
1718
+ 0.492,
1719
+ 0.373
1720
+ ],
1721
+ "angle": 0,
1722
+ "content": "[5] A. Brogi and S. Forti, \"Qos-aware deployment of IoT applications through the fog,\" IEEE Internet of Things Journal, vol. 4, no. 5, pp. 1185-1192, Oct 2017."
1723
+ },
1724
+ {
1725
+ "type": "ref_text",
1726
+ "bbox": [
1727
+ 0.074,
1728
+ 0.373,
1729
+ 0.492,
1730
+ 0.429
1731
+ ],
1732
+ "angle": 0,
1733
+ "content": "[6] C. Guerrero, I. Lera, and C. Juiz, \"A lightweight decentralized service placement policy for performance optimization in fog computing,\" Journal of Ambient Intelligence and Humanized Computing, Jun 2018. [Online]. Available: https://doi.org/10.1007/s12652-018-0914-0"
1734
+ },
1735
+ {
1736
+ "type": "ref_text",
1737
+ "bbox": [
1738
+ 0.074,
1739
+ 0.429,
1740
+ 0.492,
1741
+ 0.464
1742
+ ],
1743
+ "angle": 0,
1744
+ "content": "[7] L. Ni, J. Zhang, C. Jiang, C. Yan, and K. Yu, \"Resource allocation strategy in fog computing based on priced timed petri nets,\" IEEE Internet of Things Journal, vol. 4, no. 5, pp. 1216-1228, Oct 2017."
1745
+ },
1746
+ {
1747
+ "type": "ref_text",
1748
+ "bbox": [
1749
+ 0.074,
1750
+ 0.464,
1751
+ 0.492,
1752
+ 0.532
1753
+ ],
1754
+ "angle": 0,
1755
+ "content": "[8] R. Urgaonkar, S. Wang, T. He, M. Zafer, K. Chan, and K. K. Leung, \"Dynamic service migration and workload scheduling in edge-clouds,\" Performance Evaluation, vol. 91, no. Supplement C, pp. 205 - 228, 2015, special Issue: Performance 2015. [Online]. Available: http://www.sciencedirect.com/science/article/pii/S0166531615000619"
1756
+ },
1757
+ {
1758
+ "type": "ref_text",
1759
+ "bbox": [
1760
+ 0.074,
1761
+ 0.532,
1762
+ 0.492,
1763
+ 0.578
1764
+ ],
1765
+ "angle": 0,
1766
+ "content": "[9] L. Gu, D. Zeng, S. Guo, A. Barnawi, and Y. Xiang, \"Cost efficient resource management in fog computing supported medical cyberphysical system,\" IEEE Transactions on Emerging Topics in Computing, vol. 5, no. 1, pp. 108-119, Jan 2017."
1767
+ },
1768
+ {
1769
+ "type": "ref_text",
1770
+ "bbox": [
1771
+ 0.074,
1772
+ 0.578,
1773
+ 0.492,
1774
+ 0.624
1775
+ ],
1776
+ "angle": 0,
1777
+ "content": "[10] K. Velasquez, D. P. Abreu, M. Curado, and E. Monteiro, \"Service placement for latency reduction in the internet of things,\" Annals of Telecommunications, vol. 72, no. 1, pp. 105-115, Feb 2017. [Online]. Available: https://doi.org/10.1007/s12243-016-0524-9"
1778
+ },
1779
+ {
1780
+ "type": "ref_text",
1781
+ "bbox": [
1782
+ 0.074,
1783
+ 0.623,
1784
+ 0.492,
1785
+ 0.68
1786
+ ],
1787
+ "angle": 0,
1788
+ "content": "[11] Z. Huang, K.-J. Lin, S.-Y. Yu, and J. Y. Jen Hsu, \"Co-locating services in IoT systems to minimize the communication energy cost,\" Journal of Innovation in Digital Ecosystems, vol. 1, no. 1, pp. 47 - 57, 2014. [Online]. Available: http://www.sciencedirect.com/science/article/pii/S2352664515000061"
1789
+ },
1790
+ {
1791
+ "type": "ref_text",
1792
+ "bbox": [
1793
+ 0.074,
1794
+ 0.68,
1795
+ 0.492,
1796
+ 0.715
1797
+ ],
1798
+ "angle": 0,
1799
+ "content": "[12] L. Yang, J. Cao, G. Liang, and X. Han, \"Cost aware service placement and load dispatching in mobile cloud systems,\" IEEE Transactions on Computers, vol. 65, no. 5, pp. 1440-1452, May 2016."
1800
+ },
1801
+ {
1802
+ "type": "ref_text",
1803
+ "bbox": [
1804
+ 0.074,
1805
+ 0.714,
1806
+ 0.492,
1807
+ 0.76
1808
+ ],
1809
+ "angle": 0,
1810
+ "content": "[13] V. B. C. Souza, W. Ramírez, X. Masip-Bruin, E. Marín-Tordera, G. Ren, and G. Tashakor, \"Handling service allocation in combined fog-cloud scenarios,\" in 2016 IEEE International Conference on Communications (ICC), May 2016, pp. 1-5."
1811
+ },
1812
+ {
1813
+ "type": "ref_text",
1814
+ "bbox": [
1815
+ 0.074,
1816
+ 0.76,
1817
+ 0.492,
1818
+ 0.806
1819
+ ],
1820
+ "angle": 0,
1821
+ "content": "[14] D. Zeng, L. Gu, S. Guo, Z. Cheng, and S. Yu, \"Joint optimization of task scheduling and image placement in fog computing supported software-defined embedded system,\" IEEE Transactions on Computers, vol. 65, no. 12, pp. 3702-3712, Dec 2016."
1822
+ },
1823
+ {
1824
+ "type": "ref_text",
1825
+ "bbox": [
1826
+ 0.074,
1827
+ 0.806,
1828
+ 0.492,
1829
+ 0.851
1830
+ ],
1831
+ "angle": 0,
1832
+ "content": "[15] S. Filiposka, A. Mishev, and C. Juiz, \"Community-based vm placement framework,\" The Journal of Supercomputing, vol. 71, no. 12, pp. 4504-4528, Dec 2015. [Online]. Available: https://doi.org/10.1007/s11227-015-1546-1"
1833
+ },
1834
+ {
1835
+ "type": "ref_text",
1836
+ "bbox": [
1837
+ 0.074,
1838
+ 0.851,
1839
+ 0.492,
1840
+ 0.896
1841
+ ],
1842
+ "angle": 0,
1843
+ "content": "[16] C. Guerrero, I. Lera, and C. Juiz, \"On the influence of fog colonies partitioning in fog application makespan,\" in 2019 IEEE 6th International Conference on Future Internet of Things and Cloud (FiCloud), August 2018."
1844
+ },
1845
+ {
1846
+ "type": "ref_text",
1847
+ "bbox": [
1848
+ 0.074,
1849
+ 0.896,
1850
+ 0.492,
1851
+ 0.943
1852
+ ],
1853
+ "angle": 0,
1854
+ "content": "[17] I. Lera, C. Guerrero, and C. Juiz, \"Comparing centrality indices for network usage optimization of data placement policies in fog devices,\" in 2018 Third International Conference on Fog and Mobile Edge Computing (FMEC), April 2018, pp. 115-122."
1855
+ },
1856
+ {
1857
+ "type": "list",
1858
+ "bbox": [
1859
+ 0.074,
1860
+ 0.189,
1861
+ 0.492,
1862
+ 0.943
1863
+ ],
1864
+ "angle": 0,
1865
+ "content": null
1866
+ },
1867
+ {
1868
+ "type": "ref_text",
1869
+ "bbox": [
1870
+ 0.507,
1871
+ 0.055,
1872
+ 0.923,
1873
+ 0.09
1874
+ ],
1875
+ "angle": 0,
1876
+ "content": "[18] Y. Elkhatib, B. Porter, H. B. Ribeiro, M. F. Zhani, J. Qadir, and E. Riviere, \"On using micro-clouds to deliver the fog,\" IEEE Internet Computing, vol. 21, no. 2, pp. 8-15, Mar 2017."
1877
+ },
1878
+ {
1879
+ "type": "ref_text",
1880
+ "bbox": [
1881
+ 0.507,
1882
+ 0.091,
1883
+ 0.923,
1884
+ 0.124
1885
+ ],
1886
+ "angle": 0,
1887
+ "content": "[19] F. Bonomi, R. Milito, P. Natarajan, and J. Zhu, Fog Computing: A Platform for Internet of Things and Analytics. Cham: Springer International Publishing, 2014, pp. 169-186."
1888
+ },
1889
+ {
1890
+ "type": "ref_text",
1891
+ "bbox": [
1892
+ 0.507,
1893
+ 0.124,
1894
+ 0.923,
1895
+ 0.159
1896
+ ],
1897
+ "angle": 0,
1898
+ "content": "[20] A. Yousefpour, G. Ishigaki, R. Gour, and J. P. Jue, \"On reducing iot service delay via fog offloading,\" IEEE Internet of Things Journal, vol. PP, no. 99, pp. 1-1, 2018."
1899
+ },
1900
+ {
1901
+ "type": "ref_text",
1902
+ "bbox": [
1903
+ 0.507,
1904
+ 0.159,
1905
+ 0.923,
1906
+ 0.204
1907
+ ],
1908
+ "angle": 0,
1909
+ "content": "[21] M. Vogler, J. M. Schleicher, C. Inzinger, and S. Dustdar, \"A scalable framework for provisioning large-scale IoT deployments,\" ACM Trans. Internet Technol., vol. 16, no. 2, pp. 11:1-11:20, Mar. 2016. [Online]. Available: http://doi.acm.org/10.1145/2850416"
1910
+ },
1911
+ {
1912
+ "type": "ref_text",
1913
+ "bbox": [
1914
+ 0.507,
1915
+ 0.204,
1916
+ 0.923,
1917
+ 0.25
1918
+ ],
1919
+ "angle": 0,
1920
+ "content": "[22] A. Krylovskiy, M. Jahn, and E. Patti, \"Designing a smart city internet of things platform with microservice architecture,\" in 2015 3rd International Conference on Future Internet of Things and Cloud, Aug 2015, pp. 25-30."
1921
+ },
1922
+ {
1923
+ "type": "ref_text",
1924
+ "bbox": [
1925
+ 0.507,
1926
+ 0.25,
1927
+ 0.923,
1928
+ 0.328
1929
+ ],
1930
+ "angle": 0,
1931
+ "content": "[23] E. Saurez, K. Hong, D. Lillethun, U. Ramachandran, and B. Ottenwalder, \"Incremental deployment and migration of geo-distributed situation awareness applications in the fog,\" in Proceedings of the 10th ACM International Conference on Distributed and Event-based Systems, ser. DEBS '16. New York, NY, USA: ACM, 2016, pp. 258-269. [Online]. Available: http://doi.acm.org/10.1145/2933267.2933317"
1932
+ },
1933
+ {
1934
+ "type": "ref_text",
1935
+ "bbox": [
1936
+ 0.507,
1937
+ 0.328,
1938
+ 0.923,
1939
+ 0.363
1940
+ ],
1941
+ "angle": 0,
1942
+ "content": "[24] A. Balalaie, A. Heydarnoori, and P. Jamshidi, \"Microservices architecture enables devops: Migration to a cloud-native architecture,\" IEEE Software, vol. 33, no. 3, pp. 42-52, May 2016."
1943
+ },
1944
+ {
1945
+ "type": "ref_text",
1946
+ "bbox": [
1947
+ 0.507,
1948
+ 0.363,
1949
+ 0.923,
1950
+ 0.408
1951
+ ],
1952
+ "angle": 0,
1953
+ "content": "[25] M. E. J. Newman and M. Girvan, \"Finding and evaluating community structure in networks,\" Phys. Rev. E, vol. 69, no. 2, p. 026113, Feb. 2004. [Online]. Available: http://link.aps.org/doi/10.1103/PhysRevE.69.026113"
1954
+ },
1955
+ {
1956
+ "type": "ref_text",
1957
+ "bbox": [
1958
+ 0.507,
1959
+ 0.408,
1960
+ 0.923,
1961
+ 0.455
1962
+ ],
1963
+ "angle": 0,
1964
+ "content": "[26] S. Fortunato, V. Latora, and M. Marchiori, \"Method to find community structures based on information centrality,\" Phys. Rev. E, vol. 70, p. 056104, Nov 2004. [Online]. Available: https://link.aps.org/doi/10.1103/PhysRevE.70.056104"
1965
+ },
1966
+ {
1967
+ "type": "ref_text",
1968
+ "bbox": [
1969
+ 0.507,
1970
+ 0.454,
1971
+ 0.923,
1972
+ 0.511
1973
+ ],
1974
+ "angle": 0,
1975
+ "content": "[27] A. Alahmadi, A. Alnowiser, M. M. Zhu, D. Che, and P. Ghodous, \"Enhanced first-fit decreasing algorithm for energy-aware job scheduling in cloud,\" in 2014 International Conference on Computational Science and Computational Intelligence, vol. 2, March 2014, pp. 69-74."
1976
+ },
1977
+ {
1978
+ "type": "ref_text",
1979
+ "bbox": [
1980
+ 0.507,
1981
+ 0.511,
1982
+ 0.923,
1983
+ 0.545
1984
+ ],
1985
+ "angle": 0,
1986
+ "content": "[28] H. S. Warren Jr, \"A modification of warshall's algorithm for the transitive closure of binary relations,\" Communications of the ACM, vol. 18, no. 4, pp. 218-220, 1975."
1987
+ },
1988
+ {
1989
+ "type": "ref_text",
1990
+ "bbox": [
1991
+ 0.507,
1992
+ 0.545,
1993
+ 0.923,
1994
+ 0.568
1995
+ ],
1996
+ "angle": 0,
1997
+ "content": "[29] I. Lera and C. Guerrero, \"Yafs, yet another fog simulator,\" https: //github.com/acsicuib/YAFS, accessed: 2018-02-03."
1998
+ },
1999
+ {
2000
+ "type": "list",
2001
+ "bbox": [
2002
+ 0.507,
2003
+ 0.055,
2004
+ 0.923,
2005
+ 0.568
2006
+ ],
2007
+ "angle": 0,
2008
+ "content": null
2009
+ },
2010
+ {
2011
+ "type": "image",
2012
+ "bbox": [
2013
+ 0.507,
2014
+ 0.58,
2015
+ 0.628,
2016
+ 0.697
2017
+ ],
2018
+ "angle": 0,
2019
+ "content": null
2020
+ },
2021
+ {
2022
+ "type": "text",
2023
+ "bbox": [
2024
+ 0.639,
2025
+ 0.58,
2026
+ 0.923,
2027
+ 0.694
2028
+ ],
2029
+ "angle": 0,
2030
+ "content": "Isaac Lera received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2012. He is an assistant professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research lines are semantic web, open data, system performance, educational innovation and human mobility. He has authored in several journals and international conferences."
2031
+ },
2032
+ {
2033
+ "type": "image",
2034
+ "bbox": [
2035
+ 0.508,
2036
+ 0.698,
2037
+ 0.628,
2038
+ 0.815
2039
+ ],
2040
+ "angle": 0,
2041
+ "content": null
2042
+ },
2043
+ {
2044
+ "type": "text",
2045
+ "bbox": [
2046
+ 0.639,
2047
+ 0.697,
2048
+ 0.923,
2049
+ 0.812
2050
+ ],
2051
+ "angle": 0,
2052
+ "content": "Carlos Guerrero received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2012. He is an assistant professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research interests include web performance, resource management, web engineering, and cloud computing. He has authored around 40 papers in international conferences and journals."
2053
+ },
2054
+ {
2055
+ "type": "image",
2056
+ "bbox": [
2057
+ 0.508,
2058
+ 0.817,
2059
+ 0.628,
2060
+ 0.932
2061
+ ],
2062
+ "angle": 0,
2063
+ "content": null
2064
+ },
2065
+ {
2066
+ "type": "text",
2067
+ "bbox": [
2068
+ 0.639,
2069
+ 0.815,
2070
+ 0.923,
2071
+ 0.93
2072
+ ],
2073
+ "angle": 0,
2074
+ "content": "Carlos Juiz received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2001. He is an associate professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research interests include performance engineering, cloud computing and IT governance. He has authored around 150 papers in different international conferences and journals."
2075
+ }
2076
+ ]
2077
+ ]
2401.12xxx/2401.12690/144bbb49-024f-4544-960f-9726a73d392b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33712636aea8e65150a085dd63d4f029f3d27bb8613c5d8edb54b7d6fb887ffa
3
+ size 4452688
2401.12xxx/2401.12690/full.md ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Availability-aware Service Placement Policy in Fog Computing Based on Graph Partitions
2
+
3
+ Isaac Lera, Carlos Guerrero, and Carlos Juiz, Senior Member, IEEE
4
+
5
+ Abstract—This paper presents a policy for service placement of fog applications inspired on complex networks and graph theory. We propose a twofold partition process based on communities for the partition of the fog devices and based on transitive closures for the application services partition. The allocation of the services is performed sequentially by, firstly, mapping applications to device communities and, secondly, mapping service transitive closures to fog devices in the community. The underlying idea is to place as many inter-related services as possible in the most nearby devices to the users. The optimization objectives are the availability of the applications and the Quality of Service (QoS) of the system, measured as the number of requests that are executed before the application deadlines. We compared our solution with an Integer Linear Programming approach, and the simulation results showed that our proposal obtains higher QoS and availability when fails in the nodes are considered.
6
+
7
+ Index Terms—Fog computing, Service placement, Service availability, Performance optimization, Complex network communities, Graph transitive closures.
8
+
9
+ # 1 INTRODUCTION
10
+
11
+ Fog computing has emerged as a suitable solution for the increase of application execution time and network usage that Internet of Things applications based on cloud services generate. This paradigm establishes that the in-network devices are provided with computational and storage capacities, and it enables them to allocate or execute services of the IoT applications that are commonly executed in the cloud provider [1]. By this, the application services are placed closer to the users (or IoT) devices and, consequently, the network latency between users and services and the network usage are reduced. Nevertheless, the limited capacities of the in-network devices, also known as fog devices in this domain, make the definition of management policies even more necessary than in other distributed systems such as cloud computing.
12
+
13
+ The objective of our work is to study an application service placement policy to maximize service availability in case of failures. The placement consists on the selection of the most suitable fog devices to map service instances. We consider that the IoT applications are defined as a set of interrelated services that are initially and permanently deployed on the cloud provider, but that they can be horizontally scaled by creating new stateless instances in the fog devices. We also consider that the users of our domain are unalterable connected to a same gateway or access point, i.e., we consider that our users are IoT devices such as sensors or actuators, instead of considering mobility patterns, as for example in the case of mobile users.
14
+
15
+ We propose a two phases policy that is addressed to optimize the service availability, in terms of reachability of the services from the IoT devices, and the deadline satisfaction ratios, in terms of the percentage of requests that obtain the application responses before their deadlines.
16
+
17
+ In the first phase, the policy maps applications (the complete set of interrelated services) to a set of well-connected devices to guarantee the availability of the application for the users connected to that set. We propose to use the community structure of the fog devices for the generation of the partitions of those devices. Once that an application is mapped to a fog community, a second allocation process is performed, by mapping the services of the application to the fog devices in the community. This second phase addresses the optimization of the response time by prioritizing the allocation of interrelated services in the same fog device. We propose to partition the services of an application by using the transitive closure of a service to determine the services to be placed together in the same device.
18
+
19
+ Fog service placement problem has been addressed in previous researches, even considering community-based approaches [2], but we address some features that have not been previously considered, and the novel contributions of our approach are:
20
+
21
+ - The combination of the use of complex network communities for the device partition and service transitive closures for the application partition, that has not been used simultaneously in previous studies.
22
+ - The optimization of both the application deadline satisfaction, considered in some previous studies, and the application availability, not included in previous studies, and their evolution along the simulation.
23
+ - An experimental validation that includes dynamic fails of the infrastructure along the simulation.
24
+
25
+ # 2 RELATED WORK
26
+
27
+ The problem of the optimization of service placement in a fog architecture has been previously addressed from several different perspectives, by considering algorithm proposals such as genetic algorithms [3], [4], Montecarlo methods [5], distributed solutions [6], Petri Nets [7], Markov
28
+
29
+ processes [8], and being linear programming one of the most common solutions [9], [10], [11], [12], [13], [14].
30
+
31
+ Nevertheless, there is still room for improvement and some research challenges have not been still covered. For example, most of the previous solutions have included the optimization of response time, power consumption, cost, or network usage. But to the best of our knowledge, they have not studied the availability and the influence of failures in the infrastructure.
32
+
33
+ The use of the community relationship of the devices of a distributed system for the optimization of the resource management was initially proposed by Filiposka et al. [15], and they applied it in the optimization of the allocation of virtual machines in a datacenter to optimize the hop distances between related virtual machines. In the field of fog computing, the use of other topological features of graphs and complex network was proposed at a later stage, such as centrality indexes for the static definition of fog colonies [16] or the placement of data in fog devices [17].
34
+
35
+ The idea of organizing the complex structure of a fog architecture have been applied in several studies, where the authors defined these static infrastructure organizations as fog colonies [4], micro-clouds [18], Foglets [19], or fog domains [20]. For example, Skarlat et al. [4] defined a twofold distributed placement policy that first considered if a service should be allocated in a fog colony or migrated to the neighbor colony. Once that the colony was chosen, the control node of the colony decided the device that allocated the service. In all those studies, the partition of the fog devices was static and unique for all the applications.
36
+
37
+ On the contrary, Filiposka, Mishev and Gilly proposed a virtual partition of the devices that is specific for each application and it is dynamically established by the conditions of the system. They implemented an evolution of the proposal in [15] for the case of allocation of virtual machines (VM) into fog devices [2]. They considered that the fog services where encapsulated in one VM and they proposed a two phases optimization process, where in the first step the VM is mapped to a device community, and in the second step, the VM is allocated in any of the devices in the community with a traditional optimization technique. This is probably the most similar work to our proposal in terms of the optimization algorithm, but with a different optimization objective. Their objective was to propose a runtime algorithm for the migration of the VM as mobile user of the applications move through different access points to reduce the average service delay.
38
+
39
+ The main differences of the work of Filiposka et al. with our proposal are: first, we study the suitability of the community relationships to improve service availability instead of the migration of VMs due to the user mobility; second, we consider a more complex structure of the applications because we defined them as a set of interrelated services that can be allocated in different devices, while they defined the applications as a single encapsulating element, the VM; third, we also study the use of a graph partitioning approach, the transitive closure of the services, for the allocation of the services inside the communities to also benefit the placement of the most interrelated services in the same devices to reduce the network delays between interrelated services.
40
+
41
+ ![](images/4ee8894b7bd3407721aab9be19c14d6a1d1ab2e29b18a19a6655c6adebd4c954.jpg)
42
+ Fig. 1. Fog computing architecture.
43
+
44
+ # 3 PROBLEM STATEMENT
45
+
46
+ A general fog computing architecture is represented in Fig. 1 where three layers can be identified: cloud layer, fog layer and client layer. Three types of devices can be differentiated: a device for the cloud provider of the cloud layer; the gateways, that are the access points for the clients; the fog devices, the network devices between the cloud provider and the gateways. All the devices have resources to allocate and execute services.
47
+
48
+ The fog infrastructure can be modeled as a graph where the nodes are the devices and the edges the direct network links between devices. We identify those devices as $D_{i}$ , considering two special cases for the cloud provider ( $D_{i}^{cloud}$ ) and the gateways ( $D_{i}^{gw}$ ). The devices are defined by the available capacity of their resources $AR_{D_i}$ , that is a vector which contains the capacities of each physical component. For the sake of simplicity, we have considered a scalar value, but it could easily be extended by including as many elements as necessary. We suppose unlimited resources for the specific case of the cloud provider, $AR_{D_i^{cloud}} = \infty$ . The devices are also defined by the processing speed $IPT_{D_i}$ measured in terms of instructions per unit of time. The network links are identified by the two connected nodes $NL_{D_i,D_j}$ , and we consider that it is a bidirectional communication, $NL_{D_i,D_j} = NL_{D_j,D_i}$ . The network links are defined by the propagation delay, $PR_{NL_{D_i},D_j}$ , and the network bandwidth, $BW_{NL_{D_i},D_j}$ . Thus, the network delay, $ND_{NL_{D_i},D_j}$ , for the transmission of a packet between two connected devices is calculated as:
49
+
50
+ $$
51
+ N D _ {N L _ {D _ {i}, D _ {j}}} = P R _ {N L _ {D _ {i}, D _ {j}}} + \frac {\text {s i z e}}{B W _ {N L _ {D _ {i} , D _ {j}}}} \tag {1}
52
+ $$
53
+
54
+ where size is the size of the packet to be transmitted.
55
+
56
+ The applications in our problem domain follow a microservice based development pattern, that is increasingly being used in IoT applications [21], [22], [23]. This type of applications are modeled as a set of small and stateless services that interoperate between them to accomplish a complex task [24]. Thus, the services can be easily scale up, by downloading the encapsulating element and executing it, or scale down, by just stopping and removing instances of the service. We assume that there is at least one instance of each service running in the cloud provider $(D_{i}^{cloud})$ .
57
+
58
+ We model each application $APP_{x}$ as a directed graph, where the nodes are the services and the edges are the request messages between the services. We identify the services as $S_{u}$ and they are defined by the resource consumption generated in the device that allocates the service,
59
+
60
+ $CR_{S_u}$ . As in the case of the available resources in a device, the resource consumption is generally defined as a vector which measures the consumption of each physical component, but we have considered a scalar value for a simpler definition of the problem. Services are executed when a request message is received. We classify the services in two types depending on the origin of the service request: the entry-point service $S_u^{sep}$ , the origins of the request messages that arrive to those services are users $US_a$ or IoT devices (sensors typically) $ID_b$ ; the intra-services $S_u^{intra}$ , that are only requested by other services. An intra-service can be requested for several different services and the entry-point service can be requested for several users or IoT devices. But, we suppose that there is only one entry-point service for each application.
61
+
62
+ The task performed by a service is different depending on the requester, so the execution generated by a request not only depends on the service but also on the requester, i.e. the request message. The request messages are identified by the origin and target services, $MS_{S_u,S_v}$ , and they are modeled as unidirectional edges, $MS_{S_u,S_v} \neq MS_{S_v,S_u}$ . The requests generated by the users or the IoT services, i.e. the requests to the entry-point services, are only identified by the target entry-point service $MS_{\emptyset,S_u}$ .
63
+
64
+ The request messages are defined by the size of the request message $S Z_{MS_{S_u,S_v}}$ , that determines the transmission time of the service request, and the execution load that the target service will generate in the device, defined by the number of instructions to be executed, $EI_{MS_{S_u,S_v}}$ .
65
+
66
+ We assume that there is at least one instance of each service in the cloud provider. But those services can be horizontally scaled by deploying new instances in the fog devices. By this, the workload can be distributed between instances and the network delay from the user to te service is reduced. We define a placement matrix, $P$ , of size $|S_u| \times |D_i|$ , number of services per number of fog devices, where a element $p_{ui}$ is equal 1 if service $S_u$ is deployed in device $D_i$ , and 0 otherwise.
67
+
68
+ The placement of the services are constrained by the device resource capacity. The resources consumed by the allocated services should not exceed the available resources in the device:
69
+
70
+ $$
71
+ \sum_ {u = 1} ^ {| S _ {u} |} \left(p _ {u i} \times C R _ {S _ {u}}\right) \leq A R _ {D _ {i}}, \forall D _ {i} \tag {2}
72
+ $$
73
+
74
+ Our optimization objectives are to increase the application deadline satisfaction ratio, and the application availability as the devices or the network links fail.
75
+
76
+ We define the deadline satisfaction ratio as the percentage of application requests that are processed before the application deadline. Consequently, the applications in the system, $APP_{x}$ , need to be defined by their deadlines, $DL_{APP_{x}}$ . The user perceived response time, $RT_{RQ_{US_{a},APP_{x}}^{n}}$ , is the metric that measures the time between a specific application request is sent by the user $(RQ_{US_{a},APP_{x}}^{n})$ and all the application services finish their execution. It includes the network delay of the request between services and the response times (execution and waiting time) of the services.
77
+
78
+ The equation for the deadline satisfaction ratio is:
79
+
80
+ $$
81
+ \operatorname {d e a d l i n e} \left(U S _ {a}, A P P _ {x}\right) = \frac {\left| R T _ {R Q _ {U S _ {a} , A P P _ {x}} ^ {n}} < D L _ {A P P _ {x}} \right|}{\left| R Q _ {U S _ {a} , A P P _ {x}} ^ {n} \right|} \tag {3}
82
+ $$
83
+
84
+ where $|RQ_{US_a,APP_x}^n|$ is the number of times that a request for $APP_x$ is sent from user $US_a$ , and $|RT_{RQ_{US_a,APP_x}}^n| < DL_{APP_x}|$ is the number of those requests that satisfied the application deadline. This metric can be generalized by considering the request to an application from any user, deadline $(APP_x)$ , or the ratio for all the applications and users in the system, deadline(system).
85
+
86
+ Our second objective, the application availability, is defined as the ratio of users that are able to reach all the services of the applications they request for a given point in time. In a hypothetical case, where any of the elements in the system fails, the service availability would be 1.0. But the devices or the network links can fall down, breaking the shortest paths between the users and the application services. At best, this only would generate an increase in the network delay due to the requests would be routed by a longer path, damaging the deadline satisfaction ratios. But it could even result in making the user impossible to reach all the application services, damaging the service availability ratio. The equation for the service availability ratios is:
87
+
88
+ $$
89
+ \text {a v a i l a b i l i t y} \left(\mathrm {A P P} _ {x}\right) = \frac {\left| U S _ {a} , g . t . \exists \text {p a t h} U S _ {a} \text {t o} A P P _ {x} \right|}{\left| U S _ {a} , g . t . U S _ {a} \text {r e q u e s t s} A P P _ {x} \right|} \tag {4}
90
+ $$
91
+
92
+ In summary, our domain problem is addressed to find $P$ , $p_{ui} \forall S_u, D_i$ by minimizing deadline $(US_a, APP_x) \wedge (1 - availability(APP_x)) \forall US_a, APP_x$ subject to the constraint in Eq.(2).
93
+
94
+ # 4 TWO PHASES PARTITION-BASED OPTIMIZATION PROPOSAL
95
+
96
+ Our optimization algorithm is based on a two phases placement process with a first mapping of applications in fog communities and a second phase which allocates the services of an application in the devices of a fog community. We partition the fog devices using the community relationship of the complex network that models the network infrastructure of the system. The application services are partitioned considering the transitive closures of the nodes that represent the services in the application graph.
97
+
98
+ We study if the community relationships of the fog devices is a good indicator to detect device sets that guarantee the availability of the services and the reachability of the devices when device and network links failures are considered. Additionally, we also study if the transitive closure of a service is a good indicator to decide the services that are allocated in the same device to avoid network communications overheads.
99
+
100
+ # 4.1 Community-based Fog Devices Partition
101
+
102
+ The first phase of our optimization algorithm deals with the mapping between applications (a set of interrelated services) and a device partitioning. We propose to partition the devices with the use of the community relationship between them. This phase of our optimization algorithm is based
103
+
104
+ on the previous work of Filiposka, Mishev and Gilly, where they studied and validated community-based algorithms for placement optimization in cloud computing [15] and in fog computing [2].
105
+
106
+ The community structure is a topological feature of graphs that determines the sets of nodes which are better connected between them than with the rest of the network. The most popular community detection method is the one proposed by Girvan and Newman [25], which detects communities by progressively removing edges from the original graph. The algorithm removes the edges with the highest betweenness centrality, at each step. Betweenness centrality of an edge is the sum of the fraction of the shortest paths that pass through the edge. Therefore, a community, that is organized with two regions that are mainly communicated by only one edge, is split into two new communities in each algorithm iteration.
107
+
108
+ Under the conditions of our domain problem, a device community can be understood as a set of devices that are well connected between them, with alternatives communication paths, and that the shortest paths between devices are evenly distributed between the topology. Consequently, a fail in an edge inside the community will have a lower influence in the communication paths between devices than a fail in the edges that connect the communities. This lower influence means that the fails inside the communities will not generate isolated regions in the topology neither an important increase in the communication delays.
109
+
110
+ The Girvan-Newman method iteratively determines the communities and the dendrogram, the tree structure of the communities, can be built. We characterized those communities with its depth in the dendrogram. We define this depth as the iteration in which the community was obtained. The higher the depth value is, the better communicated the device community is. Consequently, from the point of view of the availability, it is better to place the applications in device communities with higher depth values, since the devices inside those communities are better communicated between them than the devices in communities with lower depths values [26].
111
+
112
+ For example, consider the fog infrastructure in Fig. 2. The network link $NL_{D_c,D_f}$ is the one with the highest edge betweenness centrality since it is passed through the highest number of shortest paths. If we iterate the Girvan-Newman method over this example, communities 2 and 3 have higher depth values than community 1 since they are obtained when $NL_{D_c,D_f}$ is removed in the next iteration of the community generation algorithm. Consider also that we deploy an application with services $S_i$ and $S_j$ in community 1, allocating $S_i$ in $D_a$ and $S_j$ in $D_h$ , and that the user that requests the application is connected to device $D_b$ . Under those conditions, a fail in $NL_{D_c,D_f}$ would make impossible to finish the execution of the application since their services are unreachable. On the contrary, if we deploy the application in community 2, any fail in a edge would not make impossible to execute the application. Finally consider that a second user is connected to device $D_h$ . The best alternative, from the point of view of the availability, would be to horizontally scale up by deploying the same application twice in both communities 2 and 3, than only once in any of them.
113
+
114
+ ![](images/9d008cea65a83e40768a77b353aca966dc1ff0a2ea645aa46009132a440b965f.jpg)
115
+ Fig. 2. Example of fog device communities.
116
+
117
+ This example shows that, in an unrealistic situation with unlimited resources in all the devices, the best option would be to deploy an instance of the application for each client that requests it and this deployment would be placed in the community with the highest depth value that includes the device where the client is connected to. But this cannot be performed due to the limited resources in the devices of a community. Moreover, if we note that the higher the depth value of the community, the smaller the number of devices in the community, i.e., the communities with the highest values are the ones formed by only one device. Consequently, it is necessary to prioritize the allocation of the applications in the communities. We propose to use a greedy algorithm for this prioritization, more concretely, the First-Fit Decreasing algorithm [27].
118
+
119
+ Our optimization algorithm deals, in this first step, with the placement of applications in device communities using a First-Fit Decreasing approach. The priority criteria for ordering the applications is their execution deadlines, by prioritizing the applications with shortest deadlines. The algorithm starts checking the allocation of the application from the device communities with highest depth to the ones with the lowest, and the application is allocated in the first community with enough resources to allocate all the services of the application. If after checking all the communities, the application has not been allocated, this will be available only in the cloud provider. The process for the same application is repeated as many times as the number of users in the system that request this application. Algorithm 1 shows the pseudo-code of our proposal. The algorithm goes through the applications (in ascending deadline order), the users that request them and the communities (in descending depth order), trying to allocate the services of the application in the devices in the community.
120
+
121
+ In this first step, we map the applications in communities, but the map of services remains to be defined. We separate the process in two steps because we mainly focus the first one (mapping applications to communities) on increasing the application availability, and the second one (mapping services of an application to devices in a device community) on the application deadlines. This second step is performed by the function placeServicesInDevices(), in line 15, and its details are explained in Section 4.2 and Algorithm 2.
122
+
123
+ Our algorithm checks if an application has been previously placed in a community (line 11), and if not, it delegates the decision to place the application to the community to the algorithm which checks if the application services fit into the device community (Algorithm 2).
124
+
125
+ Algorithm 1 Device community-based application allocation
126
+ 1: $\mathbb{C}\gets$ calculate device communities
127
+ 2: $\mathbb{C}$ order communities C by descending depth
128
+ 3: A $\leftarrow$ order applications by ascending deadline
129
+ 4: appPlacement $\leftarrow$ 0
130
+ 5: for app in A do
131
+ 6: U $\leftarrow$ get users requesting application app
132
+ 7: for user in U do
133
+ 8: dev $\leftarrow$ get device where user is connected
134
+ 9: for infCom in IC do
135
+ 10: if dev $\in$ infCom then
136
+ 11: if infCom $\in$ appPlacement[app] then
137
+ 12: "application app already placed in community infCom""
138
+ 13: break
139
+ 14: else
140
+ 15: if placeServicesInDevices(app,infCom) then
141
+ 16: appPlacement[app].append(infCom)
142
+ 17: update resource usages in infCom
143
+ 18: "placed application app in community infCom""
144
+ 19: break
145
+
146
+ # 4.2 Transitive Closure-based Application Partition
147
+
148
+ Once that the mapping of a given application into a candidate community of devices is performed by the first phase of the optimization algorithm, the second phase deals with the allocation of the services of the application into the devices in the community. We first partition the applications into sets of services, and it is checked if each of those service sets can be placed in just one device. If not, smaller sets are considered. The partition of the service into sets is based on our previous work [6], where we studied and validated the use of a distributed placement algorithm where the service sets are created by considering the transitive closure of the services in the application graph.
149
+
150
+ The transitive closure of a directed graph indicates the nodes that are reachable for each of the nodes in the graph. If a vertex $j$ is reachable by a vertex $i$ means that there is a path from $i$ to $j$ . The reachability matrix of a graph is called the transitive closure of the graph, and the set of reachable nodes for a given node is called the transitive closure of a node [28].
151
+
152
+ Under the conditions of our domain problem, the transitive closure of a node can be understood as the set of services that are requested for the execution of the given service, i.e., the outgoing requests generated by a service when it receives an incoming request. If we are interested in reducing the response time of the application execution, the services of the transitive closure should be allocated in the same device to reduce the communication delays between them, since the network delay is 0.0 for request messages inside the same device. Moreover, the best case is when all the services of an application are allocated in the same device, but this is limited by the resource constraint (Equation 2).
153
+
154
+ We also propose a First-Fit algorithm for this second phase (Algorithm 2), which orders the sets of services from the ones with the biggest sizes (only one transitive closure with all the services) to the smallest sets of services (the transitive closures with only one node or with the loops in the service flow), and tries to place those sets of services into a same device. The devices are ordered by a fitness value which is the theoretical user perceived response time. This value is obtained by adding the network latency between the device and the user and the execution time of all the
155
+
156
+ ![](images/f0da09f8b81d1b3384485f0faa16d87af12aca32bd47b350a951825911ed060b.jpg)
157
+ Iter. 1
158
+ Fig. 3. Example of service transitive closures.
159
+
160
+ ![](images/6a6c64fccc964409e836ca2dbb792652b9617033c3e80781bdea05f1ec0e3d72.jpg)
161
+ Iter. 2
162
+
163
+ ![](images/d6bdc432e54c1dc6763a03abc153d17c247f5231474752aa6eb5b88aa2b972a8.jpg)
164
+ Iter. 3
165
+
166
+ ![](images/392749778008f722a0585f5e5b1b33e743bc12f8f1387326d2e45711a8e35ebb.jpg)
167
+ Iter. 4
168
+
169
+ Algorithm 2 Transitive closure-based service allocation
170
+ 1: function PLACESERVICESINDEVICES
171
+ 2: TC $\leftarrow$ generate transitive closure partitions for app
172
+ 3: D $\leftarrow$ order devices in infCom by reponse time
173
+ 4: SP $\leftarrow$ $\emptyset$ /*Services already placed*/
174
+ 5: servPlacement $\leftarrow$ $\emptyset$
175
+ 6: for dev in D do
176
+ 7: for appPartition in TC do
177
+ 8: for closure in appPartition do
178
+ 9: if (closure not in SP) and (closure fits in dev) then
179
+ 10: SP = SP $\cup$ closure
180
+ 11: for service in closure do
181
+ 12: servPlacement[service] = dev
182
+ 13: update resource usages in dev
183
+ 14: if SP == app then
184
+ 15: return True, servPlacement
185
+ 16: return False, $\emptyset$
186
+
187
+ services in the device. This prioritize the devices that are both closer to the users and faster in the execution. By this, the second step of the algorithm optimizes the user perceived response time, and, consequently, improves the deadline satisfaction ratio.
188
+
189
+ Initially, Algorithm 2 goes through the devices ordered by the fitness value, and tries to allocate as much services as possible in the devices with the highest values. For the first device, it first tries to allocate all the services of the application. If they do not fit, the service set is split in several sets, one for each entry-point service and one additional set for the transitive closures of each of its neighbor services of the entry-point one, and it checks if any of those new sets fits in the first device. This is recursively repeated for each transitive closure set that contains services not previously allocated. Fig. 3 shows an example of how the transitive closure of the services is generated along the iterations of the algorithm that partition the services of the application.
190
+
191
+ Once that all the service sets have been evaluated to be placed in the first device, this process is sequentially repeated for all the devices for the unallocated services. If after considering all the devices, there are still unallocated services, the mapping of the application in the current device community is rejected. Consequently, the first phase of the algorithm has to consider a greater community for the placement.
192
+
193
+ # 5 EXPERIMENTAL EVALUATION
194
+
195
+ We defined random characteristics for the elements of our simulation experiments. We modeled the parameters of the elements in the domain with uniform distributions and the minimum and maximum values are shown in Table 1.
196
+
197
+ The service applications were generated randomly following a growing network (GN) graph structure. GN graphs are built by adding nodes one at a time with a link to one previously added node. The network infrastructure was created as a random Barabasi-Albert network with 100 fog
198
+
199
+ TABLE 1 Values of the parameters for the experiment characterization
200
+
201
+ <table><tr><td>Parameter</td><td></td><td>min.-max.</td></tr><tr><td>Network</td><td></td><td></td></tr><tr><td>Propagation time (ms)</td><td>PRNLDi,Dj</td><td>5</td></tr><tr><td>Bandwidth (bytes/ms)</td><td>BWNLDi,Dj</td><td>75000</td></tr><tr><td>Fog device</td><td></td><td></td></tr><tr><td>Resources (res. units)</td><td>ARDSi</td><td>10-25</td></tr><tr><td>Speed (Intrs/ms)</td><td>IPTDi</td><td>100-1000</td></tr><tr><td>Application</td><td></td><td></td></tr><tr><td>Deadline (ms)</td><td>DLAPPx</td><td>300-50000</td></tr><tr><td>Services (number)</td><td></td><td>2-10</td></tr><tr><td>Resources (res. units)</td><td>CRSu</td><td>1-6</td></tr><tr><td>Execution (Intrs/req)</td><td>EIMSSu,Sv</td><td>20000-60000</td></tr><tr><td>Message size (bytes)</td><td>SZMSSu,Sv</td><td>1500000-4500000</td></tr><tr><td>IoT device</td><td></td><td></td></tr><tr><td>Request rate (1/ms)</td><td></td><td>1/1000-1/200</td></tr><tr><td>Popularity (prob.)</td><td></td><td>0.25</td></tr></table>
202
+
203
+ devices. Betweenness centrality index is a topological metric that measures the number of shortest path that goes through a device. The gateway devices were selected from the nodes placed in the edges of the network, i.e., the nodes with the smallest betweenness centrality indices. Betweenness centrality index is a topological metric that measures the number of shortest path that goes through a device. We selected the $25\%$ of devices with the lowest centrality value to behave as gateways (25 gateways). The number and the applications requested from the IoT devices connected to the gateways were determined with a popularity distribution modeled with an uniform distribution.
204
+
205
+ The random experimental scenario finally resulted on 20 applications with 106 services, that totally needed 360 resource units and the fog devices were able to offer up to 1874 resources units. 70 IoT devices (or users) were deployed and they generated an application request each $1/557$ ms in average.
206
+
207
+ We compared the results of our proposal with the ones obtained from the implementation of an integer linear programming (ILP) service allocation optimizer. As we mention in Section 2, ILP solutions are the most numerous in fog service placement optimization.
208
+
209
+ The experiments were executed using the YAFS simulator that we had previously developed for other research works. This simulator is able to include graph-based network topologies and pluggable fog service placement policies, apart from other features that, to the best of our knowledge, are not provided by other fog simulators, such as node failures, or dynamic service placement and routing. The simulator is open source and it can be downloaded from its code repository [29].
210
+
211
+ The experiment results are presented and analyzed in two separated sections. Section 5.1 includes the analysis of the results obtained with the YAFS simulator. Those results compare the user perceived response time and the availability of the applications for the IoT devices. In Section 5.2, it is presented an analysis of the service placement obtained with both optimization policies (our proposal and the ILP
212
+
213
+ one).
214
+
215
+ # 5.1 Simulation Results
216
+
217
+ A first simulation scenario included fails in the fog devices to study the availability of the services when the nodes are getting down. The simulation included random and permanent fails in the nodes, starting with all the devices (100 nodes) alive, and finishing the simulation with fails in all of them. The fails were generated uniformly along the simulation. The results of this simulation are presented in Fig. 4 and shows the QoS in terms of the total number of requests that are executed satisfying the application deadline. The reason because a request does not satisfy the deadline can be both due to the response time is higher than the deadline or due to none device with the services of the requested application are reachable from the IoT device due to all the paths between them have failed devices. Three data series are represented in Fig. 4: one for the total number of requests that are sent from the IoT devices (labeled with Total num. of requests), one for the number of requests that are executed before the deadline when the placement of our solution is considered (labeled with Partition); and the number of requests that satisfied the deadline with the ILP policy (labeled with ILP).
218
+
219
+ It is observed that our approach results in a higher number of satisfied requests, mainly during the first half of the simulation (up to 50 failed devices). In the second part of the simulation, improvements in the QoS are also observed but these are less significant in regard with the ILP.
220
+
221
+ For the sake of a deeper analysis of the availability, it has been also measured in terms of the number of IoT devices that are able to request their applications thank to that all the services they need are reachable with network paths without failed devices. This is represented in Fig. 5, where the y-axis are the number of IoT devices that are able to request their applications, and the x-axis the number of devices that have failed. The figure also includes the hypothetical and impossible case, due to the resource limit constraint, of allocating all the services in the gateways (labeled as All in gtws.). This is the best case and is useful to compare the solutions with the best upper bound. These results confirm that our proposal is able to increase the availability of the system when fails happen in the fog devices.
222
+
223
+ A second simulation scenario did not include fails in the fog devices and was used to study the user perceived response time of the applications. These response times were measured as the time between the user request was generated in the IoT device and all the application services finished. The results were measured independently for each pair application-IoT device. They are summarized in Fig. 6. Each plot in the figure represents the response times of an application, an each item in the x-axis corresponds to one gateway that has an IoT device (or user) that request the application. The results of our solution are labeled as Partition and the results of the ILP approach are labeled as ILP.
224
+
225
+ It is observed that the placement obtained with our proposal does not reduce the response time for all the applications, but it is shorter for 13 of the 20 applications.
226
+
227
+ ![](images/1d291efd4cb94d7aecc2b27bd5237b70b1b449ca7b7d407ce67a9d60c8814e4b.jpg)
228
+ Fig. 4. Evolution of the QoS with regard to the fail of fog devices, in terms of the number of requests which satisfy application deadlines $\left(|RT_{RQ_{US_a,APP_x}^n} < DL_{APP_x}|\right)$ compared with the total number of requests $\left(|RQ_{US_a,APP_x}^n|\right)$ .
229
+
230
+ ![](images/ef8958092fb54eff69d9e2a20b951d9dcd2959f382473650aa1df00bbe0fc9d1.jpg)
231
+ Fig. 5. Number of IoT devices that get services in regard with the number of failed fog devices (availability $(APP_{x})$ ).
232
+
233
+ Additionally, we can observed that in some applications an important damage of the response time is obtained. This is explained because both policies prioritize applications with shorter deadlines in front of the ones with longer deadlines. Nevertheless, there are less of these extreme cases, and with shorter times, when our policy is used: our policy only damages application 15 with a time of around 1000 ms, in front of four applications up to 400 s with the ILP policy (around 400000 ms for application 1, 300000 ms for application 8, 200000 ms for application 12, and 70000 for application 2).
234
+
235
+ In summary, our service placement policy shows a better behavior in terms of availability of the services that also results on a better QoS in the system. On the contrary, the response time of some applications results damaged but this behavior is also observed with the ILP policy, generating even worse response times.
236
+
237
+ # 5.2 Placement Results
238
+
239
+ This section is devoted to compare the placement of the services obtained from the execution of our algorithm with regard to the ILP one. This analysis is included to give a brief idea of how the services are spread across the fog devices.
240
+
241
+ Firstly, Fig. 7a shows that the placement of the services differs a lot between both placement policies. A mark in the plot of the figure indicates that a given service (y-axes) is placed in a given device (x-axes). Taking into account that the services of the same application have consecutive identifiers, it is also observed that in the case of our policy
242
+
243
+ (Partition), there are more cases of devices that allocate several services of the same application (consecutive marks in the same device).
244
+
245
+ Fig. 7b represent the resource usage of the fog devices. The y-axis represents the percentage of resources that are used by the services allocated in a given device and the x-axis are the devices ordered by these percentages in ascending order. By the analysis of the figure, we can observe that in the placement of our solution, there are almost the double of nodes that do not allocate any service (the resource usage is 0.0), and there is not any device that is fully used (resource usage of 1.0), with regard to the case of the ILP where almost 40 devices have a $100\%$ usage of the resources. The first interpretation of these results is that the scale level of our solution is smaller than the ILP one, in fact, we calculated that our policy deployed 357 (and 1161 resource units) instances of the services and the ILP deployed 374 (and 1203 resource units), around $5\%$ more services ( $3.6\%$ more resources). Consequently, our solution is able to obtain better QoS and availability with a lower use of the fog resources (smaller number of instances). The second interpretation is that the services are more evenly distributed, since the workload of the devices is smaller, avoiding the saturation of the devices and keeping the system in a more flexible state in order to allocate new service instances.
246
+
247
+ Finally, Fig. 7c shows the relationship between the service placement and the hop distance between the allocated service and the IoT device that requests it. A point in the scatter plot indicates how many IoT devices has a given distance with a service of the application they request. For example, in the case of our policy, there are around 100 services that are allocated in the gateways where the IoT devices are connected (a hop distance of 0.0). On the contrary, the ILP policy allocates more than 160 services in the gateways, the point (0,160) in the plot. We observe that the services are distributed more evenly and placed further from the gateways (higher distances) for the case our policy. Consequently, the ILP is able to place the services closer to the IoT devices. Despite this, our policy shows a better general behavior also in terms of application response time.
248
+
249
+ ![](images/7dde587e6e3a1ef8227348ba9ec53164170addba766968a8274a959cb322fe72.jpg)
250
+ Fig. 6. User perceived response times of the applications for each user (or IoT device) in the system $(RT_{RQ_{U S_{a},A P P_{x}}^{n}})$ .
251
+
252
+ ![](images/7061f4795f3d236abf5caacb3b908fcd825dab9f10a3fb0d65648a87c087f5e1.jpg)
253
+ (a) Allocation of the services in the fog devices $(P, p_{ui} \forall S_u, D_i)$ .
254
+
255
+ ![](images/5a4c4ea2cfbe257089515f2ba900beb04edff453e025e4fe46ec6e9a4c818a4d.jpg)
256
+ (b) Resource usage of the fog devices $\begin{array}{r}\sum_{u = 1}^{|S_u|}\left(p_{ui}\times CR_{S_u}\right),\forall D_i) \end{array}$
257
+ Fig. 7. Comparison of the services placement between our partition-based algorithm and the ILP optimizer.
258
+
259
+ ![](images/880d4454a148a72613fd918941578e188755339fcd9116e5ff8af2833c8e7fbd.jpg)
260
+ (c) Service allocation in terms of hop distance with the IoT devices.
261
+
262
+ # 6 CONCLUSION
263
+
264
+ We have proposed an algorithm for service placement in fog devices based on the partition of the fog devices (into communities) and the services of the applications (into transitive closures) for the optimization of the QoS of the system and the service availability for the users (or IoT devices).
265
+
266
+ Two simulation scenarios have been executed, one including fails in the fog devices and another one without fails, to measure the response time of the applications, the service availability and the number of request that were served satisfying the application deadlines. The service placement obtained with our policy resulted in a higher QoS and service availability, with regard to the placement
267
+
268
+ of an ILP-based algorithm. In the case of the user perceived response time, our policy obtained better times for 13 of the total 20 applications. Both policies showed a high degradation of service for some applications, but in the case of the ILP, this degradation happened in more applications and resulting in longer response times.
269
+
270
+ As future works, the use of complex networks and graph theory for the optimization of other parameters of the systems, such as service cost, network usage, migration cost, and service provider cost could be studied. By the own nature of the proposed policy, the optimization of these other metrics probably would need to be combined with other type of heuristics to obtain suitable results, and consequently, further research is necessary.
271
+
272
+ # ACKNOWLEDGMENTS
273
+
274
+ This research was supported by the Spanish Government (Agencia Estatal de Investigación) and the European Commission (Fondo Europeo de Desarrollo Regional) through grant number TIN2017-88547-P (MINECO/AEI/FEDER, UE).
275
+
276
+ # REFERENCES
277
+
278
+ [1] O. Consortium et al., "Openfog reference architecture for fog computing," Tech. Rep., February, Tech. Rep., 2017.
279
+ [2] S. Filiposka, A. Mishev, and K. Gilly, "Community-based allocation and migration strategies for fog computing," in 2018 IEEE Wireless Communications and Networking Conference (WCNC), April 2018, pp. 1-6.
280
+ [3] Z. Wen, R. Yang, P. Garraghan, T. Lin, J. Xu, and M. Rovatsos, "Fog orchestration for internet of things services," IEEE Internet Computing, vol. 21, no. 2, pp. 16-24, Mar 2017.
281
+ [4] O. Skarlat, M. Nardelli, S. Schulte, M. Borkowski, and P. Leitner, "Optimized IoT service placement in the fog," Service Oriented Computing and Applications, Oct 2017. [Online]. Available: https://doi.org/10.1007/s11761-017-0219-8
282
+ [5] A. Brogi and S. Forti, "Qos-aware deployment of IoT applications through the fog," IEEE Internet of Things Journal, vol. 4, no. 5, pp. 1185-1192, Oct 2017.
283
+ [6] C. Guerrero, I. Lera, and C. Juiz, "A lightweight decentralized service placement policy for performance optimization in fog computing," Journal of Ambient Intelligence and Humanized Computing, Jun 2018. [Online]. Available: https://doi.org/10.1007/s12652-018-0914-0
284
+ [7] L. Ni, J. Zhang, C. Jiang, C. Yan, and K. Yu, "Resource allocation strategy in fog computing based on priced timed petri nets," IEEE Internet of Things Journal, vol. 4, no. 5, pp. 1216-1228, Oct 2017.
285
+ [8] R. Urgaonkar, S. Wang, T. He, M. Zafer, K. Chan, and K. K. Leung, "Dynamic service migration and workload scheduling in edge-clouds," Performance Evaluation, vol. 91, no. Supplement C, pp. 205 - 228, 2015, special Issue: Performance 2015. [Online]. Available: http://www.sciencedirect.com/science/article/pii/S0166531615000619
286
+ [9] L. Gu, D. Zeng, S. Guo, A. Barnawi, and Y. Xiang, "Cost efficient resource management in fog computing supported medical cyberphysical system," IEEE Transactions on Emerging Topics in Computing, vol. 5, no. 1, pp. 108-119, Jan 2017.
287
+ [10] K. Velasquez, D. P. Abreu, M. Curado, and E. Monteiro, "Service placement for latency reduction in the internet of things," Annals of Telecommunications, vol. 72, no. 1, pp. 105-115, Feb 2017. [Online]. Available: https://doi.org/10.1007/s12243-016-0524-9
288
+ [11] Z. Huang, K.-J. Lin, S.-Y. Yu, and J. Y. Jen Hsu, "Co-locating services in IoT systems to minimize the communication energy cost," Journal of Innovation in Digital Ecosystems, vol. 1, no. 1, pp. 47 - 57, 2014. [Online]. Available: http://www.sciencedirect.com/science/article/pii/S2352664515000061
289
+ [12] L. Yang, J. Cao, G. Liang, and X. Han, "Cost aware service placement and load dispatching in mobile cloud systems," IEEE Transactions on Computers, vol. 65, no. 5, pp. 1440-1452, May 2016.
290
+ [13] V. B. C. Souza, W. Ramírez, X. Masip-Bruin, E. Marín-Tordera, G. Ren, and G. Tashakor, "Handling service allocation in combined fog-cloud scenarios," in 2016 IEEE International Conference on Communications (ICC), May 2016, pp. 1-5.
291
+ [14] D. Zeng, L. Gu, S. Guo, Z. Cheng, and S. Yu, "Joint optimization of task scheduling and image placement in fog computing supported software-defined embedded system," IEEE Transactions on Computers, vol. 65, no. 12, pp. 3702-3712, Dec 2016.
292
+ [15] S. Filiposka, A. Mishev, and C. Juiz, "Community-based vm placement framework," The Journal of Supercomputing, vol. 71, no. 12, pp. 4504-4528, Dec 2015. [Online]. Available: https://doi.org/10.1007/s11227-015-1546-1
293
+ [16] C. Guerrero, I. Lera, and C. Juiz, "On the influence of fog colonies partitioning in fog application makespan," in 2019 IEEE 6th International Conference on Future Internet of Things and Cloud (FiCloud), August 2018.
294
+ [17] I. Lera, C. Guerrero, and C. Juiz, "Comparing centrality indices for network usage optimization of data placement policies in fog devices," in 2018 Third International Conference on Fog and Mobile Edge Computing (FMEC), April 2018, pp. 115-122.
295
+
296
+ [18] Y. Elkhatib, B. Porter, H. B. Ribeiro, M. F. Zhani, J. Qadir, and E. Riviere, "On using micro-clouds to deliver the fog," IEEE Internet Computing, vol. 21, no. 2, pp. 8-15, Mar 2017.
297
+ [19] F. Bonomi, R. Milito, P. Natarajan, and J. Zhu, Fog Computing: A Platform for Internet of Things and Analytics. Cham: Springer International Publishing, 2014, pp. 169-186.
298
+ [20] A. Yousefpour, G. Ishigaki, R. Gour, and J. P. Jue, "On reducing iot service delay via fog offloading," IEEE Internet of Things Journal, vol. PP, no. 99, pp. 1-1, 2018.
299
+ [21] M. Vogler, J. M. Schleicher, C. Inzinger, and S. Dustdar, "A scalable framework for provisioning large-scale IoT deployments," ACM Trans. Internet Technol., vol. 16, no. 2, pp. 11:1-11:20, Mar. 2016. [Online]. Available: http://doi.acm.org/10.1145/2850416
300
+ [22] A. Krylovskiy, M. Jahn, and E. Patti, "Designing a smart city internet of things platform with microservice architecture," in 2015 3rd International Conference on Future Internet of Things and Cloud, Aug 2015, pp. 25-30.
301
+ [23] E. Saurez, K. Hong, D. Lillethun, U. Ramachandran, and B. Ottenwalder, "Incremental deployment and migration of geo-distributed situation awareness applications in the fog," in Proceedings of the 10th ACM International Conference on Distributed and Event-based Systems, ser. DEBS '16. New York, NY, USA: ACM, 2016, pp. 258-269. [Online]. Available: http://doi.acm.org/10.1145/2933267.2933317
302
+ [24] A. Balalaie, A. Heydarnoori, and P. Jamshidi, "Microservices architecture enables devops: Migration to a cloud-native architecture," IEEE Software, vol. 33, no. 3, pp. 42-52, May 2016.
303
+ [25] M. E. J. Newman and M. Girvan, "Finding and evaluating community structure in networks," Phys. Rev. E, vol. 69, no. 2, p. 026113, Feb. 2004. [Online]. Available: http://link.aps.org/doi/10.1103/PhysRevE.69.026113
304
+ [26] S. Fortunato, V. Latora, and M. Marchiori, "Method to find community structures based on information centrality," Phys. Rev. E, vol. 70, p. 056104, Nov 2004. [Online]. Available: https://link.aps.org/doi/10.1103/PhysRevE.70.056104
305
+ [27] A. Alahmadi, A. Alnowiser, M. M. Zhu, D. Che, and P. Ghodous, "Enhanced first-fit decreasing algorithm for energy-aware job scheduling in cloud," in 2014 International Conference on Computational Science and Computational Intelligence, vol. 2, March 2014, pp. 69-74.
306
+ [28] H. S. Warren Jr, "A modification of warshall's algorithm for the transitive closure of binary relations," Communications of the ACM, vol. 18, no. 4, pp. 218-220, 1975.
307
+ [29] I. Lera and C. Guerrero, "Yafs, yet another fog simulator," https: //github.com/acsicuib/YAFS, accessed: 2018-02-03.
308
+
309
+ ![](images/d06f4273a2fb8d6577d64fec81d97ae2b6d367555ef5dc862426499cb1bfe7e4.jpg)
310
+
311
+ Isaac Lera received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2012. He is an assistant professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research lines are semantic web, open data, system performance, educational innovation and human mobility. He has authored in several journals and international conferences.
312
+
313
+ ![](images/d8d825a07523da247e5fbbc78fe46d563cd15790c3ad9ba2187f0954f1dc9ff2.jpg)
314
+
315
+ Carlos Guerrero received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2012. He is an assistant professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research interests include web performance, resource management, web engineering, and cloud computing. He has authored around 40 papers in international conferences and journals.
316
+
317
+ ![](images/e87d17a2916bfdfa643a61628ccf93722b8668ac63f425b092c8fb00bc67b95e.jpg)
318
+
319
+ Carlos Juiz received his Ph.D. degree in Computer Engineering at the Balearic Islands University in 2001. He is an associate professor of Computer Architecture and Technology at the Computer Science Department of the University of the Balearic Islands. His research interests include performance engineering, cloud computing and IT governance. He has authored around 150 papers in different international conferences and journals.
2401.12xxx/2401.12690/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9702ccf951d49ab6680d36a3168c8fc6413193b1f50812560ea4937b6e2e4979
3
+ size 390325
2401.12xxx/2401.12690/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.12xxx/2401.12698/5b3c828e-248a-44b0-ba84-ef9eb969c11d_content_list.json ADDED
The diff for this file is too large to render. See raw diff