SlowGuess commited on
Commit
7e6449b
·
verified ·
1 Parent(s): a17b212

Add Batch 93a3a9ac-8263-4260-bcae-7224aa0a5438

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2202.01xxx/2202.01727/b96cd2bf-bb11-4433-9686-55bf4d6acf3a_content_list.json +2077 -0
  3. 2202.01xxx/2202.01727/b96cd2bf-bb11-4433-9686-55bf4d6acf3a_model.json +0 -0
  4. 2202.01xxx/2202.01727/b96cd2bf-bb11-4433-9686-55bf4d6acf3a_origin.pdf +3 -0
  5. 2202.01xxx/2202.01727/full.md +423 -0
  6. 2202.01xxx/2202.01727/images.zip +3 -0
  7. 2202.01xxx/2202.01727/layout.json +0 -0
  8. 2202.01xxx/2202.01741/b628ffb0-e0c1-447e-8e39-ad7ab7216401_content_list.json +0 -0
  9. 2202.01xxx/2202.01741/b628ffb0-e0c1-447e-8e39-ad7ab7216401_model.json +0 -0
  10. 2202.01xxx/2202.01741/b628ffb0-e0c1-447e-8e39-ad7ab7216401_origin.pdf +3 -0
  11. 2202.01xxx/2202.01741/full.md +0 -0
  12. 2202.01xxx/2202.01741/images.zip +3 -0
  13. 2202.01xxx/2202.01741/layout.json +0 -0
  14. 2202.01xxx/2202.01747/5c8d712c-7a56-4df1-8e5c-845b72d69caf_content_list.json +0 -0
  15. 2202.01xxx/2202.01747/5c8d712c-7a56-4df1-8e5c-845b72d69caf_model.json +0 -0
  16. 2202.01xxx/2202.01747/5c8d712c-7a56-4df1-8e5c-845b72d69caf_origin.pdf +3 -0
  17. 2202.01xxx/2202.01747/full.md +865 -0
  18. 2202.01xxx/2202.01747/images.zip +3 -0
  19. 2202.01xxx/2202.01747/layout.json +0 -0
  20. 2202.01xxx/2202.01771/e395822f-9d01-412f-9b34-5760d560fda9_content_list.json +0 -0
  21. 2202.01xxx/2202.01771/e395822f-9d01-412f-9b34-5760d560fda9_model.json +0 -0
  22. 2202.01xxx/2202.01771/e395822f-9d01-412f-9b34-5760d560fda9_origin.pdf +3 -0
  23. 2202.01xxx/2202.01771/full.md +668 -0
  24. 2202.01xxx/2202.01771/images.zip +3 -0
  25. 2202.01xxx/2202.01771/layout.json +0 -0
  26. 2202.01xxx/2202.01855/5166ae8b-4e4d-4f8b-a350-11682cc56b73_content_list.json +1361 -0
  27. 2202.01xxx/2202.01855/5166ae8b-4e4d-4f8b-a350-11682cc56b73_model.json +1683 -0
  28. 2202.01xxx/2202.01855/5166ae8b-4e4d-4f8b-a350-11682cc56b73_origin.pdf +3 -0
  29. 2202.01xxx/2202.01855/full.md +249 -0
  30. 2202.01xxx/2202.01855/images.zip +3 -0
  31. 2202.01xxx/2202.01855/layout.json +0 -0
  32. 2202.01xxx/2202.01875/70c382aa-6520-4228-8f67-cbc134991973_content_list.json +0 -0
  33. 2202.01xxx/2202.01875/70c382aa-6520-4228-8f67-cbc134991973_model.json +0 -0
  34. 2202.01xxx/2202.01875/70c382aa-6520-4228-8f67-cbc134991973_origin.pdf +3 -0
  35. 2202.01xxx/2202.01875/full.md +379 -0
  36. 2202.01xxx/2202.01875/images.zip +3 -0
  37. 2202.01xxx/2202.01875/layout.json +0 -0
  38. 2202.01xxx/2202.01938/0bdc1864-0877-4963-a9de-68c2b5f8ab9e_content_list.json +1574 -0
  39. 2202.01xxx/2202.01938/0bdc1864-0877-4963-a9de-68c2b5f8ab9e_model.json +2086 -0
  40. 2202.01xxx/2202.01938/0bdc1864-0877-4963-a9de-68c2b5f8ab9e_origin.pdf +3 -0
  41. 2202.01xxx/2202.01938/full.md +330 -0
  42. 2202.01xxx/2202.01938/images.zip +3 -0
  43. 2202.01xxx/2202.01938/layout.json +0 -0
  44. 2202.01xxx/2202.01971/dd56d8c1-727f-4bf1-ae09-4c34d53467db_content_list.json +0 -0
  45. 2202.01xxx/2202.01971/dd56d8c1-727f-4bf1-ae09-4c34d53467db_model.json +0 -0
  46. 2202.01xxx/2202.01971/dd56d8c1-727f-4bf1-ae09-4c34d53467db_origin.pdf +3 -0
  47. 2202.01xxx/2202.01971/full.md +468 -0
  48. 2202.01xxx/2202.01971/images.zip +3 -0
  49. 2202.01xxx/2202.01971/layout.json +0 -0
  50. 2202.01xxx/2202.01993/731e1a69-ebc4-471a-a367-2cb8de686eb2_content_list.json +0 -0
.gitattributes CHANGED
@@ -8183,3 +8183,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
8183
  2202.05xxx/2202.05146/637c471c-e0aa-400c-967e-126f59e423f7_origin.pdf filter=lfs diff=lfs merge=lfs -text
8184
  2202.05xxx/2202.05679/57f2869f-03fe-4463-9808-96425847bc7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
8185
  2203.07xxx/2203.07814/d7795d68-0200-452e-9f54-cf5731f27dc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8183
  2202.05xxx/2202.05146/637c471c-e0aa-400c-967e-126f59e423f7_origin.pdf filter=lfs diff=lfs merge=lfs -text
8184
  2202.05xxx/2202.05679/57f2869f-03fe-4463-9808-96425847bc7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
8185
  2203.07xxx/2203.07814/d7795d68-0200-452e-9f54-cf5731f27dc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
8186
+ 2202.01xxx/2202.01727/b96cd2bf-bb11-4433-9686-55bf4d6acf3a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8187
+ 2202.01xxx/2202.01741/b628ffb0-e0c1-447e-8e39-ad7ab7216401_origin.pdf filter=lfs diff=lfs merge=lfs -text
8188
+ 2202.01xxx/2202.01747/5c8d712c-7a56-4df1-8e5c-845b72d69caf_origin.pdf filter=lfs diff=lfs merge=lfs -text
8189
+ 2202.01xxx/2202.01771/e395822f-9d01-412f-9b34-5760d560fda9_origin.pdf filter=lfs diff=lfs merge=lfs -text
8190
+ 2202.01xxx/2202.01855/5166ae8b-4e4d-4f8b-a350-11682cc56b73_origin.pdf filter=lfs diff=lfs merge=lfs -text
8191
+ 2202.01xxx/2202.01875/70c382aa-6520-4228-8f67-cbc134991973_origin.pdf filter=lfs diff=lfs merge=lfs -text
8192
+ 2202.01xxx/2202.01938/0bdc1864-0877-4963-a9de-68c2b5f8ab9e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8193
+ 2202.01xxx/2202.01971/dd56d8c1-727f-4bf1-ae09-4c34d53467db_origin.pdf filter=lfs diff=lfs merge=lfs -text
8194
+ 2202.01xxx/2202.01993/731e1a69-ebc4-471a-a367-2cb8de686eb2_origin.pdf filter=lfs diff=lfs merge=lfs -text
8195
+ 2202.01xxx/2202.01994/882a4a89-770c-4f3e-af5f-d7693705f515_origin.pdf filter=lfs diff=lfs merge=lfs -text
8196
+ 2202.01xxx/2202.01999/74152c93-d52f-4bd8-992f-374373fe25c8_origin.pdf filter=lfs diff=lfs merge=lfs -text
8197
+ 2202.02xxx/2202.02005/f9317c8f-074a-4b12-b05b-a1a0affe154a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8198
+ 2202.02xxx/2202.02016/ffd3d9cc-3876-4711-9e0c-89ac840859f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
8199
+ 2202.02xxx/2202.02098/6816fc6f-fd8b-48bc-a739-d7a158b74ec6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8200
+ 2202.02xxx/2202.02113/e2d6dc34-9d1d-474b-aef8-f76019c9de57_origin.pdf filter=lfs diff=lfs merge=lfs -text
8201
+ 2202.02xxx/2202.02179/42989202-a44b-48b5-af78-3009f9da04e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
8202
+ 2202.02xxx/2202.02190/f027b5d1-6175-4712-bec6-1966d3b35e9b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8203
+ 2202.02xxx/2202.02195/6a8636f1-cc24-4b95-b0f2-1b6722f87351_origin.pdf filter=lfs diff=lfs merge=lfs -text
8204
+ 2202.02xxx/2202.02200/93a3c83f-412f-4ab5-b4ac-2b76e28653be_origin.pdf filter=lfs diff=lfs merge=lfs -text
8205
+ 2202.02xxx/2202.02215/6eb7ff28-e605-4983-9609-1a3dc0051189_origin.pdf filter=lfs diff=lfs merge=lfs -text
8206
+ 2202.02xxx/2202.02296/65073cac-69d2-4498-99ad-ee2bfce61c03_origin.pdf filter=lfs diff=lfs merge=lfs -text
8207
+ 2202.02xxx/2202.02299/aa55be64-8ec1-4a92-80af-5424630a4d59_origin.pdf filter=lfs diff=lfs merge=lfs -text
8208
+ 2202.02xxx/2202.02305/dbc995a3-5d4b-462e-a79e-ac128b7b54b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
8209
+ 2202.02xxx/2202.02306/17af0a16-7432-47d8-9ca6-6f2b0702c3e6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8210
+ 2202.02xxx/2202.02312/2a371881-e622-4b5d-b561-b451adf664b2_origin.pdf filter=lfs diff=lfs merge=lfs -text
8211
+ 2202.02xxx/2202.02317/6adc26ee-e948-4d97-8e17-fa87d63f61cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
8212
+ 2202.02xxx/2202.02340/6ff75886-7041-4839-b994-0aebf7b87ab9_origin.pdf filter=lfs diff=lfs merge=lfs -text
8213
+ 2202.02xxx/2202.02389/cd261a59-0ab8-4ad1-8a54-349ae66bb668_origin.pdf filter=lfs diff=lfs merge=lfs -text
8214
+ 2202.02xxx/2202.02397/0801120b-43b4-4630-aa93-098b1b05514a_origin.pdf filter=lfs diff=lfs merge=lfs -text
8215
+ 2202.02xxx/2202.02414/abafd505-0004-49ce-a09d-29dcdcd3fd7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
8216
+ 2202.02xxx/2202.02435/08a29f0a-d9f6-4136-8229-7228e97dc2a0_origin.pdf filter=lfs diff=lfs merge=lfs -text
8217
+ 2202.02xxx/2202.02440/2ea6edd3-7f7c-4156-9cc2-6ddabffe4982_origin.pdf filter=lfs diff=lfs merge=lfs -text
8218
+ 2202.02xxx/2202.02446/323ce066-fc38-4ac3-9f0c-08ed5404299c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8219
+ 2202.02xxx/2202.02466/3d68bce7-e574-4e0e-ae3e-a1382fc1b360_origin.pdf filter=lfs diff=lfs merge=lfs -text
8220
+ 2202.02xxx/2202.02472/49a680c1-57ca-4fdb-83c7-8dc220d47d1f_origin.pdf filter=lfs diff=lfs merge=lfs -text
8221
+ 2202.02xxx/2202.02514/c03ff0b4-3d9c-468e-acb6-835d2ed85d33_origin.pdf filter=lfs diff=lfs merge=lfs -text
8222
+ 2202.02xxx/2202.02519/61c512ff-80b2-4326-9aaf-889e6289d4c5_origin.pdf filter=lfs diff=lfs merge=lfs -text
8223
+ 2202.02xxx/2202.02526/e23c9d91-e41d-44fd-9548-2263183a8e83_origin.pdf filter=lfs diff=lfs merge=lfs -text
8224
+ 2202.02xxx/2202.02541/1a13f788-25d1-4c5b-b3c4-1b536d9eb45e_origin.pdf filter=lfs diff=lfs merge=lfs -text
8225
+ 2202.02xxx/2202.02556/ced985a7-af95-4137-b073-34f3521415b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
8226
+ 2202.02xxx/2202.02559/61f1610d-5609-4519-931c-213f0c87f1b5_origin.pdf filter=lfs diff=lfs merge=lfs -text
8227
+ 2202.02xxx/2202.02628/3b92ccd4-4445-4b57-97b7-403b9511bbd7_origin.pdf filter=lfs diff=lfs merge=lfs -text
8228
+ 2202.02xxx/2202.02643/53c54c6a-23c5-414d-9472-999184f69183_origin.pdf filter=lfs diff=lfs merge=lfs -text
8229
+ 2202.02xxx/2202.02673/ef0679d3-27e0-4d38-b2d2-9982a78c9614_origin.pdf filter=lfs diff=lfs merge=lfs -text
8230
+ 2202.02xxx/2202.02688/ef1577bd-ecfb-40da-9dd9-29dca2a94bbc_origin.pdf filter=lfs diff=lfs merge=lfs -text
8231
+ 2202.02xxx/2202.02691/65cb38ad-fd28-41f7-a525-896c2abffb73_origin.pdf filter=lfs diff=lfs merge=lfs -text
8232
+ 2202.02xxx/2202.02703/677f2b9b-a92e-4e13-b9fc-e973ee20288c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8233
+ 2202.02xxx/2202.02757/633e5385-942b-4101-a107-384936753af7_origin.pdf filter=lfs diff=lfs merge=lfs -text
8234
+ 2202.02xxx/2202.02763/6db9d7c3-d10e-480e-bea2-39744720d14d_origin.pdf filter=lfs diff=lfs merge=lfs -text
8235
+ 2202.02xxx/2202.02794/13248924-eb1a-4f12-927b-45533cf3862d_origin.pdf filter=lfs diff=lfs merge=lfs -text
8236
+ 2202.02xxx/2202.02831/20ddfbf1-9dd6-4e4f-9df6-ac259c2acf88_origin.pdf filter=lfs diff=lfs merge=lfs -text
8237
+ 2202.02xxx/2202.02896/1bbb9728-28a1-4028-9bba-bd71bd10e5b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
8238
+ 2202.02xxx/2202.02916/58a8eefa-971d-4882-b95f-9d9f5dfe80f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
8239
+ 2202.02xxx/2202.02931/11e08444-36bf-4157-83ad-da53fefd18a5_origin.pdf filter=lfs diff=lfs merge=lfs -text
8240
+ 2202.02xxx/2202.02950/396d59d4-0a1b-4c29-80bd-ed9d9b93f7cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
8241
+ 2202.02xxx/2202.02965/bd61b9e4-799b-4e6a-817d-33f68c8988be_origin.pdf filter=lfs diff=lfs merge=lfs -text
8242
+ 2202.02xxx/2202.02974/d8114b8e-ac64-46ff-9211-a0920249a0ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
8243
+ 2202.02xxx/2202.02980/560a8d70-4234-498a-b2c9-b6bebd703d3c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8244
+ 2202.03xxx/2202.03013/b027470b-bc01-4290-9c9c-2c1b4fdcd5d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
8245
+ 2202.03xxx/2202.03423/529e9903-1173-4072-9316-e84282a9489b_origin.pdf filter=lfs diff=lfs merge=lfs -text
8246
+ 2202.03xxx/2202.03580/81026203-1454-47b6-8ac0-7ea8828c6d6c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8247
+ 2202.03xxx/2202.03866/ae4fb52f-169c-4e6e-8194-2ae56c89e086_origin.pdf filter=lfs diff=lfs merge=lfs -text
8248
+ 2202.03xxx/2202.03896/193a36c5-4117-4f97-a608-5881bf8cf42c_origin.pdf filter=lfs diff=lfs merge=lfs -text
8249
+ 2202.08xxx/2202.08959/50784fd7-3850-4a6c-971f-ac596cec3463_origin.pdf filter=lfs diff=lfs merge=lfs -text
2202.01xxx/2202.01727/b96cd2bf-bb11-4433-9686-55bf4d6acf3a_content_list.json ADDED
@@ -0,0 +1,2077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Skeleton-Based Action Segmentation with Multi-Stage Spatial-Temporal Graph Convolutional Neural Networks",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 133,
8
+ 65,
9
+ 867,
10
+ 167
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Benjamin Filtjens, Bart Vanrumste, Peter Slaets",
17
+ "bbox": [
18
+ 300,
19
+ 186,
20
+ 684,
21
+ 204
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—The ability to identify and temporally segment fine-grained actions in motion capture sequences is crucial for applications in human movement analysis. Motion capture is typically performed with optical or inertial measurement systems, which encode human movement as a time series of human joint locations and orientations or their higher-order representations. State-of-the-art action segmentation approaches use multiple stages of temporal convolutions. The main idea is to generate an initial prediction with several layers of temporal convolutions and refine these predictions over multiple stages, also with temporal convolutions. Although these approaches capture long-term temporal patterns, the initial predictions do not adequately consider the spatial hierarchy among the human joints. To address this limitation, we recently introduced multi-stage spatial-temporal graph convolutional neural networks (MS-GCN). Our framework replaces the initial stage of temporal convolutions with spatial graph convolutions and dilated temporal convolutions, which better exploit the spatial configuration of the joints and their long-term temporal dynamics. Our framework was compared to four strong baselines on five tasks. Experimental results demonstrate that our framework is a strong baseline for skeleton-based action segmentation.",
28
+ "bbox": [
29
+ 71,
30
+ 301,
31
+ 491,
32
+ 564
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Index Terms—activity segmentation, activity detection, dense labelling, freezing of gait, graph convolutional, MS-GCN, multi-stage, spatial-temporal",
39
+ "bbox": [
40
+ 71,
41
+ 575,
42
+ 488,
43
+ 617
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "1 INTRODUCTION",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 73,
53
+ 642,
54
+ 230,
55
+ 657
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "The automatic identification and localisation of events and actions in long untrimmed motion capture (MoCap) sequences are crucial for various use-cases in human movement analysis. Typically, MoCap is performed with optical or inertial measurement systems, which encode human movement as a time series of human joint locations and orientations or their higher-order representations [1], [2]. The high-dimensional time series registers the articulated motion as a high degree of freedom human skeleton. Therefore, MoCap sequences can be generically regarded as skeleton-like inputs. Given an untrimmed skeleton sequence, we aim to segment every event and action in time. In the literature, this task falls under the domain of skeleton-based action",
62
+ "bbox": [
63
+ 71,
64
+ 662,
65
+ 491,
66
+ 851
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "segmentation.",
73
+ "bbox": [
74
+ 503,
75
+ 301,
76
+ 607,
77
+ 314
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "Related to this task is the task of skeleton-based action recognition. Unlike action segmentation, action recognition aims to classify actions from short and well-segmented video clips. This domain has made tremendous strides due to the availability of low-cost MoCap approaches. These approaches are driven by pose estimation algorithms, which are a form of marker-less optical MoCap that encode human movement as a time series of human joint locations with a single camera [3], [4]. Human actions can then be recognized by appropriately modelling the high dimensional time series. Earlier methods ignored the spatial hierarchy among the joints and modelled human actions by applying high-level temporal models [5]. Later methods explicitly modelled the natural connection between joints [6]. These methods showed encouraging improvement, which suggests the significance of modelling the spatial hierarchy among the joints. The state-of-the-art approaches are based on the spatial-temporal graph convolutional neural network (STGCN) [7]. These approaches model the skeleton sequences as a spatial-temporal graph. The idea is to construct a graph in which each node corresponds to a human body joint and the edges correspond to the spatial connectivity among the joints and the temporal connectivity of the same joint across time. The spatial-temporal graph can then be modelled by graph neural networks, which generalize convolutional neural networks to graphs of arbitrary structures [8], [9]. However, skeleton-based action segmentation is more challenging than recognition, due to the need for simultaneous recognition and localization. Despite its broad potential in human movement analysis, a proper framework for this task has not yet been established.",
84
+ "bbox": [
85
+ 501,
86
+ 315,
87
+ 924,
88
+ 765
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Within the generic domain of action segmentation, i.e. approaches that are not specifically designed for skeleton data, earlier methods mainly utilized a sliding-window scheme [10], [11]. However, the optimal window size is often a trade-off between model expressivity, i.e. the models' ability to capture long-term temporal context, and the sensitivity of the model to take into account short actions [12]. Recent methods, such as temporal convolutional neural networks (TCN) [13], can operate on untrimmed sequences and classify each time sample, termed action segmentation, for simultaneous action recognition and localisation. TCNs perform dilated temporal convolutions to capture",
95
+ "bbox": [
96
+ 501,
97
+ 767,
98
+ 923,
99
+ 944
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "page_number",
105
+ "text": "1",
106
+ "bbox": [
107
+ 911,
108
+ 32,
109
+ 919,
110
+ 42
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "aside_text",
116
+ "text": "arXiv:2202.01727v2 [cs.CV] 9 Oct 2022",
117
+ "bbox": [
118
+ 22,
119
+ 270,
120
+ 57,
121
+ 700
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "page_footnote",
127
+ "text": "Submitted on x August 2022 \nBenjamin Filtjens and Peter Slaets are with the Department of Mechanical Engineering, KU Leuven, 3001 Leuven, Belgium. (email: benjamin.filtjens@kuleuven.be and peter.slaets@kuleuven.be). \nBenjamin Filtjens and Bart Vanrumste are with the Department of Electrical Engineering (ESAT), KU Leuven, 3001 Leuven, Belgium. (email: benjamin.filtjens@kuleuven.be and bart.vanrumste@kuleuven.be).",
128
+ "bbox": [
129
+ 71,
130
+ 859,
131
+ 491,
132
+ 941
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "text",
138
+ "text": "long-term temporal context [14]. In action segmentation the predictions tend to vary at a high temporal frequency, often resulting in over-segmentation errors. To address this problem, the state of the art approach, termed multi-stage temporal convolutional neural networks (MS-TCN), includes refinement stages [15]. The idea is to employ a temporal model to generate an initial prediction and refine these predictions over multiple stages. However, these generic action segmentation approaches do not consider the spatial hierarchy among the skeleton joints.",
139
+ "bbox": [
140
+ 71,
141
+ 53,
142
+ 491,
143
+ 198
144
+ ],
145
+ "page_idx": 1
146
+ },
147
+ {
148
+ "type": "text",
149
+ "text": "We recently introduced an architecture for clinical freezing of gait (FOG) assessment in Parkinson's disease based on optical marker-based motion capture data, termed multistage spatial-temporal graph convolutional neural network (MS-GCN) [16]. Our architecture amalgamates the best practices in convolutional neural network design to address the task of skeleton-based action segmentation. First, we extended ST-GCN for action segmentation by including dilation on the temporal graph to increase the temporal receptive field [14]. Next, we modified MS-TCN by decoupling the prediction generation stage from the refinement stages, allowing us to address the different goals of these stages. Specifically, we replaced the TCN-based temporal layers that generate an initial prediction by the modified ST-GCN layers to appropriately model the spatial hierarchy among the joints. We hypothesize that MS-GCN is a strong baseline for skeleton-based action segmentation tasks other than FOG assessment and for other MoCap representations than optical marker-based MoCap. To this end, the contribution of the present manuscript is four-fold: (1) We propose MS-GCN as a generic baseline for skeleton-based action segmentation. (2) We introduce five relevant use-cases from four public datasets and one proprietary dataset. The use-cases include three different forms of motion capture, marker-based and marker-less optical MoCap, and inertial-based MoCap. (3) We show that the proposed architecture exceeds the performance of four strong deep learning baseline methods. (4) We publicly release our code and trained models at: https://github.com/BenjaminFiltjens/MS-GCN.",
150
+ "bbox": [
151
+ 76,
152
+ 199,
153
+ 491,
154
+ 623
155
+ ],
156
+ "page_idx": 1
157
+ },
158
+ {
159
+ "type": "text",
160
+ "text": "2 SKELETON-BASED ACTION SEGMENTATION",
161
+ "text_level": 1,
162
+ "bbox": [
163
+ 73,
164
+ 645,
165
+ 449,
166
+ 659
167
+ ],
168
+ "page_idx": 1
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "This section first formalizes the problem of skeleton-based action segmentation. Next, we introduce the three distinguishing characteristics of the MS-GCN architecture, which are: (1) dilated temporal convolutions to learn long-term temporal patterns [13], (2) spatial graph convolutions to learn spatial patterns [7], (3) multiple stages of refinement to reduce the number of segmentation errors [15]. These characteristics are further discussed within this section.",
173
+ "bbox": [
174
+ 71,
175
+ 666,
176
+ 490,
177
+ 782
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "2.1 Problem statement",
184
+ "text_level": 1,
185
+ "bbox": [
186
+ 71,
187
+ 804,
188
+ 256,
189
+ 818
190
+ ],
191
+ "page_idx": 1
192
+ },
193
+ {
194
+ "type": "text",
195
+ "text": "A MoCap sequence can be generically represented as: $f \\in \\mathbb{R}^{T \\times N \\times C}$ , where $T$ are the number of samples, $N$ are the number of nodes, and $C$ are the number of feature channels per node. Note that the number of samples $T$ may vary for each input sequence. Given a MoCap sequence, we aim to infer the class label for each sample $\\hat{Y} = \\hat{y}_0, \\dots, \\hat{y}_T$ . The inferred class labels are represented as: $\\hat{Y} \\in \\mathbb{R}^{T \\times L}$ , where $\\hat{y}_{t,l}$ is the probability of class $l$ at sample $t$ .",
196
+ "bbox": [
197
+ 71,
198
+ 823,
199
+ 491,
200
+ 944
201
+ ],
202
+ "page_idx": 1
203
+ },
204
+ {
205
+ "type": "image",
206
+ "img_path": "images/6e8eb79d52e09f270901521a7e84eb60671e23c3540e091210f1b48541098270.jpg",
207
+ "image_caption": [
208
+ "Fig. 1. Visual overview of a (dilated) temporal convolutional neural network (TCN). The visualized network is implemented in acausal mode, since the filters take into account future observations $f_{in,t+1}, \\ldots, f_{in,T}$ . The first layer has a dilation rate of 1, reducing this layer to a regular convolution. By increasing the dilation rate $d$ throughout the network, the deeper layers can represent a wider range of inputs, thereby expanding the temporal receptive field of the network."
209
+ ],
210
+ "image_footnote": [],
211
+ "bbox": [
212
+ 566,
213
+ 50,
214
+ 864,
215
+ 186
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "2.2 Dilated temporal convolution",
222
+ "text_level": 1,
223
+ "bbox": [
224
+ 504,
225
+ 304,
226
+ 759,
227
+ 319
228
+ ],
229
+ "page_idx": 1
230
+ },
231
+ {
232
+ "type": "text",
233
+ "text": "Convolutional neural networks (CNN) are ideal for processing data with a grid-like topology such as time-series (1D CNN) and images (2D CNN) [17]. A CNN learns an expressive representation through altering convolutional and pooling layers [18]. The pooling layers downsample the temporal representation, allowing the model to capture long-range dependencies at the cost of losing fine-grained information. Recent temporal convolutional neural networks (TCN) omit pooling and instead use dilated convolutions [14] to capture long-range dependencies while keeping the temporal representation intact [15]. For an input feature map $f_{in}$ and a filter $p$ , the dilated convolution on sample $t$ of the feature map is defined as [19]:",
234
+ "bbox": [
235
+ 501,
236
+ 324,
237
+ 923,
238
+ 515
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "equation",
244
+ "text": "\n$$\n\\left(f _ {i n} * _ {d} p\\right) (t) = \\sum_ {i = 0} ^ {k - 1} p (i) \\cdot f _ {i n _ {t - d \\cdot i}}, \\tag {1}\n$$\n",
245
+ "text_format": "latex",
246
+ "bbox": [
247
+ 598,
248
+ 522,
249
+ 921,
250
+ 561
251
+ ],
252
+ "page_idx": 1
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "where $*_{d}$ is the dilated convolution operator with dilation rate $d$ , $k$ is the size of the filter (kernel), and $t - d \\cdot i$ is used to indicate that the filter in Equation 1 is applied in causal mode, i.e. direction of the past. The filter can be implemented in acausal mode, i.e. take into account future observations, by zero-padding symmetrically. By increasing the dilation rate $d$ throughout the network, the deeper layers can represent a wider range of inputs, thereby expanding the temporal receptive field of the network. A visual overview of a dilated TCN is provided in Figure 1.",
257
+ "bbox": [
258
+ 503,
259
+ 569,
260
+ 921,
261
+ 717
262
+ ],
263
+ "page_idx": 1
264
+ },
265
+ {
266
+ "type": "text",
267
+ "text": "2.3 Graph convolution",
268
+ "text_level": 1,
269
+ "bbox": [
270
+ 504,
271
+ 738,
272
+ 684,
273
+ 752
274
+ ],
275
+ "page_idx": 1
276
+ },
277
+ {
278
+ "type": "text",
279
+ "text": "Graph convolutional neural networks (GCNs) generalize CNNs to non-euclidean structured data [9]. Yan et al. extended GCNs to exploit the inherent spatial relationship among the joints of a skeleton [7]. Their approach termed spatial-temporal graph convolutional networks (ST-GCN) learns a representation on a graph $G = (V, E, A)$ which takes as input:",
280
+ "bbox": [
281
+ 501,
282
+ 757,
283
+ 921,
284
+ 861
285
+ ],
286
+ "page_idx": 1
287
+ },
288
+ {
289
+ "type": "list",
290
+ "sub_type": "text",
291
+ "list_items": [
292
+ "- A set of nodes $V = \\{v_{ti} | t = 1, \\dots, T, i = 1, \\dots, N\\}$ for a skeleton sequence of $N$ joints and $T$ samples.",
293
+ "- Two sets of edges $E_{S} = \\{v_{ti}v_{tj}|(i,j)\\in H\\}$ and $E_{F} = \\{v_{ti}v_{(t + 1)i}\\}$ , where $H$ is the set of connected joints. $E_{S}$ refers to the intra-skeleton edges at each"
294
+ ],
295
+ "bbox": [
296
+ 527,
297
+ 869,
298
+ 921,
299
+ 943
300
+ ],
301
+ "page_idx": 1
302
+ },
303
+ {
304
+ "type": "page_number",
305
+ "text": "2",
306
+ "bbox": [
307
+ 911,
308
+ 32,
309
+ 921,
310
+ 42
311
+ ],
312
+ "page_idx": 1
313
+ },
314
+ {
315
+ "type": "image",
316
+ "img_path": "images/576546c365cf513e9bee88d888a5a34d7215036f70fbafb947fb92f3fea42a0e.jpg",
317
+ "image_caption": [
318
+ "Fig. 2. Spatial-temporal graph convolutional neural network (ST-GCN). Visual overview of a spatial-temporal graph (a) and spatial partitioning strategy (b). The spatial partitioning strategy has three subsets based on a nodes distance with respect to a self-selected root node (green). The three subsets are the node itself (blue), the node closest to the root node (red), and the node furthest from the root node (yellow)."
319
+ ],
320
+ "image_footnote": [],
321
+ "bbox": [
322
+ 91,
323
+ 54,
324
+ 475,
325
+ 151
326
+ ],
327
+ "page_idx": 2
328
+ },
329
+ {
330
+ "type": "text",
331
+ "text": "frame (spatial dimension), and $E_{F}$ refers to the interframe connection of the same joints over all of the frames (temporal dimension).",
332
+ "bbox": [
333
+ 120,
334
+ 258,
335
+ 491,
336
+ 303
337
+ ],
338
+ "page_idx": 2
339
+ },
340
+ {
341
+ "type": "text",
342
+ "text": "- A description of the graph structure in the form of an adjacency matrix $A$ .",
343
+ "bbox": [
344
+ 96,
345
+ 303,
346
+ 491,
347
+ 332
348
+ ],
349
+ "page_idx": 2
350
+ },
351
+ {
352
+ "type": "text",
353
+ "text": "For instance, Figure 2(a) visualizes the spatial-temporal graph. The joints represent the nodes of the graph (purple nodes), their natural connections are the spatial edges (purple lines), and the connection between adjacent frames are the temporal edges (green lines).",
354
+ "bbox": [
355
+ 71,
356
+ 338,
357
+ 490,
358
+ 411
359
+ ],
360
+ "page_idx": 2
361
+ },
362
+ {
363
+ "type": "text",
364
+ "text": "In the spatial dimension, the graph convolution operation on node $v_{ti}$ is defined as [7]:",
365
+ "bbox": [
366
+ 71,
367
+ 411,
368
+ 491,
369
+ 440
370
+ ],
371
+ "page_idx": 2
372
+ },
373
+ {
374
+ "type": "equation",
375
+ "text": "\n$$\nf _ {g c n} \\left(v _ {t i}\\right) = \\sum_ {v _ {t j} \\in B \\left(v _ {t i}\\right)} \\frac {1}{Z _ {t i} \\left(v _ {t j}\\right)} f _ {i n} \\left(v _ {t j}\\right) \\cdot w \\left(l _ {t i} \\left(v _ {t j}\\right)\\right), \\tag {2}\n$$\n",
376
+ "text_format": "latex",
377
+ "bbox": [
378
+ 94,
379
+ 445,
380
+ 490,
381
+ 482
382
+ ],
383
+ "page_idx": 2
384
+ },
385
+ {
386
+ "type": "text",
387
+ "text": "where $f_{in}$ and $f_{gen}$ denote the input feature map and output feature map, respectively. The term $B(v_{ti})$ denotes the sampling area of node $v_{ti}$ , with the nodes within the sampling area denoted as $v_{tj}$ . A mapping function $l_{ti}$ is defined to map each node with a unique weight vector $w$ . Figure 2(b) visualizes this strategy for a single frame $t$ , where the kernel size is set as 3 and the sampling area $B$ is partitioned into 3 subsets based on a nodes distance with respect to a self-selected root node (green). The three subsets in this partitioning strategy are the node itself (blue), the node closer to the root node (red), and the node further from the root node (yellow). The normalizing term $Z_{ij}$ is added to balance the contributions of different subsets to the output.",
388
+ "bbox": [
389
+ 71,
390
+ 487,
391
+ 490,
392
+ 691
393
+ ],
394
+ "page_idx": 2
395
+ },
396
+ {
397
+ "type": "text",
398
+ "text": "2.4 Refinement stages",
399
+ "text_level": 1,
400
+ "bbox": [
401
+ 71,
402
+ 709,
403
+ 253,
404
+ 724
405
+ ],
406
+ "page_idx": 2
407
+ },
408
+ {
409
+ "type": "text",
410
+ "text": "As predictions are made at high temporal frequencies, over-segmentation errors, i.e. an action is segmented into multiple shorter actions, often occur. A common strategy to alleviate this problem in pixel-wise labelling of images is to generate an initial prediction, then refine this initial prediction using the interactions between neighbouring pixels [17]. Farha and Gall extend this to action segmentation in time series data [15]. The idea is to stack several predictors that each operates directly on the output of the previous one to incrementally refine the predictions.",
411
+ "bbox": [
412
+ 71,
413
+ 728,
414
+ 490,
415
+ 875
416
+ ],
417
+ "page_idx": 2
418
+ },
419
+ {
420
+ "type": "text",
421
+ "text": "3 DEEP LEARNING MODELS",
422
+ "text_level": 1,
423
+ "bbox": [
424
+ 73,
425
+ 893,
426
+ 310,
427
+ 907
428
+ ],
429
+ "page_idx": 2
430
+ },
431
+ {
432
+ "type": "text",
433
+ "text": "The previous section introduced the three building blocks that characterizes the MS-GCN architecture. As the MS-",
434
+ "bbox": [
435
+ 71,
436
+ 912,
437
+ 491,
438
+ 941
439
+ ],
440
+ "page_idx": 2
441
+ },
442
+ {
443
+ "type": "text",
444
+ "text": "GCN architecture combines the best practices from TCN, STGCN, and MS-TCN, we include these as a baseline. We additionally include a bidirectional long short term memory-based network (LSTM) [20], as it is often considered an important baseline in action segmentation of MoCap data [21]-[23].",
445
+ "bbox": [
446
+ 501,
447
+ 53,
448
+ 921,
449
+ 140
450
+ ],
451
+ "page_idx": 2
452
+ },
453
+ {
454
+ "type": "text",
455
+ "text": "The implementation details of the employed models are visualized in Figure 3. The first layer of all models is a batch normalization (BN) layer that normalizes the inputs and accelerates training [24]. After normalization, the input is reshaped into the accepted formats of the specified models. For the graph-based models (i.e., ST-GCN and MS-GCN), the data is shaped into $T \\times N \\times C_{in}$ , where $N$ represents the number of nodes, $C_{in}$ the number of input channels, and $T$ the number of samples. For the temporal models (i.e., LSTM, TCN, and MS-TCN), the data is shaped into $T \\times C_{in}N$ . For these models, all input node locations are thus concatenated to form the input features at each sample $t$ .",
456
+ "bbox": [
457
+ 501,
458
+ 140,
459
+ 921,
460
+ 316
461
+ ],
462
+ "page_idx": 2
463
+ },
464
+ {
465
+ "type": "text",
466
+ "text": "3.1 LSTM",
467
+ "text_level": 1,
468
+ "bbox": [
469
+ 504,
470
+ 333,
471
+ 589,
472
+ 345
473
+ ],
474
+ "page_idx": 2
475
+ },
476
+ {
477
+ "type": "text",
478
+ "text": "The first layer of our recurrent model is an LSTM layer, which computes the following function:",
479
+ "bbox": [
480
+ 503,
481
+ 352,
482
+ 921,
483
+ 382
484
+ ],
485
+ "page_idx": 2
486
+ },
487
+ {
488
+ "type": "equation",
489
+ "text": "\n$$\ni _ {t} = \\sigma \\left(f _ {i n _ {t}} W _ {i i} + b _ {i i} + h _ {t - 1} W _ {h i} + b _ {h i}\\right),\n$$\n",
490
+ "text_format": "latex",
491
+ "bbox": [
492
+ 573,
493
+ 388,
494
+ 849,
495
+ 406
496
+ ],
497
+ "page_idx": 2
498
+ },
499
+ {
500
+ "type": "equation",
501
+ "text": "\n$$\nj _ {t} = \\sigma \\left(f _ {i n _ {t}} W _ {i f} + b _ {i f} + h _ {t - 1} W _ {h f} + b _ {h f}\\right),\n$$\n",
502
+ "text_format": "latex",
503
+ "bbox": [
504
+ 568,
505
+ 411,
506
+ 857,
507
+ 429
508
+ ],
509
+ "page_idx": 2
510
+ },
511
+ {
512
+ "type": "equation",
513
+ "text": "\n$$\n\\tilde {c} _ {t} = \\tanh \\left(f _ {i n _ {t}} W _ {i c} + b _ {i c} + h _ {t - 1} W _ {h c} + b _ {h c}\\right),\n$$\n",
514
+ "text_format": "latex",
515
+ "bbox": [
516
+ 560,
517
+ 434,
518
+ 864,
519
+ 450
520
+ ],
521
+ "page_idx": 2
522
+ },
523
+ {
524
+ "type": "equation",
525
+ "text": "\n$$\no _ {t} = \\sigma \\left(f _ {i n _ {t}} W _ {i o} + b _ {i o} + h _ {t - 1} W _ {h o} + b _ {h o}\\right),\n$$\n",
526
+ "text_format": "latex",
527
+ "bbox": [
528
+ 571,
529
+ 455,
530
+ 854,
531
+ 473
532
+ ],
533
+ "page_idx": 2
534
+ },
535
+ {
536
+ "type": "equation",
537
+ "text": "\n$$\nc _ {t} = j _ {t} \\odot c _ {t - 1} + i _ {t} \\odot \\tilde {c} _ {t}),\n$$\n",
538
+ "text_format": "latex",
539
+ "bbox": [
540
+ 625,
541
+ 479,
542
+ 800,
543
+ 494
544
+ ],
545
+ "page_idx": 2
546
+ },
547
+ {
548
+ "type": "equation",
549
+ "text": "\n$$\nh _ {t} = \\tanh (c _ {t}) \\odot o _ {t},\n$$\n",
550
+ "text_format": "latex",
551
+ "bbox": [
552
+ 643,
553
+ 502,
554
+ 782,
555
+ 517
556
+ ],
557
+ "page_idx": 2
558
+ },
559
+ {
560
+ "type": "text",
561
+ "text": "where $h_t$ is the hidden state at sample $t$ , $c_t$ is the cell state at sample $t$ , $f_{int}$ is the input feature map at sample $t$ , $h_{t-1}$ is the hidden state of the layer at sample $t-1$ . The terms $i_t$ , $j_t$ , and $o_t$ are the input, forget, and output gates, respectively. The terms $\\sigma$ , $tanh$ , and $\\odot$ are the sigmoid function, hyperbolic tangent function, and Hadamard product, respectively. The weight matrices are represented by $W$ , with subscripts representing from-to relationships. The LSTM layer above is causal, as the hidden state $h_t$ depends only on $x_0, \\ldots, x_t$ . The LSTM can be implemented in a causal mode, i.e., take into account future observations $x_{t+1}, \\ldots, x_T$ , by training it in the positive and negative time direction (bidirectional) [20], [25]. The hidden representation of the past and future are then combined through simple concatenation [20]. A visual overview of the (bidirectional) LSTM network is provided in Figure 3(a).",
562
+ "bbox": [
563
+ 501,
564
+ 523,
565
+ 921,
566
+ 758
567
+ ],
568
+ "page_idx": 2
569
+ },
570
+ {
571
+ "type": "text",
572
+ "text": "3.2 TCN",
573
+ "text_level": 1,
574
+ "bbox": [
575
+ 504,
576
+ 775,
577
+ 578,
578
+ 789
579
+ ],
580
+ "page_idx": 2
581
+ },
582
+ {
583
+ "type": "text",
584
+ "text": "The first layer of the TCN-based model is a $1 \\times 1$ convolutional layer that adjusts the input dimension $C_{in}$ to the number of filters $C$ in the network, formalized as:",
585
+ "bbox": [
586
+ 501,
587
+ 794,
588
+ 921,
589
+ 838
590
+ ],
591
+ "page_idx": 2
592
+ },
593
+ {
594
+ "type": "equation",
595
+ "text": "\n$$\nf _ {a d j} = W _ {1} * f _ {i n} + b, \\tag {3}\n$$\n",
596
+ "text_format": "latex",
597
+ "bbox": [
598
+ 640,
599
+ 845,
600
+ 921,
601
+ 862
602
+ ],
603
+ "page_idx": 2
604
+ },
605
+ {
606
+ "type": "text",
607
+ "text": "where $f_{adj} \\in \\mathbb{R}^{T \\times C}$ is the adjusted feature map, $f_{in} \\in \\mathbb{R}^{T \\times C_{in}}$ the input MoCap sequence, $*$ the convolution operator, $b \\in \\mathbb{R}^C$ the bias term, and $W_1 \\in \\mathbb{R}^{1 \\times C_{in} \\times C}$ the weights of the $1 \\times 1$ convolution filter with $C_{in}$ input feature channels and $C$ equal to the number of feature channels in",
608
+ "bbox": [
609
+ 503,
610
+ 868,
611
+ 921,
612
+ 941
613
+ ],
614
+ "page_idx": 2
615
+ },
616
+ {
617
+ "type": "page_number",
618
+ "text": "3",
619
+ "bbox": [
620
+ 911,
621
+ 32,
622
+ 921,
623
+ 42
624
+ ],
625
+ "page_idx": 2
626
+ },
627
+ {
628
+ "type": "image",
629
+ "img_path": "images/40b0b1ddb83e6b1b3bab5c2ec8a3d59191a15a655bffe6b5dd1ccaa2314848d0.jpg",
630
+ "image_caption": [
631
+ "Fig. 3. Overview of the MS-GCN and the four baseline deep learning models. The models take as input a MoCap sequence and generate as output a sequence of actions. The five deep learning models are: (a) a long short-term memory network (LSTM), (b) a temporal convolutional neural network (TCN), (c) a multi-stage temporal convolutional neural network (MS-TCN), (d) a spatial-temporal graph convolutional neural network (STGCN), and (e) a multi-stage spatial-temporal graph convolutional neural network (MS-GCN). The terms BN and $L_{s}$ denote the batch normalization layer and the loss of stage $s$ , respectively."
632
+ ],
633
+ "image_footnote": [],
634
+ "bbox": [
635
+ 81,
636
+ 53,
637
+ 915,
638
+ 281
639
+ ],
640
+ "page_idx": 3
641
+ },
642
+ {
643
+ "type": "image",
644
+ "img_path": "images/42fda8928c55ea91fbc0360595c99d561df4851cc85de9249b01443f01a1665b.jpg",
645
+ "image_caption": [
646
+ "Fig. 4. Visual overview of a temporal convolutional (TCN) and spatial-temporal graph convolutional (ST-GCN) block [7]. ST-GCN generates a spatial-temporal feature map by applying a spatial graph convolution (see Figure 2(b)) and a temporal convolution (see Figure 1), both of which are followed by batch normalization (BN) and a ReLU nonlinearity. Moreover, a residual connection is added to each block."
647
+ ],
648
+ "image_footnote": [],
649
+ "bbox": [
650
+ 161,
651
+ 375,
652
+ 403,
653
+ 570
654
+ ],
655
+ "page_idx": 3
656
+ },
657
+ {
658
+ "type": "text",
659
+ "text": "the network.",
660
+ "text_level": 1,
661
+ "bbox": [
662
+ 71,
663
+ 680,
664
+ 165,
665
+ 693
666
+ ],
667
+ "page_idx": 3
668
+ },
669
+ {
670
+ "type": "text",
671
+ "text": "The adjusted input is passed through several TCN blocks (visualized in 4). Each TCN block applies a dilated temporal convolution [14], BN, ReLU non-linear activation, and a residual connection between the activation map and the input. Formally, this process is defined as:",
672
+ "bbox": [
673
+ 71,
674
+ 694,
675
+ 490,
676
+ 767
677
+ ],
678
+ "page_idx": 3
679
+ },
680
+ {
681
+ "type": "equation",
682
+ "text": "\n$$\nf _ {o u t} = \\delta \\left(B N \\left(W * _ {d} f _ {a d j} + b\\right)\\right) + f _ {a d j}, \\tag {4}\n$$\n",
683
+ "text_format": "latex",
684
+ "bbox": [
685
+ 150,
686
+ 776,
687
+ 488,
688
+ 792
689
+ ],
690
+ "page_idx": 3
691
+ },
692
+ {
693
+ "type": "text",
694
+ "text": "where $f_{out} \\in \\mathbb{R}^{T \\times C}$ is the output feature map, $*_d$ the dilated convolution operator, $b \\in \\mathbb{R}^C$ the bias term, $W \\in \\mathbb{R}^{k \\times C \\times C}$ the weights of the dilated convolution filter with kernel size $k$ , and $\\delta$ the ReLU function. A visual overview of the TCN-based network is provided in Figure 3(b).",
695
+ "bbox": [
696
+ 71,
697
+ 799,
698
+ 490,
699
+ 875
700
+ ],
701
+ "page_idx": 3
702
+ },
703
+ {
704
+ "type": "text",
705
+ "text": "3.3 ST-GCN",
706
+ "text_level": 1,
707
+ "bbox": [
708
+ 71,
709
+ 893,
710
+ 171,
711
+ 906
712
+ ],
713
+ "page_idx": 3
714
+ },
715
+ {
716
+ "type": "text",
717
+ "text": "The first layer of the ST-GCN-based model is a $1 \\times 1$ convolutional layer that adjusts the input dimension $C_{in}$",
718
+ "bbox": [
719
+ 71,
720
+ 912,
721
+ 488,
722
+ 943
723
+ ],
724
+ "page_idx": 3
725
+ },
726
+ {
727
+ "type": "text",
728
+ "text": "to the number of filters $C$ in the network, formalized as:",
729
+ "bbox": [
730
+ 504,
731
+ 376,
732
+ 898,
733
+ 390
734
+ ],
735
+ "page_idx": 3
736
+ },
737
+ {
738
+ "type": "equation",
739
+ "text": "\n$$\nf _ {a d j} = W _ {1} * f _ {i n} + b, \\tag {5}\n$$\n",
740
+ "text_format": "latex",
741
+ "bbox": [
742
+ 640,
743
+ 397,
744
+ 921,
745
+ 414
746
+ ],
747
+ "page_idx": 3
748
+ },
749
+ {
750
+ "type": "text",
751
+ "text": "where $f_{adj} \\in \\mathbb{R}^{T \\times N \\times C}$ is the adjusted feature map, $f_{in} \\in \\mathbb{R}^{T \\times N \\times C_{in}}$ the input MoCap sequence, $*$ the convolution operator, $b \\in \\mathbb{R}^C$ the bias term, $W_1 \\in \\mathbb{R}^{1 \\times 1 \\times C_{in} \\times C}$ the weights of the $1 \\times 1$ convolution filter with $C_{in}$ input feature channels and $C$ equal to the number of feature channels in the network.",
752
+ "bbox": [
753
+ 503,
754
+ 417,
755
+ 921,
756
+ 503
757
+ ],
758
+ "page_idx": 3
759
+ },
760
+ {
761
+ "type": "text",
762
+ "text": "The adjusted input is passed through several ST-GCN blocks (visualized in 4) [7]. Each ST-GCN first applies a graph convolution, transforming Equation 2 into:",
763
+ "bbox": [
764
+ 503,
765
+ 506,
766
+ 921,
767
+ 550
768
+ ],
769
+ "page_idx": 3
770
+ },
771
+ {
772
+ "type": "equation",
773
+ "text": "\n$$\nf _ {g c n} = \\sum_ {p} A _ {p} f _ {a d j} W _ {p} M _ {p}, \\tag {6}\n$$\n",
774
+ "text_format": "latex",
775
+ "bbox": [
776
+ 622,
777
+ 555,
778
+ 921,
779
+ 585
780
+ ],
781
+ "page_idx": 3
782
+ },
783
+ {
784
+ "type": "text",
785
+ "text": "where $f_{adj} \\in \\mathbb{R}^{T \\times N \\times C}$ is the adjusted input feature map, $f_{gcn} \\in \\mathbb{R}^{T \\times N \\times C}$ the output feature map of the spatial graph convolution, and $W_p$ the $1 \\times 1 \\times C \\times C$ weight matrix. The matrix $A_p \\in \\{0,1\\}^{N \\times N}$ is the adjacency matrix, which represents the spatial connection between the joints. The adjacency matrix $A_p = D_p^{-\\frac{1}{2}} A_p D_p^{-\\frac{1}{2}}$ , where $D_p$ is the diagonal node degree matrix. Multiplying $D_p^{-\\frac{1}{2}} A_p D_p^{-\\frac{1}{2}}$ corresponds to symmetrically normalizing $A$ , which prevents changing the scale of the features based on the number of connections [9]. The graph is partitioned into three subsets $p$ based on the spatial partitioning strategy, as was visualized in Figure 2(b) [7]. There are thus three different weight vectors $W_p$ that allow modelling of relative properties between the nodes. The matrix $M_p$ is a learnable $N \\times N$ attention mask that indicates the importance of each node and its spatial partitions.",
786
+ "bbox": [
787
+ 503,
788
+ 592,
789
+ 921,
790
+ 832
791
+ ],
792
+ "page_idx": 3
793
+ },
794
+ {
795
+ "type": "text",
796
+ "text": "Next, after passing through a BN layer and ReLu nonlinearity, the ST-GCN block performs a dilated temporal convolution. The dilated temporal convolution is, in turn, passed through a BN layer and ReLU non-linearity, and lastly, a residual connection is added between the activation map and the input. This process is formalized as:",
797
+ "bbox": [
798
+ 501,
799
+ 833,
800
+ 921,
801
+ 921
802
+ ],
803
+ "page_idx": 3
804
+ },
805
+ {
806
+ "type": "equation",
807
+ "text": "\n$$\nf _ {o u t} = \\delta \\left(B N \\left(W * _ {d} f _ {g c n} + b\\right)\\right) + f _ {a d j}, \\tag {7}\n$$\n",
808
+ "text_format": "latex",
809
+ "bbox": [
810
+ 580,
811
+ 926,
812
+ 921,
813
+ 944
814
+ ],
815
+ "page_idx": 3
816
+ },
817
+ {
818
+ "type": "page_number",
819
+ "text": "4",
820
+ "bbox": [
821
+ 911,
822
+ 32,
823
+ 921,
824
+ 42
825
+ ],
826
+ "page_idx": 3
827
+ },
828
+ {
829
+ "type": "text",
830
+ "text": "where $f_{out} \\in \\mathbb{R}^{T \\times N \\times C}$ is the output feature map, $*_d$ the dilated convolution operator, $b \\in \\mathbb{R}^C$ the bias term, $W \\in \\mathbb{R}^{k \\times 1 \\times C \\times C}$ the weights of the dilated convolution filter with kernel size $k$ . The output feature map is passed through a spatial pooling layer that aggregates the spatial features among the $N$ joints. A visual overview of the ST-GCN-based network is provided in Figure 3(d).",
831
+ "bbox": [
832
+ 71,
833
+ 51,
834
+ 491,
835
+ 157
836
+ ],
837
+ "page_idx": 4
838
+ },
839
+ {
840
+ "type": "text",
841
+ "text": "3.4 Single-stage models: sample-based prediction",
842
+ "text_level": 1,
843
+ "bbox": [
844
+ 71,
845
+ 176,
846
+ 460,
847
+ 191
848
+ ],
849
+ "page_idx": 4
850
+ },
851
+ {
852
+ "type": "text",
853
+ "text": "The three aforementioned single-stage models map an input skeleton sequence $f_{in}$ to a hidden representation $f_{out} \\in \\mathbb{R}^{T \\times C}$ , with $C$ determined by the number of convolutional filters (ST-GCN and TCN) or the number of hidden units (LSTM), and length $T$ the same as the input sequence. The hidden representation of each model is passed through a $1 \\times 1$ convolution and a softmax activation function to get the probabilities for the $L$ output classes for each sample in-time, formalized as:",
854
+ "bbox": [
855
+ 71,
856
+ 194,
857
+ 491,
858
+ 325
859
+ ],
860
+ "page_idx": 4
861
+ },
862
+ {
863
+ "type": "equation",
864
+ "text": "\n$$\n\\hat {Y} = \\zeta \\left(W _ {1} * f _ {\\text {o u t}} + b\\right), \\tag {8}\n$$\n",
865
+ "text_format": "latex",
866
+ "bbox": [
867
+ 200,
868
+ 333,
869
+ 488,
870
+ 351
871
+ ],
872
+ "page_idx": 4
873
+ },
874
+ {
875
+ "type": "text",
876
+ "text": "where $\\hat{Y} \\in \\mathbb{R}^{T \\times L}$ are the class probabilities at each sample $t$ , $f_{out}$ the hidden output representation of the single stage models, $*$ the convolution operator, $b \\in \\mathbb{R}^L$ the bias term, $\\zeta$ the softmax function, $W_1 \\in \\mathbb{R}^{1 \\times C \\times L}$ the weights of the $1 \\times 1$ convolution filter with $C$ input channels and $L$ output classes.",
877
+ "bbox": [
878
+ 71,
879
+ 358,
880
+ 491,
881
+ 446
882
+ ],
883
+ "page_idx": 4
884
+ },
885
+ {
886
+ "type": "text",
887
+ "text": "3.5 Multi-stage models: prediction refinement",
888
+ "text_level": 1,
889
+ "bbox": [
890
+ 71,
891
+ 468,
892
+ 426,
893
+ 483
894
+ ],
895
+ "page_idx": 4
896
+ },
897
+ {
898
+ "type": "text",
899
+ "text": "The initial predictions predictions $\\hat{Y} \\in \\mathbb{R}^{T \\times L}$ are passed through several refinement stages. Each refinement stage contains several TCN blocks, and each stage operates directly on the softmax activations of the previous stage. Formally, this process is defined as:",
900
+ "bbox": [
901
+ 71,
902
+ 486,
903
+ 491,
904
+ 560
905
+ ],
906
+ "page_idx": 4
907
+ },
908
+ {
909
+ "type": "equation",
910
+ "text": "\n$$\n\\hat {Y} ^ {s} = \\Gamma (\\hat {Y} ^ {s - 1}), \\tag {9}\n$$\n",
911
+ "text_format": "latex",
912
+ "bbox": [
913
+ 225,
914
+ 566,
915
+ 488,
916
+ 585
917
+ ],
918
+ "page_idx": 4
919
+ },
920
+ {
921
+ "type": "text",
922
+ "text": "where $\\hat{Y}^s\\in \\mathbb{R}^{T\\times L}$ is the output at stage $s$ , $\\hat{Y}^{s - 1}$ the output of the previous stage, and $\\Gamma$ the single-stage TCN, as explained in section 3.2.",
923
+ "bbox": [
924
+ 71,
925
+ 592,
926
+ 488,
927
+ 637
928
+ ],
929
+ "page_idx": 4
930
+ },
931
+ {
932
+ "type": "text",
933
+ "text": "For the MS-TCN architecture, the initial predictions are generated by the single-stage TCN discussed in Chapter 3.2. For the MS-GCN architecture, the initial predictions are generated by the single-stage ST-GCN discussed in Chapter 3.3. A visual overview of the MS-TCN and MS-GCN network are provided in Figure 3(c) and (e), respectively.",
934
+ "bbox": [
935
+ 71,
936
+ 638,
937
+ 491,
938
+ 726
939
+ ],
940
+ "page_idx": 4
941
+ },
942
+ {
943
+ "type": "text",
944
+ "text": "3.6 Implementation details",
945
+ "text_level": 1,
946
+ "bbox": [
947
+ 71,
948
+ 746,
949
+ 284,
950
+ 760
951
+ ],
952
+ "page_idx": 4
953
+ },
954
+ {
955
+ "type": "text",
956
+ "text": "3.6.1 Loss function",
957
+ "text_level": 1,
958
+ "bbox": [
959
+ 73,
960
+ 765,
961
+ 220,
962
+ 777
963
+ ],
964
+ "page_idx": 4
965
+ },
966
+ {
967
+ "type": "text",
968
+ "text": "The models were trained by minimizing a combined cross-entropy (CE) and mean squared error (MSE) loss. The CE loss was defined as:",
969
+ "bbox": [
970
+ 71,
971
+ 782,
972
+ 491,
973
+ 825
974
+ ],
975
+ "page_idx": 4
976
+ },
977
+ {
978
+ "type": "equation",
979
+ "text": "\n$$\n\\mathcal {L} = \\sum_ {s = 1} ^ {S} \\mathcal {L} _ {s, c l s}, \\tag {10}\n$$\n",
980
+ "text_format": "latex",
981
+ "bbox": [
982
+ 272,
983
+ 832,
984
+ 488,
985
+ 869
986
+ ],
987
+ "page_idx": 4
988
+ },
989
+ {
990
+ "type": "equation",
991
+ "text": "\n$$\n\\mathcal {L} _ {c l s} = \\frac {1}{T} \\sum_ {t} - y _ {t, l} \\log \\left(\\hat {y} _ {t, l}\\right), \\tag {11}\n$$\n",
992
+ "text_format": "latex",
993
+ "bbox": [
994
+ 186,
995
+ 872,
996
+ 488,
997
+ 906
998
+ ],
999
+ "page_idx": 4
1000
+ },
1001
+ {
1002
+ "type": "text",
1003
+ "text": "where $\\mathcal{L}$ is the total loss over all $S$ stages and $\\mathcal{L}_{cls}$ the CE loss with $y_{t,l}$ and $\\hat{y}_{t,l}$ the ground truth label and predicted",
1004
+ "bbox": [
1005
+ 71,
1006
+ 912,
1007
+ 491,
1008
+ 944
1009
+ ],
1010
+ "page_idx": 4
1011
+ },
1012
+ {
1013
+ "type": "text",
1014
+ "text": "probability for class $l$ at sample $t$ , respectively. The combined CE and MSE loss was defined as [15]:",
1015
+ "bbox": [
1016
+ 503,
1017
+ 53,
1018
+ 921,
1019
+ 82
1020
+ ],
1021
+ "page_idx": 4
1022
+ },
1023
+ {
1024
+ "type": "equation",
1025
+ "text": "\n$$\n\\mathcal {L} = \\sum_ {s = 1} ^ {S} \\mathcal {L} _ {s, c l s} + \\lambda \\mathcal {L} _ {s, T - M S E}, \\tag {12}\n$$\n",
1026
+ "text_format": "latex",
1027
+ "bbox": [
1028
+ 607,
1029
+ 88,
1030
+ 921,
1031
+ 127
1032
+ ],
1033
+ "page_idx": 4
1034
+ },
1035
+ {
1036
+ "type": "text",
1037
+ "text": "where $\\mathcal{L}_{T - MSE}$ is the MSE loss and $\\lambda$ is a hyperparameter that determines its contribution. The combined loss was proposed by Farha and Gall to avoid over-segmentation errors [15], which occur when predictions vary at an unrealistically high sample frequency. The MSE term negates this effect by calculating the truncated mean squared error over the sample-wise log probabilities. The MSE loss function is defined as:",
1038
+ "bbox": [
1039
+ 501,
1040
+ 132,
1041
+ 921,
1042
+ 247
1043
+ ],
1044
+ "page_idx": 4
1045
+ },
1046
+ {
1047
+ "type": "equation",
1048
+ "text": "\n$$\n\\mathcal {L} _ {T - M S E} = \\frac {1}{T L} \\sum_ {t, l} \\widetilde {\\Delta} _ {t, l} ^ {2}, \\tag {13}\n$$\n",
1049
+ "text_format": "latex",
1050
+ "bbox": [
1051
+ 643,
1052
+ 252,
1053
+ 921,
1054
+ 287
1055
+ ],
1056
+ "page_idx": 4
1057
+ },
1058
+ {
1059
+ "type": "equation",
1060
+ "text": "\n$$\n\\widetilde {\\Delta} _ {t, l} = \\left\\{ \\begin{array}{l l} \\Delta_ {t, l} & : \\Delta_ {t, l} \\leq \\tau \\\\ \\tau & : o t h e r w i s e \\end{array} , \\right. \\tag {14}\n$$\n",
1061
+ "text_format": "latex",
1062
+ "bbox": [
1063
+ 620,
1064
+ 290,
1065
+ 921,
1066
+ 328
1067
+ ],
1068
+ "page_idx": 4
1069
+ },
1070
+ {
1071
+ "type": "equation",
1072
+ "text": "\n$$\n\\Delta_ {t, l} = \\left| \\log \\left(\\hat {y} _ {t, l}\\right) - \\log \\left(\\hat {y} _ {t - 1, l}\\right) \\right|, \\tag {15}\n$$\n",
1073
+ "text_format": "latex",
1074
+ "bbox": [
1075
+ 606,
1076
+ 330,
1077
+ 921,
1078
+ 347
1079
+ ],
1080
+ "page_idx": 4
1081
+ },
1082
+ {
1083
+ "type": "text",
1084
+ "text": "where $T$ is the sequence length, $L$ is the number of classes, and $\\hat{y}_{t,l}$ is the probability of class $l$ at sample $t$ . The hyperparameter $\\tau$ defines the threshold to truncate the smoothing loss.",
1085
+ "bbox": [
1086
+ 503,
1087
+ 353,
1088
+ 921,
1089
+ 410
1090
+ ],
1091
+ "page_idx": 4
1092
+ },
1093
+ {
1094
+ "type": "text",
1095
+ "text": "3.6.2 Model hyperparameters",
1096
+ "text_level": 1,
1097
+ "bbox": [
1098
+ 504,
1099
+ 425,
1100
+ 725,
1101
+ 440
1102
+ ],
1103
+ "page_idx": 4
1104
+ },
1105
+ {
1106
+ "type": "text",
1107
+ "text": "To avoid model selection bias for the convolutional models, (i.e., TCN, ST-GCN, MS-TCN, and MS-GCN), the same model hyperparameters were chosen as MS-TCN [15]. More specifically, each layer had 64 filters with a temporal kernel size of 3. All multi-stage models had 1 prediction generation stage and 3 refinement stages, and each stage had 10 layers. The convolutions were acausal, i.e. they could take into account both past and future input features. The dilation factor of the temporal convolutions doubled at each layer, i.e. 1, 2, 4, ..., 512.",
1108
+ "bbox": [
1109
+ 501,
1110
+ 441,
1111
+ 921,
1112
+ 588
1113
+ ],
1114
+ "page_idx": 4
1115
+ },
1116
+ {
1117
+ "type": "text",
1118
+ "text": "For the recurrent model, we followed a configuration that is conventional in MoCap-based action segmentation. For instance, prior work in gait cycle and FOG subtask segmentation used recurrent models of 1-3 LSTM layers of 32 - 128 cells each [22], [26]. For our recurrent model, we used two forward LSTM layers and two backward LSTM layers, each with 64 cells.",
1119
+ "bbox": [
1120
+ 501,
1121
+ 589,
1122
+ 921,
1123
+ 689
1124
+ ],
1125
+ "page_idx": 4
1126
+ },
1127
+ {
1128
+ "type": "text",
1129
+ "text": "3.6.3 Optimizer hyperparameters",
1130
+ "text_level": 1,
1131
+ "bbox": [
1132
+ 504,
1133
+ 704,
1134
+ 750,
1135
+ 718
1136
+ ],
1137
+ "page_idx": 4
1138
+ },
1139
+ {
1140
+ "type": "text",
1141
+ "text": "The optimizer and loss hyperparameters were also selected according to MS-TCN [15]. For the loss, we set $\\tau = 4$ and $\\lambda = 0.15$ . MS-TCN experiments show that further increasing the value of $\\lambda$ and $\\tau$ worsens the capability of the model in detecting the boundaries between action segments [15]. For the optimizer, we used Adam [27] with a learning rate of 0.0005. All models were trained for 100 epochs with a batch size of 4.",
1142
+ "bbox": [
1143
+ 501,
1144
+ 720,
1145
+ 921,
1146
+ 837
1147
+ ],
1148
+ "page_idx": 4
1149
+ },
1150
+ {
1151
+ "type": "text",
1152
+ "text": "3.6.4 Ablative experiments",
1153
+ "text_level": 1,
1154
+ "bbox": [
1155
+ 504,
1156
+ 851,
1157
+ 705,
1158
+ 866
1159
+ ],
1160
+ "page_idx": 4
1161
+ },
1162
+ {
1163
+ "type": "text",
1164
+ "text": "For MS-GCN, we perform causal versus acausal and regular temporal convolutions versus dilated temporal convolutions ablative experiments. Causal experiments mean that the prediction at sample $t$ depends only $f_{in,0},\\ldots ,f_{in,t}$ , which is important for real-time applications (e.g., in",
1165
+ "bbox": [
1166
+ 503,
1167
+ 869,
1168
+ 921,
1169
+ 944
1170
+ ],
1171
+ "page_idx": 4
1172
+ },
1173
+ {
1174
+ "type": "page_number",
1175
+ "text": "5",
1176
+ "bbox": [
1177
+ 911,
1178
+ 32,
1179
+ 921,
1180
+ 42
1181
+ ],
1182
+ "page_idx": 4
1183
+ },
1184
+ {
1185
+ "type": "text",
1186
+ "text": "robotics) [13]. In acausal mode the model can take into account future observations $f_{in,t+1}, \\ldots, f_{in,T}$ , which is sufficient for post-hoc movement analysis applications. For the regular temporal convolution experiment, we set the dilation rate to 1 in each layer.",
1187
+ "bbox": [
1188
+ 71,
1189
+ 53,
1190
+ 491,
1191
+ 128
1192
+ ],
1193
+ "page_idx": 5
1194
+ },
1195
+ {
1196
+ "type": "text",
1197
+ "text": "4 EVALUATION",
1198
+ "text_level": 1,
1199
+ "bbox": [
1200
+ 73,
1201
+ 156,
1202
+ 210,
1203
+ 172
1204
+ ],
1205
+ "page_idx": 5
1206
+ },
1207
+ {
1208
+ "type": "text",
1209
+ "text": "We present five datasets for skeleton-based action segmentation. Three of the five datasets are for action segmentation, with each featuring a different skeleton-based representation, i.e. inertial-based (HuGaDB), markerless optical MoCap (PKU-MMDv2), and marker-based optical MoCap (LARa). Two of the five datasets involve typical segmentation tasks commonly used in clinical gait analysis. For these two tasks, additional context regarding the relevance is provided.",
1210
+ "bbox": [
1211
+ 71,
1212
+ 181,
1213
+ 491,
1214
+ 313
1215
+ ],
1216
+ "page_idx": 5
1217
+ },
1218
+ {
1219
+ "type": "text",
1220
+ "text": "4.1 Peking University - Continuous Multi-Modal Human Action Understanding (PKU-MMD v2)",
1221
+ "text_level": 1,
1222
+ "bbox": [
1223
+ 71,
1224
+ 340,
1225
+ 491,
1226
+ 371
1227
+ ],
1228
+ "page_idx": 5
1229
+ },
1230
+ {
1231
+ "type": "text",
1232
+ "text": "PKU-MMD is a benchmark dataset for continuous 3D human action understanding [28]. In this study, we use the smaller phase 2 partition of the dataset. This dataset contains 1009 short video sequences in 52 action categories, performed by 13 subjects in three camera views. MoCap was performed with a Kinect v2 optical marker-less motion capture system at $30\\mathrm{Hz}$ . The Kinect system records the 3-axis locations of 25 major body joints.",
1233
+ "bbox": [
1234
+ 71,
1235
+ 377,
1236
+ 491,
1237
+ 494
1238
+ ],
1239
+ "page_idx": 5
1240
+ },
1241
+ {
1242
+ "type": "text",
1243
+ "text": "4.2 Human Gait Database (HuGaDB)",
1244
+ "text_level": 1,
1245
+ "bbox": [
1246
+ 73,
1247
+ 521,
1248
+ 354,
1249
+ 536
1250
+ ],
1251
+ "page_idx": 5
1252
+ },
1253
+ {
1254
+ "type": "text",
1255
+ "text": "HuGaDB is an action segmentation dataset where a total of 18 subjects carried out typical lower limb activities, e.g. walking, running, and cycling [29]. MoCap was performed with 6 inertial measurement units (IMUs) at a sampling frequency of $60\\mathrm{Hz}$ . The IMUs were placed on the right and left thighs, shins and feet. This dataset contains 364 IMU trials in 12 action categories.",
1256
+ "bbox": [
1257
+ 71,
1258
+ 542,
1259
+ 490,
1260
+ 645
1261
+ ],
1262
+ "page_idx": 5
1263
+ },
1264
+ {
1265
+ "type": "text",
1266
+ "text": "4.3 Logistic Activity Recognition Challenge (LARa)",
1267
+ "text_level": 1,
1268
+ "bbox": [
1269
+ 71,
1270
+ 671,
1271
+ 465,
1272
+ 688
1273
+ ],
1274
+ "page_idx": 5
1275
+ },
1276
+ {
1277
+ "type": "text",
1278
+ "text": "LARa is a recently released dataset of subjects carrying out typical warehousing activities [30]. Fourteen subjects carried out a total of eight actions. MoCap was performed by an optical MoCap system that recorded the motion of 39 reflective markers at a sampling frequency of $200\\mathrm{Hz}$ . The optical MoCap system records the 3-axis limb position and 3-axis orientation of 19 limbs. All subjects participated in a total of 30 recordings of 2 minutes each. The actions were performed under three different warehousing scenarios that each aimed to mimic real-world warehousing activities. In scenario 1, subjects 1 to 6 performed 30 recordings, and subjects 7 to 14 performed 2 recordings. Subjects 7 to 14 additionally performed 14 recordings in scenarios 2 and 3. The dataset contains 377 MoCap trials in 8 action categories. The authors proposed to tackle the automated skeleton-based segmentation task with a TCN-based model that classified temporal segments extracted by a sliding window.",
1279
+ "bbox": [
1280
+ 71,
1281
+ 694,
1282
+ 491,
1283
+ 944
1284
+ ],
1285
+ "page_idx": 5
1286
+ },
1287
+ {
1288
+ "type": "text",
1289
+ "text": "4.4 Gait phase and freezing of gait segmentation (FOG-GAIT)",
1290
+ "text_level": 1,
1291
+ "bbox": [
1292
+ 503,
1293
+ 53,
1294
+ 921,
1295
+ 82
1296
+ ],
1297
+ "page_idx": 5
1298
+ },
1299
+ {
1300
+ "type": "text",
1301
+ "text": "Freezing of gait (FOG) and temporal gait disturbances in people with Parkinson's disease (PwPD) are commonly assessed during complex experimental protocols that involve turning with or without a cognitive dual-task [31], [32], which serve as triggers to elicit FOG [33]. The current assessment implies that the gait cycle phases, i.e. double support 1, single support, double support 2, and swing, and the FOG episodes are annotated manually based on the 3D marker trajectories of a motion capture system, and standard camera footage [32], [34]. These time-consuming tasks motivate the search for algorithms to automatically delineate the gait cycle phases and FOG episodes. State-of-the-art deep learning models tackle the gait segmentation task with TCN or LSTM-based models [21], [26].",
1302
+ "bbox": [
1303
+ 501,
1304
+ 95,
1305
+ 921,
1306
+ 300
1307
+ ],
1308
+ "page_idx": 5
1309
+ },
1310
+ {
1311
+ "type": "text",
1312
+ "text": "A proprietary MoCap dataset of seven PwPD and FOG that froze during the protocol was used [31]. The subjects were instructed to complete a standardized protocol consisting of straight-ahead walking, 180 degree turning, and 360 degree turning. The experiments were offered randomly and performed with our without a cognitive dual-task [35]. Two optical markers were placed at a .5m distance from each other on the floor to standardize the turning radius. The data acquisition was further standardized by defining a zone of one meter before and after the turn in which MoCap data was stored. The FOG events and gait cycle phases were visually annotated by an experienced clinical operator. MoCap was performed at a sampling frequency of $100\\mathrm{Hz}$ with a ten camera Vicon motion capture system. Optical markers were placed according to the plugin-gait configuration [36]. This dataset contains 127 MoCap trials in 5 action categories.",
1313
+ "bbox": [
1314
+ 501,
1315
+ 301,
1316
+ 923,
1317
+ 549
1318
+ ],
1319
+ "page_idx": 5
1320
+ },
1321
+ {
1322
+ "type": "text",
1323
+ "text": "4.5 Timed Up-and-Go (TUG) sub-task segmentation",
1324
+ "text_level": 1,
1325
+ "bbox": [
1326
+ 504,
1327
+ 592,
1328
+ 898,
1329
+ 608
1330
+ ],
1331
+ "page_idx": 5
1332
+ },
1333
+ {
1334
+ "type": "text",
1335
+ "text": "The timed up-and-go (TUG) is a commonly used test in clinical practice to evaluate a subjects' functional mobility [37]. During the TUG, subjects carry out several sub-activities that are common in daily life, i.e. sitting, standing up, walking, turning around, walking back, and sitting back down. In clinical practice, the timing of the sub-activities is commonly assessed under clinical supervision. Therefore, there is increased interest in automatic TUG analysis and sub-activity segmentation techniques. State-of-the-art deep learning models tackle this task with LSTM-based models [38].",
1336
+ "bbox": [
1337
+ 501,
1338
+ 621,
1339
+ 921,
1340
+ 781
1341
+ ],
1342
+ "page_idx": 5
1343
+ },
1344
+ {
1345
+ "type": "text",
1346
+ "text": "We used a public dataset that aims to recruit a total of 500 healthy participants (aged 21-80) of Asian ethnicity [39]. At the time of this study, the data of only 10 participants were available. Each participant carried out the TUG 3 times, resulting in a total of 30 recordings. Motion capture was performed with a Qualisys optical motion capture system that recorded the motion of reflective markers at a sampling rate of $200\\mathrm{Hz}$ . The markers were placed according to the modified Calibrated Anatomical System Technique (CAST) [40]. The 6 TUG sub-activities were visually annotated by an experienced clinical operator.",
1347
+ "bbox": [
1348
+ 501,
1349
+ 782,
1350
+ 923,
1351
+ 944
1352
+ ],
1353
+ "page_idx": 5
1354
+ },
1355
+ {
1356
+ "type": "page_number",
1357
+ "text": "6",
1358
+ "bbox": [
1359
+ 911,
1360
+ 32,
1361
+ 921,
1362
+ 42
1363
+ ],
1364
+ "page_idx": 5
1365
+ },
1366
+ {
1367
+ "type": "image",
1368
+ "img_path": "images/f3dd16ae799c71c079388e9ee139cd6b84536a2f98cd09d5a66ddf20686d6bf2.jpg",
1369
+ "image_caption": [
1370
+ "Fig. 5. Different graph representations for (a) PKU-MMD v2, (b) HuGaDB, (c) LARa, (d) FOG-GAIT, (e) TUG. The purple dots represent the nodes of the graph, the purple lines the spatial edges (excluding self-connections), and the green dot the root node."
1371
+ ],
1372
+ "image_footnote": [],
1373
+ "bbox": [
1374
+ 102,
1375
+ 51,
1376
+ 171,
1377
+ 162
1378
+ ],
1379
+ "page_idx": 6
1380
+ },
1381
+ {
1382
+ "type": "image",
1383
+ "img_path": "images/d57c015edb79645ec2df8285ac24acc1c298449ce3ad0d01b289a7b8f998f7f9.jpg",
1384
+ "image_caption": [],
1385
+ "image_footnote": [],
1386
+ "bbox": [
1387
+ 179,
1388
+ 51,
1389
+ 240,
1390
+ 162
1391
+ ],
1392
+ "page_idx": 6
1393
+ },
1394
+ {
1395
+ "type": "image",
1396
+ "img_path": "images/d077367c31265b6afab209d9c95035e75f6592b460552eebdc34919c20c3904d.jpg",
1397
+ "image_caption": [],
1398
+ "image_footnote": [],
1399
+ "bbox": [
1400
+ 254,
1401
+ 51,
1402
+ 308,
1403
+ 162
1404
+ ],
1405
+ "page_idx": 6
1406
+ },
1407
+ {
1408
+ "type": "image",
1409
+ "img_path": "images/65ee51a22f7d6a680ba9e137dff0283e9ea02e625cc0ae78c68c459fa877f6d8.jpg",
1410
+ "image_caption": [],
1411
+ "image_footnote": [],
1412
+ "bbox": [
1413
+ 331,
1414
+ 51,
1415
+ 387,
1416
+ 162
1417
+ ],
1418
+ "page_idx": 6
1419
+ },
1420
+ {
1421
+ "type": "image",
1422
+ "img_path": "images/f1e7adfc31b946a0c0cbc0e3aca49115e1df0e90d9f6bbcbe78e68fdd154c7a9.jpg",
1423
+ "image_caption": [],
1424
+ "image_footnote": [],
1425
+ "bbox": [
1426
+ 408,
1427
+ 51,
1428
+ 462,
1429
+ 162
1430
+ ],
1431
+ "page_idx": 6
1432
+ },
1433
+ {
1434
+ "type": "text",
1435
+ "text": "4.6 Graph representations",
1436
+ "text_level": 1,
1437
+ "bbox": [
1438
+ 73,
1439
+ 247,
1440
+ 282,
1441
+ 262
1442
+ ],
1443
+ "page_idx": 6
1444
+ },
1445
+ {
1446
+ "type": "text",
1447
+ "text": "For each dataset, the skeleton graphs are visualized in Figure 5. For HuGaDB we used the 3-axis accelerometer and 3-axis gyroscope data, for LARa the 3-axis limb position and 3-axis orientation, and for PKU-MMD v2, TUG, and FOG-GAIT we computed the 3-axis displacement and 3-axis relative coordinates (with respect to the root node as visualized in Figure 5) from the 3D joint positions.",
1448
+ "bbox": [
1449
+ 71,
1450
+ 266,
1451
+ 490,
1452
+ 368
1453
+ ],
1454
+ "page_idx": 6
1455
+ },
1456
+ {
1457
+ "type": "text",
1458
+ "text": "4.7 Metrics",
1459
+ "text_level": 1,
1460
+ "bbox": [
1461
+ 73,
1462
+ 386,
1463
+ 171,
1464
+ 400
1465
+ ],
1466
+ "page_idx": 6
1467
+ },
1468
+ {
1469
+ "type": "table",
1470
+ "img_path": "images/4e2dad98e82cec4570b0481b39077f4e5199a039b0347c1e4b8fdbb80c262761.jpg",
1471
+ "table_caption": [
1472
+ "TABLE 1 Dataset characteristics."
1473
+ ],
1474
+ "table_footnote": [],
1475
+ "table_body": "<table><tr><td>Dataset</td><td>Partitions</td><td>SR</td><td>#N</td><td>#Trials</td><td>#L</td></tr><tr><td>PKU-MMD</td><td>3/10</td><td>30 Hz</td><td>25</td><td>234/775</td><td>52</td></tr><tr><td>HuGaDB</td><td>4/18</td><td>60 Hz</td><td>6</td><td>69/307</td><td>12</td></tr><tr><td>LARa</td><td>4/14</td><td>50 Hz</td><td>19</td><td>113/264</td><td>8</td></tr><tr><td>FOG-GAIT</td><td>LOSO</td><td>50 Hz</td><td>9</td><td>127/127</td><td>5</td></tr><tr><td>TUG</td><td>LOSO</td><td>50 Hz</td><td>19</td><td>30/30</td><td>6</td></tr></table>",
1476
+ "bbox": [
1477
+ 102,
1478
+ 454,
1479
+ 457,
1480
+ 525
1481
+ ],
1482
+ "page_idx": 6
1483
+ },
1484
+ {
1485
+ "type": "text",
1486
+ "text": "Overview of the partitioning, sampling rates (SR), number of nodes (#N), number of trials (test/train), and number of classes (#L) across datasets. For the fixed test/train partition of HuGaDB, we selected the first four subjects as test subjects. For the fixed test/train partition of PKU-MMD v2, the 3 test subjects were provided by the authors of the dataset. For LARa, S1-6 and S7-14 perform different experiments. To cover both experiments, we take subjects 5-8 for our test partition.",
1487
+ "bbox": [
1488
+ 91,
1489
+ 527,
1490
+ 491,
1491
+ 621
1492
+ ],
1493
+ "page_idx": 6
1494
+ },
1495
+ {
1496
+ "type": "text",
1497
+ "text": "We follow convention by quantitatively evaluating the predictions with respect to the ground truth annotations by means of a sample-wise and a segment-wise evaluation metric [13], [15]. For the segment-wise metric, we use the F1@50 as proposed by Lea et al. [13]. to compute the segmental metric, a predicted action segment is first classified as a true positive (TP) or false positive (FP) by comparing its intersection over union (IoU) with respect to the corresponding expert annotation. If the IoU crosses a predetermined overlap threshold it is classified as a true positive segment (TP), if it does not, as a false positive segment (FP). The number of false-negative segments (FN) in a trial is calculated by subtracting the number of correctly predicted segments from the number of segments that the experts had demarcated. From the classified segments, the segmental F1-score for each action can be computed as:",
1498
+ "bbox": [
1499
+ 71,
1500
+ 635,
1501
+ 490,
1502
+ 869
1503
+ ],
1504
+ "page_idx": 6
1505
+ },
1506
+ {
1507
+ "type": "equation",
1508
+ "text": "\n$$\nF 1 @ \\tau = \\frac {T P}{T P + \\frac {1}{2} (F P + F N)}, \\tag {16}\n$$\n",
1509
+ "text_format": "latex",
1510
+ "bbox": [
1511
+ 171,
1512
+ 873,
1513
+ 488,
1514
+ 907
1515
+ ],
1516
+ "page_idx": 6
1517
+ },
1518
+ {
1519
+ "type": "text",
1520
+ "text": "where $\\tau$ denotes the IoU overlap.",
1521
+ "bbox": [
1522
+ 71,
1523
+ 912,
1524
+ 308,
1525
+ 926
1526
+ ],
1527
+ "page_idx": 6
1528
+ },
1529
+ {
1530
+ "type": "text",
1531
+ "text": "For the sample-wise metric, we report the accuracy. The",
1532
+ "bbox": [
1533
+ 71,
1534
+ 926,
1535
+ 491,
1536
+ 941
1537
+ ],
1538
+ "page_idx": 6
1539
+ },
1540
+ {
1541
+ "type": "text",
1542
+ "text": "term sample-wise denotes that the metric is computed for each sample or timestep. Unlike the segment-wise metric, the sample-wise accuracy does not heavily penalize over-segmentation errors. Reporting both the sample-wise accuracy and the segment-wise F1@50 thus allows assessment of over-segmentation problems. The sample-wise accuracy is simply computed as the number of correctly classified samples divided by the total number of samples.",
1543
+ "bbox": [
1544
+ 501,
1545
+ 53,
1546
+ 921,
1547
+ 169
1548
+ ],
1549
+ "page_idx": 6
1550
+ },
1551
+ {
1552
+ "type": "text",
1553
+ "text": "All use cases were evaluated by assessing the generalization of the models to previously unseen subjects. For the three larger action segmentation datasets, we used a fixed test/train partition. For the two smaller gait analysis datasets, we used a leave one subject out (LOSO) cross-validation approach.",
1554
+ "bbox": [
1555
+ 503,
1556
+ 170,
1557
+ 921,
1558
+ 257
1559
+ ],
1560
+ "page_idx": 6
1561
+ },
1562
+ {
1563
+ "type": "text",
1564
+ "text": "The three high sample rate marker-based MoCap datasets were resampled to $50\\mathrm{Hz}$ . No additional pre-processing was performed. A summary is provided in Table 1.",
1565
+ "bbox": [
1566
+ 503,
1567
+ 257,
1568
+ 921,
1569
+ 301
1570
+ ],
1571
+ "page_idx": 6
1572
+ },
1573
+ {
1574
+ "type": "text",
1575
+ "text": "4.8 Statistics",
1576
+ "text_level": 1,
1577
+ "bbox": [
1578
+ 504,
1579
+ 321,
1580
+ 616,
1581
+ 335
1582
+ ],
1583
+ "page_idx": 6
1584
+ },
1585
+ {
1586
+ "type": "text",
1587
+ "text": "We aim to determine if the differences in predictive performance between the five architectures is statistically significant. Several statistical methods have been proposed to compare machine learning algorithms [41], [42]. Demšar and Garcia et al. recommend the non-parametric Friedman test [43], with the corresponding post-hoc tests, for the comparison of more than two classifiers over multiple datasets or trials. We used Friedman's test to evaluate the null hypothesis that there is no difference in the classification performance of the five architectures on a particular dataset. The post-hoc tests were used to evaluate the null hypothesis that there is no difference in the classification performance between the proposed MS-GCN model and the four baselines on a particular dataset. The post-hoc hypotheses were corrected for multiple comparisons, as defined in Li [38].",
1588
+ "bbox": [
1589
+ 501,
1590
+ 340,
1591
+ 923,
1592
+ 559
1593
+ ],
1594
+ "page_idx": 6
1595
+ },
1596
+ {
1597
+ "type": "text",
1598
+ "text": "All statistical analyses were performed using the scmamp package, version 0.2.55 [44], within The R programming language, version 4.0.3 [45]. The scmamp package implemented Friedman's test according to the version by Demšar [41] and the post-hoc tests according to the version by Garcia et al. [42]. The significance level of all tests was set at the $95\\%$ level $(p \\leq 0.05)$ .",
1599
+ "bbox": [
1600
+ 503,
1601
+ 559,
1602
+ 921,
1603
+ 662
1604
+ ],
1605
+ "page_idx": 6
1606
+ },
1607
+ {
1608
+ "type": "text",
1609
+ "text": "5 RESULTS",
1610
+ "text_level": 1,
1611
+ "bbox": [
1612
+ 504,
1613
+ 683,
1614
+ 612,
1615
+ 698
1616
+ ],
1617
+ "page_idx": 6
1618
+ },
1619
+ {
1620
+ "type": "text",
1621
+ "text": "5.1 Comparison with the four baselines",
1622
+ "text_level": 1,
1623
+ "bbox": [
1624
+ 503,
1625
+ 704,
1626
+ 808,
1627
+ 719
1628
+ ],
1629
+ "page_idx": 6
1630
+ },
1631
+ {
1632
+ "type": "text",
1633
+ "text": "Results on all five datasets in terms of the segment-wise F1@50 and sample-wise accuracy (Acc) are shown in Table 2. Figure 6 gives a visual overview of the segmentation results for MS-GCN. Sample-wise accuracy and segmentwise F1@50 for each sequence are included for comparison. The results of the statistical hypotheses tests and the spread across evaluation trials are visualized in Figure 7. All methods were evaluated in acausal mode.",
1634
+ "bbox": [
1635
+ 501,
1636
+ 723,
1637
+ 921,
1638
+ 839
1639
+ ],
1640
+ "page_idx": 6
1641
+ },
1642
+ {
1643
+ "type": "text",
1644
+ "text": "The results suggest that MS-GCN outperforms the four baseline approaches across all tasks on most metrics. Figure 6 indicates that MS-GCN enables a near perfect action segmentation on HuGaDB, FOG-GAIT, and TUG. The Friedman test was statistically significant at the $95\\%$ level for both metrics on all but the TUG dataset, for which the accuracy was found to not be significant. We thus reject the null",
1645
+ "bbox": [
1646
+ 501,
1647
+ 840,
1648
+ 923,
1649
+ 941
1650
+ ],
1651
+ "page_idx": 6
1652
+ },
1653
+ {
1654
+ "type": "page_number",
1655
+ "text": "7",
1656
+ "bbox": [
1657
+ 911,
1658
+ 32,
1659
+ 921,
1660
+ 42
1661
+ ],
1662
+ "page_idx": 6
1663
+ },
1664
+ {
1665
+ "type": "table",
1666
+ "img_path": "images/03d5593649accc5b82bc8d8126b4e45ac59d5a9c1a5d37a07871bdc7b5dec936.jpg",
1667
+ "table_caption": [
1668
+ "TABLE2 Action segmentation results."
1669
+ ],
1670
+ "table_footnote": [
1671
+ "Skeleton-based action segmentation results on PKU-MMD v2, HuGaDB, LARa, FOG-GAIT, and TUG. All results are quantified in terms of segment-wise F1@50 and sample-wise accuracy (Acc)."
1672
+ ],
1673
+ "table_body": "<table><tr><td>PKU-MMD</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>22.7</td><td>59.6</td></tr><tr><td>TCN</td><td>13.8</td><td>61.9</td></tr><tr><td>ST-GCN</td><td>15.5</td><td>64.9</td></tr><tr><td>MS-TCN</td><td>46.3</td><td>65.5</td></tr><tr><td>MS-GCN</td><td>51.6</td><td>68.5</td></tr><tr><td>HuGaDB</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>81.5</td><td>86.1</td></tr><tr><td>TCN</td><td>56.8</td><td>88.3</td></tr><tr><td>ST-GCN</td><td>67.7</td><td>88.7</td></tr><tr><td>MS-TCN</td><td>89.9</td><td>86.8</td></tr><tr><td>MS-GCN</td><td>93.0</td><td>90.4</td></tr><tr><td>LARa</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>32.3</td><td>63.9</td></tr><tr><td>TCN</td><td>20.0</td><td>61.5</td></tr><tr><td>ST-GCN</td><td>25.8</td><td>67.9</td></tr><tr><td>MS-TCN</td><td>39.6</td><td>65.8</td></tr><tr><td>MS-GCN</td><td>43.6</td><td>65.6</td></tr><tr><td>FOG-GAIT</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>92.1</td><td>90.6</td></tr><tr><td>TCN</td><td>89.9</td><td>89.8</td></tr><tr><td>ST-GCN</td><td>90.8</td><td>89.4</td></tr><tr><td>MS-TCN</td><td>92.5</td><td>86.7</td></tr><tr><td>MS-GCN</td><td>95.0</td><td>90.1</td></tr><tr><td>TUG</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>97.1</td><td>93.2</td></tr><tr><td>TCN</td><td>84.4</td><td>92.7</td></tr><tr><td>ST-GCN</td><td>93.8</td><td>93.2</td></tr><tr><td>MS-TCN</td><td>96.5</td><td>92.7</td></tr><tr><td>MS-GCN</td><td>97.9</td><td>93.6</td></tr></table>",
1674
+ "bbox": [
1675
+ 184,
1676
+ 90,
1677
+ 377,
1678
+ 483
1679
+ ],
1680
+ "page_idx": 7
1681
+ },
1682
+ {
1683
+ "type": "text",
1684
+ "text": "hypothesis that there is no difference in the classification performance among the five models on all but the TUG dataset. The post-hoc tests between MS-GCN and the second best model were statistically significant at the $95\\%$ level level for the F1@50 metric on PKU-MMD and FOG-GAIT, and for the accuracy on PKU-MMD. For these tasks, we thus reject the null hypotheses that there is no difference in the classification performance between MS-GCN and the second best model.",
1685
+ "bbox": [
1686
+ 71,
1687
+ 551,
1688
+ 490,
1689
+ 683
1690
+ ],
1691
+ "page_idx": 7
1692
+ },
1693
+ {
1694
+ "type": "text",
1695
+ "text": "5.2 Effect of the refinement stages",
1696
+ "text_level": 1,
1697
+ "bbox": [
1698
+ 73,
1699
+ 724,
1700
+ 344,
1701
+ 739
1702
+ ],
1703
+ "page_idx": 7
1704
+ },
1705
+ {
1706
+ "type": "text",
1707
+ "text": "Notice that the single-stage models (ST-GCN and TCN) and the multi-stage models (MS-GCN and MS-TCN) achieve similar sample-wise accuracy but very different F1@50 scores. The statistical tests confirm these observations, as the difference in F1@50 between ST-GCN and MS-GCN was statistically significant at the $95\\%$ level across all datasets except for the TUG dataset, while the difference in sample-wise accuracy varied with MS-GCN performing significantly better (PKU-MMD and FOG-GAIT), no significant effect (HuGaDB and TUG), and ST-GCN performing significantly better (LARa). These results indicate that the addition of the refinement stages (i.e., multi-stage models) significantly reduces the number of segmentation errors.",
1708
+ "bbox": [
1709
+ 71,
1710
+ 752,
1711
+ 490,
1712
+ 941
1713
+ ],
1714
+ "page_idx": 7
1715
+ },
1716
+ {
1717
+ "type": "image",
1718
+ "img_path": "images/2777167c3880143987b28efa9c08ecb521c3c52b435716e75b011aa2cc4a31be.jpg",
1719
+ "image_caption": [
1720
+ "Fig. 6. Visual overview of action segmentation results for one sequence of each use-case. From top to bottom: (a) PKU-MMD v2, (b) HuGaDB, (c) LARa, (d) FOG-GAIT, and (e) TUG. For each use-case, the first sequence represents the GT and the second the segmentation by MSGCN. For the visualized sequences, the segment-wise F1@50 score is given after the GT sequence and the sample-wise accuracy after the predicted sequence."
1721
+ ],
1722
+ "image_footnote": [],
1723
+ "bbox": [
1724
+ 539,
1725
+ 56,
1726
+ 890,
1727
+ 422
1728
+ ],
1729
+ "page_idx": 7
1730
+ },
1731
+ {
1732
+ "type": "text",
1733
+ "text": "5.3 Effect of the graph convolutions",
1734
+ "text_level": 1,
1735
+ "bbox": [
1736
+ 504,
1737
+ 542,
1738
+ 784,
1739
+ 556
1740
+ ],
1741
+ "page_idx": 7
1742
+ },
1743
+ {
1744
+ "type": "text",
1745
+ "text": "Notice that the graph convolutional models (ST-GCN and MS-GCN) outperform the regular convolutional models (TCN and MS-TCN) on all tasks. This effect was found to have a higher impact on the sample-wise accuracy than on the number of segmentation errors (F1@50). The statistical tests confirm these observations, as the difference between MS-GCN and MS-TCN was statistically significant at the $95\\%$ level on two datasets (PKU-MMD and FOG-GAIT) for the F1@50 and on three datasets (PKU-MMD, HuGaDB, and FOG-GAIT) for the sample-wise accuracy. These results confirm that it is beneficial to explicitly model the spatial hierarchy among the joints or limbs in skeleton-based action segmentation tasks.",
1746
+ "bbox": [
1747
+ 501,
1748
+ 561,
1749
+ 921,
1750
+ 752
1751
+ ],
1752
+ "page_idx": 7
1753
+ },
1754
+ {
1755
+ "type": "text",
1756
+ "text": "5.4 Ablative experiments",
1757
+ "text_level": 1,
1758
+ "bbox": [
1759
+ 504,
1760
+ 773,
1761
+ 702,
1762
+ 787
1763
+ ],
1764
+ "page_idx": 7
1765
+ },
1766
+ {
1767
+ "type": "text",
1768
+ "text": "5.4.1 Effect of the dilated convolutions",
1769
+ "text_level": 1,
1770
+ "bbox": [
1771
+ 504,
1772
+ 792,
1773
+ 782,
1774
+ 806
1775
+ ],
1776
+ "page_idx": 7
1777
+ },
1778
+ {
1779
+ "type": "text",
1780
+ "text": "Ablative experiments for MS-GCN were carried out to assess the effect of the introduced dilated temporal convolutions in the prediction generation stage. According to the results in Table 3, it is evident that the introduced dilation within the ST-GCN layers of the prediction generation stage has a positive effect on both metrics across all datasets. The drop in performance with regular convolutions is due to the fact that without dilation the initial predictions are generated based on limited temporal context.",
1781
+ "bbox": [
1782
+ 501,
1783
+ 811,
1784
+ 921,
1785
+ 941
1786
+ ],
1787
+ "page_idx": 7
1788
+ },
1789
+ {
1790
+ "type": "page_number",
1791
+ "text": "8",
1792
+ "bbox": [
1793
+ 911,
1794
+ 32,
1795
+ 921,
1796
+ 42
1797
+ ],
1798
+ "page_idx": 7
1799
+ },
1800
+ {
1801
+ "type": "image",
1802
+ "img_path": "images/dfaa8e133d776d4cfdc1e8c0ebdbac69b3bdb573cf329995a0bb4cdf762fb2d2.jpg",
1803
+ "image_caption": [
1804
+ "Fig. 7. Boxplots to visualize the spread in the segment-wise F1@50 (top row) and the sample-wise accuracy (bottom row) across trials per dataset. Significance levels were visualized as: $p \\leq 0.01^{(**)}$ , $p \\leq 0.05^{(**)}$ , $p \\leq 0.1^{(*)}$ , and no significance (#). The Friedman test was significant at the $p \\leq 0.01^{(**)}$ level for all but the TUG dataset, for which the significance of the F1@50 was found to be $p \\leq 0.05^{(**)}$ and not significant for accuracy. The significance level of the post-hoc tests with respect to the MS-GCN model (corrected for multiple-comparisons) are visualized above their respective boxplot. No post-hoc tests were performed for the sample-wise accuracy of the TUG dataset since the Friedman test was not significant at the 95% level ( $p \\leq 0.05^{(**)}$ )."
1805
+ ],
1806
+ "image_footnote": [],
1807
+ "bbox": [
1808
+ 81,
1809
+ 50,
1810
+ 915,
1811
+ 304
1812
+ ],
1813
+ "page_idx": 8
1814
+ },
1815
+ {
1816
+ "type": "table",
1817
+ "img_path": "images/d60cf67b7f1066693e6012bb47de97c58c5523de914d43bb07ea1033b76d2f47.jpg",
1818
+ "table_caption": [
1819
+ "TABLE 3 MS-GCN: Effect of the dilated convolutions in the prediction generation stage."
1820
+ ],
1821
+ "table_footnote": [],
1822
+ "table_body": "<table><tr><td colspan=\"3\">Regular convolutions</td><td colspan=\"2\">Dilated convolutions</td></tr><tr><td>Dataset</td><td>F1@50</td><td>Acc</td><td>F1@50</td><td>Acc</td></tr><tr><td>PKU-MMD</td><td>44.8</td><td>68.4</td><td>51.6</td><td>68.5</td></tr><tr><td>HuGaDB</td><td>75.5</td><td>83.8</td><td>93.0</td><td>90.4</td></tr><tr><td>LARa</td><td>37.5</td><td>57.0</td><td>43.6</td><td>65.6</td></tr><tr><td>FOG-GAIT</td><td>88.1</td><td>85.7</td><td>95.0</td><td>90.1</td></tr><tr><td>TUG</td><td>85.8</td><td>90.0</td><td>97.9</td><td>93.6</td></tr></table>",
1823
+ "bbox": [
1824
+ 91,
1825
+ 463,
1826
+ 468,
1827
+ 547
1828
+ ],
1829
+ "page_idx": 8
1830
+ },
1831
+ {
1832
+ "type": "text",
1833
+ "text": "5.4.2 Causal versus acausal convolutions",
1834
+ "text_level": 1,
1835
+ "bbox": [
1836
+ 73,
1837
+ 571,
1838
+ 375,
1839
+ 585
1840
+ ],
1841
+ "page_idx": 8
1842
+ },
1843
+ {
1844
+ "type": "text",
1845
+ "text": "We perform causal versus acausal experiments for MS-GCN. According to the results in Table 4, MS-GCN with acausal temporal convolutions performs much better than the causal variant. The effect is larger on the segment-wise metric than the sample-wise metric. This verifies that future context is important for determining plausible action durations and accurate boundaries between action segments.",
1846
+ "bbox": [
1847
+ 71,
1848
+ 589,
1849
+ 490,
1850
+ 691
1851
+ ],
1852
+ "page_idx": 8
1853
+ },
1854
+ {
1855
+ "type": "table",
1856
+ "img_path": "images/ab73e0e529f3de954e54c19d252c11e7419d849cd9f3ddc8c041ee8038ae41f5.jpg",
1857
+ "table_caption": [
1858
+ "TABLE 4 MS-GCN: Causal versus acausal temporal convolutions."
1859
+ ],
1860
+ "table_footnote": [],
1861
+ "table_body": "<table><tr><td></td><td colspan=\"2\">Causal convolutions</td><td colspan=\"2\">Acausal convolutions</td></tr><tr><td>Dataset</td><td>F1@50</td><td>Acc</td><td>F1@50</td><td>Acc</td></tr><tr><td>PKU-MMD</td><td>24.8</td><td>58.2</td><td>51.6</td><td>68.5</td></tr><tr><td>HuGaDB</td><td>65.6</td><td>85.7</td><td>93.0</td><td>90.4</td></tr><tr><td>LARa</td><td>18.5</td><td>57.0</td><td>43.6</td><td>65.6</td></tr><tr><td>FOG-GAIT</td><td>85.7</td><td>89.0</td><td>95.0</td><td>90.1</td></tr><tr><td>TUG</td><td>88.3</td><td>91.5</td><td>97.9</td><td>93.6</td></tr></table>",
1862
+ "bbox": [
1863
+ 94,
1864
+ 747,
1865
+ 467,
1866
+ 830
1867
+ ],
1868
+ "page_idx": 8
1869
+ },
1870
+ {
1871
+ "type": "text",
1872
+ "text": "6 CONCLUSION",
1873
+ "text_level": 1,
1874
+ "bbox": [
1875
+ 73,
1876
+ 864,
1877
+ 215,
1878
+ 878
1879
+ ],
1880
+ "page_idx": 8
1881
+ },
1882
+ {
1883
+ "type": "text",
1884
+ "text": "This paper evaluated a neural network architecture for skeleton-based action segmentation, termed multi-stage spatial-temporal graph convolutional network (MS-GCN), that we initially developed for freezing of gait assessment",
1885
+ "bbox": [
1886
+ 71,
1887
+ 883,
1888
+ 491,
1889
+ 944
1890
+ ],
1891
+ "page_idx": 8
1892
+ },
1893
+ {
1894
+ "type": "text",
1895
+ "text": "in Parkinson's disease [16]. The developed architecture amalgamates three architectural elaborations based on the current best practices in convolutional neural network design. We presented five challenging use-cases of skeleton-based action segmentation in human action understanding and clinical gait analysis. The results indicated that our framework statistically outperformed four strong baselines on four of the five datasets. For the fifth dataset, i.e. the segmentation of TUG sub-activities, the task was found to be too simple, resulting in minimal to no statistical effect in the predictive performance of the models. The experimental evaluation demonstrated the benefit of the three architectural elaborations for detecting accurate action sequences with precise temporal boundaries. In conclusion, we believe that the MS-GCN framework is a formidable baseline for skeleton-based action segmentation tasks.",
1896
+ "bbox": [
1897
+ 501,
1898
+ 412,
1899
+ 924,
1900
+ 647
1901
+ ],
1902
+ "page_idx": 8
1903
+ },
1904
+ {
1905
+ "type": "text",
1906
+ "text": "REFERENCES",
1907
+ "text_level": 1,
1908
+ "bbox": [
1909
+ 504,
1910
+ 674,
1911
+ 619,
1912
+ 688
1913
+ ],
1914
+ "page_idx": 8
1915
+ },
1916
+ {
1917
+ "type": "list",
1918
+ "sub_type": "ref_text",
1919
+ "list_items": [
1920
+ "[1] M. Al-Amri, K. Nicholas, K. Button, V. Sparkes, L. Sheeran, and J. L. Davies, \"Inertial measurement units for clinical movement analysis: Reliability and concurrent validity,\" Sensors, vol. 18, no. 3, Feb. 2018.",
1921
+ "[2] N. Mahmood, N. Ghorbani, N. F. Troje, G. Pons-Moll, and M. Black, \"AMASS: Archive of motion capture as surface shapes,\" in 2019 IEEE/CVF International Conference on Computer Vision (ICCV). IEEE, Oct. 2019.",
1922
+ "[3] J. Shotton, R. Girshick, A. Fitzgibbon, T. Sharp, M. Cook, M. Finocchio, R. Moore, P. Kohli, A. Criminisi, A. Kipman, and A. Blake, \"Efficient human pose estimation from single depth images,\" IEEE Trans. Pattern Anal. Mach. Intell., vol. 35, no. 12, pp. 2821-2840, Dec. 2013.",
1923
+ "[4] Z. Cao, G. Hidalgo, T. Simon, S. E. Wei, and Y. Sheikh, \"Openpose: Realtime multi-person 2d pose estimation using part affinity fields,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 43, no. 1, pp. 172-186, 2021.",
1924
+ "[5] B. Fernando, E. Gavves, M. José Oramas, A. Ghodrati, and T. Tuytelaars, \"Modeling video evolution for action recognition,\" in 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Jun. 2015, pp. 5378-5387."
1925
+ ],
1926
+ "bbox": [
1927
+ 504,
1928
+ 699,
1929
+ 923,
1930
+ 941
1931
+ ],
1932
+ "page_idx": 8
1933
+ },
1934
+ {
1935
+ "type": "page_number",
1936
+ "text": "9",
1937
+ "bbox": [
1938
+ 911,
1939
+ 32,
1940
+ 921,
1941
+ 42
1942
+ ],
1943
+ "page_idx": 8
1944
+ },
1945
+ {
1946
+ "type": "list",
1947
+ "sub_type": "ref_text",
1948
+ "list_items": [
1949
+ "[6] A. Shahroudy, J. Liu, T.-T. Ng, and G. Wang, \"NTU RGB+D: A large scale dataset for 3D human activity analysis,\" pp. 1010-1019, Apr. 2016.",
1950
+ "[7] S. Yan, Y. Xiong, and D. Lin, \"Spatial temporal graph convolutional networks for skeleton-based action recognition,\" in AAAI, 2018.",
1951
+ "[8] M. Defferrard, X. Bresson, and P. Vandergheynst, \"Convolutional neural networks on graphs with fast localized spectral filtering,\" in Proceedings of the 30th International Conference on Neural Information Processing Systems, ser. NIPS'16. Red Hook, NY, USA: Curran Associates Inc., Dec. 2016, pp. 3844-3852.",
1952
+ "[9] T. N. Kipf and M. Welling, \"Semi-Supervised Classification with Graph Convolutional Networks,\" International conference on learning representation, p. 14, 2017.",
1953
+ "[10] B. Singh, T. K. Marks, M. Jones, O. Tuzel, and M. Shao, \"A multi-stream bi-directional recurrent neural network for Fine-Grained action detection,\" in 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Jun. 2016, pp. 1961-1970.",
1954
+ "[11] L. Sun, K. Jia, D.-Y. Yeung, and B. E. Shi, \"Human action recognition using factorized Spatio-Temporal convolutional networks,\" in 2015 IEEE International Conference on Computer Vision (ICCV), Dec. 2015, pp. 4597-4605.",
1955
+ "[12] R. Yao, G. Lin, Q. Shi, and D. C. Ranasinghe, \"Efficient dense labelling of human activity sequences from wearables using fully convolutional networks,\" Pattern Recognit., vol. 78, pp. 252-266, Jun. 2018.",
1956
+ "[13] C. Lea, M. D. Flynn, R. Vidal, A. Reiter, and G. D. Hager, \"Temporal convolutional networks for action segmentation and detection,\" Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, vol. 2017-January, pp. 1003-1012, 2017.",
1957
+ "[14] F. Yu and V. Koltun, \"Multi-Scale context aggregation by dilated convolutions,\" pre-print, Nov. 2015.",
1958
+ "[15] Y. A. Farha and J. Gall, \"Ms-tcn: Multi-stage temporal convolutional network for action segmentation,\" in 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 3570-3579.",
1959
+ "[16] B. Filtjens, P. Ginis, A. Nieuwboer, P. Slaets, and B. Vanrumste, \"Automated freezing of gait assessment with marker-based motion capture and multi-stage spatial-temporal graph convolutional neural networks,\" J. Neuroeng. Rehabil., vol. 19, no. 1, p. 48, May 2022.",
1960
+ "[17] I. Goodfellow, Y. Bengio, and A. Courville, Deep Learning. The MIT Press, 2016.",
1961
+ "[18] Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner, \"Gradient-based learning applied to document recognition,\" Proc. IEEE, vol. 86, no. 11, pp. 2278-2324, Nov. 1998.",
1962
+ "[19] S. Bai, J. Zico Kolter, and V. Koltun, \"An empirical evaluation of generic convolutional and recurrent networks for sequence modeling,\" Mar. 2018.",
1963
+ "[20] A. Graves and J. Schmidhuber, \"Frameswise phoneme classification with bidirectional LSTM and other neural network architectures,\" Neural Netw., vol. 18, no. 5-6, pp. 602-610, Jun. 2005.",
1964
+ "[21] B. Filtjens, A. Nieuwboer, N. D'cruz, J. Spildooren, P. Slaets, and B. Vanrumste, \"A data-driven approach for detecting gait events during turning in people with parkinson's disease and freezing of gait,\" Gait Posture, vol. 80, pp. 130-136, Jul. 2020.",
1965
+ "[22] Y. Matsushita, D. T. Tran, H. Yamazoe, and J.-H. Lee, \"Recent use of deep learning techniques in clinical applications based on gait: a survey,\" Journal of Computational Design and Engineering, vol. 8, no. 6, pp. 1499-1532, Oct. 2021.",
1966
+ "[23] N. Cheema, S. Hosseini, J. Sprenger, E. Herrmann, H. Du, K. Fischer, and P. Slusallek, \"Dilated temporal Fully-Convolutional network for semantic segmentation of motion capture data,\" Jun. 2018.",
1967
+ "[24] S. Ioffe and C. Szegedy, \"Batch normalization: Accelerating deep network training by reducing internal covariate shift,\" in Proceedings of the 32nd International Conference on International Conference on Machine Learning (ICML), 2015.",
1968
+ "[25] M. Schuster and K. K. Paliwal, \"Bidirectional recurrent neural networks,\" Trans. Sig. Proc., vol. 45, no. 11, pp. 2673-2681, Nov. 1997.",
1969
+ "[26] L. Kidzinski, S. Delp, and M. Schwartz, \"Automatic real-time gait event detection in children using deep neural networks,\" PLoS One, vol. 14, no. 1, p. e0211466, Jan. 2019.",
1970
+ "[27] D. P. Kingma and J. Ba, \"Adam: A method for stochastic optimization,\" pre-print, Dec. 2014."
1971
+ ],
1972
+ "bbox": [
1973
+ 76,
1974
+ 54,
1975
+ 491,
1976
+ 941
1977
+ ],
1978
+ "page_idx": 9
1979
+ },
1980
+ {
1981
+ "type": "list",
1982
+ "sub_type": "ref_text",
1983
+ "list_items": [
1984
+ "[28] C. Liu, Y. Hu, Y. Li, S. Song, and J. Liu, \"PKU-MMD: A large scale benchmark for Skeleton-Based human action understanding,\" in Proceedings of the Workshop on Visual Analysis in Smart and Connected Communities, ser. VSCC '17. New York, NY, USA: Association for Computing Machinery, Oct. 2017, pp. 1-8.",
1985
+ "[29] R. Chereshev and A. Kertesz-Farkas, \"Hugadb: Human gait database for activity recognition from wearable inertial sensor networks,\" in AIST, 2017.",
1986
+ "[30] F. Niemann, C. Reining, F. Moya Rueda, N. R. Nair, J. A. Steffens, G. A. Fink, and M. Ten Hompel, \"LARa: Creating a dataset for human activity recognition in logistics using semantic attributes,\" Sensors, vol. 20, no. 15, Jul. 2020.",
1987
+ "[31] J. Spildooren, S. Vercruysse, K. Desloovere, W. Vandenberghe, E. Kerckhofs, and A. Nieuwboer, \"Freezing of gait in parkinson's disease: the impact of dual-tasking and turning,\" Mov. Disord., vol. 25, no. 15, pp. 2563-2570, Nov. 2010.",
1988
+ "[32] A. Nieuwboer, R. Dom, W. De Weerdt, K. Desloovere, S. Fieuws, and E. Broens-Kaucsik, \"Abnormalities of the spatiotemporal characteristics of gait at the onset of freezing in parkinson's disease,\" Mov. Disord., vol. 16, no. 6, pp. 1066-1075, Nov. 2001.",
1989
+ "[33] J. D. Schaafsma, Y. Balash, T. Gurevich, A. L. Bartels, J. M. Hausdorff, and N. Giladi, \"Characterization of freezing of gait subtypes and the response of each to levodopa in parkinson's disease,\" Eur. J. Neurol., vol. 10, no. 4, pp. 391-398, Jul. 2003.",
1990
+ "[34] M. Gilat, \"How to annotate freezing of gait from video: A standardized method using Open-Source software,\" J. Parkinsons. Dis., vol. 9, no. 4, pp. 821-824, 2019.",
1991
+ "[35] A. Bowen, R. Wenman, J. Mickelborough, J. Foster, E. Hill, and R. Tallis, \"Dual-task effects of talking while walking on velocity and balance following a stroke,\" Age Ageing, vol. 30, no. 4, pp. 319-323, Jul. 2001.",
1992
+ "[36] R. B. Davis, S. Ōunpuu, D. Tyburski, and J. R. Gage, \"A gait analysis data collection and reduction technique,\" Hum. Mov. Sci., vol. 10, no. 5, pp. 575-587, Oct. 1991.",
1993
+ "[37] D. Podsiadlo and S. Richardson, \"The timed \"up & go\": a test of basic functional mobility for frail elderly persons,\" J. Am. Geriatr. Soc., vol. 39, no. 2, pp. 142-148, Feb. 1991.",
1994
+ "[38] T. Li, J. Chen, C. Hu, Y. Ma, Z. Wu, W. Wan, Y. Huang, F. Jia, C. Gong, S. Wan, and L. Li, \"Automatic timed Up-and-Go SubTask segmentation for parkinson's disease patients using Video-Based activity classification,\" IEEE Trans. Neural Syst. Rehabil. Eng., vol. 26, no. 11, pp. 2189-2199, Nov. 2018.",
1995
+ "[39] P. Liang, W. H. Kwong, A. Sidarta, C. K. Yap, W. K. Tan, L. S. Lim, P. Y. Chan, C. W. K. Kuah, S. K. Wee, K. Chua, C. Quek, and W. T. Ang, \"An asian-centric human movement database capturing activities of daily living,\" Sci Data, vol. 7, no. 1, p. 290, Sep. 2020.",
1996
+ "[40] A. Cappozzo, F. Catani, U. D. Croce, and A. Leardini, \"Position and orientation in space of bones during movement: anatomical frame definition and determination,\" Clin. Biomech., vol. 10, no. 4, pp. 171-178, Jun. 1995.",
1997
+ "[41] J. Demšar, \"Statistical comparisons of classifiers over multiple data sets,\" J. Mach. Learn. Res., vol. 7, no. 1, pp. 1-30, 2006.",
1998
+ "[42] S. García, A. Fernández, J. Luengo, and F. Herrera, \"Advanced nonparametric tests for multiple comparisons in the design of experiments in computational intelligence and data mining: Experimental analysis of power,\" Inf. Sci., vol. 180, no. 10, pp. 2044-2064, May 2010.",
1999
+ "[43] M. Friedman, \"The use of ranks to avoid the assumption of normality implicit in the analysis of variance,\" J. Am. Stat. Assoc., vol. 32, no. 200, pp. 675-701, Dec. 1937.",
2000
+ "[44] C. Borja and S. Guzman, \"scmamp: Statistical comparison of multiple algorithms in multiple problems,\" *The R Journal*, vol. Accepted for publication, 2015.",
2001
+ "[45] R Core Team, R: A Language and Environment for Statistical Computing, R Foundation for Statistical Computing, Vienna, Austria, 2013. [Online]. Available: http://www.R-project.org/"
2002
+ ],
2003
+ "bbox": [
2004
+ 506,
2005
+ 54,
2006
+ 921,
2007
+ 818
2008
+ ],
2009
+ "page_idx": 9
2010
+ },
2011
+ {
2012
+ "type": "text",
2013
+ "text": "Benjamin Filtjens received a MSc in Mechanical Engineering Technology from Hasselt University in 2017. He is currently a Ph.D. student working towards automated at-home freezing of gait assessment, at KU Leuven. He is part of the eMedia research lab at the Department of Electrical Engineering (ESAT) and the intelligent mobile platform research group at the Department of Mechanical Engineering, both from",
2014
+ "bbox": [
2015
+ 501,
2016
+ 871,
2017
+ 921,
2018
+ 941
2019
+ ],
2020
+ "page_idx": 9
2021
+ },
2022
+ {
2023
+ "type": "page_number",
2024
+ "text": "10",
2025
+ "bbox": [
2026
+ 906,
2027
+ 32,
2028
+ 921,
2029
+ 42
2030
+ ],
2031
+ "page_idx": 9
2032
+ },
2033
+ {
2034
+ "type": "text",
2035
+ "text": "KU Leuven campus Group T. At Group T, he teaches mathematical modelling, advanced automation engineering, and deep learning in health technologies. His research interests are deep learning, explainable artificial intelligence, and mobile robots in general, and ICT applications for automatic and objective gait and freezing of gait assessment in particular.",
2036
+ "bbox": [
2037
+ 76,
2038
+ 55,
2039
+ 488,
2040
+ 125
2041
+ ],
2042
+ "page_idx": 10
2043
+ },
2044
+ {
2045
+ "type": "text",
2046
+ "text": "Bart Vanrumste received a MSc in Electrical Engineering and MSc in Biomedical Engineering both from Ghent University in 1994 and 1998, respectively. In 2001 he received a Ph.D. in Engineering from the same institute. He worked as a post-doctoral fellow from 2001 until 2003 at the Electrical and Computer Engineering Department of the University Of Canterbury, New Zealand. From 2003 until 2005 he was post-doctoral fellow at the Department of Electrical Engineering (ESAT) in the STADIUS division at KU Leuven. In 2005 he was appointed faculty member initially at University of Applied Sciences Thomas More and since 2013 in the Faculty of Engineering Technology of KU Leuven. He is member of the eMedia research lab and member of the ESAT-STADIUS division. His current research activities focus on multimodal sensor integration. He is senior member of IEEE Engineering in Medicine and Biology Society.",
2047
+ "bbox": [
2048
+ 75,
2049
+ 304,
2050
+ 488,
2051
+ 464
2052
+ ],
2053
+ "page_idx": 10
2054
+ },
2055
+ {
2056
+ "type": "text",
2057
+ "text": "Peter Slaets received a MSc in electrotechnical-mechanical engineering, specialization in datamining and automation, in 2002 from KU Leuven, Leuven, Belgium. In 2005, he became a lecturer at the Katholieke Hogeschool Limburg (KHLIM), Diepenbeek, Belgium, and the Katholieke Hogeschool Kempen (KHK), Geel, Belgium, where he teaches courses in digital electronics, control, and automation. In 2008 He received a Ph.D. in applied sciences from KU Leuven with the title: 'Geometric 3D Model Building from Sensor Measurements Collected during Compliant Motion: Stochastic Filtering and Hardware Architectures'. He is currently an associate professor in the intelligent mobile platform research group at the Department of Mechanical Engineering, KU Leuven. His research focuses on modeling, Bayesian estimation techniques, and mobile platforms in general, with applications in autonomous inland shipping and health monitoring in particular.",
2058
+ "bbox": [
2059
+ 75,
2060
+ 645,
2061
+ 488,
2062
+ 805
2063
+ ],
2064
+ "page_idx": 10
2065
+ },
2066
+ {
2067
+ "type": "page_number",
2068
+ "text": "11",
2069
+ "bbox": [
2070
+ 906,
2071
+ 32,
2072
+ 919,
2073
+ 42
2074
+ ],
2075
+ "page_idx": 10
2076
+ }
2077
+ ]
2202.01xxx/2202.01727/b96cd2bf-bb11-4433-9686-55bf4d6acf3a_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01727/b96cd2bf-bb11-4433-9686-55bf4d6acf3a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e2722ca651d31b6f8791791658087190baf4f166313c12d6b7a39b88e736b80
3
+ size 1150524
2202.01xxx/2202.01727/full.md ADDED
@@ -0,0 +1,423 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Skeleton-Based Action Segmentation with Multi-Stage Spatial-Temporal Graph Convolutional Neural Networks
2
+
3
+ Benjamin Filtjens, Bart Vanrumste, Peter Slaets
4
+
5
+ Abstract—The ability to identify and temporally segment fine-grained actions in motion capture sequences is crucial for applications in human movement analysis. Motion capture is typically performed with optical or inertial measurement systems, which encode human movement as a time series of human joint locations and orientations or their higher-order representations. State-of-the-art action segmentation approaches use multiple stages of temporal convolutions. The main idea is to generate an initial prediction with several layers of temporal convolutions and refine these predictions over multiple stages, also with temporal convolutions. Although these approaches capture long-term temporal patterns, the initial predictions do not adequately consider the spatial hierarchy among the human joints. To address this limitation, we recently introduced multi-stage spatial-temporal graph convolutional neural networks (MS-GCN). Our framework replaces the initial stage of temporal convolutions with spatial graph convolutions and dilated temporal convolutions, which better exploit the spatial configuration of the joints and their long-term temporal dynamics. Our framework was compared to four strong baselines on five tasks. Experimental results demonstrate that our framework is a strong baseline for skeleton-based action segmentation.
6
+
7
+ Index Terms—activity segmentation, activity detection, dense labelling, freezing of gait, graph convolutional, MS-GCN, multi-stage, spatial-temporal
8
+
9
+ # 1 INTRODUCTION
10
+
11
+ The automatic identification and localisation of events and actions in long untrimmed motion capture (MoCap) sequences are crucial for various use-cases in human movement analysis. Typically, MoCap is performed with optical or inertial measurement systems, which encode human movement as a time series of human joint locations and orientations or their higher-order representations [1], [2]. The high-dimensional time series registers the articulated motion as a high degree of freedom human skeleton. Therefore, MoCap sequences can be generically regarded as skeleton-like inputs. Given an untrimmed skeleton sequence, we aim to segment every event and action in time. In the literature, this task falls under the domain of skeleton-based action
12
+
13
+ segmentation.
14
+
15
+ Related to this task is the task of skeleton-based action recognition. Unlike action segmentation, action recognition aims to classify actions from short and well-segmented video clips. This domain has made tremendous strides due to the availability of low-cost MoCap approaches. These approaches are driven by pose estimation algorithms, which are a form of marker-less optical MoCap that encode human movement as a time series of human joint locations with a single camera [3], [4]. Human actions can then be recognized by appropriately modelling the high dimensional time series. Earlier methods ignored the spatial hierarchy among the joints and modelled human actions by applying high-level temporal models [5]. Later methods explicitly modelled the natural connection between joints [6]. These methods showed encouraging improvement, which suggests the significance of modelling the spatial hierarchy among the joints. The state-of-the-art approaches are based on the spatial-temporal graph convolutional neural network (STGCN) [7]. These approaches model the skeleton sequences as a spatial-temporal graph. The idea is to construct a graph in which each node corresponds to a human body joint and the edges correspond to the spatial connectivity among the joints and the temporal connectivity of the same joint across time. The spatial-temporal graph can then be modelled by graph neural networks, which generalize convolutional neural networks to graphs of arbitrary structures [8], [9]. However, skeleton-based action segmentation is more challenging than recognition, due to the need for simultaneous recognition and localization. Despite its broad potential in human movement analysis, a proper framework for this task has not yet been established.
16
+
17
+ Within the generic domain of action segmentation, i.e. approaches that are not specifically designed for skeleton data, earlier methods mainly utilized a sliding-window scheme [10], [11]. However, the optimal window size is often a trade-off between model expressivity, i.e. the models' ability to capture long-term temporal context, and the sensitivity of the model to take into account short actions [12]. Recent methods, such as temporal convolutional neural networks (TCN) [13], can operate on untrimmed sequences and classify each time sample, termed action segmentation, for simultaneous action recognition and localisation. TCNs perform dilated temporal convolutions to capture
18
+
19
+ long-term temporal context [14]. In action segmentation the predictions tend to vary at a high temporal frequency, often resulting in over-segmentation errors. To address this problem, the state of the art approach, termed multi-stage temporal convolutional neural networks (MS-TCN), includes refinement stages [15]. The idea is to employ a temporal model to generate an initial prediction and refine these predictions over multiple stages. However, these generic action segmentation approaches do not consider the spatial hierarchy among the skeleton joints.
20
+
21
+ We recently introduced an architecture for clinical freezing of gait (FOG) assessment in Parkinson's disease based on optical marker-based motion capture data, termed multistage spatial-temporal graph convolutional neural network (MS-GCN) [16]. Our architecture amalgamates the best practices in convolutional neural network design to address the task of skeleton-based action segmentation. First, we extended ST-GCN for action segmentation by including dilation on the temporal graph to increase the temporal receptive field [14]. Next, we modified MS-TCN by decoupling the prediction generation stage from the refinement stages, allowing us to address the different goals of these stages. Specifically, we replaced the TCN-based temporal layers that generate an initial prediction by the modified ST-GCN layers to appropriately model the spatial hierarchy among the joints. We hypothesize that MS-GCN is a strong baseline for skeleton-based action segmentation tasks other than FOG assessment and for other MoCap representations than optical marker-based MoCap. To this end, the contribution of the present manuscript is four-fold: (1) We propose MS-GCN as a generic baseline for skeleton-based action segmentation. (2) We introduce five relevant use-cases from four public datasets and one proprietary dataset. The use-cases include three different forms of motion capture, marker-based and marker-less optical MoCap, and inertial-based MoCap. (3) We show that the proposed architecture exceeds the performance of four strong deep learning baseline methods. (4) We publicly release our code and trained models at: https://github.com/BenjaminFiltjens/MS-GCN.
22
+
23
+ # 2 SKELETON-BASED ACTION SEGMENTATION
24
+
25
+ This section first formalizes the problem of skeleton-based action segmentation. Next, we introduce the three distinguishing characteristics of the MS-GCN architecture, which are: (1) dilated temporal convolutions to learn long-term temporal patterns [13], (2) spatial graph convolutions to learn spatial patterns [7], (3) multiple stages of refinement to reduce the number of segmentation errors [15]. These characteristics are further discussed within this section.
26
+
27
+ # 2.1 Problem statement
28
+
29
+ A MoCap sequence can be generically represented as: $f \in \mathbb{R}^{T \times N \times C}$ , where $T$ are the number of samples, $N$ are the number of nodes, and $C$ are the number of feature channels per node. Note that the number of samples $T$ may vary for each input sequence. Given a MoCap sequence, we aim to infer the class label for each sample $\hat{Y} = \hat{y}_0, \dots, \hat{y}_T$ . The inferred class labels are represented as: $\hat{Y} \in \mathbb{R}^{T \times L}$ , where $\hat{y}_{t,l}$ is the probability of class $l$ at sample $t$ .
30
+
31
+ ![](images/6e8eb79d52e09f270901521a7e84eb60671e23c3540e091210f1b48541098270.jpg)
32
+ Fig. 1. Visual overview of a (dilated) temporal convolutional neural network (TCN). The visualized network is implemented in acausal mode, since the filters take into account future observations $f_{in,t+1}, \ldots, f_{in,T}$ . The first layer has a dilation rate of 1, reducing this layer to a regular convolution. By increasing the dilation rate $d$ throughout the network, the deeper layers can represent a wider range of inputs, thereby expanding the temporal receptive field of the network.
33
+
34
+ # 2.2 Dilated temporal convolution
35
+
36
+ Convolutional neural networks (CNN) are ideal for processing data with a grid-like topology such as time-series (1D CNN) and images (2D CNN) [17]. A CNN learns an expressive representation through altering convolutional and pooling layers [18]. The pooling layers downsample the temporal representation, allowing the model to capture long-range dependencies at the cost of losing fine-grained information. Recent temporal convolutional neural networks (TCN) omit pooling and instead use dilated convolutions [14] to capture long-range dependencies while keeping the temporal representation intact [15]. For an input feature map $f_{in}$ and a filter $p$ , the dilated convolution on sample $t$ of the feature map is defined as [19]:
37
+
38
+ $$
39
+ \left(f _ {i n} * _ {d} p\right) (t) = \sum_ {i = 0} ^ {k - 1} p (i) \cdot f _ {i n _ {t - d \cdot i}}, \tag {1}
40
+ $$
41
+
42
+ where $*_{d}$ is the dilated convolution operator with dilation rate $d$ , $k$ is the size of the filter (kernel), and $t - d \cdot i$ is used to indicate that the filter in Equation 1 is applied in causal mode, i.e. direction of the past. The filter can be implemented in acausal mode, i.e. take into account future observations, by zero-padding symmetrically. By increasing the dilation rate $d$ throughout the network, the deeper layers can represent a wider range of inputs, thereby expanding the temporal receptive field of the network. A visual overview of a dilated TCN is provided in Figure 1.
43
+
44
+ # 2.3 Graph convolution
45
+
46
+ Graph convolutional neural networks (GCNs) generalize CNNs to non-euclidean structured data [9]. Yan et al. extended GCNs to exploit the inherent spatial relationship among the joints of a skeleton [7]. Their approach termed spatial-temporal graph convolutional networks (ST-GCN) learns a representation on a graph $G = (V, E, A)$ which takes as input:
47
+
48
+ - A set of nodes $V = \{v_{ti} | t = 1, \dots, T, i = 1, \dots, N\}$ for a skeleton sequence of $N$ joints and $T$ samples.
49
+ - Two sets of edges $E_{S} = \{v_{ti}v_{tj}|(i,j)\in H\}$ and $E_{F} = \{v_{ti}v_{(t + 1)i}\}$ , where $H$ is the set of connected joints. $E_{S}$ refers to the intra-skeleton edges at each
50
+
51
+ ![](images/576546c365cf513e9bee88d888a5a34d7215036f70fbafb947fb92f3fea42a0e.jpg)
52
+ Fig. 2. Spatial-temporal graph convolutional neural network (ST-GCN). Visual overview of a spatial-temporal graph (a) and spatial partitioning strategy (b). The spatial partitioning strategy has three subsets based on a nodes distance with respect to a self-selected root node (green). The three subsets are the node itself (blue), the node closest to the root node (red), and the node furthest from the root node (yellow).
53
+
54
+ frame (spatial dimension), and $E_{F}$ refers to the interframe connection of the same joints over all of the frames (temporal dimension).
55
+
56
+ - A description of the graph structure in the form of an adjacency matrix $A$ .
57
+
58
+ For instance, Figure 2(a) visualizes the spatial-temporal graph. The joints represent the nodes of the graph (purple nodes), their natural connections are the spatial edges (purple lines), and the connection between adjacent frames are the temporal edges (green lines).
59
+
60
+ In the spatial dimension, the graph convolution operation on node $v_{ti}$ is defined as [7]:
61
+
62
+ $$
63
+ f _ {g c n} \left(v _ {t i}\right) = \sum_ {v _ {t j} \in B \left(v _ {t i}\right)} \frac {1}{Z _ {t i} \left(v _ {t j}\right)} f _ {i n} \left(v _ {t j}\right) \cdot w \left(l _ {t i} \left(v _ {t j}\right)\right), \tag {2}
64
+ $$
65
+
66
+ where $f_{in}$ and $f_{gen}$ denote the input feature map and output feature map, respectively. The term $B(v_{ti})$ denotes the sampling area of node $v_{ti}$ , with the nodes within the sampling area denoted as $v_{tj}$ . A mapping function $l_{ti}$ is defined to map each node with a unique weight vector $w$ . Figure 2(b) visualizes this strategy for a single frame $t$ , where the kernel size is set as 3 and the sampling area $B$ is partitioned into 3 subsets based on a nodes distance with respect to a self-selected root node (green). The three subsets in this partitioning strategy are the node itself (blue), the node closer to the root node (red), and the node further from the root node (yellow). The normalizing term $Z_{ij}$ is added to balance the contributions of different subsets to the output.
67
+
68
+ # 2.4 Refinement stages
69
+
70
+ As predictions are made at high temporal frequencies, over-segmentation errors, i.e. an action is segmented into multiple shorter actions, often occur. A common strategy to alleviate this problem in pixel-wise labelling of images is to generate an initial prediction, then refine this initial prediction using the interactions between neighbouring pixels [17]. Farha and Gall extend this to action segmentation in time series data [15]. The idea is to stack several predictors that each operates directly on the output of the previous one to incrementally refine the predictions.
71
+
72
+ # 3 DEEP LEARNING MODELS
73
+
74
+ The previous section introduced the three building blocks that characterizes the MS-GCN architecture. As the MS-
75
+
76
+ GCN architecture combines the best practices from TCN, STGCN, and MS-TCN, we include these as a baseline. We additionally include a bidirectional long short term memory-based network (LSTM) [20], as it is often considered an important baseline in action segmentation of MoCap data [21]-[23].
77
+
78
+ The implementation details of the employed models are visualized in Figure 3. The first layer of all models is a batch normalization (BN) layer that normalizes the inputs and accelerates training [24]. After normalization, the input is reshaped into the accepted formats of the specified models. For the graph-based models (i.e., ST-GCN and MS-GCN), the data is shaped into $T \times N \times C_{in}$ , where $N$ represents the number of nodes, $C_{in}$ the number of input channels, and $T$ the number of samples. For the temporal models (i.e., LSTM, TCN, and MS-TCN), the data is shaped into $T \times C_{in}N$ . For these models, all input node locations are thus concatenated to form the input features at each sample $t$ .
79
+
80
+ # 3.1 LSTM
81
+
82
+ The first layer of our recurrent model is an LSTM layer, which computes the following function:
83
+
84
+ $$
85
+ i _ {t} = \sigma \left(f _ {i n _ {t}} W _ {i i} + b _ {i i} + h _ {t - 1} W _ {h i} + b _ {h i}\right),
86
+ $$
87
+
88
+ $$
89
+ j _ {t} = \sigma \left(f _ {i n _ {t}} W _ {i f} + b _ {i f} + h _ {t - 1} W _ {h f} + b _ {h f}\right),
90
+ $$
91
+
92
+ $$
93
+ \tilde {c} _ {t} = \tanh \left(f _ {i n _ {t}} W _ {i c} + b _ {i c} + h _ {t - 1} W _ {h c} + b _ {h c}\right),
94
+ $$
95
+
96
+ $$
97
+ o _ {t} = \sigma \left(f _ {i n _ {t}} W _ {i o} + b _ {i o} + h _ {t - 1} W _ {h o} + b _ {h o}\right),
98
+ $$
99
+
100
+ $$
101
+ c _ {t} = j _ {t} \odot c _ {t - 1} + i _ {t} \odot \tilde {c} _ {t}),
102
+ $$
103
+
104
+ $$
105
+ h _ {t} = \tanh (c _ {t}) \odot o _ {t},
106
+ $$
107
+
108
+ where $h_t$ is the hidden state at sample $t$ , $c_t$ is the cell state at sample $t$ , $f_{int}$ is the input feature map at sample $t$ , $h_{t-1}$ is the hidden state of the layer at sample $t-1$ . The terms $i_t$ , $j_t$ , and $o_t$ are the input, forget, and output gates, respectively. The terms $\sigma$ , $tanh$ , and $\odot$ are the sigmoid function, hyperbolic tangent function, and Hadamard product, respectively. The weight matrices are represented by $W$ , with subscripts representing from-to relationships. The LSTM layer above is causal, as the hidden state $h_t$ depends only on $x_0, \ldots, x_t$ . The LSTM can be implemented in a causal mode, i.e., take into account future observations $x_{t+1}, \ldots, x_T$ , by training it in the positive and negative time direction (bidirectional) [20], [25]. The hidden representation of the past and future are then combined through simple concatenation [20]. A visual overview of the (bidirectional) LSTM network is provided in Figure 3(a).
109
+
110
+ # 3.2 TCN
111
+
112
+ The first layer of the TCN-based model is a $1 \times 1$ convolutional layer that adjusts the input dimension $C_{in}$ to the number of filters $C$ in the network, formalized as:
113
+
114
+ $$
115
+ f _ {a d j} = W _ {1} * f _ {i n} + b, \tag {3}
116
+ $$
117
+
118
+ where $f_{adj} \in \mathbb{R}^{T \times C}$ is the adjusted feature map, $f_{in} \in \mathbb{R}^{T \times C_{in}}$ the input MoCap sequence, $*$ the convolution operator, $b \in \mathbb{R}^C$ the bias term, and $W_1 \in \mathbb{R}^{1 \times C_{in} \times C}$ the weights of the $1 \times 1$ convolution filter with $C_{in}$ input feature channels and $C$ equal to the number of feature channels in
119
+
120
+ ![](images/40b0b1ddb83e6b1b3bab5c2ec8a3d59191a15a655bffe6b5dd1ccaa2314848d0.jpg)
121
+ Fig. 3. Overview of the MS-GCN and the four baseline deep learning models. The models take as input a MoCap sequence and generate as output a sequence of actions. The five deep learning models are: (a) a long short-term memory network (LSTM), (b) a temporal convolutional neural network (TCN), (c) a multi-stage temporal convolutional neural network (MS-TCN), (d) a spatial-temporal graph convolutional neural network (STGCN), and (e) a multi-stage spatial-temporal graph convolutional neural network (MS-GCN). The terms BN and $L_{s}$ denote the batch normalization layer and the loss of stage $s$ , respectively.
122
+
123
+ ![](images/42fda8928c55ea91fbc0360595c99d561df4851cc85de9249b01443f01a1665b.jpg)
124
+ Fig. 4. Visual overview of a temporal convolutional (TCN) and spatial-temporal graph convolutional (ST-GCN) block [7]. ST-GCN generates a spatial-temporal feature map by applying a spatial graph convolution (see Figure 2(b)) and a temporal convolution (see Figure 1), both of which are followed by batch normalization (BN) and a ReLU nonlinearity. Moreover, a residual connection is added to each block.
125
+
126
+ # the network.
127
+
128
+ The adjusted input is passed through several TCN blocks (visualized in 4). Each TCN block applies a dilated temporal convolution [14], BN, ReLU non-linear activation, and a residual connection between the activation map and the input. Formally, this process is defined as:
129
+
130
+ $$
131
+ f _ {o u t} = \delta \left(B N \left(W * _ {d} f _ {a d j} + b\right)\right) + f _ {a d j}, \tag {4}
132
+ $$
133
+
134
+ where $f_{out} \in \mathbb{R}^{T \times C}$ is the output feature map, $*_d$ the dilated convolution operator, $b \in \mathbb{R}^C$ the bias term, $W \in \mathbb{R}^{k \times C \times C}$ the weights of the dilated convolution filter with kernel size $k$ , and $\delta$ the ReLU function. A visual overview of the TCN-based network is provided in Figure 3(b).
135
+
136
+ # 3.3 ST-GCN
137
+
138
+ The first layer of the ST-GCN-based model is a $1 \times 1$ convolutional layer that adjusts the input dimension $C_{in}$
139
+
140
+ to the number of filters $C$ in the network, formalized as:
141
+
142
+ $$
143
+ f _ {a d j} = W _ {1} * f _ {i n} + b, \tag {5}
144
+ $$
145
+
146
+ where $f_{adj} \in \mathbb{R}^{T \times N \times C}$ is the adjusted feature map, $f_{in} \in \mathbb{R}^{T \times N \times C_{in}}$ the input MoCap sequence, $*$ the convolution operator, $b \in \mathbb{R}^C$ the bias term, $W_1 \in \mathbb{R}^{1 \times 1 \times C_{in} \times C}$ the weights of the $1 \times 1$ convolution filter with $C_{in}$ input feature channels and $C$ equal to the number of feature channels in the network.
147
+
148
+ The adjusted input is passed through several ST-GCN blocks (visualized in 4) [7]. Each ST-GCN first applies a graph convolution, transforming Equation 2 into:
149
+
150
+ $$
151
+ f _ {g c n} = \sum_ {p} A _ {p} f _ {a d j} W _ {p} M _ {p}, \tag {6}
152
+ $$
153
+
154
+ where $f_{adj} \in \mathbb{R}^{T \times N \times C}$ is the adjusted input feature map, $f_{gcn} \in \mathbb{R}^{T \times N \times C}$ the output feature map of the spatial graph convolution, and $W_p$ the $1 \times 1 \times C \times C$ weight matrix. The matrix $A_p \in \{0,1\}^{N \times N}$ is the adjacency matrix, which represents the spatial connection between the joints. The adjacency matrix $A_p = D_p^{-\frac{1}{2}} A_p D_p^{-\frac{1}{2}}$ , where $D_p$ is the diagonal node degree matrix. Multiplying $D_p^{-\frac{1}{2}} A_p D_p^{-\frac{1}{2}}$ corresponds to symmetrically normalizing $A$ , which prevents changing the scale of the features based on the number of connections [9]. The graph is partitioned into three subsets $p$ based on the spatial partitioning strategy, as was visualized in Figure 2(b) [7]. There are thus three different weight vectors $W_p$ that allow modelling of relative properties between the nodes. The matrix $M_p$ is a learnable $N \times N$ attention mask that indicates the importance of each node and its spatial partitions.
155
+
156
+ Next, after passing through a BN layer and ReLu nonlinearity, the ST-GCN block performs a dilated temporal convolution. The dilated temporal convolution is, in turn, passed through a BN layer and ReLU non-linearity, and lastly, a residual connection is added between the activation map and the input. This process is formalized as:
157
+
158
+ $$
159
+ f _ {o u t} = \delta \left(B N \left(W * _ {d} f _ {g c n} + b\right)\right) + f _ {a d j}, \tag {7}
160
+ $$
161
+
162
+ where $f_{out} \in \mathbb{R}^{T \times N \times C}$ is the output feature map, $*_d$ the dilated convolution operator, $b \in \mathbb{R}^C$ the bias term, $W \in \mathbb{R}^{k \times 1 \times C \times C}$ the weights of the dilated convolution filter with kernel size $k$ . The output feature map is passed through a spatial pooling layer that aggregates the spatial features among the $N$ joints. A visual overview of the ST-GCN-based network is provided in Figure 3(d).
163
+
164
+ # 3.4 Single-stage models: sample-based prediction
165
+
166
+ The three aforementioned single-stage models map an input skeleton sequence $f_{in}$ to a hidden representation $f_{out} \in \mathbb{R}^{T \times C}$ , with $C$ determined by the number of convolutional filters (ST-GCN and TCN) or the number of hidden units (LSTM), and length $T$ the same as the input sequence. The hidden representation of each model is passed through a $1 \times 1$ convolution and a softmax activation function to get the probabilities for the $L$ output classes for each sample in-time, formalized as:
167
+
168
+ $$
169
+ \hat {Y} = \zeta \left(W _ {1} * f _ {\text {o u t}} + b\right), \tag {8}
170
+ $$
171
+
172
+ where $\hat{Y} \in \mathbb{R}^{T \times L}$ are the class probabilities at each sample $t$ , $f_{out}$ the hidden output representation of the single stage models, $*$ the convolution operator, $b \in \mathbb{R}^L$ the bias term, $\zeta$ the softmax function, $W_1 \in \mathbb{R}^{1 \times C \times L}$ the weights of the $1 \times 1$ convolution filter with $C$ input channels and $L$ output classes.
173
+
174
+ # 3.5 Multi-stage models: prediction refinement
175
+
176
+ The initial predictions predictions $\hat{Y} \in \mathbb{R}^{T \times L}$ are passed through several refinement stages. Each refinement stage contains several TCN blocks, and each stage operates directly on the softmax activations of the previous stage. Formally, this process is defined as:
177
+
178
+ $$
179
+ \hat {Y} ^ {s} = \Gamma (\hat {Y} ^ {s - 1}), \tag {9}
180
+ $$
181
+
182
+ where $\hat{Y}^s\in \mathbb{R}^{T\times L}$ is the output at stage $s$ , $\hat{Y}^{s - 1}$ the output of the previous stage, and $\Gamma$ the single-stage TCN, as explained in section 3.2.
183
+
184
+ For the MS-TCN architecture, the initial predictions are generated by the single-stage TCN discussed in Chapter 3.2. For the MS-GCN architecture, the initial predictions are generated by the single-stage ST-GCN discussed in Chapter 3.3. A visual overview of the MS-TCN and MS-GCN network are provided in Figure 3(c) and (e), respectively.
185
+
186
+ # 3.6 Implementation details
187
+
188
+ # 3.6.1 Loss function
189
+
190
+ The models were trained by minimizing a combined cross-entropy (CE) and mean squared error (MSE) loss. The CE loss was defined as:
191
+
192
+ $$
193
+ \mathcal {L} = \sum_ {s = 1} ^ {S} \mathcal {L} _ {s, c l s}, \tag {10}
194
+ $$
195
+
196
+ $$
197
+ \mathcal {L} _ {c l s} = \frac {1}{T} \sum_ {t} - y _ {t, l} \log \left(\hat {y} _ {t, l}\right), \tag {11}
198
+ $$
199
+
200
+ where $\mathcal{L}$ is the total loss over all $S$ stages and $\mathcal{L}_{cls}$ the CE loss with $y_{t,l}$ and $\hat{y}_{t,l}$ the ground truth label and predicted
201
+
202
+ probability for class $l$ at sample $t$ , respectively. The combined CE and MSE loss was defined as [15]:
203
+
204
+ $$
205
+ \mathcal {L} = \sum_ {s = 1} ^ {S} \mathcal {L} _ {s, c l s} + \lambda \mathcal {L} _ {s, T - M S E}, \tag {12}
206
+ $$
207
+
208
+ where $\mathcal{L}_{T - MSE}$ is the MSE loss and $\lambda$ is a hyperparameter that determines its contribution. The combined loss was proposed by Farha and Gall to avoid over-segmentation errors [15], which occur when predictions vary at an unrealistically high sample frequency. The MSE term negates this effect by calculating the truncated mean squared error over the sample-wise log probabilities. The MSE loss function is defined as:
209
+
210
+ $$
211
+ \mathcal {L} _ {T - M S E} = \frac {1}{T L} \sum_ {t, l} \widetilde {\Delta} _ {t, l} ^ {2}, \tag {13}
212
+ $$
213
+
214
+ $$
215
+ \widetilde {\Delta} _ {t, l} = \left\{ \begin{array}{l l} \Delta_ {t, l} & : \Delta_ {t, l} \leq \tau \\ \tau & : o t h e r w i s e \end{array} , \right. \tag {14}
216
+ $$
217
+
218
+ $$
219
+ \Delta_ {t, l} = \left| \log \left(\hat {y} _ {t, l}\right) - \log \left(\hat {y} _ {t - 1, l}\right) \right|, \tag {15}
220
+ $$
221
+
222
+ where $T$ is the sequence length, $L$ is the number of classes, and $\hat{y}_{t,l}$ is the probability of class $l$ at sample $t$ . The hyperparameter $\tau$ defines the threshold to truncate the smoothing loss.
223
+
224
+ # 3.6.2 Model hyperparameters
225
+
226
+ To avoid model selection bias for the convolutional models, (i.e., TCN, ST-GCN, MS-TCN, and MS-GCN), the same model hyperparameters were chosen as MS-TCN [15]. More specifically, each layer had 64 filters with a temporal kernel size of 3. All multi-stage models had 1 prediction generation stage and 3 refinement stages, and each stage had 10 layers. The convolutions were acausal, i.e. they could take into account both past and future input features. The dilation factor of the temporal convolutions doubled at each layer, i.e. 1, 2, 4, ..., 512.
227
+
228
+ For the recurrent model, we followed a configuration that is conventional in MoCap-based action segmentation. For instance, prior work in gait cycle and FOG subtask segmentation used recurrent models of 1-3 LSTM layers of 32 - 128 cells each [22], [26]. For our recurrent model, we used two forward LSTM layers and two backward LSTM layers, each with 64 cells.
229
+
230
+ # 3.6.3 Optimizer hyperparameters
231
+
232
+ The optimizer and loss hyperparameters were also selected according to MS-TCN [15]. For the loss, we set $\tau = 4$ and $\lambda = 0.15$ . MS-TCN experiments show that further increasing the value of $\lambda$ and $\tau$ worsens the capability of the model in detecting the boundaries between action segments [15]. For the optimizer, we used Adam [27] with a learning rate of 0.0005. All models were trained for 100 epochs with a batch size of 4.
233
+
234
+ # 3.6.4 Ablative experiments
235
+
236
+ For MS-GCN, we perform causal versus acausal and regular temporal convolutions versus dilated temporal convolutions ablative experiments. Causal experiments mean that the prediction at sample $t$ depends only $f_{in,0},\ldots ,f_{in,t}$ , which is important for real-time applications (e.g., in
237
+
238
+ robotics) [13]. In acausal mode the model can take into account future observations $f_{in,t+1}, \ldots, f_{in,T}$ , which is sufficient for post-hoc movement analysis applications. For the regular temporal convolution experiment, we set the dilation rate to 1 in each layer.
239
+
240
+ # 4 EVALUATION
241
+
242
+ We present five datasets for skeleton-based action segmentation. Three of the five datasets are for action segmentation, with each featuring a different skeleton-based representation, i.e. inertial-based (HuGaDB), markerless optical MoCap (PKU-MMDv2), and marker-based optical MoCap (LARa). Two of the five datasets involve typical segmentation tasks commonly used in clinical gait analysis. For these two tasks, additional context regarding the relevance is provided.
243
+
244
+ # 4.1 Peking University - Continuous Multi-Modal Human Action Understanding (PKU-MMD v2)
245
+
246
+ PKU-MMD is a benchmark dataset for continuous 3D human action understanding [28]. In this study, we use the smaller phase 2 partition of the dataset. This dataset contains 1009 short video sequences in 52 action categories, performed by 13 subjects in three camera views. MoCap was performed with a Kinect v2 optical marker-less motion capture system at $30\mathrm{Hz}$ . The Kinect system records the 3-axis locations of 25 major body joints.
247
+
248
+ # 4.2 Human Gait Database (HuGaDB)
249
+
250
+ HuGaDB is an action segmentation dataset where a total of 18 subjects carried out typical lower limb activities, e.g. walking, running, and cycling [29]. MoCap was performed with 6 inertial measurement units (IMUs) at a sampling frequency of $60\mathrm{Hz}$ . The IMUs were placed on the right and left thighs, shins and feet. This dataset contains 364 IMU trials in 12 action categories.
251
+
252
+ # 4.3 Logistic Activity Recognition Challenge (LARa)
253
+
254
+ LARa is a recently released dataset of subjects carrying out typical warehousing activities [30]. Fourteen subjects carried out a total of eight actions. MoCap was performed by an optical MoCap system that recorded the motion of 39 reflective markers at a sampling frequency of $200\mathrm{Hz}$ . The optical MoCap system records the 3-axis limb position and 3-axis orientation of 19 limbs. All subjects participated in a total of 30 recordings of 2 minutes each. The actions were performed under three different warehousing scenarios that each aimed to mimic real-world warehousing activities. In scenario 1, subjects 1 to 6 performed 30 recordings, and subjects 7 to 14 performed 2 recordings. Subjects 7 to 14 additionally performed 14 recordings in scenarios 2 and 3. The dataset contains 377 MoCap trials in 8 action categories. The authors proposed to tackle the automated skeleton-based segmentation task with a TCN-based model that classified temporal segments extracted by a sliding window.
255
+
256
+ # 4.4 Gait phase and freezing of gait segmentation (FOG-GAIT)
257
+
258
+ Freezing of gait (FOG) and temporal gait disturbances in people with Parkinson's disease (PwPD) are commonly assessed during complex experimental protocols that involve turning with or without a cognitive dual-task [31], [32], which serve as triggers to elicit FOG [33]. The current assessment implies that the gait cycle phases, i.e. double support 1, single support, double support 2, and swing, and the FOG episodes are annotated manually based on the 3D marker trajectories of a motion capture system, and standard camera footage [32], [34]. These time-consuming tasks motivate the search for algorithms to automatically delineate the gait cycle phases and FOG episodes. State-of-the-art deep learning models tackle the gait segmentation task with TCN or LSTM-based models [21], [26].
259
+
260
+ A proprietary MoCap dataset of seven PwPD and FOG that froze during the protocol was used [31]. The subjects were instructed to complete a standardized protocol consisting of straight-ahead walking, 180 degree turning, and 360 degree turning. The experiments were offered randomly and performed with our without a cognitive dual-task [35]. Two optical markers were placed at a .5m distance from each other on the floor to standardize the turning radius. The data acquisition was further standardized by defining a zone of one meter before and after the turn in which MoCap data was stored. The FOG events and gait cycle phases were visually annotated by an experienced clinical operator. MoCap was performed at a sampling frequency of $100\mathrm{Hz}$ with a ten camera Vicon motion capture system. Optical markers were placed according to the plugin-gait configuration [36]. This dataset contains 127 MoCap trials in 5 action categories.
261
+
262
+ # 4.5 Timed Up-and-Go (TUG) sub-task segmentation
263
+
264
+ The timed up-and-go (TUG) is a commonly used test in clinical practice to evaluate a subjects' functional mobility [37]. During the TUG, subjects carry out several sub-activities that are common in daily life, i.e. sitting, standing up, walking, turning around, walking back, and sitting back down. In clinical practice, the timing of the sub-activities is commonly assessed under clinical supervision. Therefore, there is increased interest in automatic TUG analysis and sub-activity segmentation techniques. State-of-the-art deep learning models tackle this task with LSTM-based models [38].
265
+
266
+ We used a public dataset that aims to recruit a total of 500 healthy participants (aged 21-80) of Asian ethnicity [39]. At the time of this study, the data of only 10 participants were available. Each participant carried out the TUG 3 times, resulting in a total of 30 recordings. Motion capture was performed with a Qualisys optical motion capture system that recorded the motion of reflective markers at a sampling rate of $200\mathrm{Hz}$ . The markers were placed according to the modified Calibrated Anatomical System Technique (CAST) [40]. The 6 TUG sub-activities were visually annotated by an experienced clinical operator.
267
+
268
+ ![](images/f3dd16ae799c71c079388e9ee139cd6b84536a2f98cd09d5a66ddf20686d6bf2.jpg)
269
+ Fig. 5. Different graph representations for (a) PKU-MMD v2, (b) HuGaDB, (c) LARa, (d) FOG-GAIT, (e) TUG. The purple dots represent the nodes of the graph, the purple lines the spatial edges (excluding self-connections), and the green dot the root node.
270
+
271
+ ![](images/d57c015edb79645ec2df8285ac24acc1c298449ce3ad0d01b289a7b8f998f7f9.jpg)
272
+
273
+ ![](images/d077367c31265b6afab209d9c95035e75f6592b460552eebdc34919c20c3904d.jpg)
274
+
275
+ ![](images/65ee51a22f7d6a680ba9e137dff0283e9ea02e625cc0ae78c68c459fa877f6d8.jpg)
276
+
277
+ ![](images/f1e7adfc31b946a0c0cbc0e3aca49115e1df0e90d9f6bbcbe78e68fdd154c7a9.jpg)
278
+
279
+ # 4.6 Graph representations
280
+
281
+ For each dataset, the skeleton graphs are visualized in Figure 5. For HuGaDB we used the 3-axis accelerometer and 3-axis gyroscope data, for LARa the 3-axis limb position and 3-axis orientation, and for PKU-MMD v2, TUG, and FOG-GAIT we computed the 3-axis displacement and 3-axis relative coordinates (with respect to the root node as visualized in Figure 5) from the 3D joint positions.
282
+
283
+ # 4.7 Metrics
284
+
285
+ TABLE 1 Dataset characteristics.
286
+
287
+ <table><tr><td>Dataset</td><td>Partitions</td><td>SR</td><td>#N</td><td>#Trials</td><td>#L</td></tr><tr><td>PKU-MMD</td><td>3/10</td><td>30 Hz</td><td>25</td><td>234/775</td><td>52</td></tr><tr><td>HuGaDB</td><td>4/18</td><td>60 Hz</td><td>6</td><td>69/307</td><td>12</td></tr><tr><td>LARa</td><td>4/14</td><td>50 Hz</td><td>19</td><td>113/264</td><td>8</td></tr><tr><td>FOG-GAIT</td><td>LOSO</td><td>50 Hz</td><td>9</td><td>127/127</td><td>5</td></tr><tr><td>TUG</td><td>LOSO</td><td>50 Hz</td><td>19</td><td>30/30</td><td>6</td></tr></table>
288
+
289
+ Overview of the partitioning, sampling rates (SR), number of nodes (#N), number of trials (test/train), and number of classes (#L) across datasets. For the fixed test/train partition of HuGaDB, we selected the first four subjects as test subjects. For the fixed test/train partition of PKU-MMD v2, the 3 test subjects were provided by the authors of the dataset. For LARa, S1-6 and S7-14 perform different experiments. To cover both experiments, we take subjects 5-8 for our test partition.
290
+
291
+ We follow convention by quantitatively evaluating the predictions with respect to the ground truth annotations by means of a sample-wise and a segment-wise evaluation metric [13], [15]. For the segment-wise metric, we use the F1@50 as proposed by Lea et al. [13]. to compute the segmental metric, a predicted action segment is first classified as a true positive (TP) or false positive (FP) by comparing its intersection over union (IoU) with respect to the corresponding expert annotation. If the IoU crosses a predetermined overlap threshold it is classified as a true positive segment (TP), if it does not, as a false positive segment (FP). The number of false-negative segments (FN) in a trial is calculated by subtracting the number of correctly predicted segments from the number of segments that the experts had demarcated. From the classified segments, the segmental F1-score for each action can be computed as:
292
+
293
+ $$
294
+ F 1 @ \tau = \frac {T P}{T P + \frac {1}{2} (F P + F N)}, \tag {16}
295
+ $$
296
+
297
+ where $\tau$ denotes the IoU overlap.
298
+
299
+ For the sample-wise metric, we report the accuracy. The
300
+
301
+ term sample-wise denotes that the metric is computed for each sample or timestep. Unlike the segment-wise metric, the sample-wise accuracy does not heavily penalize over-segmentation errors. Reporting both the sample-wise accuracy and the segment-wise F1@50 thus allows assessment of over-segmentation problems. The sample-wise accuracy is simply computed as the number of correctly classified samples divided by the total number of samples.
302
+
303
+ All use cases were evaluated by assessing the generalization of the models to previously unseen subjects. For the three larger action segmentation datasets, we used a fixed test/train partition. For the two smaller gait analysis datasets, we used a leave one subject out (LOSO) cross-validation approach.
304
+
305
+ The three high sample rate marker-based MoCap datasets were resampled to $50\mathrm{Hz}$ . No additional pre-processing was performed. A summary is provided in Table 1.
306
+
307
+ # 4.8 Statistics
308
+
309
+ We aim to determine if the differences in predictive performance between the five architectures is statistically significant. Several statistical methods have been proposed to compare machine learning algorithms [41], [42]. Demšar and Garcia et al. recommend the non-parametric Friedman test [43], with the corresponding post-hoc tests, for the comparison of more than two classifiers over multiple datasets or trials. We used Friedman's test to evaluate the null hypothesis that there is no difference in the classification performance of the five architectures on a particular dataset. The post-hoc tests were used to evaluate the null hypothesis that there is no difference in the classification performance between the proposed MS-GCN model and the four baselines on a particular dataset. The post-hoc hypotheses were corrected for multiple comparisons, as defined in Li [38].
310
+
311
+ All statistical analyses were performed using the scmamp package, version 0.2.55 [44], within The R programming language, version 4.0.3 [45]. The scmamp package implemented Friedman's test according to the version by Demšar [41] and the post-hoc tests according to the version by Garcia et al. [42]. The significance level of all tests was set at the $95\%$ level $(p \leq 0.05)$ .
312
+
313
+ # 5 RESULTS
314
+
315
+ # 5.1 Comparison with the four baselines
316
+
317
+ Results on all five datasets in terms of the segment-wise F1@50 and sample-wise accuracy (Acc) are shown in Table 2. Figure 6 gives a visual overview of the segmentation results for MS-GCN. Sample-wise accuracy and segmentwise F1@50 for each sequence are included for comparison. The results of the statistical hypotheses tests and the spread across evaluation trials are visualized in Figure 7. All methods were evaluated in acausal mode.
318
+
319
+ The results suggest that MS-GCN outperforms the four baseline approaches across all tasks on most metrics. Figure 6 indicates that MS-GCN enables a near perfect action segmentation on HuGaDB, FOG-GAIT, and TUG. The Friedman test was statistically significant at the $95\%$ level for both metrics on all but the TUG dataset, for which the accuracy was found to not be significant. We thus reject the null
320
+
321
+ TABLE2 Action segmentation results.
322
+
323
+ <table><tr><td>PKU-MMD</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>22.7</td><td>59.6</td></tr><tr><td>TCN</td><td>13.8</td><td>61.9</td></tr><tr><td>ST-GCN</td><td>15.5</td><td>64.9</td></tr><tr><td>MS-TCN</td><td>46.3</td><td>65.5</td></tr><tr><td>MS-GCN</td><td>51.6</td><td>68.5</td></tr><tr><td>HuGaDB</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>81.5</td><td>86.1</td></tr><tr><td>TCN</td><td>56.8</td><td>88.3</td></tr><tr><td>ST-GCN</td><td>67.7</td><td>88.7</td></tr><tr><td>MS-TCN</td><td>89.9</td><td>86.8</td></tr><tr><td>MS-GCN</td><td>93.0</td><td>90.4</td></tr><tr><td>LARa</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>32.3</td><td>63.9</td></tr><tr><td>TCN</td><td>20.0</td><td>61.5</td></tr><tr><td>ST-GCN</td><td>25.8</td><td>67.9</td></tr><tr><td>MS-TCN</td><td>39.6</td><td>65.8</td></tr><tr><td>MS-GCN</td><td>43.6</td><td>65.6</td></tr><tr><td>FOG-GAIT</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>92.1</td><td>90.6</td></tr><tr><td>TCN</td><td>89.9</td><td>89.8</td></tr><tr><td>ST-GCN</td><td>90.8</td><td>89.4</td></tr><tr><td>MS-TCN</td><td>92.5</td><td>86.7</td></tr><tr><td>MS-GCN</td><td>95.0</td><td>90.1</td></tr><tr><td>TUG</td><td>F1@50</td><td>Acc</td></tr><tr><td>Bi-LSTM</td><td>97.1</td><td>93.2</td></tr><tr><td>TCN</td><td>84.4</td><td>92.7</td></tr><tr><td>ST-GCN</td><td>93.8</td><td>93.2</td></tr><tr><td>MS-TCN</td><td>96.5</td><td>92.7</td></tr><tr><td>MS-GCN</td><td>97.9</td><td>93.6</td></tr></table>
324
+
325
+ Skeleton-based action segmentation results on PKU-MMD v2, HuGaDB, LARa, FOG-GAIT, and TUG. All results are quantified in terms of segment-wise F1@50 and sample-wise accuracy (Acc).
326
+
327
+ hypothesis that there is no difference in the classification performance among the five models on all but the TUG dataset. The post-hoc tests between MS-GCN and the second best model were statistically significant at the $95\%$ level level for the F1@50 metric on PKU-MMD and FOG-GAIT, and for the accuracy on PKU-MMD. For these tasks, we thus reject the null hypotheses that there is no difference in the classification performance between MS-GCN and the second best model.
328
+
329
+ # 5.2 Effect of the refinement stages
330
+
331
+ Notice that the single-stage models (ST-GCN and TCN) and the multi-stage models (MS-GCN and MS-TCN) achieve similar sample-wise accuracy but very different F1@50 scores. The statistical tests confirm these observations, as the difference in F1@50 between ST-GCN and MS-GCN was statistically significant at the $95\%$ level across all datasets except for the TUG dataset, while the difference in sample-wise accuracy varied with MS-GCN performing significantly better (PKU-MMD and FOG-GAIT), no significant effect (HuGaDB and TUG), and ST-GCN performing significantly better (LARa). These results indicate that the addition of the refinement stages (i.e., multi-stage models) significantly reduces the number of segmentation errors.
332
+
333
+ ![](images/2777167c3880143987b28efa9c08ecb521c3c52b435716e75b011aa2cc4a31be.jpg)
334
+ Fig. 6. Visual overview of action segmentation results for one sequence of each use-case. From top to bottom: (a) PKU-MMD v2, (b) HuGaDB, (c) LARa, (d) FOG-GAIT, and (e) TUG. For each use-case, the first sequence represents the GT and the second the segmentation by MSGCN. For the visualized sequences, the segment-wise F1@50 score is given after the GT sequence and the sample-wise accuracy after the predicted sequence.
335
+
336
+ # 5.3 Effect of the graph convolutions
337
+
338
+ Notice that the graph convolutional models (ST-GCN and MS-GCN) outperform the regular convolutional models (TCN and MS-TCN) on all tasks. This effect was found to have a higher impact on the sample-wise accuracy than on the number of segmentation errors (F1@50). The statistical tests confirm these observations, as the difference between MS-GCN and MS-TCN was statistically significant at the $95\%$ level on two datasets (PKU-MMD and FOG-GAIT) for the F1@50 and on three datasets (PKU-MMD, HuGaDB, and FOG-GAIT) for the sample-wise accuracy. These results confirm that it is beneficial to explicitly model the spatial hierarchy among the joints or limbs in skeleton-based action segmentation tasks.
339
+
340
+ # 5.4 Ablative experiments
341
+
342
+ # 5.4.1 Effect of the dilated convolutions
343
+
344
+ Ablative experiments for MS-GCN were carried out to assess the effect of the introduced dilated temporal convolutions in the prediction generation stage. According to the results in Table 3, it is evident that the introduced dilation within the ST-GCN layers of the prediction generation stage has a positive effect on both metrics across all datasets. The drop in performance with regular convolutions is due to the fact that without dilation the initial predictions are generated based on limited temporal context.
345
+
346
+ ![](images/dfaa8e133d776d4cfdc1e8c0ebdbac69b3bdb573cf329995a0bb4cdf762fb2d2.jpg)
347
+ Fig. 7. Boxplots to visualize the spread in the segment-wise F1@50 (top row) and the sample-wise accuracy (bottom row) across trials per dataset. Significance levels were visualized as: $p \leq 0.01^{(**)}$ , $p \leq 0.05^{(**)}$ , $p \leq 0.1^{(*)}$ , and no significance (#). The Friedman test was significant at the $p \leq 0.01^{(**)}$ level for all but the TUG dataset, for which the significance of the F1@50 was found to be $p \leq 0.05^{(**)}$ and not significant for accuracy. The significance level of the post-hoc tests with respect to the MS-GCN model (corrected for multiple-comparisons) are visualized above their respective boxplot. No post-hoc tests were performed for the sample-wise accuracy of the TUG dataset since the Friedman test was not significant at the 95% level ( $p \leq 0.05^{(**)}$ ).
348
+
349
+ TABLE 3 MS-GCN: Effect of the dilated convolutions in the prediction generation stage.
350
+
351
+ <table><tr><td colspan="3">Regular convolutions</td><td colspan="2">Dilated convolutions</td></tr><tr><td>Dataset</td><td>F1@50</td><td>Acc</td><td>F1@50</td><td>Acc</td></tr><tr><td>PKU-MMD</td><td>44.8</td><td>68.4</td><td>51.6</td><td>68.5</td></tr><tr><td>HuGaDB</td><td>75.5</td><td>83.8</td><td>93.0</td><td>90.4</td></tr><tr><td>LARa</td><td>37.5</td><td>57.0</td><td>43.6</td><td>65.6</td></tr><tr><td>FOG-GAIT</td><td>88.1</td><td>85.7</td><td>95.0</td><td>90.1</td></tr><tr><td>TUG</td><td>85.8</td><td>90.0</td><td>97.9</td><td>93.6</td></tr></table>
352
+
353
+ # 5.4.2 Causal versus acausal convolutions
354
+
355
+ We perform causal versus acausal experiments for MS-GCN. According to the results in Table 4, MS-GCN with acausal temporal convolutions performs much better than the causal variant. The effect is larger on the segment-wise metric than the sample-wise metric. This verifies that future context is important for determining plausible action durations and accurate boundaries between action segments.
356
+
357
+ TABLE 4 MS-GCN: Causal versus acausal temporal convolutions.
358
+
359
+ <table><tr><td></td><td colspan="2">Causal convolutions</td><td colspan="2">Acausal convolutions</td></tr><tr><td>Dataset</td><td>F1@50</td><td>Acc</td><td>F1@50</td><td>Acc</td></tr><tr><td>PKU-MMD</td><td>24.8</td><td>58.2</td><td>51.6</td><td>68.5</td></tr><tr><td>HuGaDB</td><td>65.6</td><td>85.7</td><td>93.0</td><td>90.4</td></tr><tr><td>LARa</td><td>18.5</td><td>57.0</td><td>43.6</td><td>65.6</td></tr><tr><td>FOG-GAIT</td><td>85.7</td><td>89.0</td><td>95.0</td><td>90.1</td></tr><tr><td>TUG</td><td>88.3</td><td>91.5</td><td>97.9</td><td>93.6</td></tr></table>
360
+
361
+ # 6 CONCLUSION
362
+
363
+ This paper evaluated a neural network architecture for skeleton-based action segmentation, termed multi-stage spatial-temporal graph convolutional network (MS-GCN), that we initially developed for freezing of gait assessment
364
+
365
+ in Parkinson's disease [16]. The developed architecture amalgamates three architectural elaborations based on the current best practices in convolutional neural network design. We presented five challenging use-cases of skeleton-based action segmentation in human action understanding and clinical gait analysis. The results indicated that our framework statistically outperformed four strong baselines on four of the five datasets. For the fifth dataset, i.e. the segmentation of TUG sub-activities, the task was found to be too simple, resulting in minimal to no statistical effect in the predictive performance of the models. The experimental evaluation demonstrated the benefit of the three architectural elaborations for detecting accurate action sequences with precise temporal boundaries. In conclusion, we believe that the MS-GCN framework is a formidable baseline for skeleton-based action segmentation tasks.
366
+
367
+ # REFERENCES
368
+
369
+ [1] M. Al-Amri, K. Nicholas, K. Button, V. Sparkes, L. Sheeran, and J. L. Davies, "Inertial measurement units for clinical movement analysis: Reliability and concurrent validity," Sensors, vol. 18, no. 3, Feb. 2018.
370
+ [2] N. Mahmood, N. Ghorbani, N. F. Troje, G. Pons-Moll, and M. Black, "AMASS: Archive of motion capture as surface shapes," in 2019 IEEE/CVF International Conference on Computer Vision (ICCV). IEEE, Oct. 2019.
371
+ [3] J. Shotton, R. Girshick, A. Fitzgibbon, T. Sharp, M. Cook, M. Finocchio, R. Moore, P. Kohli, A. Criminisi, A. Kipman, and A. Blake, "Efficient human pose estimation from single depth images," IEEE Trans. Pattern Anal. Mach. Intell., vol. 35, no. 12, pp. 2821-2840, Dec. 2013.
372
+ [4] Z. Cao, G. Hidalgo, T. Simon, S. E. Wei, and Y. Sheikh, "Openpose: Realtime multi-person 2d pose estimation using part affinity fields," IEEE Transactions on Pattern Analysis and Machine Intelligence, vol. 43, no. 1, pp. 172-186, 2021.
373
+ [5] B. Fernando, E. Gavves, M. José Oramas, A. Ghodrati, and T. Tuytelaars, "Modeling video evolution for action recognition," in 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Jun. 2015, pp. 5378-5387.
374
+
375
+ [6] A. Shahroudy, J. Liu, T.-T. Ng, and G. Wang, "NTU RGB+D: A large scale dataset for 3D human activity analysis," pp. 1010-1019, Apr. 2016.
376
+ [7] S. Yan, Y. Xiong, and D. Lin, "Spatial temporal graph convolutional networks for skeleton-based action recognition," in AAAI, 2018.
377
+ [8] M. Defferrard, X. Bresson, and P. Vandergheynst, "Convolutional neural networks on graphs with fast localized spectral filtering," in Proceedings of the 30th International Conference on Neural Information Processing Systems, ser. NIPS'16. Red Hook, NY, USA: Curran Associates Inc., Dec. 2016, pp. 3844-3852.
378
+ [9] T. N. Kipf and M. Welling, "Semi-Supervised Classification with Graph Convolutional Networks," International conference on learning representation, p. 14, 2017.
379
+ [10] B. Singh, T. K. Marks, M. Jones, O. Tuzel, and M. Shao, "A multi-stream bi-directional recurrent neural network for Fine-Grained action detection," in 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), Jun. 2016, pp. 1961-1970.
380
+ [11] L. Sun, K. Jia, D.-Y. Yeung, and B. E. Shi, "Human action recognition using factorized Spatio-Temporal convolutional networks," in 2015 IEEE International Conference on Computer Vision (ICCV), Dec. 2015, pp. 4597-4605.
381
+ [12] R. Yao, G. Lin, Q. Shi, and D. C. Ranasinghe, "Efficient dense labelling of human activity sequences from wearables using fully convolutional networks," Pattern Recognit., vol. 78, pp. 252-266, Jun. 2018.
382
+ [13] C. Lea, M. D. Flynn, R. Vidal, A. Reiter, and G. D. Hager, "Temporal convolutional networks for action segmentation and detection," Proceedings - 30th IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, vol. 2017-January, pp. 1003-1012, 2017.
383
+ [14] F. Yu and V. Koltun, "Multi-Scale context aggregation by dilated convolutions," pre-print, Nov. 2015.
384
+ [15] Y. A. Farha and J. Gall, "Ms-tcn: Multi-stage temporal convolutional network for action segmentation," in 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 3570-3579.
385
+ [16] B. Filtjens, P. Ginis, A. Nieuwboer, P. Slaets, and B. Vanrumste, "Automated freezing of gait assessment with marker-based motion capture and multi-stage spatial-temporal graph convolutional neural networks," J. Neuroeng. Rehabil., vol. 19, no. 1, p. 48, May 2022.
386
+ [17] I. Goodfellow, Y. Bengio, and A. Courville, Deep Learning. The MIT Press, 2016.
387
+ [18] Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner, "Gradient-based learning applied to document recognition," Proc. IEEE, vol. 86, no. 11, pp. 2278-2324, Nov. 1998.
388
+ [19] S. Bai, J. Zico Kolter, and V. Koltun, "An empirical evaluation of generic convolutional and recurrent networks for sequence modeling," Mar. 2018.
389
+ [20] A. Graves and J. Schmidhuber, "Frameswise phoneme classification with bidirectional LSTM and other neural network architectures," Neural Netw., vol. 18, no. 5-6, pp. 602-610, Jun. 2005.
390
+ [21] B. Filtjens, A. Nieuwboer, N. D'cruz, J. Spildooren, P. Slaets, and B. Vanrumste, "A data-driven approach for detecting gait events during turning in people with parkinson's disease and freezing of gait," Gait Posture, vol. 80, pp. 130-136, Jul. 2020.
391
+ [22] Y. Matsushita, D. T. Tran, H. Yamazoe, and J.-H. Lee, "Recent use of deep learning techniques in clinical applications based on gait: a survey," Journal of Computational Design and Engineering, vol. 8, no. 6, pp. 1499-1532, Oct. 2021.
392
+ [23] N. Cheema, S. Hosseini, J. Sprenger, E. Herrmann, H. Du, K. Fischer, and P. Slusallek, "Dilated temporal Fully-Convolutional network for semantic segmentation of motion capture data," Jun. 2018.
393
+ [24] S. Ioffe and C. Szegedy, "Batch normalization: Accelerating deep network training by reducing internal covariate shift," in Proceedings of the 32nd International Conference on International Conference on Machine Learning (ICML), 2015.
394
+ [25] M. Schuster and K. K. Paliwal, "Bidirectional recurrent neural networks," Trans. Sig. Proc., vol. 45, no. 11, pp. 2673-2681, Nov. 1997.
395
+ [26] L. Kidzinski, S. Delp, and M. Schwartz, "Automatic real-time gait event detection in children using deep neural networks," PLoS One, vol. 14, no. 1, p. e0211466, Jan. 2019.
396
+ [27] D. P. Kingma and J. Ba, "Adam: A method for stochastic optimization," pre-print, Dec. 2014.
397
+
398
+ [28] C. Liu, Y. Hu, Y. Li, S. Song, and J. Liu, "PKU-MMD: A large scale benchmark for Skeleton-Based human action understanding," in Proceedings of the Workshop on Visual Analysis in Smart and Connected Communities, ser. VSCC '17. New York, NY, USA: Association for Computing Machinery, Oct. 2017, pp. 1-8.
399
+ [29] R. Chereshev and A. Kertesz-Farkas, "Hugadb: Human gait database for activity recognition from wearable inertial sensor networks," in AIST, 2017.
400
+ [30] F. Niemann, C. Reining, F. Moya Rueda, N. R. Nair, J. A. Steffens, G. A. Fink, and M. Ten Hompel, "LARa: Creating a dataset for human activity recognition in logistics using semantic attributes," Sensors, vol. 20, no. 15, Jul. 2020.
401
+ [31] J. Spildooren, S. Vercruysse, K. Desloovere, W. Vandenberghe, E. Kerckhofs, and A. Nieuwboer, "Freezing of gait in parkinson's disease: the impact of dual-tasking and turning," Mov. Disord., vol. 25, no. 15, pp. 2563-2570, Nov. 2010.
402
+ [32] A. Nieuwboer, R. Dom, W. De Weerdt, K. Desloovere, S. Fieuws, and E. Broens-Kaucsik, "Abnormalities of the spatiotemporal characteristics of gait at the onset of freezing in parkinson's disease," Mov. Disord., vol. 16, no. 6, pp. 1066-1075, Nov. 2001.
403
+ [33] J. D. Schaafsma, Y. Balash, T. Gurevich, A. L. Bartels, J. M. Hausdorff, and N. Giladi, "Characterization of freezing of gait subtypes and the response of each to levodopa in parkinson's disease," Eur. J. Neurol., vol. 10, no. 4, pp. 391-398, Jul. 2003.
404
+ [34] M. Gilat, "How to annotate freezing of gait from video: A standardized method using Open-Source software," J. Parkinsons. Dis., vol. 9, no. 4, pp. 821-824, 2019.
405
+ [35] A. Bowen, R. Wenman, J. Mickelborough, J. Foster, E. Hill, and R. Tallis, "Dual-task effects of talking while walking on velocity and balance following a stroke," Age Ageing, vol. 30, no. 4, pp. 319-323, Jul. 2001.
406
+ [36] R. B. Davis, S. Ōunpuu, D. Tyburski, and J. R. Gage, "A gait analysis data collection and reduction technique," Hum. Mov. Sci., vol. 10, no. 5, pp. 575-587, Oct. 1991.
407
+ [37] D. Podsiadlo and S. Richardson, "The timed "up & go": a test of basic functional mobility for frail elderly persons," J. Am. Geriatr. Soc., vol. 39, no. 2, pp. 142-148, Feb. 1991.
408
+ [38] T. Li, J. Chen, C. Hu, Y. Ma, Z. Wu, W. Wan, Y. Huang, F. Jia, C. Gong, S. Wan, and L. Li, "Automatic timed Up-and-Go SubTask segmentation for parkinson's disease patients using Video-Based activity classification," IEEE Trans. Neural Syst. Rehabil. Eng., vol. 26, no. 11, pp. 2189-2199, Nov. 2018.
409
+ [39] P. Liang, W. H. Kwong, A. Sidarta, C. K. Yap, W. K. Tan, L. S. Lim, P. Y. Chan, C. W. K. Kuah, S. K. Wee, K. Chua, C. Quek, and W. T. Ang, "An asian-centric human movement database capturing activities of daily living," Sci Data, vol. 7, no. 1, p. 290, Sep. 2020.
410
+ [40] A. Cappozzo, F. Catani, U. D. Croce, and A. Leardini, "Position and orientation in space of bones during movement: anatomical frame definition and determination," Clin. Biomech., vol. 10, no. 4, pp. 171-178, Jun. 1995.
411
+ [41] J. Demšar, "Statistical comparisons of classifiers over multiple data sets," J. Mach. Learn. Res., vol. 7, no. 1, pp. 1-30, 2006.
412
+ [42] S. García, A. Fernández, J. Luengo, and F. Herrera, "Advanced nonparametric tests for multiple comparisons in the design of experiments in computational intelligence and data mining: Experimental analysis of power," Inf. Sci., vol. 180, no. 10, pp. 2044-2064, May 2010.
413
+ [43] M. Friedman, "The use of ranks to avoid the assumption of normality implicit in the analysis of variance," J. Am. Stat. Assoc., vol. 32, no. 200, pp. 675-701, Dec. 1937.
414
+ [44] C. Borja and S. Guzman, "scmamp: Statistical comparison of multiple algorithms in multiple problems," *The R Journal*, vol. Accepted for publication, 2015.
415
+ [45] R Core Team, R: A Language and Environment for Statistical Computing, R Foundation for Statistical Computing, Vienna, Austria, 2013. [Online]. Available: http://www.R-project.org/
416
+
417
+ Benjamin Filtjens received a MSc in Mechanical Engineering Technology from Hasselt University in 2017. He is currently a Ph.D. student working towards automated at-home freezing of gait assessment, at KU Leuven. He is part of the eMedia research lab at the Department of Electrical Engineering (ESAT) and the intelligent mobile platform research group at the Department of Mechanical Engineering, both from
418
+
419
+ KU Leuven campus Group T. At Group T, he teaches mathematical modelling, advanced automation engineering, and deep learning in health technologies. His research interests are deep learning, explainable artificial intelligence, and mobile robots in general, and ICT applications for automatic and objective gait and freezing of gait assessment in particular.
420
+
421
+ Bart Vanrumste received a MSc in Electrical Engineering and MSc in Biomedical Engineering both from Ghent University in 1994 and 1998, respectively. In 2001 he received a Ph.D. in Engineering from the same institute. He worked as a post-doctoral fellow from 2001 until 2003 at the Electrical and Computer Engineering Department of the University Of Canterbury, New Zealand. From 2003 until 2005 he was post-doctoral fellow at the Department of Electrical Engineering (ESAT) in the STADIUS division at KU Leuven. In 2005 he was appointed faculty member initially at University of Applied Sciences Thomas More and since 2013 in the Faculty of Engineering Technology of KU Leuven. He is member of the eMedia research lab and member of the ESAT-STADIUS division. His current research activities focus on multimodal sensor integration. He is senior member of IEEE Engineering in Medicine and Biology Society.
422
+
423
+ Peter Slaets received a MSc in electrotechnical-mechanical engineering, specialization in datamining and automation, in 2002 from KU Leuven, Leuven, Belgium. In 2005, he became a lecturer at the Katholieke Hogeschool Limburg (KHLIM), Diepenbeek, Belgium, and the Katholieke Hogeschool Kempen (KHK), Geel, Belgium, where he teaches courses in digital electronics, control, and automation. In 2008 He received a Ph.D. in applied sciences from KU Leuven with the title: 'Geometric 3D Model Building from Sensor Measurements Collected during Compliant Motion: Stochastic Filtering and Hardware Architectures'. He is currently an associate professor in the intelligent mobile platform research group at the Department of Mechanical Engineering, KU Leuven. His research focuses on modeling, Bayesian estimation techniques, and mobile platforms in general, with applications in autonomous inland shipping and health monitoring in particular.
2202.01xxx/2202.01727/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bde71d2f9c147d37a74a58bafbb94dcc5ff5200bad90e98170e6d2f9a4a3690c
3
+ size 447991
2202.01xxx/2202.01727/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01741/b628ffb0-e0c1-447e-8e39-ad7ab7216401_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01741/b628ffb0-e0c1-447e-8e39-ad7ab7216401_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01741/b628ffb0-e0c1-447e-8e39-ad7ab7216401_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8ba1afa575e0fb2637229cb7ac71689ee95760f9c6f6f762bd5260f40ddec9f
3
+ size 2500745
2202.01xxx/2202.01741/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01741/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0648e8249e720952cb3ced24079287cc10ee809c7e5078cebce8c3d823f00a65
3
+ size 1379319
2202.01xxx/2202.01741/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01747/5c8d712c-7a56-4df1-8e5c-845b72d69caf_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01747/5c8d712c-7a56-4df1-8e5c-845b72d69caf_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01747/5c8d712c-7a56-4df1-8e5c-845b72d69caf_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4af1a8c64cf437e3bf9fa91337d929c0916b2c18eaae019431e37c8bfba62ff3
3
+ size 40851347
2202.01xxx/2202.01747/full.md ADDED
@@ -0,0 +1,865 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The Met Dataset: Instance-level Recognition for Artworks
2
+
3
+ Nikolaos-Antonios Ypsilantis
4
+ VRG, Faculty of Electrical Engineering
5
+ Czech Technical University in Prague
6
+
7
+ Noa Garcia Institute for Datability Science Osaka University
8
+
9
+ Guangxing Han
10
+ DVMM Lab
11
+ Columbia University
12
+
13
+ Sarah Ibrahimi
14
+ Multimedia Analytics Lab
15
+ University of Amsterdam
16
+
17
+ Nanne van Noord
18
+ Multimedia Analytics Lab
19
+ University of Amsterdam
20
+
21
+ Giorgos Tolias
22
+ VRG, Faculty of Electrical Engineering
23
+ Czech Technical University in Prague
24
+
25
+ # Abstract
26
+
27
+ This work introduces a dataset for large-scale instance-level recognition in the domain of artworks. The proposed benchmark exhibits a number of different challenges such as large inter-class similarity, long tail distribution, and many classes. We rely on the open access collection of The Met museum to form a large training set of about 224k classes, where each class corresponds to a museum exhibit with photos taken under studio conditions. Testing is primarily performed on photos taken by museum guests depicting exhibits, which introduces a distribution shift between training and testing. Testing is additionally performed on a set of images not related to Met exhibits making the task resemble an out-of-distribution detection problem. The proposed benchmark follows the paradigm of other recent datasets for instance-level recognition on different domains to encourage research on domain independent approaches. A number of suitable approaches are evaluated to offer a testbed for future comparisons. Self-supervised and supervised contrastive learning are effectively combined to train the backbone which is used for non-parametric classification that is shown as a promising direction. Dataset webpage: http://cmp.felk(cvut.cz/met/.
28
+
29
+ # 1 Introduction
30
+
31
+ Classification of objects can be done with categories defined at different levels of granularity. For instance, a particular piece of art is classified as the "Blue Poles" by Jackson Pollock, as painting, or artwork, from the point of view of instance-level recognition [8], fine-grained recognition [26], or generic category-level recognition [35], respectively. Instance-level recognition (ILR) is applied to a variety of domains such as products, landmarks, urban locations, and artworks. Representative examples of real world applications are place recognition [1, 24], landmark recognition and retrieval [43], image-based localization [36, 3], street-to-shop product matching [2, 17, 28], and artwork recognition [11]. There are several factors that make ILR a challenging task. It is typically required to deal with a large category set, whose size reaches the order of $10^{6}$ , with many classes represented by only a few or a single example, while the small between class variability further increases the hardness. Due to these difficulties the choice is often made to handle instance-level classification as an instance-level retrieval task [41]. Particular applications, e.g. in the product or art
32
+
33
+ ![](images/adad37c1c688f7ae90c424af0c80c4b04634331b236c053b7d9f5260624688b4.jpg)
34
+
35
+ ![](images/caa791d6df6904e9d415ef2ed0e59c51f1956b469dbe269873345e5539805d40.jpg)
36
+ correctly recognized test images
37
+
38
+ ![](images/2a41277a853ae1eb0067de8c954cd575e03b8d503209cd156a51edea9ddf59d4.jpg)
39
+
40
+ ![](images/a2090f9badabbe0b33c7a9fd750cb98007368796addefeb96281cec858dddabb.jpg)
41
+
42
+ ![](images/3bdc6f131ae79cf6073e80d3672d2b0e2505a2b8d3e6037dadbcf3a72c694ca9.jpg)
43
+
44
+ ![](images/4686c565220f114a71ea7bfa2fc8f7b8e458f7949673259b840df765879fd439.jpg)
45
+
46
+ ![](images/7f8ce9e261f882d8576bb245c817b38834bf09f820bbc3ff07f39a49bb2863a8.jpg)
47
+
48
+ ![](images/7743be2fe675417de5d8b74bde0a4568107345ae81ed67f8fe7ba3d10fe2ac85.jpg)
49
+ incorrectly recognized test images
50
+
51
+ ![](images/467c5c4a13d9cc54a5141551ff83fa82cc3bcb3f49ed9873783c644f8bf51523.jpg)
52
+
53
+ ![](images/a693e51bcad996fa3a38ee31409c9d5e3c7d1f54ae6388785ee3a1444a772585.jpg)
54
+
55
+ ![](images/d2b0d284e4cf77a42792117797d992d38072c7f169452da3d77b87d572acd7b6.jpg)
56
+
57
+ ![](images/e08d17ae5e33e72aa134d069a11954a5eb2fe0d3371d86d902edcff835316a32.jpg)
58
+
59
+ ![](images/afd417f79c81c30ee818f1ab3a8d119d50e8fc759b495acc3a87ca3d2757dbcc.jpg)
60
+ OOD-test images with high confidence predictions
61
+ Figure 1: Challenging examples from the Met dataset for the top performing approach. Test images are shown next to their nearest neighbor from the Met exhibits that generated the prediction of the corresponding class. Top row: correct predictions. Middle row: incorrect predictions; an image of the ground truth class is also shown. Bottom row: high confidence predictions for OOD-test images; the goal is to obtain low confidence for these.
62
+
63
+ ![](images/8477706a5269653237c58cd64ba2816c86861c0b39913050c4c740362c42fe8c.jpg)
64
+ OOD-test predicted
65
+
66
+ ![](images/7f40e6e0e9a0cb78a397bccae16366e06bde27b03f7140c4b4df072d74d49168.jpg)
67
+ OOD-test predicted
68
+
69
+ ![](images/d0e051e72a7c48767ceaea7982c39cb0b4427c0a3f07a2bd4504825bc4055250.jpg)
70
+ OOD-test predicted
71
+
72
+ domain require dynamic updates of the category set; images from new categories are continuously added. Therefore, ILR is a form of open set recognition [16].
73
+
74
+ Despite the many real-world applications and challenging aspects of the task, ILR has attracted less attention than category-level recognition (CLR) tasks, which are accompanied by large and popular benchmarks, such as ImageNet [34], that serve as a testbed even for approaches applicable beyond classification tasks. A major cause for this is the lack of large-scale datasets. Creating datasets with accurate ground truth at large scale for ILR is a tedious process. As a consequence, many datasets include noise in their labels [8, 11, 43]. In this work, we fill this gap by introducing a dataset for instance-level classification in the artwork domain.
75
+
76
+ The art domain has attracted a lot of attention in computer vision research. A popular line of research focuses on a specific flavor of classification, namely attribute prediction [23, 29, 30, 39, 44]. In this case, attributes correspond to various kinds of metadata for a piece of art, such as style, genre, period, artist and more. The metadata for attribute prediction is obtained from museums and archives that make this information freely available. This makes the dataset creation process convenient, but the resulting datasets are often highly noisy due to the sparseness of this information [29, 39]. Another known task is domain generalization or adaptation where object recognition or detection models are trained on natural images and their generalization is tested on artworks [10]. A very challenging task is motif discovery [37, 38] which is intended as a tool for art historians, and aims to find shared motifs between artworks. In this work we focus on ILR for artworks which combines the aforementioned challenges of ILR, is related to applications with positive impact, such as educational applications, and has not yet attracted attention in the research community.
77
+
78
+ We introduce a new large-scale dataset (see Figure 1 for examples) for instance-level classification by relying on the open access collection from the Metropolitan Museum of Art (The Met) in New York. The training set consists of about $400\mathrm{k}$ images from more than $224\mathrm{k}$ classes, with artworks of world-level geographic coverage and chronological periods dating back to the Paleolithic period. Each museum exhibit corresponds to a unique artwork, and defines its own class. The training set exhibits a long-tail distribution with more than half of the classes represented by a single image, making it a special case of few-shot learning. We have established ground-truth for more than 1,100 images from museum visitors, which form the query set. Note that there is a distribution shift between this query set and the training images which are created in studio-like conditions. We additionally include a large set of distractor images not related to The Met, which form an Out-Of-Distribution (OOD) [27, 33] query set. The dataset follows the paradigm and evaluation protocol of the recent Google Landmarks Dataset (GLD) [43] to encourage universal ILR approaches that are applicable in a wider range of domains. Nevertheless, in contrast to GLD, the established ground
79
+
80
+ ![](images/c95029a98f79aeec8ce246e7d5240862be95050818db92fd86805cccc1811132.jpg)
81
+
82
+ ![](images/784ce9453bb38f71354f5618bec260a7729d2a3fc2b0e2e8a906f335f49bbae1.jpg)
83
+ Figure 2: The Met dataset collection and annotation process.
84
+
85
+ ![](images/e85bfba5ff399a986954b1ca8e89848b20a5af30d493873cc670d49d17ceeb82.jpg)
86
+ Figure 3: Samples from the Met dataset of exhibit and query (Met and distractor) images, demonstrating the diversity in viewpoint, lighting, and subject matter of the images. Exhibit images and queries from the same Met class are indicated by dashed lines.
87
+
88
+ truth does not include noise. To our knowledge this the only ILR dataset at this scale, that includes no noise in the ground-truth and is fully publicly available.
89
+
90
+ The introduced dataset is accompanied by performance evaluation of relevant approaches. We show that non-parametric classifiers perform much better than parametric ones. Improving the visual representation becomes essential with the use of non-parametric classifiers. To this end, we show that the recent self-supervised learning methods that rely only on image augmentations are beneficial, but the available ILR labels should not be discarded. A combined self-supervised and supervised contrastive learning approach is the top performer in our benchmark indicating promising future directions.
91
+
92
+ # 2 The Met dataset
93
+
94
+ The Met dataset for ILR contains two types of images, namely **exhibit images** and **query images**. Exhibit images are photographs of artworks in The Met collection taken by The Met organization under studio conditions, capturing multiple views of objects featured in the exhibits. These images form the training set for classification and are interchangeably called **exhibit or training images** in the following. We collect about 397k **exhibit** images corresponding to about 224k **unique** exhibits, i.e. classes, also called **Met classes**.
95
+
96
+ Query images are images that need to be labeled by the recognition system, essentially forming the evaluation set. They are collected from multiple online sources for which ground-truth is established by labeling them according to the Met classes. The Met dataset contains about 20k query images, that are divided into the following three types: 1) Met queries, which are images taken at The Met museum by visitors and labeled with the exhibit depicted, 2) other-artwork queries, which are images of artworks from collections that do not belong to The Met, and 3) non-artwork queries, which are images that do not depict artworks. The last two types of queries are referred to as distractor queries and are labeled as "distractor" class which denotes out-of-distribution queries.
97
+
98
+ Dataset collection. The dataset collection and annotation process is described in the following and summarized in Figure 2, while sample images from the dataset are shown in Figure 3.
99
+
100
+ <table><tr><td rowspan="2">Split</td><td rowspan="2">Type</td><td colspan="3"># Images</td><td rowspan="2"># Classes</td></tr><tr><td>Met</td><td>other-art</td><td>non-art</td></tr><tr><td>Train</td><td>Exhibit</td><td>397, 121</td><td>-</td><td>-</td><td>224, 408</td></tr><tr><td>Val</td><td>Query</td><td>129</td><td>1, 168</td><td>868</td><td>111 + 1</td></tr><tr><td>Test</td><td>Query</td><td>1, 003</td><td>10, 352</td><td>7, 964</td><td>734 + 1</td></tr></table>
101
+
102
+ Table 1: Number of images and classes in the Met dataset per split. Met exhibits images are from the museum's open collection, while Met query images are from museum visitors. Query images contain distractor images too (denoted by the $+1$ class) while the rest of val/test classes are subset of the train classes.
103
+
104
+ Image sources: Exhibit images are obtained from The Met collection. Only exhibits labeled as open access are considered. A maximum of 10 images per exhibit is included in the dataset, images with very skewed aspect ratios are excluded, and image dedduplication is performed. Query images are collected from different sources according to the type of query. Met queries are taken on site by museum visitors. Part of them are collected by our team, and the rest are Creative Commons (CC) images crawled from Flickr. We use Flickr groups $^2$ related to The Met to collect candidate images. Distractor queries are downloaded from Wikipedia Commons $^3$ by crawling public domain images according to the Wikipedia assigned categories. Generic categories, such as people, nature, or music, are used for non-artwork queries, and art-related categories, e.g. art, sculptures, painting, architecture, for other-artwork queries.
105
+
106
+ Annotation: We label query images with their corresponding Met class, if any. Met queries taken by our team are annotated based on exhibit information, whereas Met queries downloaded from Flickr are annotated in three phases, namely filtering, annotation, and verification. In the filtering phase, invalid images are discarded, i.e. images containing visitor faces, images not depicting exhibits, or images with more than one exhibit. In the annotation phase, queries are labeled with the corresponding Met class. To ease the task, the title and description fields on Flickr are used for text-based search in the list of titles from The Met exhibits included in the corresponding metadata. Queries whose depicted Met exhibit is not in the public domain are discarded. Finally, in the verification phase, two different annotators verify the correctness of the labeling per query. We additionally verify that distractor queries, especially other-artwork queries, are true distractors and do not belong to The Met collection. This is done in a semi-automatic manner supported by (i) text-based filtering of the Wikimedia image titles and (ii) visual search using a pre-trained deep network. Top matches are manually inspected and images corresponding to Met exhibits are removed.
107
+
108
+ ![](images/01435526de5a02c2a508ccd159941ab52d915aba19fe794d217bb9ed9911f0b4.jpg)
109
+ Figure 4: Left: Number of images and classes by department. Met queries are assigned to the department of their ground-truth class. Some departments that do not contain queries but contain exhibit images are not shown. Right: Number of distractor images by Wikimedia category. Top categories shown: art-related categories in solid blue and generic categories in dash purple.
110
+
111
+ ![](images/babd5aeda3bbea54420bd8a7041aab9353c69803d05c42b7a07962672e0ca1c7.jpg)
112
+
113
+ Benchmark and evaluation protocol. The structure and evaluation protocol for the Met dataset follows that of the Google Landmarks Dataset (GLD) [43]. All Met exhibit images form the training
114
+
115
+ ![](images/bbc38f55350c2827c96088d001154122e6bdbf524fff7dca2073cc240ca2c5f3.jpg)
116
+ Figure 5: Left: number of Met classes versus number of training images per class. Right: number of Met classes versus number of query images per class.
117
+
118
+ ![](images/a3fb97b3fe2ea5f9aaa5ad7dd170f10acd506feae4749e42b2d423ef80a4db23.jpg)
119
+
120
+ set, while the query images are split into test and validation sets. The test set is composed of roughly $90\%$ of the query images, and the rest is used to form the validation set. To ensure no leakage between the validation and test split, all Met queries are first grouped by user and then assigned to a split. Additionally, we enforce that there is no class overlap between the splits. As a result, 25 (14) users appear only in the test (validation) split, respectively. Image and class statistics for the train, val, and test parts are summarized in Table 1. The intended use of the validation split is for hyper-parameter tuning. All images are resized to have maximum resolution $500 \times 500$ .
121
+
122
+ For evaluation we measure the classification performance with two standard ILR metrics, namely average classification accuracy (ACC), and Global Average Precision (GAP). The average classification accuracy is measured only on the Met queries, whereas the GAP, also known as Micro Average Precision ( $\mu \mathrm{AP}$ ), is measured on all queries taking into account both the predicted label and the prediction confidence. All queries are ranked according to the confidence of the prediction in descending order, and then average precision is estimated on this ranked list; predicted labels and ground-truth labels are used to infer correctness of the prediction, while distractors are always considered to have incorrect predictions. GAP is given by $\frac{1}{M} \sum_{i=1}^{T} p(i) r(i)$ , where $p(i)$ is the precision at position $i$ , $r(i)$ is a binary indicator function denoting the correctness of prediction at position $i$ , $M$ is the number of the Met queries, and $T$ is the total number of queries. The GAP score is equal to the area-under-the-curve of the precision-recall curve whilst jointly taking all queries into account. We measure this for the Met queries only, denoted by $\mathrm{GAP}^{-}$ , and for all queries, denoted by GAP. In contrast to accuracy, this metric reflects the quality of the prediction confidence as a way to detect out-of-distribution (distractor) queries and incorrectly classified queries. It allows for inclusion of distractor queries in the evaluation without the need for distractors in the learning; the classifier never predicts "out-of-Met" (distractor) class. Optimal GAP requires, other than correct predictions for all Met queries, that all distractor queries get smaller prediction confidence than all the Met queries.
123
+
124
+ Dataset statistics. The Met dataset contains artworks spanning from as far back as 240,000 BC to the current day. Figure 4 (left) shows the distribution of classes and images according to The Met department. Whereas there is an imbalance for exhibits across The Met departments, queries are collected to be evenly distributed to the best of our capabilities. In this way, we aim to ensure models are not biased towards a specific type of art, i.e., developing models that only produce good results for, e.g., European paintings, will not necessarily ensure good results on the overall benchmark. Finally, Figure 4 (right) shows the number of distractor query images by Wikipedia Commons categories.
125
+
126
+ The class frequency for exhibit images ranges from 1 to 10, with $60.8\%$ and $1.2\%$ classes containing a single and 10 images, respectively (see Figure 5 left). Met queries are obtained from 39 visitors in total, while the maximum number of query images per class is, coincidentally, also 10. In total, $81.5\%$ of the Met query images are the sole Met queries that depict a particular Met class (see Figure 5 right).
127
+
128
+ Comparison to other datasets. We compare the Met dataset with existing datasets that are relevant in terms of domain or task.
129
+
130
+ Artwork datasets: Table 2 summarizes datasets in the artwork domain for various tasks. Most of the artwork datasets [23, 29, 30, 39, 44] focus on attribute prediction (AP), containing multiple types of annotations, such as author, material, or year of creation, usually obtained directly from the museum collections. Other datasets [5, 10, 44, 46] are focused on CLR, aiming to recognize object
131
+
132
+ <table><tr><td>Art datasets</td><td>Year</td><td>Domain</td><td># Images</td><td># Classes</td><td>Type of annotations</td><td>Task</td><td>Image source</td></tr><tr><td>PrintArt [5]</td><td>2012</td><td>Prints</td><td>988</td><td>75</td><td>Art theme</td><td>CLR</td><td>Artstor</td></tr><tr><td>VGG Paintings [10]</td><td>2014</td><td>Paintings</td><td>8,629</td><td>10</td><td>Object category</td><td>CLR</td><td>Art UK</td></tr><tr><td>WikiPaintings [23]</td><td>2014</td><td>Paintings</td><td>85,000</td><td>25</td><td>Style</td><td>AP</td><td>WikiArt</td></tr><tr><td>Rijksmuseum [30]</td><td>2014</td><td>Artwork</td><td>112,039</td><td>†6,629</td><td>Art attributes</td><td>AP</td><td>Rijksmuseum</td></tr><tr><td>BAM [44]</td><td>2017</td><td>Digital art</td><td>65M</td><td>†9</td><td>Media, content, emotion</td><td>AP, CLR</td><td>Enhance</td></tr><tr><td>Art500k [29]</td><td>2017</td><td>Artwork</td><td>554,198</td><td>†1,000</td><td>Art attributes</td><td>AP</td><td>Various</td></tr><tr><td>SemArt [14]</td><td>2018</td><td>Paintings</td><td>21,383</td><td>21,383</td><td>Art attributes, descriptions</td><td>Text-image</td><td>Web Gallery of Art</td></tr><tr><td>OmniArt [39]</td><td>2018</td><td>Artwork</td><td>1,348,017</td><td>†100,433</td><td>Art attributes</td><td>AP</td><td>Various</td></tr><tr><td>Open MIC [25]</td><td>2018</td><td>Artwork</td><td>16,156</td><td>866</td><td>Instance</td><td>ILR (DA)</td><td>Authors</td></tr><tr><td>iMET [46]</td><td>2019</td><td>Artwork</td><td>155,531</td><td>1,103</td><td>Concepts</td><td>CLR</td><td>The Met</td></tr><tr><td>NoisyArt [11]</td><td>2019</td><td>Artwork</td><td>89,095</td><td>3,120</td><td>Instance (noisy)</td><td>ILR</td><td>Various</td></tr><tr><td>The Met (Ours)</td><td>2021</td><td>Artwork</td><td>418,605</td><td>224,408</td><td>Instance</td><td>ILR</td><td>Various</td></tr></table>
133
+
134
+ Table 2: Comparison to art datasets. † For datasets with multiple annotations, the task with the largest number of classes is reported.
135
+
136
+ <table><tr><td>ILR datasets</td><td>Year</td><td>Domain</td><td># Images</td><td># Classes</td><td>Type of annotations</td><td>Image source</td></tr><tr><td>Street2Shop [17]</td><td>2015</td><td>Clothes</td><td>425,040</td><td>204,795</td><td>Category, instance</td><td>Various</td></tr><tr><td>DeepFashion [28]</td><td>2016</td><td>Clothes</td><td>800,000</td><td>33,881</td><td>Attributes, landmarks, instance</td><td>Various</td></tr><tr><td>GLD v2 [43]</td><td>2019</td><td>Landmarks</td><td>4.98M</td><td>200,000</td><td>Instance (noisy)</td><td>Wikimedia</td></tr><tr><td>AliProducts [8]</td><td>2020</td><td>Products</td><td>3M</td><td>50,030</td><td>Instance (noisy)</td><td>Alibaba</td></tr><tr><td>Products-10K [2]</td><td>2020</td><td>Products</td><td>150,000</td><td>10,000</td><td>Category, instance</td><td>JD.com</td></tr><tr><td>The Met (Ours)</td><td>2021</td><td>Artwork</td><td>418,605</td><td>224,408</td><td>Instance</td><td>Various</td></tr></table>
137
+
138
+ Table 3: Comparison to instance-level recognition datasets.
139
+
140
+ categories, such as animals and vehicles, in paintings. From the artwork datasets, Open MIC [25] and NoisyArt [11] are the only ones with instance-level labels. Compared to the Met dataset, the Open MIC is smaller, with significantly less classes and mostly focuses on domain adaptation (DA) tasks. NoisyArt has a similar focus to ours, but is significantly smaller, and has noisy labels.
141
+
142
+ ILR datasets: In Table 3 we compare the Met dataset with existing ILR datasets in multiple domains. ILR is widely studied for clothing [17, 28], landmarks [43], and products [2, 8]. The Met dataset resembles ILR datasets in those domains in that the training and query images are from different scenarios. For example, in Street2Shop [17] and DeepFashion [28] queries are taken by customers in real-life environments, whereas training images are studio shots. Getting annotations for ILR, however, is not easy, and some datasets contain a significant number of noisy annotations from crawling from the web without verification [8, 11, 43]. In that sense, the Met is the largest ILR dataset in terms of number of classes, which have been manually verified. Overall, the Met dataset proposes a large-scale challenge in a new domain, encouraging future research on generic ILR approaches that are applicable in a universal way to multiple domains.
143
+
144
+ # 3 Baseline approaches
145
+
146
+ This section presents the approaches considered as baselines, i.e. existing methods that are applicable to this dataset, in the experimental evaluation.
147
+
148
+ Representation. Consider an embedding function $f_{\theta}:\mathcal{X}\to \mathbb{R}^{d}$ that takes an input image $x\in \mathcal{X}$ and maps it to a vector $f_{\theta}(x)\in \mathbb{R}^{d}$ , equivalently denoted by $f(x)$ . Function $f(\cdot)$ comprises a fully convolutional network (the backbone network), a global pooling operation that maps a 3D tensor to a vector, vector $\ell_2$ normalization, and an optional fully-connected layer (also seen as $1\times 1$ convolution), and a final vector $\ell_2$ normalization. The backbone is parametrized by the parameter set $\theta$ . ResNet18 (R18) and ResNet50 (R50) [18] are the backbones used in this work, while global pooling is performed by Generalized-Mean (GeM) pooling [32], shown to be effective for representation in instance-level tasks [4].
149
+
150
+ Representation of image $x$ , denoted by vector embedding $\mathbf{v}(x) \in \mathbb{R}^d$ , is a result of aggregation of multi-resolution embeddings given by
151
+
152
+ $$
153
+ \mathbf {v} (x) = \frac {\sum_ {r \in R} f \left(x _ {r}\right)}{| | \sum_ {r \in R} f \left(x _ {r}\right) | |}, \tag {1}
154
+ $$
155
+
156
+ where $x_{r}$ denotes image $x$ down-sampled by relative factor $r$ . We set $R = \{1, 2^{-0.5}, 2^{-1}\}$ and $R = \{1\}$ in the multi-scale (MS) and single-scale (SS) case, respectively. Following the standard practice in instance-level search, the image representation space is whitened with PCA whitening
157
+
158
+ (PCAw) [20] learned on the representation vectors of all Met training images. Optionally, dimensionality reduction is performed by keeping the dimensions corresponding to the top components. PCAw is always performed in the rest of the paper, unless stated otherwise; for simplicity we reuse notation $\mathbf{v}(x)$ for the whitened image embeddings. Given a trained backbone (fixed $\theta$ ), the image representation is consequently used in combination with a k-Nearest-Neighbor (kNN) classifier.
159
+
160
+ kNN classifier. The label of image $x$ is denoted by $y(x)$ and $q$ is a query image. The similarity between query and a training image is given by $\mathbf{v}(x)^{\top}\mathbf{v}(q)$ , coinciding with the cosine similarity. The confidence of class $c$ for query $q$ is given by
161
+
162
+ $$
163
+ s _ {c} (q) = \max _ {x \in \mathrm {N N} _ {k} (q)} \left(\mathbf {v} (x) ^ {\top} \mathbf {v} (q)\right) \mathbb {1} _ {y (x) = c}, \tag {2}
164
+ $$
165
+
166
+ where $\mathrm{NN}_k(q)$ is the set of $k$ nearest-neighbors of $q$ in the $d$ -dimensional representation space. The vector of class confidences is $\mathbf{s}(q) \in \mathbb{R}^N$ with elements $s_c(q), c \in [1, \dots, N]$ , where $N$ is the number of training classes. Classes without any example in the top- $k$ neighbors have zero confidence. The predicted label $\hat{y}(q) = \arg \max_{c} s_c(q)$ is, according to (2), equivalent to the label of the closest training image. Despite label prediction requiring only $k = 1$ , confidence estimation for more classes is essential for normalization and handling of OOD (distractor) queries. The normalized confidence is given by the soft-max of vector $\tau \mathbf{s}(q)$ , where $\tau$ is the temperature. This is a non-parametric classifier that does not necessarily require training on The Met dataset; it only requires an existing backbone network. Hyper-parameters $k$ and $\tau$ are tuned with grid search according to GAP on the validation set.
167
+
168
+ Training on the Met. We use the Met training set and perform either training of a classifier for the Met classes or training of the backbone to obtain image embeddings for the kNN classifier. During all variants of training the backbone the optional FC layer is included in the architecture and initialized with the result of PCA whitening [32].
169
+
170
+ Deep network (DNet) classifier with instance-level labels: The backbone is trained jointly with a cosine similarity (linear) classifier [42], used previously for training with imbalanced datasets [19], combined with one of the two following losses. Cross-Entropy (CE) loss with soft-max, where the input to the soft-max is equal to the cosine similarity between the backbone output and the learnable class vectors (prototypes) multiplied by temperature $\gamma$ . Alternatively, we use the Arc-Face (AF) loss [12], which is also used in the work of Cao et al. [4] for instance-level recognition of landmarks. During inference two options are considered. First, use the whole deep network classifier and consider its arg max and max as class prediction and confidence score, respectively. Second, discard the linear classifier and use the backbone $f_{\theta}(\cdot)$ to obtain the image representation $v(x)$ and make predictions with the kNN classifier.
171
+
172
+ Simple-siamese (SimSiam) instance discrimination: We apply the recent self-supervised approach by Chen and He [7] to train the backbone. Each training image is augmented twice resulting in a positive pair, while no negative pairs and no Met labels are used in this approach.
173
+
174
+ Contrastive loss with synthetic/real positives and hard negatives: The backbone is trained with contrastive loss [9], where each training image is used as an anchor to form one positive and one hard negative pair per epoch. A hard-negative pair is formed by randomly choosing an image among the 10 most similar images from a different class, as these are computed according to embeddings obtained with the current backbone before each epoch. Three different ways of forming the positive pair are tested. Syn: the positive is an augmented (synthesized) version of the anchor image. Syn+Real: the selected positive is another randomly chosen image of the same class as the anchor, or an augmented version of the anchor image. Synthetic positive or one of the real (all images in the class but the anchor) positives is chosen with equal probability which is equal to one over the number of images in the class. If the class has a single image, then augmentation is performed; note that many classes contain a single image. Syn+Real-closest: same as Syn+Real but the real positive counterpart is chosen to be the one with the most similar embedding to the anchor. This is used to avoid images that depict completely different views of the object and has previously been used in location estimation [1]. Synthetic or real positive is chosen with equal probability in this case.
175
+
176
+ Pretrained models. We consider networks pretrained on other tasks and use them to obtain the image embeddings for the kNN classifier. None of these variants includes the optional FC layer in the architecture.
177
+
178
+ <table><tr><td>ID</td><td>Net</td><td>PCAw</td><td>MS</td><td>k</td><td>τ</td><td>GAP</td><td>GAP-</td><td>ACC</td></tr><tr><td>1</td><td>R18IN</td><td></td><td></td><td>3</td><td>15</td><td>3.7</td><td>16.7</td><td>26.8</td></tr><tr><td>2</td><td>R18IN</td><td>✓</td><td></td><td>7</td><td>100</td><td>10.9</td><td>28.0</td><td>33.7</td></tr><tr><td>3</td><td>R18IN</td><td></td><td>✓</td><td>50</td><td>10</td><td>10.5</td><td>23.8</td><td>33.5</td></tr><tr><td>4</td><td>R18IN</td><td>✓</td><td>✓</td><td>3</td><td>50</td><td>15.9</td><td>37.5</td><td>42.3</td></tr><tr><td>5</td><td>R18IN</td><td>✓</td><td>✓</td><td>1</td><td>-</td><td>2.9</td><td>33.6</td><td>42.3</td></tr><tr><td>6</td><td>R18IN†</td><td>✓</td><td>✓</td><td>3</td><td>100</td><td>14.1</td><td>36.9</td><td>42.3</td></tr></table>
179
+
180
+ Table 4: Recognition performance for kNN classifier on representation obtained from ResNet18 pretrained on ImageNet. MS: multi-scale representation. †: tuning $k, \tau$ only with Met queries, and without distractor queries in the validation set.
181
+
182
+ <table><tr><td>Net</td><td>GAP</td><td>GAP-</td><td>ACC</td></tr><tr><td>R18IN [18]</td><td>15.9 (+0.0)</td><td>37.5 (+0.0)</td><td>42.3 (+0.0)</td></tr><tr><td>R18SFM [32]</td><td>23.2 (+7.3)</td><td>41.5 (+4.0)</td><td>45.7 (+3.4)</td></tr><tr><td>R18SWSL [45]</td><td>24.7 (+8.8)</td><td>47.0 (+9.5)</td><td>50.9 (+8.6)</td></tr><tr><td>R50IN [18]</td><td>22.2 (+0.0)</td><td>41.8 (+0.0)</td><td>46.4 (+0.0)</td></tr><tr><td>R50SFM [32]</td><td>26.6 (+4.4)</td><td>44.8 (+3.0)</td><td>48.6 (+2.2)</td></tr><tr><td>R50SemArt (author) [13]</td><td>1.8 (-20.4)</td><td>12.2 (-29.6)</td><td>18.0 (-28.4)</td></tr><tr><td>R50SemArt (type) [13]</td><td>7.9 (-14.3)</td><td>26.8 (-15.0)</td><td>31.9 (-14.5)</td></tr><tr><td>R50SIN [15]</td><td>15.5 (-6.7)</td><td>36.4 (-5.4)</td><td>41.7 (-4.7)</td></tr><tr><td>R50SwAV [6]</td><td>22.8 (+0.6)</td><td>45.0 (+3.2)</td><td>49.6 (+3.2)</td></tr><tr><td>R50SWSL [45]</td><td>30.4 (+8.2)</td><td>52.9 (+11.1)</td><td>56.3 (+9.9)</td></tr></table>
183
+
184
+ Table 5: Comparison of recognition performance for kNN classifier with representation from backbone networks pretrained for different tasks. Relative improvements compared to the corresponding network trained on ImageNet are shown in parentheses.
185
+
186
+ ImageNet (IN) - classification: approach for training on ImageNet with cross-entropy loss [18]. Landmarks (SfM) - metric learning: approach for metric learning with contrastive loss on image pairs obtained from Structure-from-Motion on landmarks [32]. Artwork attributes (SemArt): networks trained on the SemArt dataset [14] by Garcia et al. [13] for artwork attribute prediction. In particular, we consider variants for painting type (10 classes) or author (350 classes). StylizedImageNet (SIN): network trained by Geirhos et al. [15] on a stylized version of ImageNet to improve the texture bias of deep networks. SwAV on ImageNet (IN) - self supervision: representation learning on ImageNet with self-supervision by instance discrimination. The resulting network has achieved good results in concept generalization [6]. Semi-weakly supervised (SWSL) on Instagram $1B +$ ImageNet: teacher-student approach [45] with teacher pretrained on about 1 billion images with hashtags and student trained with teacher-generated pseudo-labels, eventually fine-tuned on ImageNet.
187
+
188
+ # 4 Experiments
189
+
190
+ We perform performance evaluation of the baseline approaches using GAP and accuracy on the test queries of the Met dataset. Training, if any, is performed on the training part of the Met, while the validation queries are either used as validation set during the training or to tune the hyper-parameters of the kNN classifier. Multi-scale representation and PCA whitening with dimensionality reduction to 512D are used unless otherwise stated.
191
+
192
+ Image representation and kNN classifier components. ResNet18 trained on ImageNet is used as backbone to perform recognition with a kNN classifier. Hyper-parameters $k$ and $\tau$ are tuned and reported separately per experiment in Table 4 which shows the impact of different components. The multi-scale representation and the use of whitening are essential parts of main approach (ID4 vs ID1, ID2, and ID3). Fixing $k = 1$ (ID5) is equivalent to no use of soft-max normalization and has significantly lower GAP on all queries, slightly lower GAP on Met queries, and identical accuracy by definition. Confidence normalization is therefore very important for handling distractors and high GAP performance. Finally, we show that having distractors in the validation set is boosting GAP by better kNN classifier hyper-parameter tuning (ID6 vs ID4).
193
+
194
+ <table><tr><td>Method</td><td>GAP</td><td>GAP-</td><td>ACC</td></tr><tr><td colspan="4">Parametric classification</td></tr><tr><td>R18IN DNet CE</td><td>9.6</td><td>24.7</td><td>30.6</td></tr><tr><td>R18IN DNet AF</td><td>16.9</td><td>32.0</td><td>36.6</td></tr><tr><td colspan="4">kNN classification</td></tr><tr><td>R18IN (baseline)</td><td>15.9</td><td>37.5</td><td>42.3</td></tr><tr><td>R18IN DNet CE</td><td>21.6</td><td>40.4</td><td>44.7</td></tr><tr><td>R18IN DNet AF</td><td>23.7</td><td>43.9</td><td>47.4</td></tr><tr><td>R18IN SimSiam</td><td>26.8</td><td>42.3</td><td>45.6</td></tr><tr><td>R18IN Con-Syn</td><td>30.4</td><td>46.6</td><td>49.4</td></tr><tr><td>R18IN Con-Syn+Real</td><td>29.8</td><td>46.0</td><td>48.8</td></tr><tr><td>R18IN Con-Syn+Real-closest</td><td>32.5</td><td>47.5</td><td>50.0</td></tr><tr><td>R18SWSL (baseline)</td><td>24.7</td><td>47.0</td><td>50.9</td></tr><tr><td>R18SWSL Con-Syn+Real-closest</td><td>36.1</td><td>52.4</td><td>55.0</td></tr></table>
195
+
196
+ Table 6: Performance comparison for different types of training on the Met dataset. Training starts from the result of pretraining on ImageNet or that of SWSL. Baseline: not trained on the Met.
197
+
198
+ ![](images/e01667c164a5e1a46b695ab9d0fa49c339a164b41bcea96a4da4a6744165859f.jpg)
199
+ Figure 6: Examples of incorrect and correct classification of test images for R18IN (baseline) and R18IN Con-Syn+Real-closest (R18IN $\star$ ), respectively. The test images are shown next to their nearest neighbor from the Met exhibits that produced the respective prediction per method.
200
+
201
+ Pretrained backbones and kNN classifier. Table 5 summarizes results of recognition performance with a kNN classifier for backbones pretrained on different tasks. Networks for art attribute prediction perform worse than the ImageNet ones, verifying that the task of art attribute prediction is far from that of ILR. The network for metric learning on landmarks provides improvements; despite the domain difference (artwork vs landmarks), training for metric learning well reflects the objectives of ILR. SwAV provides a performance boost, verifying the usefulness of unsupervised representation learning for better generalization. Finally, SWSL is the best performing variant demonstrating the benefits of learning on a very large image corpus despite the noisy labels; we expect the training set to include many artworks too.
202
+
203
+ Training on the Met dataset. Results from training on the Met dataset are shown in Table 6 with a parametric deep network classifier and with a kNN classifier. The latter is shown to be superior, while carrying the extra cost of storing a 512-D vector per training image. AF is shown to be better than CE, verifying prior results on ILR [4]. SimSiam improves the performance over the baseline without the use of any supervision indicating that self-supervised learning is a promising direction for ILR. Con-Syn uses the same positives as SimSiam but further boosts the performance by the use of negatives. Including real positives too with constrastive loss achieves the best performance but only if the positive pair is properly disambiguated (Real-closest vs Real). Improvements by training on the Met are confirmed starting from R18SWSL too. Examples where R18IN Con-Syn+Real
204
+
205
+ ![](images/c51a1a7a0738394f6c0d087e2cc9651433d022ed316c1bf3758fec31d908912e.jpg)
206
+ Figure 7: Examples of hard negative pairs formed by the approaches that use the Contrastive loss on the Met training set. These examples additionally demonstrate the large inter-class similarity of the dataset. Images are shown as squares only for the purposes of this figure.
207
+
208
+ ![](images/21688eea4325a314a2ba2e69ea40a68f2bac5dd0b4e4e306d6e5cf0f04bf52ce.jpg)
209
+ Figure 8: Accuracy improvement of the kNN classifier over the parametric one for varying number of training images per class. DNet is trained with AF loss for the parametric classifier, while the embeddings learned with this setup are used for the kNN classifier. Relative improvements are reported in percentage for the different embedding variants.
210
+
211
+ closest succeeds in prediction but the R18IN baseline fails are shown in Figure 6. These cases include challenges such as large view points changes and high inter-class similarity. Examples of hard negative pairs used in the contrastive variants are shown in Figure 7.
212
+
213
+ Few training examples and kNN classifier. We train a parametric classifier and additionally use the resulting embeddings for the kNN classifier. A comparison is shown in Figure 8, where performance is reported separately according to the number of training examples per ground-truth class of each query. The kNN classifier does not only perform better than the parametric one, but is shown to be more suitable for long tail recognition, as it achieves increasingly higher gains for more underrepresented classes.
214
+
215
+ # 5 Conclusions
216
+
217
+ This work introduces a new large-scale dataset for ILR on artworks. It is the first dataset on artworks to focus on this task, the only large-scale ILR dataset with clean annotations, and it poses a number of different challenges. The considered task is closer to ILR and deep representation learning than it is to popular computer vision tasks in the artwork domain, whilst including many of the same challenges. Fine-tuning the representation on The Met exhibits appears essential but also challenging due to the training set statistics. We expect this dataset to foster research not only on ILR for artworks but also for ILR across multiple domains, when combined with other existing datasets.
218
+
219
+ # 6 Acknowledgements
220
+
221
+ The authors would like to thank The Met employees Jennie Choi and Maria Kessler for their support and help, Andre Araujo, Tobias Weyand, and Xu Zhang for valuable discussions during the earlier stages of this work, and all the Flickr photographers whose photos are included in this dataset. This work was supported by JSPS KAKENHI No. JP20K19822, Junior Star GACR grant No. GM 21-28830M, and MSMT LL1901 ERC-CZ grant.
222
+
223
+ # References
224
+
225
+ [1] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. NetVLAD: CNN architecture for weakly supervised place recognition. In CVPR, 2016.
226
+ [2] Yalong Bai, Yuxiang Chen, Wei Yu, Linfang Wang, and Wei Zhang. Products-10k: A large-scale product recognition dataset. In arXiv, 2020.
227
+ [3] Vassileios Balntas, Shuda Li, and Victor Prisacariu. Relocnet: Continuous metric learning relocalisation using neural nets. In ECCV, 2018.
228
+ [4] Bingyi Cao, André Araujo, and Jack Sim. Unifying deep local and global features for image search. In ECCV, 2020.
229
+ [5] Gustavo Carneiro, Nuno Pinho Da Silva, Alessio Del Bue, and João Paulo Costeira. Artistic image classification: An analysis on the PRINTART database. In ECCV. Springer, 2012.
230
+ [6] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. In NeurIPS, 2020.
231
+ [7] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021.
232
+ [8] Lele Cheng, Xiangzeng Zhou, Liming Zhao, Dangwei Li, Hong Shang, Yun Zheng, Pan Pan, and Yinghui Xu. Weakly supervised learning with side information for noisy labeled images. In ECCV, 2020.
233
+ [9] Sumit Chopra, Raia Hadsell, and Yann LeCun. Learning a similarity metric discriminatively, with application to face verification. In CVPR, 2005.
234
+ [10] Elliot J Crowley and Andrew Zisserman. The state of the art: Object retrieval in paintings using discriminative regions. In BMVC, 2014.
235
+ [11] Riccardo Del Chiaro, Andrew D Bagdanov, and Alberto Del Bimbo. Noisyart: A dataset for webly-supervised artwork recognition. In VISIGRAPP (4: VISAPP), 2019.
236
+ [12] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In CVPR, 2019.
237
+ [13] Noa Garcia, Benjamin Renoust, and Yuta Nakashima. Context-aware embeddings for automatic art analysis. In Proceedings of the ACM International Conference on Multimedia Retrieval, 2019.
238
+ [14] Noa Garcia and George Vogiatzis. How to read paintings: semantic art understanding with multi-modal retrieval. In ECCV Workshops, 2018.
239
+ [15] Robert Geirhos, Patricia Rubisch, Claudio Michaelis, Matthias Bethge, Felix A Wichmann, and Wieland Brendel. Imagenet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness. In ICLR, 2019.
240
+ [16] Chuanxing Geng, Sheng-jun Huang, and Songcan Chen. Recent advances in open set recognition: A survey. PAMI, 2020.
241
+ [17] M Hadi Kiapour, Xufeng Han, Svetlana Lazebnik, Alexander C Berg, and Tamara L Berg. Where to buy it: Matching street clothing photos in online shops. In ICCV, 2015.
242
+ [18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016.
243
+ [19] Saihui Hou, Xinyu Pan, Chen Change Loy, Zilei Wang, and Dahua Lin. Learning a unified classifier incrementally via rebalancing. In CVPR, 2019.
244
+ [20] Hervé Jégou and Ondrej Chum. Negative evidences and co-occurrences in image retrieval: The benefit of pca and whitening. In ECCV. Springer, 2012.
245
+ [21] Jeff Johnson, Matthijs Douze, and Hervé Jégou. Billion-scale similarity search with gpus. In arXiv, 2017.
246
+
247
+ [22] Bingyi Kang, Saining Xie, Marcus Rohrbach, Zhicheng Yan, Albert Gordo, Jiashi Feng, and Yannis Kalantidis. Decoupling representation and classifier for long-tailed recognition. In ICLR, 2020.
248
+ [23] Sergey Karayev, Matthew Trentacoste, Helen Han, Aseem Agarwala, Trevor Darrell, Aaron Hertzmann, and Holger Winnemoeller. Recognizing image style. In BMVC, 2014.
249
+ [24] Jan Knopp, Josef Sivic, and Tomas Pajdla. Avoiding confusing features in place recognition. In ECCV, 2010.
250
+ [25] Piotr Koniusz, Yusuf Tas, Hongguang Zhang, Mehrtash Harandi, Fatih Porikli, and Rui Zhang. Museum exhibit identification challenge for the supervised domain adaptation and beyond. In ECCV, 2018.
251
+ [26] Jonathan Krause, Hailin Jin, Jianchao Yang, and Li Fei-Fei. Fine-grained recognition without part annotations. In CVPR, 2015.
252
+ [27] Shiyu Liang, Yixuan Li, and R Srikant. Enhancing the reliability of out-of-distribution image detection in neural networks. In ICLR, 2018.
253
+ [28] Ziwei Liu, Ping Luo, Shi Qiu, Xiaogang Wang, and Xiaou Tang. Deepfashion: Powering robust clothes recognition and retrieval with rich annotations. In CVPR, 2016.
254
+ [29] Hui Mao, Ming Cheung, and James She. Deepart: Learning joint representations of visual arts. In ACM Multimedia, 2017.
255
+ [30] Thomas Mensink and Jan Van Gemert. The Rijksmuseum challenge: Museum-centered visual recognition. In ICMR, 2014.
256
+ [31] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Köpf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In arXiv, 2019.
257
+ [32] Filip Radenović, Giorgos Tolias, and Ondřej Chum. Fine-tuning cnn image retrieval with no human annotation. PAMI, 41(7):1655-1668, 2019.
258
+ [33] Jie Ren, Peter J Liu, Emily Fertig, Jasper Snoek, Ryan Poplin, Mark A DePristo, Joshua V Dillon, and Balaji Lakshminarayanan. Likelihood ratios for out-of-distribution detection. In NeurIPS, 2019.
259
+ [34] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. ImageNet Large Scale Visual Recognition Challenge. IJCV, 115(3):211-252, 2015.
260
+ [35] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. IJCV, 2015.
261
+ [36] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Fast image-based localization using direct 2d-to-3d matching. In ICCV, 2011.
262
+ [37] Benoit Laurent Auguste Seguin, Carlota Striolo, Isabella di Lenardo, and Frédéric Kaplan. Visual Link Retrieval in a Database of Paintings. In ECCVW, 2016.
263
+ [38] Xi Shen, Alexei A. Efros, and Mathieu Aubry. Discovering Visual Patterns in Art Collections With Spatially-Consistent Feature Learning. In CVPR, 2019.
264
+ [39] Gjordji Strezoski and Marcel Worring. Omniart: a large-scale artistic benchmark. TOMM, 14(4):1-21, 2018.
265
+ [40] Giorgos Tolias, Yannis Avrithis, and Hervé Jégou. To aggregate or not to aggregate: selective match kernels for image search. In ICCV, Sep. 2013.
266
+ [41] Giorgos Tolias, Tomas Jenicek, and Ondrej Chum. Learning and aggregating deep local descriptors for instance-level recognition. In ECCV, 2020.
267
+ [42] Feng Wang, Xiang Xiang, Jian Cheng, and Alan Loddon Yuille. Normface: L2 hypersphere embedding for face verification. In acmmn, pages 1041-1049, 2017.
268
+ [43] Tobias Weyand, André Araujo, Bingyi Cao, and Jack Sim. Google landmarks dataset v2 - A large-scale benchmark for instance-level recognition and retrieval. In CVPR, 2020.
269
+ [44] Michael J Wilber, Chen Fang, Hailin Jin, Aaron Hertzmann, John Collomosse, and Serge Belongie. BAM! the behave artistic media dataset for recognition beyond photography. In ICCV, 2017.
270
+ [45] I. Zeki Yalniz, Hervé Jégou, Kan Chen, Manohar Paluri, and Dhruv Mahajan. Billion-scale semi-supervised learning for image classification. In arXiv, 2019.
271
+
272
+ [46] Chenyang Zhang, Christine Kaeser-Chen, Grace Vesom, Jennie Choi, Maria Kessler, and Serge Belongie. The iMet collection 2019 challenge dataset. In arXiv, 2019.
273
+
274
+ # A Appendix
275
+
276
+ # A.1 Implementation details
277
+
278
+ All methods are implemented in PyTorch [31] and use FAISS [21] for nearest neighbor search. In all approaches that involve training the Adam optimizer is used, weight decay is equal to $10^{-6}$ , learning rate is equal to $10^{-7}$ for the backbone and it is decreased by a factor of 10 in the middle of the training. The augmentations used consist of random cropping in the scale range [0.7, 1.0] and resize to $500 \times 500$ , color jittering with probability 0.8, and conversion to grayscale with probability 0.2. DNet is trained with a batch size of 256 images for 25 epochs with the learning rate of the classifier set to $10^{-3}$ . Temperature $\gamma$ used with CE is set to be fixed and equal to 30, while the temperature and margin penalty for AF are set to be fixed and equal to 64 and 0.5, respectively. SimSiam is trained with a batch size of 128 images, i.e. 64 original images augmented twice, for 15 epochs, with the learning rates of the projector and predictor MLP set to $10^{-3}$ . The training with contrastive loss is performed for 10 epochs with the margin set to 1.8. The batch size is equal to 128 images, comprised of 64 pairs randomly sampled from the positive and negative pairs of all anchors. An epoch is finished when all training images are used as anchors once.
279
+
280
+ The best epoch is chosen according to validation accuracy of corresponding (parametric or non-parametric) classifier. To speed-up the process of choosing the best epoch with the kNN classifier, single-scale representation is used without PCAw.
281
+
282
+ The hyper-parameters of the kNN classifier are tuned according to GAP on the validation set with grid search on the cartesian product of the sets $\{1,2,3,5,7,10,15,20,50\}$ and $\{0.01,0.1,1,5,10,15,20,25,30,50,100,500\}$ for $k$ and $\tau$ , respectively. The temperature of the parametric classifiers is also tuned according to validation GAP once the training is finished.
283
+
284
+ # A.2 Dataset hosting and maintenance
285
+
286
+ Public access and download links to the dataset are provided through the dataset webpage: http://cmp.felk(cvut.cz/met/.. It contains tar files for all images and the ground truth files for evaluation. Publicly available code to provide reference code for using the dataset and computing the evaluation metrics can be found in https://github.com/nikosips/met. The code repository additionally includes code to reproduce some of the methods evaluated in the paper. The dataset is hosted at the servers of the Visual Recognition Group at the Czech Technical University in Prague.
287
+
288
+ # A.3 License
289
+
290
+ The annotations are licensed under CC BY 4.0 license. The images included in the dataset are either publicly available on the web, and come from three sources, i.e. the Met open collection, Flickr, and WikiMedia commons, or are created by us. The corresponding licenses for the ones that are available on the web are public domain, Creative Commons, and public domain, respectively. We do not own their copyright. For the ones created by us, we release them to the public domain.
291
+
292
+ We, the authors of this paper and creators of the dataset, bear all responsibility in case of violation of rights.
293
+
294
+ # A.4 Flickr users
295
+
296
+ We thank the 37 following Flickr photographers whose photos with permissive license are included in the Met dataset. They appear in the form: username [real name], profile url.
297
+
298
+ - edenpictures [Eden, Janine and Jim], https://www.flickr.com/people/edenpictures
299
+ Eric.Parker [Eric Parker], https://www.flickr.com/people/ericparker/
300
+ - semarr [Sarah Marriage], https://www.flickr.com/people/semarr/
301
+ - mharrsch [Mary Harrsch], https://www.flickr.com/people/mharrsch/
302
+
303
+ - Johnk85 [Johnk85], https://www.flickr.com/people/johnk85/
304
+ - zinetv [Lionel Martinez], https://www.flickr.com/people/zinetv/
305
+ - opacity [], https://www.flickr.com/people/opacity/
306
+ - Will.House [Will House], https://www.flickr.com/people/karloff/
307
+ - sarahstierch [Sarah Stierch], https://www.flickr.com/people/sarahvain/
308
+ - euthman [Ed Uthman], https://www.flickr.com/people/euthman/
309
+ - griannan [], https://www.flickr.com/people/griannan/
310
+ Trish Mayo [], https://www.flickr.com/people/obsessivephotography/
311
+ - Stephen Sandoval [Stephen Sandoval], https://www.flickr.com/people/pursuebliss/
312
+ - Grufnik [], https://www.flickr.com/people/grufnik/
313
+ - smallcurio [], https://www.flickr.com/people/smallcurio/
314
+ gtrwndr87 [Matthew Mendoza], https://www.flickr.com/people/mattmendoza/
315
+ - peterjr1961 [Peter Roan], https://www.flickr.com/people/peterjr1961/
316
+ - Stabbur's Master [Larry Syverson], https://www.flickr.com/people/124651729@N04/
317
+ - gorekun [], https://www.flickr.com/people/gorekun/
318
+ - rverc [Regan Vercruysse], https://www.flickr.com/people/rverc/
319
+ - IslesPunkFan [Neil R], https://www.flickr.com/people/islespunkfan/
320
+ - Pete Tillman [Peter D. Tillman], https://www.flickr.com/people/29050464@N06/
321
+ - squesada70 [Sergio Quesada], https://www.flickr.com/people/squesada/
322
+ - jareed [], https://www.flickr.com/people/jareed/
323
+ - stausi [], https://www.flickr.com/people/stausi/
324
+ - terryballard [Terry Ballard], https://www.flickr.com/people/terryballard/
325
+ - suetry [Susan Tryforos], https://www.flickr.com/people/stryforos/
326
+ - h-bomb [Howard Walfish], https://www.flickr.com/people/h-bomb/
327
+ - Robert Goldwater Library [The Robert Goldwater Library, The Metropolitan Museum of Art], https://www.flickr.com/people/goldwaterlibrary/
328
+ - juan tan kwon [jon mannion], https://www.flickr.com/people/jmansion/
329
+ - ctj71081 [], https://www.flickr.com/people/55267995@N04/
330
+ - ketrin1407 [], https://www.flickr.com/people/65986072@N00/
331
+ - wallyg [Wally Gobetz], https://www.flickr.com/people/wallyg/
332
+ - h_wang_02 [], https://www.flickr.com/people/7238238@N02/
333
+ - Olivier Bruchez [Olivier Bruchez], https://www.flickr.com/people/bruchez/
334
+ - JBYoder [Jeremy Yoder], https://www.flickr.com/people/jbyoder/
335
+ - jaroslavd [jerry dohnal], https://www.flickr.com/people/jaroslavd/
336
+
337
+ # A.5 Additional results
338
+
339
+ Figure 9 demonstrates the performance for increasing dimensionality of the image representation after PCAw. Combination by simple concatenation is shown to be effective.
340
+
341
+ Local descriptors: We evaluate the kNN classifier where the image-to-image similarity is computed with HOW local descriptors [41] (ECCV2020 R18 trained model) and ASMK [40]. It achieves 25.3 GAP, $47.6\mathrm{GAP^{-}}$ and 50.9 ACC, which is the highest performance for this backbone so far, however very close to the one achieved by the R18SWSL model and similarity with global descriptors. Note that this is a much costlier approach than all the rest in the paper, which use global descriptors. The use of local descriptors trained for this task is likely to be a promising future direction especially due to the high inter-class similarities and the importance of distinctive artwork details.
342
+
343
+ Mini dataset: We additionally create a smaller version of the database (training set) that contains all images from the classes that constitute the Met queries, plus about an extra $10\%$ of the images from the rest of the classes of the original database. Its final size is 38,307 images from 33,501 classes. This set, along with the original query sets (test/val), form a subset of the dataset that serves as a
344
+
345
+ faster way to check the validity of different training methods, before moving on to training on the entire database. This setup corresponds to an easier recognition problem than the original one. For reference, R18IN with kNN classification achieves 27.1 GAP, 49.0 $\mathrm{GAP^{-}}$ and 53.2 ACC on this subset.
346
+
347
+ OOD ratio: Results with and without distractors in the test set are included in the paper (GAP and $\mathrm{GAP^{-}}$ , respectively). We now include results, in Table 7, for varying ratio of OOD queries in the validation set and in the test set. Results demonstrate the increasing difficulty by introducing more distractors and the fact that a small amount of validation distractors are enough for hyper-parameter tuning of the kNN classifier.
348
+
349
+ <table><tr><td>Test Val</td><td>0%</td><td>5%</td><td>10%</td><td>50%</td><td>100%</td></tr><tr><td>0%</td><td>36.9</td><td>32.9</td><td>29.7</td><td>19.9</td><td>14.1</td></tr><tr><td>10%</td><td>36.9</td><td>32.9</td><td>29.7</td><td>19.9</td><td>14.1</td></tr><tr><td>100%</td><td>37.5</td><td>33.6</td><td>30.9</td><td>21.8</td><td>15.9</td></tr></table>
350
+
351
+ Table 7: Performance of R18IN with kNN classification with different amount (percentage of their total number) of distractor queries in the validation (for tuning $k,\tau$ ) and test set. Ratio lower than 100 is achieved by removing the appropriate amount of distractor queries.
352
+
353
+ Approaches for long-tail recognition: In order to mitigate the harmful effect of the imbalance of the Met training set on the learning process, we test a number of different approaches that are designed for long-tail recognition. Using the DNet classifier trained with Arcface loss as the reference method, the following methods are additionally used for training. Class weighting: The contribution of each sample in the loss function is weighted by the inverse of its class frequency. Class-balanced sampling: The mini-batch samples are sampled uniformly across classes, and not across all training images. Classifier retraining with class-balanced sampling: After training the reference method, the backbone is kept frozen and only the classifier is re-initialized and trained with class-balanced sampling, as in the work of Kang et al. [22]. We observe no increase in accuracy with all these methods. More specifically, the reference method achieves 36.6 accuracy, class weighting achieves 35.8, class-balanced sampling achieves 33.4, and retraining achieves 35.0.
354
+
355
+ # A.6 Dataset extras
356
+
357
+ Figure 10 shows a smoothed histogram of the number of exhibit images by creation year, grouped in bins of 500 years. More than half of the exhibits were created between 1,500 AD and 1,999 AD, with a remarkable number of ancient artworks created between 500 BC and 1 BC.
358
+
359
+ The number of photographers versus the Met queries that belong to them is shown in Figure 11.
360
+
361
+ We present examples of Met queries and training images from the same class in Figures 12 - 16. Finally, we show examples of distractor queries from the other-artwork and non-artwork category in Figure 17 and Figure 18, respectively.
362
+
363
+ ![](images/6c3cd13caab13a5185f652056c21f64e91995b0c93a11a46af4be1e8f33e96f3.jpg)
364
+ Figure 9: Performance with a kNN classifier versus dimensionality for different backbones. Two approaches are combined by simple representation concatenation before PCAw and is denoted by "+" $\star$ : Contrastive Syn+Real-Closest training on the Met dataset.
365
+
366
+ ![](images/08676796acf51aa628e8d6e4da31c6cdb4b9d53188844ca0aaade3c3839f88ba.jpg)
367
+
368
+ ![](images/31a5f0fc3acd7e19eca818625274d622cf97de8a9f7d95fbf7e4b69697e3dd73.jpg)
369
+ Figure 10: Number of exhibit images per time period.
370
+
371
+ ![](images/af2c1bee6f8007d09ccd9ad8dde8425a9daee8fa630c7963e6c860a9fa486cb8.jpg)
372
+ Figure 11: The number of photographers versus the Met queries that belong to them.
373
+
374
+ ![](images/92d3dc3d4eb4b4962c7bd1e5da78c9a2706251d11545950d7cc92f8aa6b58837.jpg)
375
+
376
+ ![](images/312f6614d763733f16f1025a65ff9f1b93780e514dede374fa3fa2e509fa732e.jpg)
377
+
378
+ ![](images/66c312e6df38156ab4fd9210f3feffc945a7b949372e9661ef6343a40ff2d0e7.jpg)
379
+
380
+ ![](images/15ad03c36195ce60a1471d77c2cfa829601cf6c139ef77bdc382fac4dcf22dba.jpg)
381
+
382
+ ![](images/3b66eb8b50659cfb58efd1a469d5e11c55024e534cd68f2da4d68cf1a63a68ee.jpg)
383
+
384
+ ![](images/18bae4acfaf9dc49508d48186288fad26b3a478a8058fbd9965aa07c3650f836.jpg)
385
+
386
+ ![](images/ec98558d50c5a50ca581032351621f567cbb8c5c8b8d698e9263a9a8e17817e6.jpg)
387
+
388
+ ![](images/f2077cd3760eadf3ac7fcb24e9d8ec06de1a280d883605f2628a650f64595c04.jpg)
389
+
390
+ ![](images/77d87a1e1c8ac576a9d609f4435a2f93e4e8208c2e39a12b8024bda533913311.jpg)
391
+
392
+ ![](images/0af2f25dbae6f19166fb07c1609a4eafe82d9ced6dde4aa6605db5189ce536e1.jpg)
393
+
394
+ ![](images/8740964cf91236d3836e215655398163ce509aa13065887cc6407139d55bd241.jpg)
395
+
396
+ ![](images/db181d569783a2be488c0f9995575d75ab66b7d82bf6f7d98300fa6ca4c64fe2.jpg)
397
+
398
+ ![](images/53e3b357cf4fc0a67556b396bbec1042bfb16e9808bd338a53e36b2d908e9109.jpg)
399
+
400
+ ![](images/e24c90635caf65f5942a0f9d1b519aba1073aca8dbadb94b3d73306e8c5c6948.jpg)
401
+
402
+ ![](images/3efb05408439d5a792cf91baa7e7115134a40e06a36008419ce2876795b5f0a9.jpg)
403
+
404
+ ![](images/b810ab64008bd75cae17acf29b625e7cfdcf8f73f768d7ee917818f740a4a8f2.jpg)
405
+
406
+ ![](images/243fd2915f82a0edc8dd2e66eae190aeb507ded75b9baf1f889f95ae57a7c149.jpg)
407
+
408
+ ![](images/bc8ac0333010b282aef5ddaa9a52e80bcd2e58d01cb0012f022117a206b41e89.jpg)
409
+
410
+ ![](images/a0359a045b468213c055725d2ea96d1d4164ea763806948276febe6bfe24c1fb.jpg)
411
+
412
+ ![](images/76db6b9e8a4f26eacb390d7da3695e28eb1fbc4c29c16b9dcc8807dff13fa11e.jpg)
413
+
414
+ ![](images/a1294e46c86c4d36b3a5901f14dc50cd55f1fd6655e5f7e4c5c283a094752373.jpg)
415
+
416
+ ![](images/c23f7e945b771c1f7b0fa6a335322c4587f60f5a5f57bb178e66f39329cd5828.jpg)
417
+
418
+ ![](images/623b8a7a10b7460fa99fe4110fa4d8c0eae0c31f06544b5266d46fd045fbf421.jpg)
419
+
420
+ ![](images/9d2aa7c33322437633896b726eee7ebe0607f8da5d087c6ee70141c953924ada.jpg)
421
+
422
+ ![](images/05be59c6f7632526a6bbc95b7338d2f4d6df34634c9c25d6bc3f1768fc48f5f6.jpg)
423
+
424
+ ![](images/2e5683c05e260199a116458d48b482a5c57bdbcf22bb204aed5740f422f33e21.jpg)
425
+
426
+ ![](images/a8dfc7598ab1c78e2ceb21b61399e07ed6b607d29fb74c1328efd1906bfa56fc.jpg)
427
+
428
+ ![](images/09086eef49cf01755ca9ce810a61472236d2043efbfeeaccd402142087333117.jpg)
429
+
430
+ ![](images/f421415d43c2b65b614efe4b49b4efbcb0ba3857b3c127ceed78ba9279c7be18.jpg)
431
+
432
+ ![](images/2e29717313cbeb854f23ca9a6845d920ce812424ff0ae03c0142c3e174f41a8e.jpg)
433
+
434
+ ![](images/62841a81d2046c5c59ebf9bab7d1b2b95f0ae80858922f3ac232a796b79c5ecc.jpg)
435
+
436
+ ![](images/32b9b44e5846e6b77ed2d7ac254167d68b23524cfe8a38ad9bc82bafde934566.jpg)
437
+
438
+ ![](images/10b156f762cd1fb5eca9977f3b1e5305017976bf6e53e671461dacd8e5de454d.jpg)
439
+
440
+ ![](images/62e0882ad0b24ad836ee4719ee84ec50126b7abeab903566df9968d2aec4b720.jpg)
441
+
442
+ ![](images/c764d4e95cc995777711d27a855e560e36d789651de70c5963ac2aa85716f186.jpg)
443
+
444
+ ![](images/4b0e014f4872f01d0f42ea5117a049cec2eda643fd3907ea1614e2b89be933aa.jpg)
445
+
446
+ ![](images/2829acbb9d6c675aee14e9257c30431f153194ed90dfc4309b54042af83599db.jpg)
447
+ Figure 12: Examples of Met query images and training (exhibit) images of the corresponding Met class. Query images are shown in black border.
448
+
449
+ ![](images/9a0a3b13f214ad5ddd0298d11b9c999944f2aa9c80917f4a652459adc60100fd.jpg)
450
+
451
+ ![](images/dce9ddc2db5a080967a0e117ba568f496b098c2f5be3ed1df9b72a9364949ca1.jpg)
452
+
453
+ ![](images/da3915b3794f784b6fbc4f4785cd18b073e0f93e3cb6caa18acf1e805801f793.jpg)
454
+
455
+ ![](images/8580bc256edb0e348c3ac93921ecb6daf433794fa8fc0edc72a33b9db05bd4c4.jpg)
456
+
457
+ ![](images/014a32f089b50ac91ed0c3cc45420eb97b69b71b01e0a88cf87c2dff39b12714.jpg)
458
+
459
+ ![](images/9f4ea1bf4ec69c4fa8e335d92be6c18adf81ef9ec0ef1e54ef0432a218702543.jpg)
460
+
461
+ ![](images/6b13588908c47dfd19b81dc1f176eb5c456e0406c8cc6f4a439d426e23e0d919.jpg)
462
+
463
+ ![](images/5a6604ea83a218ddbfb4e21b0f17776c846125128f6d5392fb07c3901972519f.jpg)
464
+
465
+ ![](images/71c146b2b843c8b5f5e8588169f7c3b311a87dd6e6f550191155d8d5f09d7de9.jpg)
466
+
467
+ ![](images/f46db33fc591f3a9ad7d5ac5e34a7097acacdd7fbfc3bfc342460e6be85c2378.jpg)
468
+
469
+ ![](images/3520f92a98acce42a06f2263a66233fc70214590d25fa941583bafb2f7dcb7c4.jpg)
470
+
471
+ ![](images/4fb513a9aea4cc9827ee905a121bf9e873d696196ef615e5a6e4daafa3d1678d.jpg)
472
+
473
+ ![](images/e27f075434363d101db17692fc081568995d97bf02f23532e92379c516870003.jpg)
474
+
475
+ ![](images/3ea3f6598504e264249255ea0970256d7cb8b490c2b646b51ceddb74e0cb4221.jpg)
476
+
477
+ ![](images/33328fdf30ecabeef72e15f2b22b4dfc263d0bf47e2d68bd16d0c38cb45a4897.jpg)
478
+
479
+ ![](images/489ad1c5d984f92b26affcb4c3acd3ae982f2cbc1c9627cbb79ff0898ed79132.jpg)
480
+
481
+ ![](images/57fbdb0396afe7944188e39ae9a5fc0a9e435aaaf71b0bcd9639365ee83514cb.jpg)
482
+
483
+ ![](images/4ddc6a0fdddafeb934155838a3afb45d598be51f9cfe9f76db8036e75f9ae37f.jpg)
484
+
485
+ ![](images/dff9c371c217f21f340c9141ac45e0cfeb4a33b7a1de98e71d22f9edcfedce3f.jpg)
486
+
487
+ ![](images/a8ba449054a08c602ff8379745f5ac159472481e689b4c7095931f4b66f693ae.jpg)
488
+
489
+ ![](images/240138a98368ac8d6d5704edd7e2223310742f848611c9cd1f6d52b4f1468b99.jpg)
490
+
491
+ ![](images/d8a88e44010b03c177693dfffdfd55d598d3822baa3527acb5a1247a45ed8545.jpg)
492
+
493
+ ![](images/513a67c31d624009657d68ff9e5d7a21e3c5391f4d663b2cc5087ab1e7acc92c.jpg)
494
+
495
+ ![](images/ffc9c8e57e38aef31be90764a192753f44b1795a9d1f4fc0a57eeaba6b49773f.jpg)
496
+
497
+ ![](images/5fe8043948f5d34723da48fc954e8153f100328340f90b43385875d89b460a4c.jpg)
498
+
499
+ ![](images/92e58666b928ddb3bc8e2499cc9d9f8a4af44c15579e6c6e91d283e325cb4b8a.jpg)
500
+
501
+ ![](images/0ea4eca278eb5808560eb2f3a52d67332b90233be723b1e4ebdcb19326b48132.jpg)
502
+
503
+ ![](images/4cc2a8e4fc7e58e5e2c7f5751f607909ca6f3f1a5407a451059be8ea92416ac3.jpg)
504
+
505
+ ![](images/a263f17c67307b5a53a7f4e9298ab931d1b8f0d89425f90ea89a4f8deecb32de.jpg)
506
+
507
+ ![](images/e1bd54d05f4466aa2902147ff03e5d675e2275769e98f3e8151c2cd02d674af2.jpg)
508
+
509
+ ![](images/ae0e84cda27b7db6ddfea663334adbe826b18b7b6dfd1dfd6a66674c27b0db53.jpg)
510
+
511
+ ![](images/ac9bfb62bdeec7aea11d1d5b173b2993f911d799aab2056823d924f8e533f0f6.jpg)
512
+
513
+ ![](images/983bccb1eb2a065ed98feac4e8d9e3297119300686bff11ef28ecea8ab2216d4.jpg)
514
+
515
+ ![](images/e325809524fcf86815f713550fdf69573920903d44fe54ee75ac1a67929dedb3.jpg)
516
+
517
+ ![](images/b3f180db2c3ab91a244176e52bf122b2db37ccbf65914977a5da6e2596e72b5d.jpg)
518
+
519
+ ![](images/08fcf15421a96feb1297a30877b00d9f1e351a4d1f8ee95b72e918c3b78a0345.jpg)
520
+
521
+ ![](images/9ee8bc2337df85f5ab29954c2a298f625ba9180a606403796cfbf7fad547ac13.jpg)
522
+
523
+ ![](images/1999625d0ce6d1d36fc7167ab2dca64282d8a1d6c286b59a53e491d620d8b87e.jpg)
524
+
525
+ ![](images/955d16c70f09353edeff25f5158d71f349474eda25d68963da6b56307cefb46d.jpg)
526
+ Figure 13: Examples of Met query images and training (exhibit) images of the corresponding Met class. Query images are shown in black border.
527
+
528
+ ![](images/8ff074a25b55b2b6d8f01ca8145970db82fa833c7aad9ac6eae09602d66eb976.jpg)
529
+
530
+ ![](images/87dcebbbb9555aa6e0d7ffbaf397919fb179ee8b64e08f78514fcaf41d8e7576.jpg)
531
+
532
+ ![](images/6600ab9e2681c800b216b0c699653c7866a4939cf0b5f4504ede6370f8d80400.jpg)
533
+
534
+ ![](images/3c41f54e51b8d079a662bad1d8238557b90af2a07e2427179a76b186cb5eed1f.jpg)
535
+
536
+ ![](images/1571a2c7bd0103a1e89eed32919277bdfa7bff1e72178ab76b4317805e791095.jpg)
537
+
538
+ ![](images/f9ac18be28db1c5a6a4d87ba69af7289238939df1aa76319403fd280d2113489.jpg)
539
+
540
+ ![](images/2af9096ef8611fc5123397c938a283439e924f2e02a9411efcc5da674dbce1c7.jpg)
541
+
542
+ ![](images/9809752f66e575124138c19471a7fa226491c0107458b40b8ffb5448135efdcf.jpg)
543
+
544
+ ![](images/fb79fbf3433a442ec0a7d2c91a256bdbd1ce4c67f8ed3552d1be1c1141f291e8.jpg)
545
+
546
+ ![](images/cde5b35e033c9c5ecf1793d5b0cc209b83c42456266705ad0358773d0f8bdb70.jpg)
547
+
548
+ ![](images/76d49d17ecdaa95c16eedd9e871abb6c3244b931077b8c2f5f74a89e3bca8c88.jpg)
549
+
550
+ ![](images/538bcf1ea4e9fbd351b307982ec805856a373171a824e5360e914b3b1aed3df7.jpg)
551
+
552
+ ![](images/15e4bf4e3fbadad3de88c1fe587d9e8cc144f93b3e7535b60907d5aa13cb9e58.jpg)
553
+
554
+ ![](images/e4ef8a05b23aa63c1dd7299e69cf247122be20ef33aa3e8b7289b66a868437be.jpg)
555
+
556
+ ![](images/168e15f9dc706b6895d3d75a8becd957cbbae76982a294b9cfe51d3c91e17d09.jpg)
557
+
558
+ ![](images/14190219f8d984e47582179cbb150d23d1fced06c99dc5768b597afee4b37b89.jpg)
559
+
560
+ ![](images/ba7707d903375207daef64e4d92b0c34cafa80c240abc25e8254c346cc743e2e.jpg)
561
+
562
+ ![](images/6f8cbbf510a5432d1093f490283bcb8ffa8659058297ffc254192b33e073cfa0.jpg)
563
+
564
+ ![](images/bd603d78132541dc5831f7988e0ca1f695d87211e6fb07b406f80ae587c16d18.jpg)
565
+
566
+ ![](images/7f15d4cb92286b3ecde80e6ba6e883575cd44e0b62394313eea7e2e58beb828f.jpg)
567
+
568
+ ![](images/a963bb98730bb3963a062f8f86b9c860249b54d79d25cc716fb40f8935707b71.jpg)
569
+
570
+ ![](images/e69e94eea8f08f90db31bafa3d71ef2882dc03da3310f061fc8a93cf483d4e8a.jpg)
571
+
572
+ ![](images/c7c5a62624abfc6bf0880f62b21e4bd03cb07dfed269a6a4ca679a8f03fcb09b.jpg)
573
+
574
+ ![](images/c232932d6707a457070485ad9b88965671552a4803bd45836cfbb44fb042fecd.jpg)
575
+
576
+ ![](images/4898f730b58101f8e5fa24a19a30729de7451272f473bb728095f0ee61a488b8.jpg)
577
+
578
+ ![](images/05c25fff44ad00313ae34b38817fbf938fc535dff7bbc79d4c1a3955d18466a2.jpg)
579
+
580
+ ![](images/4d66b3965c73e70e45b3d51a05d23de37e8eb67230d65cfbf99a6abea3b6f56d.jpg)
581
+
582
+ ![](images/a63067d81d4f910d82aed02e99a86e108304c3434b3b87935934458c763ca6fe.jpg)
583
+
584
+ ![](images/4fb5d2382cd8e1888a7dd92550e37c30bfbc411316e758c349d88545b00ea0d6.jpg)
585
+
586
+ ![](images/09f41a28d82e206867adec63b9f849a601c7eff7c95ebe983f705ca10983258d.jpg)
587
+
588
+ ![](images/5ecea77a17ca5caae7ce22b674e95402131f7a7ef440f66c2789954230b77a5a.jpg)
589
+
590
+ ![](images/a6ce9ef258006a36f72568a1a2f531d02854a5f30d966f3f34303b0927592a31.jpg)
591
+ Figure 14: Examples of Met query images and training (exhibit) images of the corresponding Met class. Query images are shown in black border.
592
+
593
+ ![](images/3cb1868410a3068fd50e270dce82375ebb7604cedc1e59e681194dc36004521d.jpg)
594
+
595
+ ![](images/64d96d26f175ca97eaf04a99279151b15afe61fa97effcaf4ba16e880ae559fb.jpg)
596
+
597
+ ![](images/e0fe60735acfd3389aef86939dc7e9d43cfd1129ce4b1aa657c3881539610a80.jpg)
598
+
599
+ ![](images/5ceb63bb5a4aa59700b908a6c911f9639621f884aa824ffbecaffcd1a3831c43.jpg)
600
+
601
+ ![](images/11ede17b62c8c0c9a17db99d280af51c24a50956f737bd3fe62396176f0eeb37.jpg)
602
+
603
+ ![](images/b3af04d7c771220947e26117eb9fff0aa2e1802d2367ce7b04dac5c00c6cc494.jpg)
604
+
605
+ ![](images/4c28f5cf392037d13435bfabc9619a363eb9517fd9fa4b8eea6ca28bd8c97b38.jpg)
606
+
607
+ ![](images/2ff2f2bc5c8349335a23f15a0110ef28ce5b813407e609c549bb231435637b63.jpg)
608
+
609
+ ![](images/db94058c529f29f230f6cbb8452857850cd3b87800555851a90b84eec495e2f8.jpg)
610
+
611
+ ![](images/517202dc0ac644a9bbcbd68c56a88b9977e6a00e3c0fa3d99ac1cb2f4c8238ed.jpg)
612
+ Figure 15: Examples of Met query images and training (exhibit) images of the corresponding Met class. Query images are shown in black border.
613
+
614
+ ![](images/dfff678cc4aea1f995d38e35a055b70b14af763729246e8845fb63a8880e2e8d.jpg)
615
+ Figure 16: Examples of Met query images and training (exhibit) images of the corresponding Met class. Query images are shown in black border.
616
+
617
+ ![](images/3065fcfa3c2858ebfd4ff6cde8380c8d82424a3509bcc4d109531a961c1b7d0f.jpg)
618
+ Figure 17: Examples of other-artwork (distractor) queries.
619
+
620
+ ![](images/aca5ea0a2e1dbfaaa857089171ab3a717416f798671049df4a119d1943721477.jpg)
621
+ Figure 18: Examples of non-artwork (distractor) queries.
622
+
623
+ # B Datasheet
624
+
625
+ # MOTIVATION
626
+
627
+ # For what purpose was the dataset created?
628
+
629
+ To our knowledge this is the only ILR dataset at this scale that does not include any noise in the ground-truth and is fully publicly available. Existing datasets are either significantly smaller, or created the ground-truth via crowd-sourcing which resulted in noisy labels, or are not fully publicly available. ILR has many application domains with corresponding real world applications, and existing datasets include the domains of landmarks, clothing, or products in general. The Met dataset is the first ILR dataset in the artwork domain. The dataset is created for the specific task of matching user photos of artworks to a database of images of artworks with known metadata, with the aim of identifying the artwork in the user photo.
630
+
631
+ Who created this dataset (e.g., which team, research group) and on behalf of which entity (e.g., company, institution, organization)?
632
+
633
+ The dataset was created by Nikolaos-Antonios Ypsilantis (Czech Technical University in Prague), Noa Garcia (Osaka University), Guangxing Han (Columbia University), Sarah Ibrahimi (University of Amsterdam), Nanne van Noord (University of Amsterdam), Giorgos Tolias (Czech Technical University in Prague).
634
+
635
+ What support was needed to make this dataset?
636
+
637
+ N.A.
638
+
639
+ Any other comments?
640
+
641
+ # COMPOSITION
642
+
643
+ What do the instances that comprise the dataset represent (e.g., documents, photos, people, countries)?
644
+
645
+ Each instance in the dataset is an image depicting either artworks or non-artwork content. There are exhibit images that form the training set, and query images that form the test set. The exhibit images are images from the open-access Met Catalog, made available by the Met museum through their online platform. There are two types of query images, the ones that depict an item from Met and the ones that do not. The latter are called distractor queries. The non-distractor query images consist of user photos taken at the Met museum by visitors depicting any of the items shown in the exhibit images of the dataset. A portion of these were taken by the creators of the dataset, and others were collected from Flickr. The distractor query images are images taken from Wikimedia Commons and might depict both artwork (other-art) and non-artwork (non-art) content.
646
+
647
+ How many instances are there in total (of each type, if appropriate)?
648
+
649
+ The number of images in the Met dataset are as follows:
650
+
651
+ <table><tr><td rowspan="2">Split</td><td rowspan="2">Type</td><td colspan="3"># Images</td></tr><tr><td>Met</td><td>other-art</td><td>non-art</td></tr><tr><td>Train</td><td>Exhibit</td><td>397,121</td><td>-</td><td>-</td></tr><tr><td>Val</td><td>Query</td><td>129</td><td>1,168</td><td>868</td></tr><tr><td>Test</td><td>Query</td><td>1,003</td><td>10,352</td><td>7,964</td></tr></table>
652
+
653
+ Does the dataset contain all possible instances or is it a sample (not necessarily random) of instances from a larger set?
654
+
655
+ All types of instances are samples from a larger set. For the Met catalog images that were not open-access or those that had a highly skewed aspect ratio were excluded, additionally a maximum of (the first) 10 images was selected per exhibit of the same artwork - to reduce overrepresentation of exhibits. The query images were selected from online sources based on open-access availability.
656
+
657
+ In the case of Met museum query images an additional selection criterion was the availability of identifying metadata so that we can establish ground-truth correspondence with any of the Met exhibits.
658
+
659
+ What data does each instance consist of?
660
+
661
+ Each instance is an image in JPEG format with a maximum resolution of $500 \times 500$ .
662
+
663
+ Is there a label or target associated with each instance?
664
+
665
+ Each distinct Met exhibit included in the set of exhibit images of the dataset forms its own class. Query images are assigned to one of these classes, if the exhibit is depicted, or to the distractor class if no Met exhibit is depicted.
666
+
667
+ Is any information missing from individual instances?
668
+
669
+ Everything is included in the dataset.
670
+
671
+ Are relationships between individual instances made explicit (e.g., users' movie ratings, social network links)?
672
+
673
+ The relationships between exhibit images from the Met catalog and user query images are made explicit via discrete class labels. Additional relationships between user images, such as being captured by the same photographer, are made explicit via the metadata for the query images.
674
+
675
+ Are there recommended data splits (e.g., training, development/validation, testing)?
676
+
677
+ The dataset is divided into a training, validation, and test split. All Met exhibit images form the training set, while the query images are split into test and validation sets. The test set is composed of roughly $90\%$ of the query images, and the rest is used to form the validation set. To ensure no leakage between the validation and test split, all Met queries are first grouped by user and then assigned to a split. Additionally, we enforce that there is no class overlap between the splits.
678
+
679
+ Are there any errors, sources of noise, or redundancies in the dataset?
680
+
681
+ We have performed multiple rounds of automated, semi-automated, and manual verification of the ground-truth and filtering of errors to minimize the chance of included errors. The accuracy of the final ground-truth has been verified by two different annotators. The open collection of the Met includes duplicate entries which we spotted (identical images) and removed.
682
+
683
+ Is the dataset self-contained, or does it link to or otherwise rely on external resources (e.g., websites, tweets, other datasets)?
684
+
685
+ The dataset is self-contained.
686
+
687
+ Does the dataset contain data that might be considered confidential (e.g., data that is protected by legal privilege or by doctor-patient confidentiality, data that includes the content of individuals' non-public communications)?
688
+
689
+ No
690
+
691
+ Does the dataset contain data that, if viewed directly, might be offensive, insulting, threatening, or might otherwise cause anxiety?
692
+
693
+ No
694
+
695
+ Does the dataset relate to people?
696
+
697
+ A subset of the artworks depict persons (not always in a photorealistic manner, and the persons depicted might be fictive). The query images (particularly those from Wikipedia Commons) may contain depictions of persons, but the dataset nor the metadata contain information about these persons depicted (the source for the image Wikipedia Commons may have identifying information). Specifically, this dataset does not directly concern persons, nor does it contain data to identify any persons.
698
+
699
+ Does the dataset identify any subpopulations (e.g., by age, gender)?
700
+
701
+ No demographic information is included with the dataset.
702
+
703
+ Is it possible to identify individuals (i.e., one or more natural persons), either directly or indirectly (i.e., in combination with other data) from the dataset?
704
+
705
+ Persons depicted in artworks can be identified via museum metadata. However, images collected from Flickr that are taken by museum guests do not depict individuals in an identifiable way; we have removed those images.
706
+
707
+ Does the dataset contain data that might be considered sensitive in any way (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history)?
708
+
709
+ The dataset does not contain sensitive data, as all images were collected from open-access online sources.
710
+
711
+ Any other comments?
712
+
713
+ # COLLECTION
714
+
715
+ How was the data associated with each instance acquired?
716
+
717
+ Each Met exhibit forms its own Met class. Each exhibit image is labeled to a Met class according to the Met metadata. We label query images with their corresponding Met class, if any. Met queries taken by our team are annotated based on exhibit information, whereas Met queries downloaded from Flickr are annotated manually. To ease the task, the title and description fields on Flickr are used for text-based search in the list of titles from The Met exhibits included in the corresponding metadata. Finally, two different annotators verify the correctness of the labeling per query. We additionally verify that distractor queries, especially other-artwork queries, are true distractors and do not belong to The Met collection. This is done in a semi-automatic manner supported by (i) text-based filtering of the Wikimedia image titles and (ii) visual search using a pre-trained deep network. Top matches are manually inspected and images corresponding to Met exhibits are removed.
718
+
719
+ What mechanisms or procedures were used to collect the data (e.g., hardware apparatus or sensor, manual human curation, software program, software API)?
720
+
721
+ The majority of images were collected using software to crawl the Met catalog, Flickr, and Wikimedia Commons, the hardware used to take these images varies significantly. The images collected by the team were taken with an iPhone 11 pro max.
722
+
723
+ If the dataset is a sample from a larger set, what was the sampling strategy (e.g., deterministic, probabilistic with specific sampling probabilities)?
724
+
725
+ Sampling was done based on availability and adherence to selection criteria, no specific (statistical) sampling strategy was used.
726
+
727
+ Who was involved in the data collection process (e.g., students, crowdworkers, contractors) and how were they compensated (e.g., how much were crowdworkers paid)?
728
+
729
+ All data collection and curation was performed by the paper authors themselves.
730
+
731
+ Over what timeframe was the data collected?
732
+
733
+ The dataset was constructed between September 2020 and September 2021. Images included in the dataset from public sources might have been captured before this timeframe.
734
+
735
+ Were any ethical review processes conducted (e.g., by an institutional review board)?
736
+
737
+ No
738
+
739
+ # Does the dataset relate to people?
740
+
741
+ A subset of the artworks depict persons (not always in a photorealistic manner, and the persons depicted might be fictive). The query images (particularly those from Wikipedia Commons) may contain depictions of persons, but the dataset nor the metadata contain information about these persons depicted (the source for the image Wikipedia Commons may have identifying information). Specifically, this dataset does not directly concern persons, nor does it contain data to identify any persons.
742
+
743
+ # Did you collect the data from the individuals in question directly, or obtain it via third parties or other sources (e.g., websites)?
744
+
745
+ The data was crawled from open-access collections online (Met catalog, Flickr, Wikimedia Commons). Photos we collected ourselves were taken such as to avoid capturing other museum visitors.
746
+
747
+ # Were the individuals in question notified about the data collection?
748
+
749
+ No, all data used was already public and available under an open-access license or does not contain persons.
750
+
751
+ Did the individuals in question consent to the collection and use of their data?
752
+
753
+ If consent was obtained, were the consenting individuals provided with a mechanism to revoke their consent in the future or for certain uses?
754
+
755
+ Has an analysis of the potential impact of the dataset and its use on data subjects (e.g., a data protection impact analysis) been conducted?
756
+
757
+ Any other comments?
758
+
759
+ # PREPROCESSING / CLEANING / LABELING
760
+
761
+ Was any preprocessing/cleaning/labeling of the data done(e.g.,discretization or bucketing, tokenization, part-of-speech tagging, SIFT feature extraction, removal of instances, processing of missing values)?
762
+
763
+ The data was processed according to the following steps:
764
+
765
+ 1. Gathered raw images from Flickr: The images were collected as described in the collection section.
766
+ 2. Filtering: images that contain visitor faces, images not depicting exhibits, or images with more than one exhibit were discarded.
767
+ 3. Annotation: query images were annotated with the corresponding Met class, similarly distractor images were discarded if they corresponded to a Met exhibit.
768
+ 4. Verification: the label for each image was verified by two different annotators.
769
+ 5. Rescaling: all images were resized to a maximum resolution of $500 \times 500$ , preserving aspect ratio.
770
+
771
+ # Was the "raw" data saved in addition to the preprocessed/cleaned/labeled data (e.g., to support unanticipated future uses)?
772
+
773
+ The 'raw' data is available from online sources, where relevant the metadata contains reference to the source image data.
774
+
775
+ Is the software used to preprocess/clean/label the instances available?
776
+
777
+ No, this process mainly consisted of manual effort with small specific scripts to automate simple tasks.
778
+
779
+ Any other comments?
780
+
781
+ # USES
782
+
783
+ Has the dataset been used for any tasks already?
784
+
785
+ Yes, the paper has been used for Instance-level Recognition of artworks. See [PAPER] for details.
786
+
787
+ Is there a repository that links to any or all papers or systems that use the dataset?
788
+
789
+ No, we do not intend to collect all such links. We will ask future research publications that use the dataset to cite our paper. In such way, it should be possible to track its usage.
790
+
791
+ What (other) tasks could the dataset be used for?
792
+
793
+ The dataset could potentially be used for other Computer Vision tasks related to artistic images, such as attribute prediction, additionally given the domain shift between the exhibit and the query images the dataset could be used for domain adaptation.
794
+
795
+ Is there anything about the composition of the dataset or the way it was collected and preprocessed/cleaned/labeled that might impact future uses?
796
+
797
+ The dataset was collected and constructed with the ILR task in mind, because of this there might be limitations for future uses. Additionally, certain applications within the artistic domain rely on high resolution images, for this dataset the images have been downscaled, which might inhibit such applications.
798
+
799
+ Are there tasks for which the dataset should not be used?
800
+
801
+ Any other comments?
802
+
803
+ # DISTRIBUTION
804
+
805
+ Will the dataset be distributed to third parties outside of the entity (e.g., company, institution, organization) on behalf of which the dataset was created?
806
+
807
+ Yes, the dataset is publicly available.
808
+
809
+ How will the dataset will be distributed (e.g., tarball on website, API, GitHub)?
810
+
811
+ The dataset is available for download from http://cmp.felk(cvut.cz/met/). The website is under construction. A simple version is offered to provide access to reviewers, and a complete version will become available before publication.
812
+
813
+ The supporting code for evaluation and reproducing some of the baseline in the paper is provided in https://github.com/nikosips/met.
814
+
815
+ When will the dataset be distributed?
816
+
817
+ The dataset is already publicly available through the corresponding webpage.
818
+
819
+ Will the dataset be distributed under a copyright or other intellectual property (IP) license, and/or under applicable terms of use (ToU)?
820
+
821
+ The ownership of all images in the dataset are with their original publishers (e.g., the Met, Flickr
822
+
823
+ users, and Wikimedia Commons users), however as all images are either licensed using a Creative Commons License or are in the public domain there are no limitations on the distribution and use of the dataset. We are providing attribution for all Flickr images by mentioning the creator and the corresponding Flickr url.
824
+
825
+ Have any third parties imposed IP-based or other restrictions on the data associated with the instances?
826
+
827
+ No
828
+
829
+ Do any export controls or other regulatory restrictions apply to the dataset or to individual instances?
830
+
831
+ No
832
+
833
+ Any other comments?
834
+
835
+ # MAINTENANCE
836
+
837
+ Who is supporting/hosting/maintaining the dataset?
838
+
839
+ The dataset is hosted at the Czech Technical University in Prague. Long-term administrator access is guaranteed for Giorgos Tolias.
840
+
841
+ How can the owner/curator/manager of the dataset be contacted (e.g., email address)?
842
+
843
+ Questions and comments about the dataset can be sent to Giorgos Tolias: giorgos.tolias@cmp.felk.cvut.cz
844
+
845
+ Is there an erratum?
846
+
847
+ No.
848
+
849
+ Will the dataset be updated (e.g., to correct labeling errors, add new instances, delete instances)?
850
+
851
+ In the unlikely event (see above for our effort to remove errors) that a number of errors are spotted in the future, the dataset will be updated and the relevant baselines scores will be updated too.
852
+
853
+ If the dataset relates to people, are there applicable limits on the retention of the data associated with the instances (e.g., were individuals in question told that their data would be retained for a fixed period of time and then deleted)?
854
+
855
+ The dataset does not relate to people.
856
+
857
+ Will older versions of the dataset continue to be supported/hosted/maintained?
858
+
859
+ In the unlike even that spotted errors will trigger a dataset update, the older version (instances, and ground-truth) will remain publicly available.
860
+
861
+ If others want to extend/augment/build on/contribute to the dataset, is there a mechanism for them to do so?
862
+
863
+ There is no specified mechanism but we are willing to update the dataset webpage by adding links to any useful extensions.
864
+
865
+ Any other comments?
2202.01xxx/2202.01747/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a106dbe57e8728a95b13b4270f0523e812b3e12b71eb400c5b6cacd791ddf7e0
3
+ size 2602201
2202.01xxx/2202.01747/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01771/e395822f-9d01-412f-9b34-5760d560fda9_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01771/e395822f-9d01-412f-9b34-5760d560fda9_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01771/e395822f-9d01-412f-9b34-5760d560fda9_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc67f260eeef8fc1c6f0cef3a1dfbabb47d7ab05c666a1e1f0b8eec0ede60035
3
+ size 9460072
2202.01xxx/2202.01771/full.md ADDED
@@ -0,0 +1,668 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pre-Trained Language Models for Interactive Decision-Making
2
+
3
+ Shuang Li $^{1*}$ , Xavier Puig $^{1}$ , Chris Paxton $^{2}$ , Yilun Du $^{1}$ , Clinton Wang $^{1}$ , Linxi Fan $^{2}$ , Tao Chen $^{1}$ , De-An Huang $^{2}$ , Ekin Akyurek $^{1}$ , Anima Anandkumar $^{2,3,\dagger}$ , Jacob Andreas $^{1,\dagger}$ , Igor Mordatch $^{4,\dagger}$ , Antonio Torralba $^{1,\dagger}$ , Yuke Zhu $^{2,5,\dagger}$
4
+
5
+ $^{1}$ MIT, $^{2}$ Nvidia, $^{3}$ Caltech, $^{4}$ Google Brain, $^{5}$ UT Austin Junior authors are ordered based on contributions and senior authors† are ordered alphabetically.
6
+
7
+ # Abstract
8
+
9
+ Language model (LM) pre-training is useful in many language processing tasks. But can pre-trained LMs be further leveraged for more general machine learning problems? We propose an approach for using LMs to scaffold learning and generalization in general sequential decision-making problems. In this approach, goals and observations are represented as a sequence of embeddings, and a policy network initialized with a pre-trained LM predicts the next action. We demonstrate that this framework enables effective combinatorial generalization across different environments and supervisory modalities. We begin by assuming access to a set of expert demonstrations, and show that initializing policies with LMs and fine-tuning them via behavior cloning improves task completion rates by $43.6\%$ in the Virtual-Home environment. Next, we integrate an active data gathering procedure in which agents iteratively interact with the environment, relabel past "failed" experiences with new goals, and update their policies in a self-supervised loop. Active data gathering further improves combinatorial generalization, outperforming the best baseline by $25.1\%$ . Finally, we explain these results by investigating three possible factors underlying the effectiveness of the LM-based policy. We find that sequential input representations (vs. fixed-dimensional feature vectors) and LM-based weight initialization are both important for generalization. Surprisingly, however, the format of the policy inputs encoding (e.g. as a natural language string vs. an arbitrary sequential encoding) has little influence. Together, these results suggest that language modeling induces representations that are useful for modeling not just language, but also goals and plans; these representations can aid learning and generalization even outside of language processing.
10
+
11
+ # 1 Introduction
12
+
13
+ Language models (LMs) play a key role in machine learning approaches to natural language processing tasks [9]. This includes tasks that are not purely linguistic, and require nontrivial planning and reasoning capabilities [24, 13]: for example, instruction following, vision-language navigation, and visual question answering. Indeed, some of these tasks are so distant from language modeling that one can ask whether pre-trained LMs can be used as a general framework even for tasks that involve no language at all. If so, how might these capabilities be accessed in a model trained only to process and generate natural language strings?
14
+
15
+ ![](images/bae0a2193f343d1b18123705a6c33239366c37b1f2ccadac73695aa287a2bf01.jpg)
16
+ VirtualHome
17
+
18
+ ![](images/d45c4eca7931b033c8d9fdf9aaf4d0950692ea53ca7982f355ca6092a0851f04.jpg)
19
+ Graph partial observation:
20
+
21
+ ![](images/024e6557d27dbeb1242c6948e11149bfeb6eb5c905b824b06d9c9884636d15c5.jpg)
22
+ BabyAI
23
+ Grid partial observation:
24
+
25
+ Language goal:
26
+ ![](images/b7f3269353c13d2e82710a45ac6e4fbfc5313caec6da0e5e5659e5dde792dc0d.jpg)
27
+ Put the green box next to the purple box
28
+
29
+ ![](images/c017e179d92fd225e35e2313e497f34c7ab1e2d6c51957339906356500f3b27d.jpg)
30
+ Figure 1: Environments (left): Different environments have different types of observations and goals. Our approach (right): We use pre-trained LMs as a general framework for interactive decision-making by converting policy inputs into sequential data. Such a method enables effective combinatorial generalization to novel tasks.
31
+
32
+ In this paper, we study these questions through the lens of embodied decision-making, investigating the effectiveness of LM pre-training as a general framework for learning policies across a variety of environments. We propose LID, a framework that uses Pre-Trained Language Models for Interactive Decision-Making. As shown in Figure 1 (right), we encode the inputs to a policy—including observations, goals, and history—as a sequence of embeddings. These embeddings are passed to a policy network initialized with the parameters of a pre-trained LM, which is fine-tuned to predict actions. This framework is broadly applicable, accommodating goals and environment states represented as natural language strings, image patches, or scene graphs.
33
+
34
+ We find that imitation learning using pre-trained LMs as policy initializers improves in-domain performance and enables strong generalization over novel tasks. For i.i.d. training and evaluation tasks, this approach yields $20\%$ more successful policies than other baseline methods in Virtual-Home [31]. For combinatorial generalization to out-of-distribution tasks, i.e. tasks involving new combinations of goals, states or objects, LM pre-training confers even more benefits: it improves task completion rates by $43.6\%$ for novel tasks (see Figure 3). These results hold for a variety of environment representations: encoding states as natural language strings, when possible, improves the data-efficiency of training, but even LMs fine-tuned on random environment encodings generalize combinatorially to new goals and states when trained on large enough datasets.
35
+
36
+ We further examine how our method may be used in environments where expert data is not available, and agents must instead actively gather data. To do this, we integrate an Active Data Gathering (ADG) procedure into pre-trained LMs as shown in Figure 2. Our proposed approach to ADG consists of three parts. First, exploration collects trajectories using a mix of random actions and actions generated by the current policy. Exploration is insufficient in this high dimensional problem and most of the trajectories will likely fail to achieve the end goal. A key insight is that even the failed trajectories contain useful sub-trajectories that solve certain sub-goals, and we relabel these goals in a hindsight relabeling stage. The relabeled goal describes what was achieved in the extracted sub-trajectory. The policy update stage samples relabeled trajectories to update the policy. The active data gathering procedure allows us to train the LM-policy without pre-collected expert data. It also outperforms reinforcement learning (RL) methods on embodied decision-making tasks and enables more effective generalization to novel tasks.
37
+
38
+ Finally, we investigate why LID contributes to generalization. We hypothesize three possible causes for the effectiveness of LM-based policy initialization: (1) the use of language-based input encodings, and more generally LMs' ability to reason about natural language strings; (2) the sequential structure of transformer inputs, in contrast to the fixed-sized observations used by most policy architectures, and (3) task-general inductive bias conferred by weight initialization with LM pretraining. We investigate (1) by encoding the policy inputs as different types of sequences. Different input encoding schemes have only a negligible impact on the performance: the effectiveness of language modeling is not limited to utilizing natural strings, but in fact extends to arbitrary sequential encodings. We study (2) by encoding observations with a single vector embedding, thereby removing its sequential structure. This operation significantly degrades the model's performance on novel tasks. Finally, we investigate (3) by learning the parameters of the policy from scratch. The success rate after removing the pre-trained LM weights drops by $11.2\%$ , indicating that LM pretraining provides useful inductive bias for sequence processing even when sequences are not natural language strings.
39
+
40
+ To summarize, our work has four main contributions:
41
+
42
+ - First, we propose to use pre-trained LMs as a general scaffold for interactive decision-making across a variety of environments by converting all policy inputs into sequential data.
43
+ - Second, we demonstrate that language modeling improves combinatorial generalization in policy learning: initializing a policy with a pre-trained LM substantially improves out-of-distribution performance on novel tasks.
44
+ - Third, we integrate an active data gathering procedure into the proposed approach to further enable policy learning on environments without using pre-collected expert data.
45
+ - Finally, we perform several analyses to explain the generalization capabilities of pre-trained LMs, finding that natural strings are not needed to benefit from LM pre-training, but the sequential input encoding and weight pre-training are important.
46
+
47
+ These results point to the effectiveness of the proposed framework with pre-trained LMs as a general-purpose framework to promote structured generalization in interactive decision-making.
48
+
49
+ # 2 Related Work
50
+
51
+ In recent years, word and sentence representations from pre-trained LMs [29, 9, 33] have become ubiquitous in natural language processing [49, 30]. Some of the most successful applications of pre-training lie at the boundary of natural language processing and other domains, as in instruction following [13] and language-guided image retrieval [22].
52
+
53
+ Learning representations of language. From nearly the earliest days of the field, natural language processing researchers observed that representations of words derived from distributional statistics in large text corpora serve as useful features for downstream tasks [8, 11]. The earliest versions of these representation learning schemes focused on isolated word forms [25, 28]. However, recent years have seen a number of techniques for training (masked or autoregressive) language models to produce contextualized word representations (which incorporate information neighboring words in sentences and paragraphs) via a variety of masked-word prediction objectives [9, 47].
54
+
55
+ Applications of pre-trained LMs. LMs can be fine-tuned to perform language processing tasks other than language modeling by casting those tasks as word-prediction problems. Successful uses of representations from pre-trained models include syntactic parsing [19] and language-to-code translation [45]; successful adaptations of LM prediction heads include machine translation [49], sentiment classification [6] and style transfer [18]. A number of tasks integrate language and other modalities, including visual question answering and image captioning [48]. Recent works find that image representations can be injected directly into LMs' embedding layers [42].
56
+
57
+ Policy learning and LM. Traditional policy learning methods, such as PPO [37], DQN [27], DDPG [21], A3C [26], perform well on playing tasks on Atari, OpenAI gym [5], and MuJoCo [41]. Some of them might fail to solve more challenging tasks on embodied environments [31, 39]. Several recent papers [36, 17, 15] propose to use LM for policy learning. Frozen Pretrained Transformer (FPT) [23] demonstrates that pre-trained LMs require very little fine-tuning to match the performance of task-specific models on several image classification and numerical sequence processing tasks. Semi-Supervised Skill Learning with Latent Language $(\mathrm{SL})^3$ [38] shows that LMs can serve as an effective backbone for hierarchical policies that express plans as natural language strings [2, 4]. In this paper, we focus on building a general framework for decision-making tasks using pre-trained LMs, even when language is not provided as an input or output.
58
+
59
+ # 3 Decision-Making and Language Modeling
60
+
61
+ # 3.1 POMDPs and Policy Learning
62
+
63
+ We explore the application of LMs to general sequential decision-making tasks in partially observed environments. These tasks may be formalized as partially observable Markov decision processes (POMDPs). A POMDP is defined by a set of states, a set of observations, a set of actions, and a transition model $\mathcal{T}(s_{t + 1}|s_t,a_t)$ that maps the current state and action to the next state. Importantly, in a POMDP setting, the observation $o_t$ only captures a portion of the underlying state $s_t$ , and an
64
+
65
+ optimal decision-making strategy (a policy) must incorporate both the current observation and the history of previous observations and actions. In our experiments, policies are parametric models $\pi_{\phi}(a_t|g,h_t,o_t)$ that output the probability of an action given the goals $g$ , history information $h_t = \{o_1,a_1,\dots ,o_{t - 1},a_{t - 1}\}$ , and partial observations $o_t$ of the current state $s_t$ .
66
+
67
+ In Figure 1 (right), we show a high-level overview of the proposed method. We first convert all policy inputs into a sequence and provide them as input to a transformer encoder. Representations from this encoder model are then passed to a task-specific decoder that predicts actions. We collect a dataset of $N$ training trajectories $\mathcal{D} = \{d^i\}_{i=1}^N$ , where each trajectory consists of a goal and a sequence of observations and actions: $d^i = \{g^i, o_1^i, a_1^i, \dots, o_{T_i}^i, a_{T_i}^i\}$ , where $T_i$ is the length of the trajectory. We then train the policy to maximize the probability of actions we want to achieve $\boldsymbol{a}^i = \{a_1^i, \dots, a_{T_i}^i\}$ across trajectories using the cross-entropy loss:
68
+
69
+ $$
70
+ \phi^ {*} = \arg \min _ {\phi} \left(- \sum_ {i = 1} ^ {N} \sum_ {t = 1} ^ {T _ {i}} \ln \pi_ {\phi} \left(a _ {t} ^ {i} \mid g ^ {i}, h _ {t} ^ {i}, o _ {t} ^ {i}\right)\right). \tag {1}
71
+ $$
72
+
73
+ # 3.2 Language models as policy initializers
74
+
75
+ Our experiments focus on autoregressive, transformer-based LMs [43]. These models are trained to fit a distribution over a text sequence $\mathbf{y} = \{y_{i}\}_{i=1}^{n}$ via the chain rule $p(\mathbf{y}) = p(y_{1}) \prod_{i=2}^{n} p(y_{i} \mid y_{1}, \ldots, y_{i-1})$ . Each term on the right-hand side is parameterized by a transformer network, which accepts the conditioned tokens as input. Each token passes through a learned embedding layer $F_{\theta}$ , then the full conditioned sequence is fed into the LM. In our work, we use a standard LM, GPT-2, to process the input sequence rather than to predict future tokens.
76
+
77
+ Both POMDP decision-making and language modeling are naturally framed as sequence prediction tasks, where successive words or actions/observations are predicted based on a sequence of previous words or actions/observations. This suggests that pre-trained LMs can be used to initialize POMDP policies by fine-tuning them to model high-reward or expert trajectories, as described below.
78
+
79
+ # 4 Approach
80
+
81
+ We evaluate the effectiveness of pre-trained LMs in solving decision-making tasks across environments. We use BabyAI [16] and VirtualHome [31] to evaluate the proposed method. While both environments feature complex goals, the nature of these goals, as well as the state and action sequences that accomplish them, differ substantially across environments (Figure 1 (left)).
82
+
83
+ # 4.1 Policy Network
84
+
85
+ We first examine whether pre-trained LMs provide effective initializers when states and action histories are represented as natural language strings. We encode the inputs to the policy—including observations, goals, and action histories—as sequences of words. These word sequences are passed to the LM (using its pre-trained word embedding layer $F_{\theta}$ ) and used to obtain contextualized token representations. Token representations are averaged and used to predict actions. We design a policy network following the general policy framework proposed in Figure 1.
86
+
87
+ Environment encodings in VirtualHome. In VirtualHome, each goal consists of a sequence of predicates and multiplicities, and is translated into a populated English sentence (e.g. "Inside (apple, fridge): 2" becomes "put two apples inside the fridge"). To encode the agent's partial observation, we extract a list of currently visible objects, their states (e.g. "open, clean"), and 3D world coordinates. We use a fully-connected layer to encode the 3D information and generate a feature representation of each object in the observation. To encode history, we store information about all previous actions and convert them into populated English sentences (e.g. "I have put the plate on the kitchen table and the apple inside the fridge").
88
+
89
+ Environment encodings in BabyAI. The observation by default is a $7 \times 7$ grid. We convert the observation into $7 \times 7$ text descriptions, e.g. "purple ball", "grey wall", "open door", and combine them into a long sentence. We then convert the history actions into text descriptions, e.g. "turn left" and "go forward". We combine the language instruction (without modification) with the observation and history text descriptions, and feed them to the pre-trained LM.
90
+
91
+ We note that the policy network described above does not strictly require that these encodings take the form of natural language strings—other encodings of the environment as a sequence also work (see Section 7). This framework could be also generalized to support pixel-based observations using discretization schemes like the one employed in the Vision Transformer [10].
92
+
93
+ Action prediction. We pool LM outputs into a "context representation" that is used to predict the next action. In training, we maximize the probabilities of demonstrated actions. In inference, we select the valid action with the highest probability. See Appendix C.1 for details.
94
+
95
+ VirtualHome and BabyAI have quite different observation spaces, action spaces, and goal spaces; however, we show that embedding policy inputs as sequences and utilizing the pre-trained LM as a policy initializer, enables effective generalization to novel tasks on both environments. We note that LID is not limited to VirtualHome and BabyAI, but is straightforwardly applicable to other embodied environments, such as ALFRED [40] and iGibson [39].
96
+
97
+ # 4.2 Training
98
+
99
+ We first examine LID through imitation learning on data collected by experts in Section 4.2.1. We then show that integrating an active data gathering procedure into LID enables policy learning without using expert data in Section 4.2.2. We use VirtualHome as an example to explain the data gathering.
100
+
101
+ # 4.2.1 Policy Learning with Expert Data
102
+
103
+ The policy model is first initialized from a pre-trained LM and then fine-tuned on data collected by experts. We build on the VirtualHome environment to collect a set of expert trajectories using regression planning [20] and create a VirtualHome-Imitation Learning dataset. Given a task described by goal predicates, the planner generates an action sequence to accomplish this task (See Appendix E.1). The planner has access to privileged information, such as information about the pre-conditions and effects of each action, allowing an agent to robustly perform tasks in partially observable environments and generate expert trajectories for training and evaluation.
104
+
105
+ # 4.2.2 Policy Learning with Active Data Gathering
106
+
107
+ Collecting expert data is sometimes challenging. It may require privileged information of the environment or human annotations, which can be time-consuming and difficult to scale. A promising way to scale up supervision is Hindsight Experience Replay (HER) [3], which allows agents to learn from orders of magnitude more data without supervision. However, existing HER methods [12] focus on simple tasks with small state/action space and full observability. They cannot tackle more complicated embodied decision-making tasks, requiring nontrivial planning and reasoning or natural language understanding. LID with the active data gathering (LID-ADG) can be used in solving tasks in such environments.
108
+
109
+ ![](images/0ca8d61aaaf920d56511a5195db9475791a9560fdfd451399a066da554083acf.jpg)
110
+ Figure 2: LID with the active data gathering procedure. By iteratively repeating the exploration, hindsight relabeling, and policy update, LID with active data gathering can learn an effective policy without using pre-collected expert data.
111
+
112
+ As shown in Figure 2, LID-ADG consists of three stages, i.e. exploration, hindsight relabeling, and policy update. The key idea is to gradually improve the task success rate by asking the agent to iteratively explore the environment, relabel failure samples, and update its policy using imitation learning. In the exploration stage, we first randomly sample a goal and an initial state. We then use a mix of random actions and actions generated by the current policy $\pi_{\phi}(a_t|g,h_t,o_t)$ to obtain the next action. We repeat this process until this episode ends. We collect $M$ trajectories and store them in the replay buffers. The generated actions in the early stages rarely complete the given task.
113
+
114
+ However, even the failed trajectories contain useful sub-trajectories that solve certain sub-goals. In the hindsight relabeling stage, we extract useful sub-trajectories and relabel a goal $g'$ for each of them. We design a goal relabel function $f_{l}$ that generates a goal based on the sequence of observations and actions using hand-designed templates. In practice, we implement the goal relabel function as a program (see Appendix E.2). The hindsight relabeling stage allows sample-efficient learning by reusing the failure cases. During policy update, the agent samples the data from the replay buffers and updates its policy network $\pi_{\phi}$ .
115
+
116
+ By interleaving the exploration, hindsight relabeling, and policy update, LID-ADG can gradually improve the policy without requiring pre-collected expert data. In embodied environments with large action spaces, sparse rewards, and long-horizon planning, RL methods often struggle to obtain stable policy gradients during training. Our method enables sample-efficient learning from the sparse rewards by relabeling new goals for the bad samples that the agent fails to achieve. In addition, LID-ADG leverages the stability of supervised learning in the policy update stage, enabling it to outperform RL approaches on a wide range of decision-making tasks.
117
+
118
+ # 5 Experiment Setup
119
+
120
+ We evaluate the proposed method and baselines on VirtualHome and BabyAI.
121
+
122
+ # 5.1 VirtualHome
123
+
124
+ VirtualHome is a 3D embodied environment featuring partial observability, large action spaces, and long time horizons. We evaluate policies' performance from three aspects: (1) performance on in-distribution tasks; (2) generalization to novel scenes; and (3) generalization to novel tasks.
125
+
126
+ In-Distribution. The predicate types and their counts in the goal are randomly sampled from the same distribution as the training data. The objects are initially placed in the environment according to common-sense layouts (e.g. plates appear inside the kitchen cabinets rather than the bathtub).
127
+
128
+ Novel Scenes. The objects are placed in random positions in the initial environment without commonsense constraints (e.g. apples may appear inside the dishwasher).
129
+
130
+ Novel Tasks. The components of all goal predicates are never seen together during training (e.g. both plates and fridges appear in training goals, but Inside(plate, fridge) only appears in the test set. (See Appendix F for more details.)
131
+
132
+ We evaluate the success rates of different methods on each test set. A given episode is scored as successful if the policy completes its entire goal within the maximum allowed steps of the environment. On each of the 3 test subsets, we use 5 different random seeds and test 100 tasks under each seed. Thus there are 1500 examples used to evaluate each model.
133
+
134
+ # 5.2 BabyAI
135
+
136
+ BabyAI is a 2D grid world environment for instruction following. Observations in BabyAI are $7 \times 7 \times 3$ grids describing a partial and local egocentric view of the state of the environment. We evaluate the methods on four representative tasks: GoToRedBall, GoToLocal, PickupLoc, and PutNextLocal. Performing well on the test set requires the models to generalize to new environment layouts and goals, resulting in new combinations of tasks not seen in training. For each method, we compute success rates over 500 episodes on each task.
137
+
138
+ # 6 Experiments
139
+
140
+ We first show results of the proposed method and baselines for embodied decision-making tasks using expert data in Section 6.1. We then show our results when using actively gathered data in Section 6.2.
141
+
142
+ # 6.1 Embodied Decision Making with Pre-trained Language Model (LID)
143
+
144
+ # 6.1.1 Results on VirtualHome
145
+
146
+ We evaluate the following methods:
147
+
148
+ ![](images/31473f698800fa17e4973e92bbc6056b958720febde8841f69e6e589fc86ac0a.jpg)
149
+ Figure 3: Comparisons of the proposed method and baselines on VirtualHome. All the methods are trained on expert data using imitation learning. MLP-1, MLP, and LSTM are baselines without using the pre-trained LM. The proposed method, LID-Text (Ours), outperforms all baselines.
150
+
151
+ <table><tr><td rowspan="2">Tasks</td><td rowspan="2">Methods</td><td colspan="5">Number of Demos</td></tr><tr><td>100</td><td>500</td><td>1K</td><td>5K</td><td>10K</td></tr><tr><td rowspan="2">GoToRedBall</td><td>BabyAI-Ori [16]</td><td>81.0</td><td>96.0</td><td>99.0</td><td>99.5</td><td>99.9</td></tr><tr><td>LID-Text (Ours)</td><td>93.9</td><td>99.4</td><td>99.7</td><td>100.0</td><td>100.0</td></tr><tr><td rowspan="2">GoToLocal</td><td>BabyAI-Ori [16]</td><td>55.9</td><td>84.3</td><td>98.6</td><td>99.9</td><td>99.8</td></tr><tr><td>LID-Text (Ours)</td><td>64.6</td><td>97.9</td><td>99.0</td><td>99.5</td><td>99.5</td></tr><tr><td rowspan="2">PickupLoc</td><td>BabyAI-Ori [16]</td><td>28.0</td><td>58.0</td><td>93.3</td><td>97.9</td><td>99.8</td></tr><tr><td>LID-Text (Ours)</td><td>28.7</td><td>73.4</td><td>99.0</td><td>99.6</td><td>99.8</td></tr><tr><td rowspan="2">PutNextLocal</td><td>BabyAI-Ori [16]</td><td>14.3</td><td>16.8</td><td>43.4</td><td>81.2</td><td>97.7</td></tr><tr><td>LID-Text (Ours)</td><td>11.1</td><td>93.0</td><td>93.2</td><td>98.9</td><td>99.9</td></tr></table>
152
+
153
+ Table 1: Success rates on BabyAI tasks. All the methods are trained on offline expert data using imitation learning. LID-Text (Ours) outperforms BabyAI-Ori, the method used in the original paper [16].
154
+
155
+ LID-Text (Ours) is the proposed method that converts all environments inputs into text descriptions. The pre-trained LM is fine-tuned for decision-making (conditioned on goals, observations, and histories) as described in Section 4.1.
156
+
157
+ Recurrent Network. We compare our method with a recurrent baseline using an LSTM [14] to encode the history information. The hidden representation from the last timestep, together with the goal and current observation, are used to predict the next action.
158
+
159
+ MLP and MLP-1. We perform additional comparisons with baselines that do not use recurrent networks or pre-trained LMs. MLP and MLP-1 take the goal, histories, and the current observation as input and send them to the multilayer perceptron neural network (MLP) to predict actions. MLP-1 has three more average-pooling layers than MLP that average the features of tokens in the goal, history actions, and the current observation, respectively, before sending them to the MLP layer.
160
+
161
+ Quantitative results. Each method is trained on $20K$ demos from the VirtualHome-Imitation Learning dataset, and then evaluated on the three test subsets: In-Distribution, Novel Scenes, and Novel Tasks. In Figure 3, LID-Text (Ours), which initializes the policy with a pre-trained LM, has higher success rates than other methods. This difference is most pronounced in the Novel Tasks setting, where test tasks require combinatorial generalization across goals that are never seen during training. Here, LID-Text (Ours) dramatically $(43.6\%)$ improves upon all baselines. Such combinatorial generalization is necessary to construct general purpose agents, but is often difficult for existing approaches. Our results suggest that pre-trained LMs can serve as a computational backbone for combinatorial generalization.
162
+
163
+ # 6.1.2 Results on BabyAI
164
+
165
+ We use the standard training and test data provided by [16]. In BabyAI, performing well on unseen test tasks with new environment layouts and goals requires combinatorial reasoning. In Table 1, we report the success rate of models trained on different number of demos. BabyAI-Ori [16] is the method used in the original paper. LID-Text (Ours) is the proposed method that converts policy inputs into a text sequence. Given enough training data, i.e. 10K demos, both methods achieve high success rates, but LID-Text (Ours) outperforms BabyAI-Ori with less training data, indicating the proposed method improves sample efficiency when generalizing to novel tasks.
166
+
167
+ # 6.2 Pre-trained Language Model with Active Data Gathering (LID-ADG)
168
+
169
+ We compare LID-ADG, the proposed LM framework for decision-making using actively gathered data (Section 4.2.2), to a variety of baselines that do not use pre-collected expert data on VirtualHome.
170
+
171
+ Random. The agent selects the next action randomly from the valid action space at that state. Goal-Object. The agent randomly selects an object that in the goal and in the valid action space to interact with. For example, given a goal of "Inside(apple, fridge):1", this baseline might choose "grab apple", "open fridge", or other actions containing "apple" or "fridge". Online RL. We compare with PPO [37], one of the most commonly used online RL methods. For fair comparison, we equip PPO with the same main policy network as the proposed method. Our implementation
172
+
173
+ <table><tr><td></td><td>In-Distribution</td><td>Novel Scenes</td><td>Novel Tasks</td></tr><tr><td>Random</td><td>0.0 ± 0.0</td><td>0.0 ± 0.0</td><td>0.0 ± 0.0</td></tr><tr><td>Goal-Object</td><td>0.8 ± 0.5</td><td>0.0 ± 0.0</td><td>0.4 ± 0.4</td></tr><tr><td>PPO</td><td>0.0 ± 0.0</td><td>0.0 ± 0.0</td><td>0.0 ± 0.0</td></tr><tr><td>DQN+HER</td><td>0.0 ± 0.0</td><td>0.0 ± 0.0</td><td>0.0 ± 0.0</td></tr><tr><td>LID-ADG (Ours)</td><td>46.7 ± 2.7</td><td>32.2 ± 3.3</td><td>25.5 ± 4.1</td></tr></table>
174
+
175
+ Table 2: Comparisons of methods without using expert data on VirtualHome. LID-ADG (Ours) is the only successful approach.
176
+
177
+ <table><tr><td></td><td>In-Distribution</td><td>Novel Scenes</td><td>Novel Tasks</td></tr><tr><td>LID-ADG (Ours)</td><td>46.7 ± 2.7</td><td>32.2 ± 3.3</td><td>25.5 ± 4.1</td></tr><tr><td>PPO (LID-ADG Init)</td><td>53.7 ± 3.5</td><td>30.2 ± 3.4</td><td>27.8 ± 2.7</td></tr><tr><td>DT (LID-ADG Data)</td><td>42.4 ± 1.5</td><td>21.6 ± 2.48</td><td>16.8 ± 1.0</td></tr></table>
178
+
179
+ Table 3: The proposed method with active data gathering, LID-ADG (Ours), can be used as an policy initializer for online RL or a data provider for offline RL.
180
+
181
+ is based on Stable Baselines3 [35]. Hindsight Experience Replay. We compare with DQN+HER used in [3] and modify its main policy network to be the same as the proposed method.
182
+
183
+ Quantitative results. We compare LID-ADG with baselines on VirtualHome in Table 2. Each experiment is performed 5 times with different random seeds. The Random baseline is always 0, indicating the tasks in VirtualHome cannot be easily solved by a random policy. Goal-Object is better than Random because Goal-Object has access to objects in the goal and it samples actions from a much smaller action space. The online RL baseline, PPO, fails to solve tasks in VirtualHome featured by partially observation, large state/action space, and long-term horizon. DQN+HER works well on simple tasks on 2D environments, but they cannot tackle VirtualHome tasks neither, requiring nontrivial planning and reasoning. LID-ADG does not require expert data and can solve the complicated tasks in 3D embodied environments which cannot be easily achieved using RL.
184
+
185
+ Policy initializer and data provider. LID-ADG can further be used to initialize the weights for fine-tuning RL policies and to gather data for offline learning. As shown in Table 2, directly training RL, e.g. PPO, fails to solve tasks in VirtualHome. However, after using the policy trained by LID-ADG to initialize the PPO policy, we may effectively learn an interactive policy with good performance. In Table 3, PPO (LID-ADG Init) is initialized from LID-ADG and further fine-tuned to solve the tasks in VirtualHome. After initialization, PPO improves its success rate by $53.7\%$ on the In-Distribution setting (See PPO results in Table 2 and Table 3). In addition, LID-ADG can provide data for offline learning. LID-ADG saves the relabeled data in replay buffers. We train Decision Transformer (DT) [7] using the data collected by LID-ADG. See DT (LID-ADG Data) in Table 3.
186
+
187
+ # 7 Analysis: Understanding the Sources of Generalization
188
+
189
+ The pre-trained LM policy, fine-tuned on either expert data or actively gathered data, exhibits effective combinatorial generalization. Is this simply because LMs are effective models of relations between natural language descriptions of states and actions [1], or because they provide a more general framework for combinatorial generalization in decision-making? We hypothesize and investigate three possible factors to understand the sources of such combinatorial generalization. We use policies trained on the expert data as an example to explain the experiments.
190
+
191
+ # 7.1 Input Encoding Scheme
192
+
193
+ We first hypothesize that converting environment inputs into natural language contributes to the combinatorial generalization as the LMs are trained on language data. We explore the role of natural language by investigating three alternative ways of encoding policy inputs to our model without using natural language strings: two in VirtualHome, and one in BabyAI. BabyAI results are in Appendix A.
194
+
195
+ Index encoding in VirtualHome. Rather than natural language strings, LID-Index (Ours) converts policy inputs into integer indices. LID-Index (Ours) retains the discrete, serial format of the goal, history, and observation, but replaces each word with an integer, and replaces the embedding layer from the pre-trained LM with a new embedding layer trained from scratch. For example, grab apple is mapped to (5,3) based on the positions of grab and apple in the vocabulary set.
196
+
197
+ Unnatural string encoding in VirtualHome. LID-Unnatural (Ours) replaces the natural language tokens (e.g. converting the goal "On(fork, table):1" to put one fork on the table) with random ones (e.g. converting On(fork, table) to brought wise character trees fine yet). This is done by
198
+
199
+ Table 4: Success rates of policies trained with different input encodings in the Novel Tasks setting on VirtualHome. The text encoding is the most sample-efficient, but all models converge to similar performance given sufficient training data.
200
+
201
+ <table><tr><td rowspan="2">Methods</td><td colspan="6">Number of Demos</td></tr><tr><td>100</td><td>500</td><td>1K</td><td>5K</td><td>10K</td><td>20K</td></tr><tr><td>LID-Text (Ours)</td><td>8.8 ± 1.4</td><td>22.2 ± 1.7</td><td>26.8 ± 1.0</td><td>46.0 ± 1.0</td><td>58.2 ± 1.2</td><td>58.2 ± 1.6</td></tr><tr><td>LID-Index (Ours)</td><td>6.4 ± 0.6</td><td>18.0 ± 3.8</td><td>18.8 ± 1.0</td><td>45.5 ± 2.1</td><td>54.6 ± 0.8</td><td>57.8 ± 0.9</td></tr><tr><td>LID-Unnatural (Ours)</td><td>6.8 ± 1.3</td><td>18.6 ± 2.1</td><td>27.0 ± 1.1</td><td>47.2 ± 1.7</td><td>55.8 ± 0.8</td><td>58.8 ± 0.9</td></tr></table>
202
+
203
+ randomly permuting the entire vocabulary, mapping each token to a new token. Such a permutation breaks the semantic information in natural strings.
204
+
205
+ LID-Index (Ours) and LID-Unnatural (Ours) have the same policy network as LID-Text (Ours). All are fine-tuned on the expert data. The averaged results using 5 different random seeds on the Novel Tasks setting are reported in Table 4. Given few training data, e.g. 100 demos, all the models perform poorly, with success rates lower than $10\%$ . LID-Text (Ours) achieves higher success rates than LID-Index (Ours) and LID-Unnatural (Ours) when dataset size increases, e.g. LID-Text (Ours) is around $4\%$ higher than LID-Index (Ours) and LID-Unnatural (Ours) with 500 training demos. When the training dataset is further enlarged, e.g. 20K demos, success rates of all approaches reach similar performance. This result indicates that the effectiveness of pre-trained LMs in compositional generalization is not unique to natural language strings, but can be leveraged from arbitrary encodings, although adapting the model to arbitrary encodings may require more training data.
206
+
207
+ # 7.2 Sequential Input Representation
208
+
209
+ Next, we explore whether generalization requires the sequential processing mechanisms in transformer-based LMs. We investigate whether the LM pre-trained policy will still be effective when the input encoding is not sequential. No-Seq encodes the goal as a single vector by averaging all goal embeddings. History and observation features are obtained in the same way. All features are then sent to the pre-trained LM to predict actions. As shown in Table 5, removing sequential structure significantly hurts performance on Novel Tasks. No
210
+
211
+ Table 5: Experiments on sequential inputs and weight initialization. Fine-tuning the pre-trained weights and the usage of sequential encoding are important for combinatorial generalization.
212
+
213
+ <table><tr><td></td><td>In-Distribution</td><td>Novel Tasks</td></tr><tr><td>LID-Text (Ours)</td><td>87.6 ± 1.9</td><td>58.2 ± 2.3</td></tr><tr><td>No-Seq</td><td>74.0 ± 2.3</td><td>2.0 ± 0.6</td></tr><tr><td>No-Pretrain</td><td>90.8 ± 2.0</td><td>47.0 ± 2.8</td></tr><tr><td>No-FT</td><td>51.2 ± 4.5</td><td>17.0 ± 2.9</td></tr></table>
214
+
215
+ Seq achieves good performance on test tasks that are closer to training tasks, but cannot generalize well to more challenging unseen tasks. Thus, combinatorial generalization in pre-trained LMs may be attributed in part to transformers' ability to process sequential input representations effectively.
216
+
217
+ # 7.3 Favorable Weight Initialization
218
+
219
+ Finally, we investigate if the favorable weight initialization from LM pre-training enables effective generalization of the proposed model. No-Pretrain does not initialize the policy using the pre-trained LM, but instead training the policy on the expert data from scratch. In Table 5, we find that removing the pre-trained weights can fit the in-domain data and thus performs well on the In-Distribution setting. However, its success rate is $11.2\%$ lower than the proposed model on the Novel Tasks setting, indicating the pre-trained weights are important for effective generalization, but not necessary for effective data fitting. We further test a baseline, No-FT, that keeps the pre-trained weights of the language model but freezes them while training the rest model on our expert data. Freezing the pretrained weights without fine-tuning significantly hurts the performance on both settings, suggesting that fine-tuning of the transformer weights is essential for effective combinatorial generalization.
220
+
221
+ Together, these results suggest that sequential input representations (vs. fixed-dimensional feature vectors) and favorable weight initialization are both important for generalization, however, the input encoding schemes (e.g. as a natural language string vs. an arbitrary encoding scheme) has little influence. These results point to the potential broader applicability of pre-trained LMs as a computational backbone for compositional embodied decision making, where arbitrary inputs, such as language, images, or grids, may be converted to sequential encodings.
222
+
223
+ ![](images/a630d63b5088795acccce8c2395ca69259e069ec2c8752a03aec49925cf46f02.jpg)
224
+
225
+ ![](images/8c9b448ee4bb3e7d4d8fb7e411805ad9dcf0c8f5f1f9d41965c8e73fc2608383.jpg)
226
+
227
+ ![](images/c5b6abbdc15cd207d9e9e93627b94b79f92e1fee0ee522a738a3246e0dd99ee1.jpg)
228
+ Goal: Inside (pancake, stove): 1; Close (stove); SwitchOn (stove)
229
+
230
+ ![](images/341acc3b13bb004b86833576f259b3ec7f7348c60015aec6b777983d45372abe.jpg)
231
+
232
+ ![](images/cd6756756a04bc55c477a9a3d3328ae63e7df07aa259a4775ce2035de059e1c8.jpg)
233
+
234
+ ![](images/4b41aede2d954f5d7bc96fb7f3964cd3a523323aae61ebcbfad211f3e862baa1.jpg)
235
+
236
+ ![](images/87e3ca7a9f2851267270ffd2bdd2a9cb3aeb250553eace88d0a122891c966227.jpg)
237
+
238
+ ![](images/f0057bdf702a6a30271694a7d7e55efef64991b659e2ee250f643d6fa48da65b.jpg)
239
+
240
+ ![](images/849c067c395106c65eec9dce65b8c19a6d46eb2f7e7163de547b96f8baa8d7d5.jpg)
241
+
242
+ ![](images/aa119fc2fc4fab51ceb56eb397a2e399928c0c2f5a41e18dd86a8adefe9d3939.jpg)
243
+
244
+ ![](images/3db08d212c5cbf7b266091e53fdbdfb4f7293eef2ebad5a05a760f7f7d66ecf2.jpg)
245
+
246
+ ![](images/8e18b1e325bacf97b833b11523c79e59239f024c24853381653df29dd9945ab8.jpg)
247
+
248
+ ![](images/ab8e9fd2e05baf381dd43c9344e8b356c3d87df89e6f45a544d8ca4e4df9c3a0.jpg)
249
+
250
+ ![](images/60abb00a997b88b8eb2d015df0a95981b3bd39f179e209ac25e3251672718d81.jpg)
251
+
252
+ ![](images/89e65c5468430ec852e438efc69abe4eda5b8853272e2da6f8c4690c0f66c1e1.jpg)
253
+
254
+ ![](images/b7c2b0eed8f86262f5c0902344c819747f30bbe967502d01e8df527922451415.jpg)
255
+
256
+ ![](images/8c7a579e84d54533f82378c0f9c19cb3758e1db68e2f72e2c03c09901e9d9953.jpg)
257
+ Goal: go to the grey key
258
+
259
+ ![](images/8f56b1c8ccbf5a4c10b1190991e399a8b9bc844d08b55c2a728050bb3b0ca03d.jpg)
260
+ Goal: Inside (cutlery fork, fridge): 1
261
+
262
+ ![](images/cc22ee60c3bcd9374b9a909d8416dc546c5907d4b86bf33431c8adb4569b892f.jpg)
263
+
264
+ ![](images/c675888fe7ed0484491bbe4474bf45f2a0b6e5a4f32ab50e965fcf0c309a1ae5.jpg)
265
+ Goal: pick up the purple key on your right
266
+
267
+ ![](images/8490990a2ed9cd6eca70b1b6e2ec26eac139f9d98f53f91a61b53d91a576c9e2.jpg)
268
+
269
+ ![](images/71cfba14a68bb5bfc08814d58c567a472494e566b84c209b5ca0b31dd1fe0182.jpg)
270
+
271
+ ![](images/88dba6e22b62e8d5be9449a0fbfe99186c36572c1d3aafeb97b96ca52d15c0b4.jpg)
272
+
273
+ ![](images/a1462e435920762915147090b6ade605eb0988fca2805e0c76772c99b9461e5b.jpg)
274
+ Figure 4: Qualitative results of our model on VirtualHome and BabyAI. We only show a sub-trajectory in each example to save space. The interacted objects are labelled by green bounding boxes.
275
+
276
+ ![](images/819cad20198a4699bfcf9f636dc1d45551b887335763c1fb254145202322661e.jpg)
277
+ Goal: Inside (salmon,fridge): 1; Inside (sundae,fridge): 1
278
+
279
+ ![](images/5005fcb7e9f2130279820fa7ca81cad28ba3ca7bfcaabdfaa292d90b87a9bdf5.jpg)
280
+
281
+ ![](images/f6e207e240efd96921beb50538f8f1e26c3ef9a2c3ea8e9d74b26b00c6b91f80.jpg)
282
+
283
+ ![](images/e23f83ea8af8e7ae1f83b6091fdc1f267eb99436e0e6a3a3544fd4b1b5441715.jpg)
284
+
285
+ ![](images/66d6c8f6998dcf8b266c59671bbb1210338384eddd05b33e853a737c0058700a.jpg)
286
+
287
+ ![](images/74de7f7678d08a6332b3565bf56748ff86552b449f555c0b55994f6416c7500a.jpg)
288
+
289
+ ![](images/48e80347c8c7d67ef3d96376804e813c2f65d3a7f250ddcc8818ef76a1ac172d.jpg)
290
+
291
+ ![](images/74ea7ccd6d94a0d91477d3d225b3694b5ad046c3adc2518f46b0dac2ae406751.jpg)
292
+
293
+ ![](images/f2ce4fade3fa6c11a2f4d66ca5977e34eea12ca458bf0ed712d526dac70c9361.jpg)
294
+
295
+ ![](images/21a1a6573e7cc4a0f7eed999d8fdcffc18d6089c1343db4b07862b9240e0220b.jpg)
296
+ Goal: Inside (chicken, oven): 1; Close (oven); TurnON (oven)
297
+
298
+ ![](images/466fc88f21083b76113085cd3c9ef83cde5637277ae79f65a2de14677fb2f106.jpg)
299
+
300
+ ![](images/0e70acfc567babc102fd9f41172c05991cc05ce5b67a124779e24046082c2911.jpg)
301
+ Figure 5: Failure cases. We show failure cases caused by the grounding error and policy error. The interacted objects are labelled by green bounding boxes.
302
+
303
+ ![](images/bdfcc2eb71ef46e45c21cd65aa2ab4c56ee87cbcd136ed4f57e01e2b5b2c19e3.jpg)
304
+
305
+ ![](images/366cbb72bff20dba4cf5f1cd9cb5fb73c759605c7df8524195628181fc8a4729.jpg)
306
+
307
+ ![](images/a37dcb9b49280762f95bfc9b6bb2f77697849ce4d2cb2854e86bc71f7c38fd9e.jpg)
308
+ Goal: On (pillow, bed): 1
309
+
310
+ ![](images/8cda391880bfa7f61507839e2208263fe123248a3dd44554fd1a59fb1373b524.jpg)
311
+
312
+ ![](images/613f72d1278efaba62fa67d4f837c5683c9502bd89c425b4fad5da58ee253ae6.jpg)
313
+
314
+ # 8 Qualitative Results
315
+
316
+ In Figure 4, we show examples of LID-Text (Ours) completing tasks in VirtualHome and BabyAI. We show two successful examples from VirtualHome on the In-Distribution and Novel Tasks settings, and two successful examples from BabyAI on solving the GoToLocal and PickupLoc tasks. We only show short trajectories or extract a sub-trajectory for saving space.
317
+
318
+ Failure case analysis. In Figure 5, we show some failure cases of the proposed method. We observed two main types of failure cases: grounding error and policy error. For failures caused by the grounding error, the agent interacts with a wrong object that is not related to the given goal, e.g. the agent puts cutlets instead of the salmon inside the fridge. For failures caused by the policy error, the agent cannot find the target objects or does not interact with them. The proposed method that converts policy inputs into sequential encodings and feeds them to the general LM framework can accomplish decision-making tasks efficiently, however, there are still challenging tasks that the policy fails to accomplish. Larger LMs, e.g. GPT-3 [6], may improve the success rate of those challenging tasks.
319
+
320
+ # 9 Conclusion and Broader Impact
321
+
322
+ In this paper, we introduced LID, a general approach to sequential decision-making that converts goals, histories, and observations into sequences and processes them using a policy initialized with a pre-trained LM. We integrated an active data gathering procedure into the proposed method to enable policy learning without using expert data. Our analysis showed that input representation and favorable weight initialization both contribute to the generalization while the input encoding scheme has little influence. One drawback of the active data gathering is that it relies on hand-designed rules for task relabeling. More generally, a potential disadvantage of the proposed approach is that biases of the pre-trained LMs may influence its behavior, and further study of LID-based models' bias is required before they may be deployed in sensitive downstream applications. Nevertheless, our results demonstrate that LID enables effective combinatorial generalization across different environments, and highlight the promise of LM pre-training for more general decision-making problems.
323
+
324
+ # References
325
+
326
+ [1] P. Ammanabrolu and M. O. Riedl. Playing text-adventure games with graph-based deep reinforcement learning. arXiv preprint arXiv:1812.01628, 2018.
327
+ [2] J. Andreas and D. Klein. Learning with latent language. In North American Association for Computational Linguistics, 2022.
328
+ [3] M. Andrychowicz, F. Wolski, A. Ray, J. Schneider, R. Fong, P. Welinder, B. McGrew, J. Tobin, P. Abbeel, and W. Zaremba. Hindsight experience replay. arXiv preprint arXiv:1707.01495, 2017.
329
+ [4] M. L. Athul Paul Jacob and J. Andreas. Multitasking inhibits semantic drift. In North American Association for Computational Linguistics, 2021.
330
+ [5] G. Brockman, V. Cheung, L. Pettersson, J. Schneider, J. Schulman, J. Tang, and W. Zaremba. Openai gym, 2016.
331
+ [6] T. B. Brown, B. Mann, N. Ryder, M. Subbiah, J. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020.
332
+ [7] L. Chen, K. Lu, A. Rajeswaran, K. Lee, A. Grover, M. Laskin, P. Abbeel, A. Srinivas, and I. Mordatch. Decision transformer: Reinforcement learning via sequence modeling. arXiv preprint arXiv:2106.01345, 2021.
333
+ [8] S. Deerwester, S. T. Dumais, G. W. Furnas, T. K. Landauer, and R. Harshman. Indexing by latent semantic analysis. Journal of the American society for information science, 41(6):391-407, 1990.
334
+ [9] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
335
+ [10] A. Dosovitskiy, L. Beyer, A. Kolesnikov, D. Weissenborn, X. Zhai, T. Unterthiner, M. Dehghani, M. Minderer, G. Heigold, S. Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.
336
+ [11] S. T. Dumais. Latent semantic analysis. Annual review of information science and technology, 38(1):188-230, 2004.
337
+ [12] D. Ghosh, A. Gupta, A. Reddy, J. Fu, C. Devin, B. Eysenbach, and S. Levine. Learning to reach goals via iterated supervised learning. arXiv preprint arXiv:1912.06088, 2019.
338
+ [13] F. Hill, S. Mokra, N. Wong, and T. Harley. Human instruction-following with deep reinforcement learning via transfer-learning from text. arXiv preprint arXiv:2005.09382, 2020.
339
+ [14] S. Hochreiter and J. Schmidhuber. Long short-term memory. Neural computation, 9(8):1735-1780, 1997.
340
+ [15] W. Huang, P. Abbeel, D. Pathak, and I. Mordatch. Language models as zero-shot planners: Extracting actionable knowledge for embodied agents. arXiv preprint arXiv:2201.07207, 2022.
341
+ [16] D. Y.-T. Hui, M. Chevalier-Boisvert, D. Bahdanau, and Y. Bengio. Babyai 1.1, 2020.
342
+ [17] E. Jang, A. Irpan, M. Khansari, D. Kappler, F. Ebert, C. Lynch, S. Levine, and C. Finn. Bc-z: Zero-shot task generalization with robotic imitation learning. In Conference on Robot Learning, pages 991-1002. PMLR, 2022.
343
+ [18] N. S. Keskar, B. McCann, L. R. Varshney, C. Xiong, and R. Socher. Ctrl: A conditional transformer language model for controllable generation. arXiv preprint arXiv:1909.05858, 2019.
344
+ [19] N. Kitaev, S. Cao, and D. Klein. Multilingual constituency parsing with self-attention and pre-training. arXiv preprint arXiv:1812.11760, 2018.
345
+
346
+ [20] R. E. Korf. Planning as search: A quantitative approach. Artificial intelligence, 33(1):65-88, 1987.
347
+ [21] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra. Continuous control with deep reinforcement learning. arXiv preprint arXiv:1509.02971, 2015.
348
+ [22] J. Lu, D. Batra, D. Parikh, and S. Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. arXiv preprint arXiv:1908.02265, 2019.
349
+ [23] K. Lu, A. Grover, P. Abbeel, and I. Mordatch. Pretrained transformers as universal computation engines. arXiv preprint arXiv:2103.05247, 2021.
350
+ [24] A. Majumdar, A. Shrivastava, S. Lee, P. Anderson, D. Parikh, and D. Batra. Improving vision-and-language navigation with image-text pairs from the web. In European Conference on Computer Vision, pages 259-274. Springer, 2020.
351
+ [25] T. Mikolov, I. Sutskever, K. Chen, G. S. Corrado, and J. Dean. Distributed representations of words and phrases and their compositionality. In Advances in neural information processing systems, pages 3111-3119, 2013.
352
+ [26] V. Mnih, A. P. Badia, M. Mirza, A. Graves, T. Lillicrap, T. Harley, D. Silver, and K. Kavukcuoglu. Asynchronous methods for deep reinforcement learning. In International conference on machine learning, pages 1928-1937. PMLR, 2016.
353
+ [27] V. Mnih, K. Kavukcuoglu, D. Silver, A. Graves, I. Antonoglou, D. Wierstra, and M. Riedmiller. Playing atari with deep reinforcement learning. arXiv preprint arXiv:1312.5602, 2013.
354
+ [28] J. Pennington, R. Socher, and C. D. Manning. Glove: Global vectors for word representation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543, 2014.
355
+ [29] M. E. Peters, M. Neumann, M. Iyyer, M. Gardner, C. Clark, K. Lee, and L. Zettlemoyer. Deep contextualized word representations. arXiv preprint arXiv:1802.05365, 2018.
356
+ [30] E. A. Platanios, A. Pauls, S. Roy, Y. Zhang, A. Kyte, A. Guo, S. Thomson, J. Krishnamurthy, J. Wolfe, J. Andreas, et al. Value-agnostic conversational semantic parsing. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3666–3681, 2021.
357
+ [31] X. Puig, K. Ra, M. Boben, J. Li, T. Wang, S. Fidler, and A. Torralba. Virtualhome: Simulating household activities via programs. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8494-8502, 2018.
358
+ [32] X. Puig, T. Shu, S. Li, Z. Wang, J. B. Tenenbaum, S. Fidler, and A. Torralba. Watch-and-help: A challenge for social perception and human-ai collaboration. arXiv preprint arXiv:2010.09890, 2020.
359
+ [33] A. Radford, K. Narasimhan, T. Salimans, and I. Sutskever. Improving language understanding by generative pre-training. 2018.
360
+ [34] A. Radford, J. Wu, R. Child, D. Luan, D. Amodei, I. Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.
361
+ [35] A. Raffin, A. Hill, A. Gleave, A. Kanervisto, M. Ernestus, and N. Dormann. Stable-baselines3: Reliable reinforcement learning implementations. Journal of Machine Learning Research, 22(268):1-8, 2021.
362
+ [36] M. Reid, Y. Yamada, and S. S. Gu. Can wikipedia help offline reinforcement learning? arXiv preprint arXiv:2201.12122, 2022.
363
+ [37] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
364
+
365
+ [38] P. Sharma, A. Torralba, and J. Andreas. Skill induction and planning with latent language. In Association for Computational Linguistics, 2022.
366
+ [39] B. Shen, F. Xia, C. Li, R. Martin-Martin, L. Fan, G. Wang, S. Buch, C. D'Arpino, S. Srivastava, L. P. Tchapmi, et al. igibson, a simulation environment for interactive tasks in large realisticscenes. arXiv preprint arXiv:2012.02924, 2020.
367
+ [40] M. Shridhar, J. Thomason, D. Gordon, Y. Bisk, W. Han, R. Mottaghi, L. Zettlemoyer, and D. Fox. Alfred: A benchmark for interpreting grounded instructions for everyday tasks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10740-10749, 2020.
368
+ [41] E. Todorov, T. Erez, and Y. Tassa. Mujoco: A physics engine for model-based control. In 2012 IEEE/RSJ International Conference on Intelligent Robots and Systems, pages 5026-5033. IEEE, 2012.
369
+ [42] M. Tsimpoukelli, J. Menick, S. Cabi, S. Eslami, O. Vinyals, and F. Hill. Multimodal few-shot learning with frozen language models. arXiv preprint arXiv:2106.13884, 2021.
370
+ [43] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin. Attention is all you need. arXiv preprint arXiv:1706.03762, 2017.
371
+ [44] J. Vig. A multiscale visualization of attention in the transformer model. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 37-42, Florence, Italy, July 2019. Association for Computational Linguistics.
372
+ [45] B. Wang, R. Shin, X. Liu, O. Polozov, and M. Richardson. Rat-sql: Relation-aware schema encoding and linking for text-to-sql parsers. arXiv preprint arXiv:1911.04942, 2019.
373
+ [46] T. Wolf, L. Debut, V. Sanh, J. Chaumont, C. Delangue, A. Moi, P. Cistac, T. Rault, R. Louf, M. Funtowicz, et al. Huggingface's transformers: State-of-the-art natural language processing. arXiv preprint arXiv:1910.03771, 2019.
374
+ [47] Z. Yang, Z. Dai, Y. Yang, J. Carbonell, R. R. Salakhutdinov, and Q. V. Le. Xlnet: Generalized autoregressive pretraining for language understanding. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019.
375
+ [48] Z. Yang, N. Garcia, C. Chu, M. Otani, Y. Nakashima, and H. Takemura. Bert representations for video question answering. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1556-1565, 2020.
376
+ [49] J. Zhu, Y. Xia, L. Wu, D. He, T. Qin, W. Zhou, H. Li, and T.-Y. Liu. Incorporating bert into neural machine translation. arXiv preprint arXiv:2002.06823, 2020.
377
+
378
+ # Checklist
379
+
380
+ 1. For all authors...
381
+
382
+ (a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
383
+ (b) Did you describe the limitations of your work? [Yes] See Section 9.
384
+ (c) Did you discuss any potential negative societal impacts of your work? [Yes] See Section 9.
385
+ (d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
386
+
387
+ 2. If you are including theoretical results...
388
+
389
+ (a) Did you state the full set of assumptions of all theoretical results? [N/A]
390
+ (b) Did you include complete proofs of all theoretical results? [N/A]
391
+
392
+ 3. If you ran experiments...
393
+
394
+ (a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes] In the supplemental material.
395
+ (b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes] See Section 6 and Appendix C.2.
396
+ (c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes] See Section 6.
397
+ (d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes] See Appendix C.2.
398
+
399
+ 4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
400
+
401
+ (a) If your work uses existing assets, did you cite the creators? [Yes]
402
+ (b) Did you mention the license of the assets? [N/A]
403
+ (c) Did you include any new assets either in the supplemental material or as a URL? [N/A]
404
+ (d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
405
+ (e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [Yes] See Section 9.
406
+
407
+ 5. If you used crowdsourcing or conducted research with human subjects...
408
+
409
+ (a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
410
+ (b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
411
+ (c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
412
+
413
+ # Appendix
414
+
415
+ In this appendix, we first show the convolutional encoding in BabyAI in Appendix A. We then describe the environment details in Appendix B and the implementation details of the proposed model in Appendix C. We show the algorithm of interactive evaluation in Section D and the data gathering procedure in Appendix E. The goal predicates used in VirtualHome test subsets are shown in Appendix F. We visualize the attention weights in language models in Appendix G.
416
+
417
+ # A Convolutional encoding in BabyAI
418
+
419
+ In the main paper Section 7.1, we explore the role of natural language by investigating two alternative ways of encoding policy inputs in VirtualHome. In this section, we show the third way of encoding policy inputs in BabyAI.
420
+
421
+ We test a new model, LID-Conv (Ours), that converts environment inputs into convolutional embeddings. We pass the $7 \times 7 \times 3$ grid observation in BabyAI to convolutional layers and obtain a $7 \times 7 \times d$ feature map, where $d$ is the feature dimension. We flatten the feature map and get a sequence of features to describe the observation. The rest of the model is the same as LID-Text (Ours). Table 6 shows the results of policies using the text encoding and convolutional encoding. LID-Text (Ours) and LID-Conv (Ours) have similar results given enough training data, but LID-Text (Ours) is slightly better when there are fewer training data. This conclusion is coincident with the results on VirtualHome.
422
+
423
+ Different input encoding schemes have only a negligible impact on model performance: the effectiveness of pre-training is not limited to utilizing natural strings, but in fact extends to arbitrary sequential encodings.
424
+
425
+ Table 6: Success rate of policies trained with text encoding vs. convolutional encoding on BabyAI. The text encoding is more sample-efficient, but both models converge to near perfect performance given sufficient training data.
426
+
427
+ <table><tr><td rowspan="2">Tasks</td><td rowspan="2">Methods</td><td colspan="5">Number of Demos</td></tr><tr><td>100</td><td>500</td><td>1K</td><td>5K</td><td>10K</td></tr><tr><td rowspan="2">GoToRedBall</td><td>LID-Text (Ours)</td><td>93.9</td><td>99.4</td><td>99.7</td><td>100.0</td><td>100.0</td></tr><tr><td>LID-Conv (Ours)</td><td>92.5</td><td>98.8</td><td>100.0</td><td>100.0</td><td>100.0</td></tr><tr><td rowspan="2">GoToLocal</td><td>LID-Text (Ours)</td><td>64.6</td><td>97.9</td><td>99.0</td><td>99.5</td><td>99.5</td></tr><tr><td>LID-Conv (Ours)</td><td>69.5</td><td>86.0</td><td>98.2</td><td>99.9</td><td>99.9</td></tr><tr><td rowspan="2">PickupLoc</td><td>LID-Text (Ours)</td><td>28.7</td><td>73.4</td><td>99.0</td><td>99.6</td><td>99.8</td></tr><tr><td>LID-Conv (Ours)</td><td>25.0</td><td>58.8</td><td>95.1</td><td>99.6</td><td>100.0</td></tr><tr><td rowspan="2">PutNextLocal</td><td>LID-Text (Ours)</td><td>11.1</td><td>93.0</td><td>93.2</td><td>98.9</td><td>99.9</td></tr><tr><td>LID-Conv (Ours)</td><td>17.9</td><td>53.6</td><td>91.3</td><td>97.7</td><td>99.5</td></tr></table>
428
+
429
+ # B Environments
430
+
431
+ We use BabyAI [16] and VirtualHome [31] to evaluate the proposed method. While both environments feature complex goals, the nature of these goals, as well as the state and action sequences that accomplish them, differ substantially across environments.
432
+
433
+ # B.1 VirtualHome
434
+
435
+ VirtualHome is a 3D realistic environment featuring partial observability, large action spaces, and long time horizons. It provides a set of realistic 3D homes and objects that can be manipulated to perform household organization tasks.
436
+
437
+ Goal Space. For each task, we define the goal as a set of predicates and multiplicities. For example, Inside(apple, fridge):2; Inside(pancake, fridge):1; means "put two apples and one pancake inside the fridge". In each task, the initial environment (including initial object locations), the goal predicates, and their orders and multiplicities are randomly sampled. There are 59 different types of predicates in total.
438
+
439
+ ![](images/e4b0ba29f3f0d18c1919d0321917f7161ed19ab3cd1c4670249d01a81ad74d99.jpg)
440
+ Figure 6: Policy network in VirtualHome. The observation, goal, and history are first converted into sequences and then passed through an embedding layer $F_{\theta}$ . The combined sequence is passed through a pre-trained LM, and the output tokens are pooled into a context feature vector for action prediction.
441
+
442
+ Observation Space. The observation in VirtualHome by default is a graph describing a list of objects and their relations in the current partial observation. Each object has an object name, a state, e.g. open, close, clean, and 3D coordinates.
443
+
444
+ Action Space. Agents can navigate in the environment and interact with objects. To interact with an object, the agent must predict an action name and the index of the interested object, e.g. Open (5) to opening the object with index (5). The agent can only interact with objects that are in the current observation or execute the navigation actions, such as Walk (bathroom). For some actions, such as open, the agent must be close to the object. There are also strict preconditions for actions, e.g. the agent must grab an object before it can put the object on a target position. As a result of these constraints, the subset of actions available to the agent changes at every timestep.
445
+
446
+ We evaluate the success rates of different methods on VirtualHome. A given episode is scored as successful if the policy completes its entire goal within $T$ steps, where $T = 70$ is the maximum allowed steps of the environment.
447
+
448
+ # B.2 BabyAI
449
+
450
+ BabyAI is a 2D grid world environment designed to evaluate instruction following. Different from VirtualHome, the observation in BabyAI by default is a $7 \times 7$ grid describing a partial and local egocentric view of the state of the environment. Each tile in the grid contains at most one object, encoded using 3 integer values: one for the object type, one for the object color, and a state for doors indicating whether it is open, closed or locked. The goals in BabyAI are language instructions, e.g. "put the blue key next to the purple ball". BabyAI has 7 actions, e.g. "turn left", "pick up", and "drop".
451
+
452
+ # C More implementation Details of LID in VirtualHome
453
+
454
+ In Appendix C.1, we provide more details of the model architecture used in the main paper Section 4.1. We then introduce the training detail in Appendix C.2.
455
+
456
+ ![](images/a9176e07c7c873133a8f5644fa83fca12e4f32997b5bcc528e4b795e1010d423.jpg)
457
+ Figure 7: Object encoding. In VirtualHome, the partial observation of the environment state can be represented as a list of objects in the agent's view. Each object is represented by a name, a state vector, and position vector. Object name encoding: each object's name is an English phrase. We tokenize the phrase, embed the tokens, and average the embeddings. Object state encoding: each object is assigned one of six states: "clean", "closed", "off", "on", "open", or "none". This state is represented as a 6-dimensional binary vector and passed through a fully-connected layer. Object position encoding: an object's position vector is a 6-dimensional vector containing its world coordinates alongside its displacement to the agent (i.e. the difference in their world coordinates). This position vector is passed through two fully-connected layers. These three features are concatenated and passed through a fully-connected layer to obtain the representation of an object in the current observation.
458
+
459
+ # C.1 Model architecture details in VirtualHome
460
+
461
+ In this section, we provide more details of the policy network we used in VirtualHome. Our policy model consists of three parts, i.e. inputs, the pre-trained LM, and outputs. As shown Figure 6, we encode the inputs to the policy—including goal $g$ , history $h_t$ , and the current partial observation $o_t$ as sequences of embeddings. These embeddings are passed to the LM (using its pre-trained embedding layer $F_\theta$ ) and used to obtain contextualized token representations. These token representations are averaged to generate a context feature $f_c$ , which is then passed to fully-connected layer to predict the next action $a_t$ . The output action in VirtualHome consists of a verb and an object. For brevity, we omit the time subscript $t$ from now on.
462
+
463
+ In VirtualHome, the partial observation $o$ of the environment state can be represented as a list of objects in the agent's view. We represent each object by its name, e.g. "oven", a state description, e.g. "open, clean", and position both in the world and relative to the agent. In this part, we provide more details of how LID-Text (Ours) encodes the name, state, and position of each object in the observation. Figure 7 shows the model architecture we used to encode the observation.
464
+
465
+ Name encoding. For each object node, we serialize its object name as an English phrase $s^o$ . We extract its tokens and features using the tokenizer and the embedding layer of the pre-trained LM, respectively. Since one object name might generate several English tokens using the tokenizer from the pre-trained LM, e.g. the tokens of "kitchencabinet" is [15813, 6607, 16212, 500], we take the averaged features of all the tokens in the object name and obtain a "name" feature $f_i^{o,\text{name}}$ for each object node as shown in Figure 7.
466
+
467
+ State encoding. Some objects have a state description, e.g. "oven: open, clean". There are six types of object states in the environment: "clean", "closed", "off", "on", "open", and "none". For each object node, we use a binary vector to represent its state. Taking the "oven" as an example, if the oven is open and clean, its state vector would be $[1,0,0,0,1,0]$ . This state vector is then passed through a fully-connected layer to generate a state feature $f_{i}^{o,\mathrm{state}}$ of object $o_{i}$ .
468
+
469
+ Position encoding. To encode the position information of each object $o_i$ , we take their world coordinates $\{o_{i,x}, o_{i,y}, o_{i,z}\}$ and their spatial distance to the agent $\{a_x, a_y, a_z\}$ to generate a position vector $[o_{i,x}, o_{i,y}, o_{i,z}, o_{i,x} - a_x, o_{i,y} - a_y, o_{i,z} - a_z]$ . This position vector is then passed through two fully-connected layers with a ReLU layer in the middle to generate a position feature $f_i^{o,\text{position}}$ of object $o_i$ .
470
+
471
+ The final feature $f_{i}^{o}$ of each object node is obtained by passing the concatenation of its name feature $f_{i}^{o,\text{name}}$ , state feature $f_{i}^{o,\text{state}}$ , and position feature $f_{i}^{o,\text{position}}$ through a fully connected layer. The observation at a single step can be written as a set of features $\{f_{1}^{o},\dots ,f_{N}^{o}\}$ , where $N$ is the number of objects in the current observation.
472
+
473
+ # C.2 Training details
474
+
475
+ Our proposed approach and baselines are trained on Tesla 32GB GPUs. We train every single model on 1 Tesla 32GB GPU. All experiments used the AdamW optimizer with the learning rate of $10^{-5}$ . We utilize a standard pre-trained language model, GPT-2, in our experiments. GPT-2 is trained on the Webtext dataset [34] using the Huggingface library [46].
476
+
477
+ # D Interactive Evaluation
478
+
479
+ The algorithm for interactive evaluation is shown in Algorithm 1.
480
+
481
+ Algorithm 1: Interactive evaluation
482
+ ```txt
483
+ A set of task goals $G$ (each goal has a corresponding initial state); Load the learned policy $\pi_{\phi}$
484
+ Successful trajectory count: $n = 0$ .
485
+ for example $= 1$ $N_{test}$ do Sample a goal $g$ and the an initial state; for $t = 0,T$ do Sample an action $a_{t}$ from policy $\pi_{\phi}(a_t|g,h_t,o_t)$ Execute the action $a_{t}$ and get a new observation $o_{t + 1}$ if success then $n = n + 1$ break; end end
486
+ end success rate: $r = n / N_{\mathrm{test}}$
487
+ ```
488
+
489
+ # E Data Gathering Details in VirtualHome
490
+
491
+ In this section, we provide more data gathering details in VirtualHome for training the decision-making policies. We introduce the expert data collection and active data gathering in Appendix E.1 and Appendix E.2, respectively.
492
+
493
+ # E.1 Expert Data Collection
494
+
495
+ VirtualHome-Imitation Learning Dataset. To train the models, we collect a set of expert trajectories in VirtualHome using regression planning (RP) [20]. We follow the implementation of the regression planner used in [32]. Given a task described by goal predicates, the planner generates an action sequence to accomplish this task. As shown in Figure 8, the agent has a belief about the environment, i.e. an imagined distribution of object locations. As the agent explores the environment, its belief of the world becomes closer to the real world. At every step, the agent updates its belief based on the latest observation (see [32]), finds a new plan using the regression planner, and executes the first action of the plan. If the subtask (described by the goal predicate) has been finished, the agent will select a new unfinished subtask, otherwise, the agent will keep doing this subtask until it finishes.
496
+
497
+ Similarly to previous work [40, 39, 32], we generate training data using a planner that has access to privileged information, such as full observation of the environment and information about the pre-conditions and effects of each action. The planner allows an agent to robustly perform tasks in partially observable environments and generate expert trajectories for training and evaluation. We generate 20,000 trajectories for training and 3,000 trajectories for validation. Each trajectory has a goal, an action sequence, and the corresponding observations after executing each action.
498
+
499
+ ![](images/abf9360d4955054c44481fde294d14cd64b08eaa2c61dcfb262598da8f9dbeb5.jpg)
500
+ Figure 8: Regression planner. Given a task described by goal predicates, the planner generates an action sequence to accomplish this task. The agent has a belief about the environment, i.e. an imagined distribution of object locations. As the agent explores the environment, its belief of the world becomes closer to the real world. At every step, the agent updates its belief based on the latest observation, finds a new plan using the regression planner, and executes the first action of the plan. If the subtask (described by the goal predicate) has been finished, the agent will select a new unfinished subtask, otherwise, the agent will keep doing this subtask until finish it.
501
+
502
+ # E.2 Active Data Gathering
503
+
504
+ The algorithm for active data gathering is shown in Algorithm 2. To sample the goal and initial state, we first generate a set of initial states in VirtualHome using the code released by [32]. For each initial state, we are able to get a set of feasible tasks that can be accomplished in this environment. For example, in an initial state, if the apple is on the kitchen table, a feasible task goal could be "put the apple inside the fridge". In contrast, "put the banana inside the fridge" is not a feasible task if there is no banana in the initial state.
505
+
506
+ We collect 9893 initial states, and randomly sample an initial state and its feasible goal every time when we reset the environment. After each data collection iteration, we obtain a set of new goals using the goal relabel function. We save the goal and its corresponding initial state in the replay buffers and use the same strategy to sample the goal and initial state in the next iteration.
507
+
508
+ The hindsight relabeling stage is the key component for active data gathering. Here we provide more implementation details of how we relabel "failed" trajectories with new goals in the hindsight relabeling stage. For each "failed" trajectory, we extract its useful sub-trajectories and relabel a task goal $g'$ for it. We design a goal relabel function $f_{l}$ that generates a goal based on the sequence of observations and actions. To do this, we first use a hand-designed program to detect what tasks are contained in a "failed" trajectory. This program finds useful tasks based on the keywords in the action list. For example in Figure 9, the program knows the trajectory containing a task of "On(apple, kitchen table):1" based on the action "[put] < apple > < kitchentable >".
509
+
510
+ The selected sub-trajectories are not always optimal. We thus design a rule to filter out bad trajectories, i.e. for trajectories with the same goal, selecting the "shorter" ones. One example is shown in Figure 10. Suppose that there are two trajectories having the same goal, e.g. "On/apple, kitchen table):1". The first trajectory has actions that are redundant or not related to the task, such as "[walk] < bathroom >" and "[walk] < kitchen >" while the second trajectory is more optimal given the goal. We select the second trajectory and delete the first trajectory from the replay buffer. Note that the "shorter" does not mean fewer actions, but fewer actions that are not related to the task. The hindsight relabeling stage allows sample-efficient learning by reusing the failure cases. The relabeled data are used to train policies in the policy update stage.
511
+
512
+ Algorithm 2: Active Data Gathering
513
+ Given: a goal relabel function $f_{l}$
514
+ Initialize: policy $\pi_{\phi}$ ; goal set $G$ training replay buffer $\mathcal{R}_{train} = \{\}$ ; validation replay buffer $\mathcal{R}_{val} = \{\}$ for iteration $= 1$ $N$ do for example $= 1$ $M$ do Sample a goal $g$ from $G$ and an initial state $s_1$ . for $t = 1,T$ do Sample an action from policy $\pi_{\phi}(a_t|g,h_t,o_t)$ or sample an action randomly; Execute $a_{t}$ and obtain a new observation $o_{t + 1}$ end Store the trajectory $(o_1,a_1,\dots ,o_T,a_T,g)$ in the replay buffer $\mathcal{R}_{train}$ or $\mathcal{R}_{val}$ end Relabel each failure trajectory $d = (o_1,a_1,\dots ,o_T,a_T)$ in the replay buffers and get new goal $g^{\prime} = f_{l}(d)$ Put new goals $g^{\prime}$ in the goal set $G$ for $k = 1,K$ do repeat Sample data from $\mathcal{R}_{train}$ and update policy $\pi_{\phi}$ until training episode ends; Get validation accuracy using the data from $\mathcal{R}_{val}$ end $\pi_{\phi} = \pi_{\mathrm{val\_best}}$
515
+
516
+ ```txt
517
+ Action generated by the current policy or random exploration:
518
+ [walk] <kitchen>
519
+ [walk] <kitchen cabinet 1>
520
+ [open] <kitchen cabinet 1>
521
+ [walk] <kitchen cabinet 2>
522
+ [open] <kitchen cabinet 2>
523
+ [grab] <apple>
524
+ [walk] <kitchentable>
525
+ [put] <apple> <kitchentable>
526
+ [walk] <bedroom>
527
+ ...
528
+ ```
529
+
530
+ ![](images/89bb76882cff22abfef44af73f4ac00cafe574223a8708c0d89e87c10bb7e3ac.jpg)
531
+ Figure 9: We first use a hand-designed program to detect what tasks are contained in the collected trajectory. This program finds tasks based on the keywords in the action list. For example, the program knows the trajectory containing a task of "On/apple, kitchen table):1" based on the action "[put] < apple >< kitchentable >". Then the program extracts all previous actions related to this task using hand-designed rules.
532
+
533
+ ```txt
534
+ Extract the useful sub-trajectory and relabel a task goal: [walk] <kitchen>; [walk] <kitchenable1>; ...; [walk] <kitchenable>; [put] <apple> <kitchenable>; [walk] <bedroom>; ... On (apple, kitchen table): 1
535
+ ```
536
+
537
+ # F Test Sets in VirtualHome
538
+
539
+ In this section, we provide more details of each test set. We first introduce the test sets used for evaluating the proposed model trained on expert data, i.e. LID, in Section F.1. We then show the test sets used for evaluating the proposed model with active data gathering, i.e. LID-ADG, in Section F.2.
540
+
541
+ # F.1 LID Test Sets
542
+
543
+ In Section 6.1, we compared the proposed method and baselines trained on expert data. In Table 7, we provide a detailed description of each test subset, including the count of goal predicate types and the number of goal predicates in each task. The In-Distribution setting has 37 goal predicates in total and each task has $2 \sim 10$ goal predicates. The tasks are drawn from the same distribution as the training tasks. The Novel Scenes setting also has 37 goal predicates and each task has $2 \sim 10$ goal predicates. The objects are randomly placed in the initial environment. The Novel Tasks setting has 22 goal predicates in total and each task has $2 \sim 8$ goal predicates. The tasks are never seen during training.
544
+
545
+ Goal: On (apple, kitchen table): 1
546
+
547
+ Action list 1:
548
+
549
+ ... [walk] <livingroom>; [grab] <apple>; [walk] <kitchen>; [walk] <bathroom>; [walk] <kitchen>; [put] <apple> <kitchenable> ...
550
+
551
+ Action list 2:
552
+
553
+ ...[walk]<livingroom>; [grab]<apple>; [walk]<kitchen>; [put]<apple><kitchentable> ...
554
+
555
+ Figure 10: Suppose there are two trajectories having the same goal, e.g. "On(apple, kitchen table):1". The first trajectory has actions that are redundant or not related to the task, such as "[walk] < bathroom >" and "[walk] < kitchen >" while the second trajectory is more optimal given the goal. We select the second trajectory and delete the first trajectory from the replay buffer. Note that the "shorter" does not mean fewer actions, but fewer actions that are not related to the task.
556
+
557
+ Table 7: Test sets used for evaluating the proposed model trained on the expert data. We show the count of goal predicate types and the number of goal predicates used in each task.
558
+
559
+ <table><tr><td>Test Sets</td><td>Predicate Types</td><td>#Predicate Per Task</td><td>Compared with the training set</td></tr><tr><td>In-Distribution</td><td>37</td><td>2 ~ 10</td><td>Tasks are drawn from the same distribution as training tasks.</td></tr><tr><td>Novel Scenes</td><td>37</td><td>2 ~ 10</td><td>The objects are randomly placed in the initial environment.</td></tr><tr><td>Novel Tasks</td><td>22</td><td>2 ~ 8</td><td>Tasks are never seen during training.</td></tr></table>
560
+
561
+ # F.2 LID-ADG Test Sets
562
+
563
+ As we have mentioned in the main paper Section 9, one limitation of active data gathering is that it relies on hand-designed rules for task relabeling. In addition, it is sometimes challenging to define effective rules to extract useful sub-trajectories and get high-quality hindsight labels, especially when trajectories are long and tasks become more complex. Thus we only relabel short sub-trajectories, where the goal consists of a single goal predicate, e.g. "On/apple, kitchen table):1". During testing, we evaluate the success rate of approaches on solving such tasks as well, i.e. the count of the goal predicate equals to 1. The types of goal predicates are the same as Section F.1, i.e. 37 goal predicates in the In-Distribution setting and the Novel Scenes setting, and 22 goal predicates in the Novel Tasks setting.
564
+
565
+ # G Visualization of Attention Weights
566
+
567
+ To better understand how does LM pre-trained policies make decisions, we visualize the attention weights from the self-attention layers of GPT-2 [43] in Figure 11 and Figure 12. In the inference time, when we are decoding the actions, we save the self-attention weights with respect to different layers and different heads. Then, we use BertViz library [44] to visualize normalized attention weights. We show the attention weights from the input to the output of LID-Text (Ours). The order of tokens in the input and output is observation, goal, and history. In Figure 11 and Figure 12, the left side is the query side. The boldness of the lines is proportional with the attention weight.
568
+
569
+ Figure 11 illustrates the attention weights of a layer named "Head 3 Layer 2". We show attention weights on two different tasks. We find that "Head 3 Layer 2" can capture objects in the goal predicates, such as "wineglass" and "cutleryfork" in the left figure, and "pancake" and "chicken" in the right figure (the figures are cropped for visualization).
570
+
571
+ Figure 12 illustrates the attention weights of layers named "Head 1 Layer 2" (left) and "Head 4 Layer 11" (right). Given the goal predicates, history, and the current observation, the policy predicts the next action as "grab milk". We find that "Head 1 Layer 2" is able to capture objects in the goal predicates, such as "milk", "pancake", and "chicken" while "Head 4 Layer 11" focuses on the interacted object in the predicted action, such as "milk".
572
+
573
+ The attention weights from different self-attention layers are significantly different—some self-attention layers assign high attention weight to objects in the goal predicates while some layers focus on the interacted object. There are also some layers that do not have interpretable meanings. The
574
+
575
+ attention weights just provide us an intuition of how does the internal language model works, more quantified results are reported in the main paper.
576
+
577
+ ![](images/24197d86b414459b523b182adf862c64f10e511830dfd9fc806ee91cbb8cf8d1.jpg)
578
+ Figure 11: Attention weights of a layer named "Head 3 Layer 2". We show attention weights on two different tasks. We find that "Head 3 Layer 2" is able to capture objects in the goal predicates, such as "wineglass" and "cutleryfork" in the left figure, and "pancake" and "chicken" in the right figure (the figures are cropped for visualization).
579
+
580
+ ![](images/84329379874e6a048cc27ae01030cab93e59bdce2c6f04d98e685269acdc7424.jpg)
581
+
582
+ # Goal:
583
+
584
+ INSIDE (pancake, microwave): 1
585
+ ON (chicken, kitchetable): 2
586
+ ON (milk, kitchetable): 1
587
+ CLOSE (microwave)
588
+ TURNON (microwave)
589
+
590
+ # Action:
591
+
592
+ [grab] <milk>
593
+
594
+ # Goal:
595
+
596
+ INSIDE (pancake, microwave): 1
597
+ ON (chicken, kitchenable): 2
598
+ ON (milk, kitchenable): 1
599
+ CLOSE (microwave)
600
+ TURNON (microwave)
601
+
602
+ # Action:
603
+
604
+ [grab] <milk>
605
+
606
+ O:door O:door
607
+
608
+ O:chicken O:chicken O:sink O:sink
609
+
610
+ O:kitchencounter O:kitchencounter O:kitchencabinet O:kitchencabinet Encounter drawer O:kitchencounter drawer O:microwave O:microwave
611
+
612
+ O:kitchencabinet O:kitchencabinet encounter drawer O:kitchencounter drawer encounter drawer O:kitchencounter drawer encounter drawer O:kitchencounter drawer
613
+
614
+ O:kitchenable O:kitchenable O:book O:book
615
+
616
+ O:kitchencabinet O:kitchencabinet O:milk O:milk O:breadslice O:breadslice O:pancake O:pancake O:bathroom O:bathroom O:kitchen O:kitchen
617
+
618
+ G:close G:close G: microwave G: microwave
619
+
620
+ G:turn G:turn G:on G:on
621
+
622
+ G: microwave G: microwave G:put G:put G:1
623
+
624
+ G: panc G: panc G:ake G:ake
625
+
626
+ G: inside G: inside G: the G: the
627
+
628
+ G: microwave G: microwave G:put G:put G:1 G:1
629
+
630
+ G: chicken G: chicken G: on G: on G: the G: the
631
+
632
+ G:kit G:kit
633
+
634
+ G:che G:che G:nt G:nt G:able G:able G:put G:put G:1 G:1
635
+
636
+ G: milk G: milk G: on G: on G: the G: the
637
+
638
+ O:door O:door chicken O:chick O:sink O:sink
639
+
640
+ O:kitchencounter O:kitchencounter O:kitchencabinet O:kitchencabinet encounter drawer O:kitchencounter O:microwave O:microwave
641
+
642
+ O:kitchencabinet O:kitchencabinet encounterdrawer O:kitchencounterdrawer encounterdrawer O:kitchencounterdrawer encounterdrawer O:kitchencounterdrawer
643
+
644
+ O:kitchenable O:kitchenable O:book O:book
645
+
646
+ O:kitchencabinet O:kitchencabinet O:milk O:milk O:breadslice O:breadslice O:pancake O:pancake
647
+
648
+ O:bathroom O:bathroom O:kitchen O:kitchen G:close G:close
649
+
650
+ G: microwave G: microwave G:turn G:turn G:on G: on
651
+
652
+ G: microwave G: microwave G:put G:put G:1 G:1 G:panc G:panc
653
+
654
+ G:ake G:ake
655
+ G: inside G: inside
656
+ G: the G:the
657
+
658
+ G: microwave G: microwave G:put G:put G:1 G:1
659
+
660
+ G: chicken G: chicken G: on G: on G: the G: the G: kit G: kit
661
+
662
+ G:che G:che G:int G:nt G:able G:able G:put G:put G:1 G:1
663
+
664
+ G: milk G: milk G: on G: on
665
+
666
+ G: the G: the
667
+
668
+ Figure 12: Attention weights of layers named "Head 1 Layer 2" (left) and "Head 4 Layer 11" (right). Given the goal predicates, history, and the current observation, the policy model predicts the next action as "grab milk". We find that "Head 1 Layer 2" can capture objects in the goal predicates, such as "milk", "pancake", and "chicken" while "Head 4 Layer 11" focuses on the interacted object in the predicted action, such as "milk".
2202.01xxx/2202.01771/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c645b5eaa4f0368fca176bca8536fbe9e1081f1b2caacf99c88eb2418e986584
3
+ size 846497
2202.01xxx/2202.01771/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01855/5166ae8b-4e4d-4f8b-a350-11682cc56b73_content_list.json ADDED
@@ -0,0 +1,1361 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Self-Supervised Learning with Random-Projection Quantizer for Speech Recognition",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 176,
8
+ 109,
9
+ 795,
10
+ 154
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Chung-Cheng Chiu $^{*1}$ James Qin $^{*1}$ Yu Zhang $^{1}$ Jiahui Yu $^{1}$ Yonghui Wu",
17
+ "bbox": [
18
+ 215,
19
+ 198,
20
+ 751,
21
+ 215
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract",
28
+ "text_level": 1,
29
+ "bbox": [
30
+ 241,
31
+ 242,
32
+ 318,
33
+ 258
34
+ ],
35
+ "page_idx": 0
36
+ },
37
+ {
38
+ "type": "text",
39
+ "text": "We present a simple and effective self-supervised learning approach for speech recognition. The approach learns a model to predict the masked speech signals, in the form of discrete labels generated with a random-projection quantizer. In particular the quantizer projects speech inputs with a randomly initialized matrix, and does a nearest-neighbor lookup in a randomly-initialized codebook. Neither the matrix nor the codebook is updated during self-supervised learning. Since the random-projection quantizer is not trained and is separated from the speech recognition model, the design makes the approach flexible and is compatible with universal speech recognition architecture. On LibriSpeech our approach achieves similar word-error-rates as previous work using self-supervised learning with non-streaming models, and provides lower word-error-rates and latency than wav2vec 2.0 and w2v-BERT with streaming models. On multilingual tasks the approach also provides significant improvement over wav2vec 2.0 and w2v-BERT.",
40
+ "bbox": [
41
+ 117,
42
+ 267,
43
+ 442,
44
+ 599
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "1. Introduction",
51
+ "text_level": 1,
52
+ "bbox": [
53
+ 86,
54
+ 631,
55
+ 215,
56
+ 646
57
+ ],
58
+ "page_idx": 0
59
+ },
60
+ {
61
+ "type": "text",
62
+ "text": "Self-supervised learning has shown impressive improvement for the quality of the speech recognition models in recent years (Schneider et al., 2019; Baevski et al., 2020a; 2019; 2020b; Hsu et al., 2021; Zhang et al., 2020; Chung et al., 2021; Zhang et al., 2021). These learning approaches enable the model to learn from unsupervised data and combine with supervised learning to improve the recognition accuracy. The capability of learning from unsupervised data is particularly beneficial when the supervised data is limited",
63
+ "bbox": [
64
+ 84,
65
+ 657,
66
+ 475,
67
+ 792
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "text",
73
+ "text": "*Equal contribution <Google Research, Brain Team. Correspondence to: Chung-Cheng Chiu <chungchengc@google.com>, James Qin <jamesqin@google.com>, Yu Zhang <ngyuzh@google.com>.",
74
+ "bbox": [
75
+ 84,
76
+ 801,
77
+ 473,
78
+ 854
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Proceedings of the $39^{th}$ International Conference on Machine Learning, Baltimore, Maryland, USA, PMLR 162, 2022. Copyright 2022 by the author(s).",
85
+ "bbox": [
86
+ 84,
87
+ 864,
88
+ 473,
89
+ 905
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "and opens up new opportunities for low resource languages and domains.",
96
+ "bbox": [
97
+ 496,
98
+ 244,
99
+ 883,
100
+ 273
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "One common design principle of self-supervised learning for speech recognition centers around learning representations. Inspired by the success of BERT (Devlin et al., 2018), one research trend in the speech community is to build BERT-inspired algorithms. One challenge in building BERT-style self-supervised learning for speech is to bridge the gap between continuous speech signals and the discrete text tokens, and a solution for addressing this issue is through learning speech representation (Schneider et al., 2019; Baevski et al., 2020b) or learning quantized representation (Baevski et al., 2020b;a; 2019; Hsu et al., 2021; Chung et al., 2021). Many previous works proposed effective algorithms for learning speech representations, and the quantized result of those learned representations showed encouraging correlation with the phoneme of the utterances.",
107
+ "bbox": [
108
+ 495,
109
+ 281,
110
+ 887,
111
+ 508
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "While representation learning is a critical topic for the speech field, combining it with self-supervised learning leads to two limitations that can slow the research progress: (1) Model architecture limitation. The integration of representation learning and self-supervised learning often requires the model to act the role of providing speech representation while still being effective for the downstream tasks. An effective representation model, however, may not always be effective for the downstream tasks. For example, a good representation learning model may require accessing the future context of the utterance, while downstream tasks may require a low latency model which prohibits the access of the future context. (2) Increased complexity. The objectives of representation learning and self-supervised learning are not always aligned, and the complexity of designing both algorithms and finding their balance can impede the research development. This complexity can also motivate the field toward designing more complicated algorithms instead of finding a simple and effective alternative.",
118
+ "bbox": [
119
+ 495,
120
+ 515,
121
+ 887,
122
+ 801
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "text",
128
+ "text": "In this work we propose BERT-based Speech pre-Training with Random-projection Quantizer (BEST-RQ), a simple and effective self-supervised learning algorithm for speech recognition. The algorithm masks speech signals and feeds them to the encoder part of the speech recognition model, and the encoder learns to predict the masked region based",
129
+ "bbox": [
130
+ 495,
131
+ 810,
132
+ 885,
133
+ 900
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "aside_text",
139
+ "text": "arXiv:2202.01855v2 [cs.CL] 29 Jun 2022",
140
+ "bbox": [
141
+ 22,
142
+ 268,
143
+ 57,
144
+ 705
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "on the unmasked speech signals where the learning targets are labels provided by a random-projection quantizer. The random projection quantizer projects speech signals to a randomly initialized matrix, and finds a nearest vector in a randomly initialized codebook. The index of that vector is the target label. Neither the projection matrix nor the codebook is updated throughout the learning process. The quantizer does not require representation learning, and its separation from the model removes the limitation on the architecture design of the model. Despite its simplicity, on LibriSpeech the algorithm achieves similar results as previous work with non-streaming models, and provides better improvement with streaming models compared with previous approaches. On multilingual tasks, the algorithm exhibits further gains compared to wav2vec 2.0 (Baevski et al., 2020b) and w2v-BERT (Chung et al., 2021).",
151
+ "bbox": [
152
+ 88,
153
+ 85,
154
+ 473,
155
+ 325
156
+ ],
157
+ "page_idx": 1
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "We conduct further analysis on the relation between representation learning quality and the self-supervised learning quality, and demonstrate that the two objectives are not inherently aligned in Section 4.3 and Section 4.4. Such an observation is central to our design of self-supervised learning without representation learning, and opens up a new, less complicated research direction for self-supervised learning.",
162
+ "bbox": [
163
+ 88,
164
+ 334,
165
+ 473,
166
+ 454
167
+ ],
168
+ "page_idx": 1
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "2. Related Work",
173
+ "text_level": 1,
174
+ "bbox": [
175
+ 89,
176
+ 474,
177
+ 225,
178
+ 489
179
+ ],
180
+ "page_idx": 1
181
+ },
182
+ {
183
+ "type": "text",
184
+ "text": "Many of the previous work on self-supervised learning for speech recognition focus on learning speech representation. wav2vec (Schneider et al., 2019) applies contrastive learning to learn the future representation based on the past context. vq-wav2vec (Baevski et al., 2020a) uses wav2vec to learn the representations and quantizes them to discrete tokens, and performs BERT-style pre-training to further improve the representation learning. DiscreteBERT (Baevski et al., 2019) extends vq-wav2vec by fine-tuning the BERT-pre-trained model on the downstream tasks. wav2vec 2.0 (Baevski et al., 2020b) uses contrastive learning with both past and future context to predict the representation of the masked parts. HuBERT (Hsu et al., 2021) uses k-means to learn the initial quantizer that maps speech signals to discrete labels, and performs BERT-style pre-training where the inputs are masked speech signals and prediction targets are discrete labels. HuBERT further uses the pretrained model as the new quantizer to train a new iteration of the model, and repeat the process to iteratively improve the pre-training results. w2v-BERT (Chung et al., 2021) uses a sub-network of the model to perform contrastive learning to learn speech representation, and use the rest of the network to perform BERT-style pre-training. w2v-BERT trains the representation learning and the BERT-style pre-training simultaneously. Our approach distinguishes from these work in avoiding the requirement of representation learning and",
185
+ "bbox": [
186
+ 88,
187
+ 500,
188
+ 473,
189
+ 892
190
+ ],
191
+ "page_idx": 1
192
+ },
193
+ {
194
+ "type": "image",
195
+ "img_path": "images/28a10ec1267219d950b28062ad87cae4d751b3791977246d70eb5517473fef7b.jpg",
196
+ "image_caption": [
197
+ "Figure 1. Overview of BEST-RQ. The approach applies random projections to project the input speech signals to a randomly initialized codebook, and map them to discrete labels through finding the nearest vector in the codebook. The pre-training objective is for the ASR encoder to take the masked input signals and predict the labels corresponding to the masked part provided by the random-projection quantizer."
198
+ ],
199
+ "image_footnote": [],
200
+ "bbox": [
201
+ 504,
202
+ 84,
203
+ 879,
204
+ 301
205
+ ],
206
+ "page_idx": 1
207
+ },
208
+ {
209
+ "type": "text",
210
+ "text": "separating the quantizer from the speech recognition model.",
211
+ "bbox": [
212
+ 501,
213
+ 458,
214
+ 885,
215
+ 472
216
+ ],
217
+ "page_idx": 1
218
+ },
219
+ {
220
+ "type": "text",
221
+ "text": "Our quantizer project input signals with a random matrix, which is similar to performing dimension reduction for the input signals. Using such quantization results as prediction target for self-supervised learning share a similar structure as the masked autoencoder (MAE) (He et al., 2021), which directly reconstruct the masked input signals. Another similar work in the computer vision community is BEiT (Bao et al., 2021), which trains a VQ-VAE (van den Oord et al., 2018) as the quantizer and use the VQ-VAE to perform BERT-style self-supervised learning. Different from these approaches, our algorithm does not require training the quantizer which further simplifies the training process.",
222
+ "bbox": [
223
+ 501,
224
+ 479,
225
+ 885,
226
+ 660
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "text",
232
+ "text": "3. Self-supervised Learning with Random-projection Quantizer",
233
+ "text_level": 1,
234
+ "bbox": [
235
+ 501,
236
+ 681,
237
+ 772,
238
+ 715
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "text",
244
+ "text": "BEST-RQ applies a random-projection quantizer to map speech signals to discrete labels to enable BERT-style pretraining for ASR encoders. The quantizer randomly initializes a matrix and a codebook, and uses the matrix to project the input speech signals and the codebook to find the nearest vector where the index of the vector is the label. The pre-training process masks the speech signals and feeds them to the ASR encoder and trains the ASR encoder to predict labels of the masked part. Both the randomly initialized matrix and codebook are fixed during the pre-training process. The input data is normalized to have 0 mean and standard deviation of 1. The normalization is critical for pre",
245
+ "bbox": [
246
+ 501,
247
+ 724,
248
+ 885,
249
+ 904
250
+ ],
251
+ "page_idx": 1
252
+ },
253
+ {
254
+ "type": "header",
255
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
256
+ "bbox": [
257
+ 218,
258
+ 56,
259
+ 751,
260
+ 70
261
+ ],
262
+ "page_idx": 1
263
+ },
264
+ {
265
+ "type": "text",
266
+ "text": "venting the random projection to collapse to a small subset of codes. The framework is described in Figure 1. After the pre-training process the resulting ASR encoder is adopted to fine-tune on downstream ASR tasks.",
267
+ "bbox": [
268
+ 84,
269
+ 85,
270
+ 473,
271
+ 145
272
+ ],
273
+ "page_idx": 2
274
+ },
275
+ {
276
+ "type": "text",
277
+ "text": "The approach applies masks directly on the speech signal, where the masking strategy samples at every frame whether to apply masks with a fixed probability. Each mask spans from the starting frame with a fixed length. The masked parts are replaced with a noise sampled from a normal distribution with 0 mean and 0.1 standard deviation.",
278
+ "bbox": [
279
+ 84,
280
+ 152,
281
+ 475,
282
+ 243
283
+ ],
284
+ "page_idx": 2
285
+ },
286
+ {
287
+ "type": "text",
288
+ "text": "3.1. Random-projection Quantizer",
289
+ "text_level": 1,
290
+ "bbox": [
291
+ 84,
292
+ 260,
293
+ 331,
294
+ 273
295
+ ],
296
+ "page_idx": 2
297
+ },
298
+ {
299
+ "type": "text",
300
+ "text": "Given an input vector $x$ where $x$ is a $d$ -dimensional vector computed from speech signals, the random-projection quantizer maps $x$ to discrete labels $y$ through",
301
+ "bbox": [
302
+ 84,
303
+ 282,
304
+ 475,
305
+ 329
306
+ ],
307
+ "page_idx": 2
308
+ },
309
+ {
310
+ "type": "equation",
311
+ "text": "\n$$\ny = \\underset {i} {\\operatorname {a r g m i n}} | | \\operatorname {n o r m} _ {l 2} \\left(c _ {i}\\right) - \\operatorname {n o r m} _ {l 2} (A x) | |, \\tag {1}\n$$\n",
312
+ "text_format": "latex",
313
+ "bbox": [
314
+ 130,
315
+ 335,
316
+ 473,
317
+ 359
318
+ ],
319
+ "page_idx": 2
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "where $A$ denotes a randomly initialized $h \\times d$ matrix and $C = \\{c_1, \\dots, c_n\\}$ is a set of randomly initialized $h$ -dimensional vectors, $\\text{norm}_{l2}(.)$ is a function that normalizes the vector to have unit $l2$ norm. The projection matrix $A$ use Xavier initialization (Glorot & Bengio, 2010) and the codebook $C$ use standard normal distribution for initialization, and the parameters are fixed during the pre-training process and therefore the quantizations are consistent during training.",
324
+ "bbox": [
325
+ 84,
326
+ 364,
327
+ 475,
328
+ 502
329
+ ],
330
+ "page_idx": 2
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "3.2. Pre-training",
335
+ "text_level": 1,
336
+ "bbox": [
337
+ 84,
338
+ 518,
339
+ 207,
340
+ 534
341
+ ],
342
+ "page_idx": 2
343
+ },
344
+ {
345
+ "type": "text",
346
+ "text": "The pre-training process adds a softmax layer on top of the ASR encoder to learn to predict the quantized speech labels. Since the random-projection quantizer is independent of the ASR encoder, the pre-training is flexible and can work with different architectures of the ASR encoder. We study the effectiveness of the algorithm on both non-streaming and streaming models, and in our experiments we use Conformer (Gulati et al., 2020) as the building block.",
347
+ "bbox": [
348
+ 84,
349
+ 542,
350
+ 475,
351
+ 662
352
+ ],
353
+ "page_idx": 2
354
+ },
355
+ {
356
+ "type": "text",
357
+ "text": "3.2.1. NON-STREAMING MODELS",
358
+ "text_level": 1,
359
+ "bbox": [
360
+ 86,
361
+ 676,
362
+ 323,
363
+ 691
364
+ ],
365
+ "page_idx": 2
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "Since the BERT-style pre-training is designed for the non-streaming models, training with this type of architecture is straightforward where the model uses both past and future context to learn to predict the quantized labels of the masked speech signals.",
370
+ "bbox": [
371
+ 84,
372
+ 700,
373
+ 475,
374
+ 777
375
+ ],
376
+ "page_idx": 2
377
+ },
378
+ {
379
+ "type": "text",
380
+ "text": "3.2.2 STREAMING MODELS",
381
+ "text_level": 1,
382
+ "bbox": [
383
+ 86,
384
+ 791,
385
+ 285,
386
+ 805
387
+ ],
388
+ "page_idx": 2
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "In addition to the non-streaming models, streaming architecture also plays a critical role for the speech recognition tasks as many of the applications require transcribing speakers' utterances with low-latency (Sainath et al., 2020). Streaming architecture however is less well-studied in the previous self-supervised learning work compared to the non-streaming ar",
393
+ "bbox": [
394
+ 84,
395
+ 814,
396
+ 475,
397
+ 906
398
+ ],
399
+ "page_idx": 2
400
+ },
401
+ {
402
+ "type": "text",
403
+ "text": "chitecture. Moreover, many of the previous self-supervised learning approaches specify a pre-training setup that takes both the previous and future context, making it a question of how one can generalize the approaches to streaming models. We proposed two pre-training algorithms that are compatible with the streaming architecture:",
404
+ "bbox": [
405
+ 496,
406
+ 84,
407
+ 885,
408
+ 175
409
+ ],
410
+ "page_idx": 2
411
+ },
412
+ {
413
+ "type": "text",
414
+ "text": "Streaming pre-train. As our algorithm does not require learning quantization and focuses only on training the ASR encoder, this approach largely benefits the streaming models. Pre-training for streaming models follows the same setup as non-streaming models but the ASR encoder now learns to predict the quantized labels of the masked part based only on the past context.",
415
+ "bbox": [
416
+ 496,
417
+ 183,
418
+ 885,
419
+ 287
420
+ ],
421
+ "page_idx": 2
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "Non-Streaming pre-train. Given that the neural network architecture like Transformer/Conformer allows switching from non-streaming to streaming behaviors by adding a mask for the future context within the same model, one can also perform pre-training with non-streaming setup for streaming models. Our algorithm provides benefits for streaming models with both non-streaming and streaming pre-training.",
426
+ "bbox": [
427
+ 496,
428
+ 296,
429
+ 885,
430
+ 417
431
+ ],
432
+ "page_idx": 2
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "3.3. Fine-tuning",
437
+ "text_level": 1,
438
+ "bbox": [
439
+ 496,
440
+ 433,
441
+ 612,
442
+ 449
443
+ ],
444
+ "page_idx": 2
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "After the pre-training, the approach initializes the encoder of the downstream ASR from the pre-trained model, and fine-tunes on the supervised set. The softmax layer added on top of the encoder during the pre-training process is not used in fine-tuning. We focus on end-to-end models with RNN transducers (Graves, 2012), where the decoder uses LSTMs for the prediction network. On constructing the encoder, an additional projection layer is added on top of the pre-trained encoder to help it adapt to the downstream ASR task. The training process also updates the encoder during the supervised fine-tuning.",
449
+ "bbox": [
450
+ 496,
451
+ 457,
452
+ 885,
453
+ 625
454
+ ],
455
+ "page_idx": 2
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "3.4. Understanding the Effectiveness of the Random-projection Quantizer",
460
+ "text_level": 1,
461
+ "bbox": [
462
+ 496,
463
+ 638,
464
+ 799,
465
+ 670
466
+ ],
467
+ "page_idx": 2
468
+ },
469
+ {
470
+ "type": "text",
471
+ "text": "Our algorithm uses a random-projection quantizer for self-supervised learning, and such a design raises two questions: how good is the resulting quantization quality with this quantizer and how much does the quantization quality affect the effectiveness of the self-supervised learning? We address these two questions through comparing our quantizer with VQ-VAEs. Using random-projections for quantizing speech signals shares some similarity as VQ-VAEs. The random projection performs dimension reduction for the speech signals while the random codebook provides an approximated discrete representation of the speech data distribution. VQ-VAEs also provide a discrete representation for the speech signals, but do so by learning a representation in the latent space that best preserves the speech data. Thus, comparing with VQ-VAEs gives us insight on the quantization quality",
472
+ "bbox": [
473
+ 496,
474
+ 678,
475
+ 885,
476
+ 905
477
+ ],
478
+ "page_idx": 2
479
+ },
480
+ {
481
+ "type": "header",
482
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
483
+ "bbox": [
484
+ 218,
485
+ 56,
486
+ 754,
487
+ 71
488
+ ],
489
+ "page_idx": 2
490
+ },
491
+ {
492
+ "type": "table",
493
+ "img_path": "images/470acb1d925642f0c1d4cda665a95894f3faca4419d478e0264d086afde47b46.jpg",
494
+ "table_caption": [
495
+ "Table 1. LibriSpeech results with non-streaming models. The LM used in our experiment is a Transformer LM with model size 0.1B."
496
+ ],
497
+ "table_footnote": [],
498
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td rowspan=\"2\">Size (B)</td><td colspan=\"4\">No LM</td><td colspan=\"4\">With LM</td></tr><tr><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td></tr><tr><td>wav2vec 2.0 (Baevski et al., 2020b)</td><td>0.3</td><td>2.1</td><td>4.5</td><td>2.2</td><td>4.5</td><td>1.6</td><td>3.0</td><td>1.8</td><td>3.3</td></tr><tr><td>HuBERT Large (Hsu et al., 2021)</td><td>0.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>1.5</td><td>3.0</td><td>1.9</td><td>3.3</td></tr><tr><td>HuBERT X-Large (Hsu et al., 2021)</td><td>1.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>1.5</td><td>2.5</td><td>1.8</td><td>2.9</td></tr><tr><td>w2v-Conformer XL (Zhang et al., 2020)</td><td>0.6</td><td>1.7</td><td>3.5</td><td>1.7</td><td>3.5</td><td>1.6</td><td>3.2</td><td>1.5</td><td>3.2</td></tr><tr><td>w2v-BERT XL (Chung et al., 2021)</td><td>0.6</td><td>1.5</td><td>2.9</td><td>1.5</td><td>2.9</td><td>1.4</td><td>2.8</td><td>1.5</td><td>2.8</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td><td>1.4</td><td>2.6</td><td>1.5</td><td>2.7</td></tr></table>",
499
+ "bbox": [
500
+ 86,
501
+ 107,
502
+ 883,
503
+ 237
504
+ ],
505
+ "page_idx": 3
506
+ },
507
+ {
508
+ "type": "text",
509
+ "text": "of our quantizer and the effect of representation learning for self-supervised learning.",
510
+ "bbox": [
511
+ 88,
512
+ 261,
513
+ 472,
514
+ 291
515
+ ],
516
+ "page_idx": 3
517
+ },
518
+ {
519
+ "type": "text",
520
+ "text": "We demonstrate that the quantization quality of the random-projection quantizer is not ideal but yet effective for self-supervised learning by comparing it with VQ-VAE-based quantizations in Section 4.3. We also show that the gap in the quantization quality is less an issue with the increase of the unsupervised data in Section 4.4. The main objective of self-supervised learning for speech recognition is to train the model to learn contextual information. The random-projection quantizer preserve the distribution of the speech data, and in order for the model to learn to predict the quantized token based on unmasked signals, the model needs to learn to process the raw signals and infer the contextual information among speech data. Such a criterion allows the model to perform effective self-supervised learning with a random-projection quantizer.",
521
+ "bbox": [
522
+ 88,
523
+ 299,
524
+ 473,
525
+ 523
526
+ ],
527
+ "page_idx": 3
528
+ },
529
+ {
530
+ "type": "text",
531
+ "text": "4. Experiments",
532
+ "text_level": 1,
533
+ "bbox": [
534
+ 88,
535
+ 545,
536
+ 215,
537
+ 561
538
+ ],
539
+ "page_idx": 3
540
+ },
541
+ {
542
+ "type": "text",
543
+ "text": "We perform self-supervised learning experiments on LibriSpeech with non-streaming and streaming models, and assess the approach on multilingual tasks with non-streaming models. We study the quantization quality of the random-projection quantizer by comparing it with the quantizer learned with VQ-VAEs. The implementation use Lingvo (Shen et al., 2019) library.",
544
+ "bbox": [
545
+ 88,
546
+ 571,
547
+ 473,
548
+ 675
549
+ ],
550
+ "page_idx": 3
551
+ },
552
+ {
553
+ "type": "text",
554
+ "text": "4.1. LibriSpeech",
555
+ "text_level": 1,
556
+ "bbox": [
557
+ 88,
558
+ 693,
559
+ 202,
560
+ 707
561
+ ],
562
+ "page_idx": 3
563
+ },
564
+ {
565
+ "type": "text",
566
+ "text": "Following (Zhang et al., 2020), we conduct experiments on the LibriLight dataset (Kahn et al., 2020) for pre-training, and fine-tune on the LibriSpeech training set which contains 960 hours of data. The input speech signals are 80-dimensional log-mel filter bank coefficients, and each frame has the stride of 10ms. In the fine-tuning phase, the decoder has a vocab size 1024 and uses a 1024-token WordPiece model (Schuster & Nakajima, 2012) for tokenizations that is constructed from the transcripts of the LibriSpeech training set.",
567
+ "bbox": [
568
+ 88,
569
+ 717,
570
+ 473,
571
+ 866
572
+ ],
573
+ "page_idx": 3
574
+ },
575
+ {
576
+ "type": "text",
577
+ "text": "4.1.1. NON-STREAMING MODELS",
578
+ "text_level": 1,
579
+ "bbox": [
580
+ 501,
581
+ 262,
582
+ 733,
583
+ 273
584
+ ],
585
+ "page_idx": 3
586
+ },
587
+ {
588
+ "type": "text",
589
+ "text": "We use the same architectures reported in (Zhang et al., 2020) for fair comparisons. The model has two convolution layers at the bottom which provide 4 times temporal dimension reduction for the input sequences. The rest of the layers are a stack of Conformer models. We explore 0.6B model size which is extensively studied in the previous works. The model contains 24 layers of Conformer models.",
590
+ "bbox": [
591
+ 501,
592
+ 285,
593
+ 885,
594
+ 388
595
+ ],
596
+ "page_idx": 3
597
+ },
598
+ {
599
+ "type": "text",
600
+ "text": "Pre-train. The pre-training uses mask length 400ms with masking probability of 0.01. The learning rate schedule uses a transformer learning rate schedule (Vaswani et al., 2017). The training of the model uses Adam optimizer (Kingma & Ba, 2015) with 0.004 peak learning rate and 25000 warm-up steps. The batch size is 2048. Since the encoder has 4 times temporal-dimension reduction, the quantization with random projections stacks every 4 frames for projections. The vocab size of the codebook is 8192 and the dimension is 16.",
601
+ "bbox": [
602
+ 501,
603
+ 398,
604
+ 885,
605
+ 547
606
+ ],
607
+ "page_idx": 3
608
+ },
609
+ {
610
+ "type": "text",
611
+ "text": "Fine-tune. The fine-tuning model also follows the same architecture as in (Zhang et al., 2020) and use RNN Transducer (RNN-T) (Graves, 2012) for decoder with 2 layers of unidirectional LSTMs, where the hidden dimension of LSTMs are 1280. The fine-tuning process uses the Transformer learning rate schedule. Since the encoder is initialized from a pre-trained model, the fine-tuning process uses a lower learning rate for the encoder than the decoder. The encoder uses 0.0003 peak learning rate and 5000 warmup steps, while the decoder uses 0.001 peak learning rate and 1500 warmup steps.",
612
+ "bbox": [
613
+ 501,
614
+ 556,
615
+ 885,
616
+ 722
617
+ ],
618
+ "page_idx": 3
619
+ },
620
+ {
621
+ "type": "text",
622
+ "text": "The results of pre-training with LibriLight and fine-tuning on LibriSpeech, along with comparisons with previous works, are shown in Table 1. Our results with LM use shallow fusion to incorporate the LM. The LM is a 0.1B Transformer model trained on the LibriSpeech LM corpus, and the model has 8 layers, 1024 model dimension, and 4096 feed-forward network dimension. By using the same architecture and similar optimization strategy as (Zhang et al., 2020), our approach shows similar WERs as previous best results on LibriSpeech both with and without LM.",
623
+ "bbox": [
624
+ 501,
625
+ 729,
626
+ 883,
627
+ 878
628
+ ],
629
+ "page_idx": 3
630
+ },
631
+ {
632
+ "type": "header",
633
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
634
+ "bbox": [
635
+ 218,
636
+ 56,
637
+ 751,
638
+ 70
639
+ ],
640
+ "page_idx": 3
641
+ },
642
+ {
643
+ "type": "text",
644
+ "text": "4.1.2. STREAMING MODELS",
645
+ "text_level": 1,
646
+ "bbox": [
647
+ 84,
648
+ 85,
649
+ 287,
650
+ 99
651
+ ],
652
+ "page_idx": 4
653
+ },
654
+ {
655
+ "type": "text",
656
+ "text": "The architecture we use for the streaming experiments follows a similar design as previous work for building streaming ASRs (Yu et al., 2021). We scale the model size to be also 0.6B to be consistent with the non-streaming experiments. The architecture has 3 Conformer layers at the bottom, followed by a stacking layer with 2 times temporal-dimension reduction and 20 Conformer layers on top of the stacking layer. The Conformer has 1024 hidden dimension for the self-attention layer and 4096 for the feed-forward layers. The self-attention layer attends to the current and the previous 64 frames, and the convolution has a kernel that covers the current and the past 3 frames.",
657
+ "bbox": [
658
+ 84,
659
+ 108,
660
+ 475,
661
+ 289
662
+ ],
663
+ "page_idx": 4
664
+ },
665
+ {
666
+ "type": "text",
667
+ "text": "The training setup is mostly the same as the 0.6B model in the non-streaming experiments, with some changes on the masking ratio for different pre-training approaches.",
668
+ "bbox": [
669
+ 84,
670
+ 297,
671
+ 473,
672
+ 342
673
+ ],
674
+ "page_idx": 4
675
+ },
676
+ {
677
+ "type": "text",
678
+ "text": "Streaming pre-train. The streaming pre-training uses the same setup as the original architecture, and the mask length is 300ms and the masking probability is 0.02. The random-projection quantizer stacks every 2 frames for projections.",
679
+ "bbox": [
680
+ 84,
681
+ 349,
682
+ 475,
683
+ 411
684
+ ],
685
+ "page_idx": 4
686
+ },
687
+ {
688
+ "type": "text",
689
+ "text": "Non-streaming pre-train. The non-streaming pre-training extends the original architecture to have access for future context by having the convolution kernel within the Conformer layer to have access for the future 3 frames. The self-attention is still limited to having access only for the previous context. We also explored having future context access for the self-attention, but this setup tends to be less stable. The masking length is $400\\mathrm{ms}$ and the masking probability is 0.02.",
690
+ "bbox": [
691
+ 84,
692
+ 417,
693
+ 475,
694
+ 551
695
+ ],
696
+ "page_idx": 4
697
+ },
698
+ {
699
+ "type": "text",
700
+ "text": "Fine-tune. The fine-tuning ASR model uses RNN-T for decoder with a layer of unidirectional LSTM, where the hidden dimension of the LSTM is 640. The training setup is the same as the fine-tuning config for the 0.6B model in the non-streaming experiments. When initializing from a non-streaming pre-trained model, the convolution only uses the kernel weight that accesses the previous context to keep the model streaming.",
701
+ "bbox": [
702
+ 84,
703
+ 561,
704
+ 473,
705
+ 681
706
+ ],
707
+ "page_idx": 4
708
+ },
709
+ {
710
+ "type": "text",
711
+ "text": "Latency measurement. A streaming model can learn to delay its prediction to access the future context and improve the prediction accuracy, and therefore it is critical to measure the latency of the streaming models to see whether the model maintains similar latency. This assessment helps us identify whether the underlying approach provides real improvement instead of trading off latency for prediction accuracy. Our latency comparison process first calculates the starting time and ending time of every word for each hypothesis generated by the two models. The comparison then aligns the hypotheses from the two models, finds the matching words and calculates the difference of their starting and ending time. The relative latency measurement is the average word timing difference of all matched words",
712
+ "bbox": [
713
+ 84,
714
+ 689,
715
+ 475,
716
+ 900
717
+ ],
718
+ "page_idx": 4
719
+ },
720
+ {
721
+ "type": "text",
722
+ "text": "between the two models among all utterances. Specifically the relative latency is calculated with",
723
+ "bbox": [
724
+ 496,
725
+ 84,
726
+ 885,
727
+ 114
728
+ ],
729
+ "page_idx": 4
730
+ },
731
+ {
732
+ "type": "equation",
733
+ "text": "\n$$\n\\sum_ {i, j} \\frac {s _ {i j} ^ {\\prime} - s _ {i j} + e _ {i j} ^ {\\prime} - e _ {i j}}{2 N}, \\tag {2}\n$$\n",
734
+ "text_format": "latex",
735
+ "bbox": [
736
+ 602,
737
+ 126,
738
+ 885,
739
+ 165
740
+ ],
741
+ "page_idx": 4
742
+ },
743
+ {
744
+ "type": "text",
745
+ "text": "where $i$ denotes the index of the matched words between the two hypotheses, $j$ is the utterance index, $s_{ij}$ and $e_{ij}$ correspond to the starting and ending time of the word from the baseline model, $s_{ij}'$ and $e_{ij}'$ correspond to the starting and ending time of the word from the compared model, and $N$ is the total number of matched words among all utterances. A negative relative latency means the compared model has lower latency than the baseline model.",
746
+ "bbox": [
747
+ 496,
748
+ 176,
749
+ 885,
750
+ 297
751
+ ],
752
+ "page_idx": 4
753
+ },
754
+ {
755
+ "type": "text",
756
+ "text": "The word-error-rates and the relative latency are shown in Table 2. In this comparison experiment both wav2vec 2.0 and w2v-BERT use the same architecture, same masking and training setup as BEST-RQ. Using the conventional masking setup for wav2vec 2.0 and w2v-BERT gives worse performance. Since there is no convolution layers at the bottom, the contrastive learning use speech signals as targets. The w2v-BERT model use 12 layers for the contrastive module and 12 layers for the masked prediction module, to be consistent with the non-streaming setup (Chung et al., 2021). Our algorithm outperforms wav2vec 2.0 and w2v-BERT for both streaming and non-streaming pre-training. In particular our algorithm performs well with both pre-training, while wav2vec 2.0 and w2v-BERT favors more with non-streaming pre-training. This is likely due to the fact that the representation learning of both approaches is more compatible with non-streaming architectures. Increasing the model size from 0.1B to 0.6B results a slight increase in latency, but models trained with self-supervised learning algorithms has lower latency with streaming pre-training giving the most significant latency reduction. This indicates that the self-supervised learning preserve the low-latency property while providing quality gain.",
757
+ "bbox": [
758
+ 496,
759
+ 305,
760
+ 885,
761
+ 654
762
+ ],
763
+ "page_idx": 4
764
+ },
765
+ {
766
+ "type": "text",
767
+ "text": "4.2. Multilingual Tasks",
768
+ "text_level": 1,
769
+ "bbox": [
770
+ 496,
771
+ 662,
772
+ 661,
773
+ 678
774
+ ],
775
+ "page_idx": 4
776
+ },
777
+ {
778
+ "type": "text",
779
+ "text": "We present multilingual results in this section. We use the same model setup as the LibriSpeech non-streaming experiment for these tasks.",
780
+ "bbox": [
781
+ 495,
782
+ 686,
783
+ 885,
784
+ 731
785
+ ],
786
+ "page_idx": 4
787
+ },
788
+ {
789
+ "type": "text",
790
+ "text": "4.2.1. DATA",
791
+ "text_level": 1,
792
+ "bbox": [
793
+ 496,
794
+ 746,
795
+ 588,
796
+ 760
797
+ ],
798
+ "page_idx": 4
799
+ },
800
+ {
801
+ "type": "text",
802
+ "text": "Multilingual LibriSpeech (MLS-10hrs) The Multilingual LibriSpeech dataset (Pratap et al., 2020) is a large corpus derived from read audiobooks of Librivox and consists of 8 languages: Dutch (du), English (en), French (fr), German (de), Italian (it), Polish (pl), Portuguese (pt), Spanish (es). The latest version of this corpus contains around 50k hours including 44k hours in English. We use the official 10 hours split of training data to evaluate few-shot learning capabilities.",
803
+ "bbox": [
804
+ 495,
805
+ 768,
806
+ 885,
807
+ 905
808
+ ],
809
+ "page_idx": 4
810
+ },
811
+ {
812
+ "type": "header",
813
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
814
+ "bbox": [
815
+ 218,
816
+ 56,
817
+ 754,
818
+ 71
819
+ ],
820
+ "page_idx": 4
821
+ },
822
+ {
823
+ "type": "table",
824
+ "img_path": "images/d3c160b225b7d0ce93a252493cc9804d944dc2d09ce732cc7189ee244185214d.jpg",
825
+ "table_caption": [
826
+ "Table 2. LibriSpeech results compared with previous works with the same streaming architecture, and use LibriLight set for pre-training and LibriSpeech 960h set for fine-tuning. The relative latency (the lower the better) is the average difference of the word prediction time when comparing with the baseline Conformer 0.1B model. Our algorithm outperforms wav2vec 2.0 and w2v-BERT on both WERs and latency."
827
+ ],
828
+ "table_footnote": [],
829
+ "table_body": "<table><tr><td>Method</td><td>Size (B)</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>Relative latency (ms)</td></tr><tr><td>Conformer 0.1B</td><td>0.1</td><td>4.1</td><td>10.3</td><td>4.5</td><td>9.8</td><td>0</td></tr><tr><td>Conformer 0.6B</td><td>0.6</td><td>3.9</td><td>9.8</td><td>4.4</td><td>9.4</td><td>15.3</td></tr><tr><td colspan=\"7\">Non-Streaming pre-train</td></tr><tr><td>wav2vec 2.0</td><td>0.6</td><td>2.6</td><td>7.3</td><td>3.0</td><td>7.2</td><td>-10.1</td></tr><tr><td>w2v-BERT</td><td>0.6</td><td>2.8</td><td>7.2</td><td>3.3</td><td>6.9</td><td>-0.7</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>2.5</td><td>6.9</td><td>2.8</td><td>6.6</td><td>-16.3</td></tr><tr><td colspan=\"7\">Streaming pre-train</td></tr><tr><td>wav2vec 2.0</td><td>0.6</td><td>2.7</td><td>8.0</td><td>2.9</td><td>7.9</td><td>-130.6</td></tr><tr><td>w2v-BERT</td><td>0.6</td><td>2.7</td><td>8.4</td><td>3.0</td><td>8.1</td><td>-117.1</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>2.5</td><td>6.9</td><td>2.8</td><td>6.6</td><td>-130.9</td></tr></table>",
830
+ "bbox": [
831
+ 140,
832
+ 155,
833
+ 834,
834
+ 353
835
+ ],
836
+ "page_idx": 5
837
+ },
838
+ {
839
+ "type": "text",
840
+ "text": "Multilingual Voice Search (VS-1000hrs) Our high resource finetune datasets is multilingual Voice Search dataset (Li et al., 2021). We sample random 1000 hour subsets (VS-1000h) across 15 languages, including English (US), English (IN), Spanish (US), Portuguese (BR), Spanish (ES), Arabic (GULF), Arabic (EG), Hindi (IN), Marathi (IN), Bengali (BD), Chinese (TW), Russian (RU), Turkish (TR), Hungarian (HU), and Malay (MY). The test set for each language contains around 3–19K utterances.",
841
+ "bbox": [
842
+ 84,
843
+ 376,
844
+ 475,
845
+ 513
846
+ ],
847
+ "page_idx": 5
848
+ },
849
+ {
850
+ "type": "text",
851
+ "text": "XLS-R unsupervised data (XLS-R -U) Our public unlabeled speech data follows the pre-training data used for XLS-R (Babu et al., 2021) with one major difference: we do not use any data from VoxLingua-107 due to license constraint. In total, we utilize approximately $429k$ hours of unlabeled speech data in $51^{1}$ languages. As a consequence our model is pre-trained on speech from 51 languages as compared to 128 for XLS-R, and our pre-training set is smaller by $6.6k$ hours. We use this pretrain data on MLS-10hrs to compare with published results.",
852
+ "bbox": [
853
+ 84,
854
+ 531,
855
+ 477,
856
+ 683
857
+ ],
858
+ "page_idx": 5
859
+ },
860
+ {
861
+ "type": "text",
862
+ "text": "Youtube unsupervised data (YT-U) Following (Zhang et al., 2021), we collected a multilingual Youtube dataset for pretraining. For each language we prepare an unlabeled YouTube dataset segmented using voice activation detection (VAD (Zazo Candil et al., 2016)). The number of hours per languages are: English (800k hrs), Spanish (800k hrs), Marathi (600k hrs), Portuguese (800k hrs), Russian (800k), Arabic (800k), Hindi (800k), Chinese (800k), Malay (250k), Turkish (800k), Bengali (800k), Hugarian (300k). In practice, we found this data performs much better than XLS-R -U on VS-1000hrs. Thus, we use this pretrain data on VS-1000hrs to compare the performance of different pretrain",
863
+ "bbox": [
864
+ 84,
865
+ 700,
866
+ 477,
867
+ 883
868
+ ],
869
+ "page_idx": 5
870
+ },
871
+ {
872
+ "type": "text",
873
+ "text": "methods.",
874
+ "bbox": [
875
+ 496,
876
+ 378,
877
+ 562,
878
+ 391
879
+ ],
880
+ "page_idx": 5
881
+ },
882
+ {
883
+ "type": "text",
884
+ "text": "4.2.2. RESULTS ON MLS-10HRS",
885
+ "text_level": 1,
886
+ "bbox": [
887
+ 496,
888
+ 407,
889
+ 728,
890
+ 421
891
+ ],
892
+ "page_idx": 5
893
+ },
894
+ {
895
+ "type": "text",
896
+ "text": "We conduct our multilingual low resource finetune experiments on MLS-10hrs. We use XLS-R -U as pretraining data and finetune it on MLS-10hrs. As shown in Table 3, our baseline w2v-BERT already outperform previous strong model from XLS-R(2B) (Babu et al., 2021). The average WER further bring down by $3\\%$ relative by using the proposed BEST-RQ. This demonstrate a simple random-projection quantizer is also effective for multilingual pretraining. We also report finetune results on the MLS full supervised data. Interestingly, with more finetune data, BEST-RQ perform even better than w2v-BERT, especially for pt and pl. Our results also comparable with previously state-of-the-art results in (Bai et al., 2021) which conduct joint training for multilingual ASR.",
897
+ "bbox": [
898
+ 495,
899
+ 430,
900
+ 888,
901
+ 643
902
+ ],
903
+ "page_idx": 5
904
+ },
905
+ {
906
+ "type": "text",
907
+ "text": "While fine-tuning with MLS-full and MLS-10hrs both exhibit improvement compared to existing approaches, fine-tuning with MLS-full provides more relative improvement. This likely implies that pre-training with random-projection quantizers is more effective when there is more fine-tuning data.",
908
+ "bbox": [
909
+ 495,
910
+ 648,
911
+ 888,
912
+ 739
913
+ ],
914
+ "page_idx": 5
915
+ },
916
+ {
917
+ "type": "text",
918
+ "text": "4.2.3. RESULTS ON VOICE SEARCH",
919
+ "text_level": 1,
920
+ "bbox": [
921
+ 496,
922
+ 755,
923
+ 748,
924
+ 768
925
+ ],
926
+ "page_idx": 5
927
+ },
928
+ {
929
+ "type": "text",
930
+ "text": "To understand how the proposed model work for high resource (1000hrs per language), we pretrain our model on YT-U and finetune it on VS-1000hrs. We can see with more finetune data, the relative improvement is smaller compared with no pretrain baseline. However, our proposed BEST-RQ consistently outperform w2v-BERT and wav2vec 2.0 by $9\\%$ and $5\\%$ relatively. Compare to w2v-BERT, our proposed method outperform on all the languages. Among the 15",
931
+ "bbox": [
932
+ 495,
933
+ 777,
934
+ 888,
935
+ 902
936
+ ],
937
+ "page_idx": 5
938
+ },
939
+ {
940
+ "type": "header",
941
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
942
+ "bbox": [
943
+ 218,
944
+ 56,
945
+ 754,
946
+ 71
947
+ ],
948
+ "page_idx": 5
949
+ },
950
+ {
951
+ "type": "page_footnote",
952
+ "text": "<sup>1</sup>Counting languages with more than 1 hour of speech data.",
953
+ "bbox": [
954
+ 106,
955
+ 890,
956
+ 460,
957
+ 905
958
+ ],
959
+ "page_idx": 5
960
+ },
961
+ {
962
+ "type": "table",
963
+ "img_path": "images/551f3375bc5520838ea752e2c0a70e39239efdcad61a560ec90741d10a5f4418.jpg",
964
+ "table_caption": [
965
+ "Table 3. Test setWER $(\\%)$ comparisons on the MLS full and 10hrs set."
966
+ ],
967
+ "table_footnote": [],
968
+ "table_body": "<table><tr><td rowspan=\"2\">Exp.</td><td colspan=\"8\">Languages</td><td rowspan=\"2\">Avg.</td></tr><tr><td>en</td><td>de</td><td>nl</td><td>fr</td><td>es</td><td>it</td><td>pt</td><td>pl</td></tr><tr><td>MLS-full</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>wav2vec 2.0 from XLSR-53 (Conneau et al., 2020)</td><td>-</td><td>7.0</td><td>10.8</td><td>7.6</td><td>6.3</td><td>10.4</td><td>14.7</td><td>17.2</td><td>10.6</td></tr><tr><td>w2v-BERT from JUST (Bai et al., 2021)</td><td>6.6</td><td>4.3</td><td>9.9</td><td>5.0</td><td>3.8</td><td>9.1</td><td>14.6</td><td>8.1</td><td>7.8</td></tr><tr><td>JUST (Bai et al., 2021) (co-train)</td><td>6.5</td><td>4.1</td><td>9.5</td><td>5.2</td><td>3.7</td><td>8.8</td><td>8.0</td><td>6.6</td><td>6.5</td></tr><tr><td>w2v-BERT (0.6B)</td><td>5.5</td><td>4.3</td><td>10.9</td><td>5.6</td><td>4.5</td><td>10.1</td><td>13.4</td><td>11.2</td><td>8.2</td></tr><tr><td>BEST-RQ (Ours, 0.6B)</td><td>6.8</td><td>4.1</td><td>9.7</td><td>5.0</td><td>4.9</td><td>7.4</td><td>9.4</td><td>5.2</td><td>6.6</td></tr><tr><td>MLS-10hrs</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>XLSR-53 (Conneau et al., 2020)</td><td>14.6</td><td>8.4</td><td>12.8</td><td>12.5</td><td>8.9</td><td>13.4</td><td>18.2</td><td>21.2</td><td>13.8</td></tr><tr><td>XLS-R(0.3B) (Babu et al., 2021)</td><td>15.9</td><td>9.0</td><td>13.5</td><td>12.4</td><td>8.1</td><td>13.1</td><td>17.0</td><td>13.9</td><td>12.8</td></tr><tr><td>XLS-R(1B) (Babu et al., 2021)</td><td>12.9</td><td>7.4</td><td>11.6</td><td>10.2</td><td>7.1</td><td>12.0</td><td>15.8</td><td>10.5</td><td>10.9</td></tr><tr><td>XLS-R(2B) (Babu et al., 2021)</td><td>14.0</td><td>7.6</td><td>11.8</td><td>10.0</td><td>6.9</td><td>12.1</td><td>15.6</td><td>9.8</td><td>11.0</td></tr><tr><td>w2v-BERT (0.6B)</td><td>12.7</td><td>7.0</td><td>12.6</td><td>8.9</td><td>5.9</td><td>10.3</td><td>14.6</td><td>6.9</td><td>9.9</td></tr><tr><td>BEST-RQ (Ours, 0.6B)</td><td>12.8</td><td>7.4</td><td>12.7</td><td>9.6</td><td>5.4</td><td>9.9</td><td>12.1</td><td>7.1</td><td>9.6</td></tr></table>",
969
+ "bbox": [
970
+ 96,
971
+ 107,
972
+ 874,
973
+ 390
974
+ ],
975
+ "page_idx": 6
976
+ },
977
+ {
978
+ "type": "table",
979
+ "img_path": "images/6be3df7d9e05e7113d63fd65f88d58584f49033396fe212ba0cffc73a6382f63.jpg",
980
+ "table_caption": [
981
+ "Table 4. Test set WER (%) comparisons using YT-U for pretrain and VS-1000hrs for finetune, across 15 languages."
982
+ ],
983
+ "table_footnote": [],
984
+ "table_body": "<table><tr><td>Exp.</td><td>Avg. on 15 langs (VS)</td></tr><tr><td>Baseline (0.6B)</td><td>12.6</td></tr><tr><td>wav2vec 2.0 (0.6B)</td><td>12.0</td></tr><tr><td>w2v-bert (0.6B)</td><td>11.5</td></tr><tr><td>BEST-RQ (Ours) (0.6B)</td><td>10.9</td></tr></table>",
985
+ "bbox": [
986
+ 102,
987
+ 452,
988
+ 452,
989
+ 542
990
+ ],
991
+ "page_idx": 6
992
+ },
993
+ {
994
+ "type": "text",
995
+ "text": "languages, English, Portuguese, Russian and Turkish, are improved more than $10\\%$ , relatively. Indic languages (Hindi, Marathi and English (IN)) are only slightly improved, all smaller than $3\\%$ relatively.",
996
+ "bbox": [
997
+ 88,
998
+ 579,
999
+ 472,
1000
+ 638
1001
+ ],
1002
+ "page_idx": 6
1003
+ },
1004
+ {
1005
+ "type": "text",
1006
+ "text": "4.3. Analyzing Quantization Quality",
1007
+ "text_level": 1,
1008
+ "bbox": [
1009
+ 88,
1010
+ 656,
1011
+ 339,
1012
+ 671
1013
+ ],
1014
+ "page_idx": 6
1015
+ },
1016
+ {
1017
+ "type": "text",
1018
+ "text": "As our self-supervised learning algorithm eliminates the requirement of representation learning through applying a random-projection quantizer, it is crucial to understand the representation quality of this quantizer and how the quality of the quantization affect the self-supervised learning. We analyze the quality of quantizers by training ASR models feeding labels generated by quantizing utterances as input. The performance of the resulting ASR provides us insights on the quality of the quantizer. The ASR model embeds quantized labels and feeds the embedding to a stack of Conformer layers, followed by a CTC decoder. 16 Conformer layer has feature dim 256, local self attention with 8 heads and 128 context length and kernel size 5 for lightweight convolution, in total the model size is $25\\mathrm{M}$ . We study the effect of representation learning through comparing with",
1019
+ "bbox": [
1020
+ 88,
1021
+ 679,
1022
+ 473,
1023
+ 904
1024
+ ],
1025
+ "page_idx": 6
1026
+ },
1027
+ {
1028
+ "type": "text",
1029
+ "text": "quantizers trained with the VQ-VAE. We compare 3 types of quantizers: a) a random-projection quantizer b) a quantizer trained with VQ-VAE where the encoder has the same architecture as the random-projection quantizer and the decoder contains only a projection layer c) a trained VQ-VAE whose encoder/decoder are Transformer models. For trained quantizers, we train on the whole LibriSpeech 960 hours audio-only data, with a constant learning rate of 1e-4 and train for 400k steps with batch size 256. For all quantizers, the input frames are stacked with 3 frames on each's left, resulting in 4x input length reduction. We also use the quantizers for self-supervised learning with the LibriSpeech 0.6B non-streaming setup to compare their performance.",
1030
+ "bbox": [
1031
+ 501,
1032
+ 415,
1033
+ 885,
1034
+ 611
1035
+ ],
1036
+ "page_idx": 6
1037
+ },
1038
+ {
1039
+ "type": "text",
1040
+ "text": "Table 5 shows the WER on LibriSpeech 960h. Both the random-projection quantizer and the projection-based VQ-VAE quantizer lead to poor ASR performance, while the Transformer-based VQ-VAE quantizer provides a significantly better performance. This implies that the Transformer-based VQ-VAE quantizer learns a better representation. On the other hand, when using these quantizers for the purpose of self-supervised learning, all quantizers lead to similar WERs. This indicates that the quantizer quality does not translate to self-supervised learning quality.",
1041
+ "bbox": [
1042
+ 501,
1043
+ 619,
1044
+ 885,
1045
+ 770
1046
+ ],
1047
+ "page_idx": 6
1048
+ },
1049
+ {
1050
+ "type": "text",
1051
+ "text": "4.4. Analyzing the Effect of Pre-training Data Size",
1052
+ "text_level": 1,
1053
+ "bbox": [
1054
+ 501,
1055
+ 787,
1056
+ 848,
1057
+ 801
1058
+ ],
1059
+ "page_idx": 6
1060
+ },
1061
+ {
1062
+ "type": "text",
1063
+ "text": "One potential explanation for the above observation, that a sub-optimal quantization can work well for self-supervised learning, is that the self-supervised learning algorithm can learn to mitigate the quality gap given sufficient amounts of pre-training data. We investigate whether a quantizer with a better quantization quality performs better when the",
1064
+ "bbox": [
1065
+ 501,
1066
+ 811,
1067
+ 883,
1068
+ 900
1069
+ ],
1070
+ "page_idx": 6
1071
+ },
1072
+ {
1073
+ "type": "header",
1074
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
1075
+ "bbox": [
1076
+ 218,
1077
+ 56,
1078
+ 751,
1079
+ 70
1080
+ ],
1081
+ "page_idx": 6
1082
+ },
1083
+ {
1084
+ "type": "table",
1085
+ "img_path": "images/9fa269d44d00b13379b822f233cbdf9cb60dd1ffef7e97c48e9fbc6075b6b5f9.jpg",
1086
+ "table_caption": [
1087
+ "Table 5. Quantizer quality's impact on ASR tasks. Although the Transformer-based quantizer gets much better performance when used as input directly, the random-projection quantizer is equally effective for self-supervised learning. The model used in the direct ASR task has size 25M. The self-supervised learning tasks use the same setup as the LibriSpeech non-streaming experiment, which use LibriLight for pre-training and LibriSpeech for fine-tuning and has 0.6B model size."
1088
+ ],
1089
+ "table_footnote": [],
1090
+ "table_body": "<table><tr><td rowspan=\"2\">Configuration</td><td rowspan=\"2\">Quantizer size (M)</td><td colspan=\"4\">Direct ASR WER</td><td colspan=\"4\">Pretrain-finetune WER</td></tr><tr><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td></tr><tr><td>Random quantizer</td><td>1</td><td>58.8</td><td>78.8</td><td>57.9</td><td>72.8</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td></tr><tr><td>Projection VQ-VAE</td><td>1</td><td>61.4</td><td>74.8</td><td>60.9</td><td>75.2</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td></tr><tr><td>Transformer VQ-VAE</td><td>10</td><td>17.8</td><td>35.8</td><td>17.6</td><td>36.1</td><td>1.4</td><td>2.9</td><td>1.6</td><td>3.1</td></tr></table>",
1091
+ "bbox": [
1092
+ 86,
1093
+ 148,
1094
+ 903,
1095
+ 246
1096
+ ],
1097
+ "page_idx": 7
1098
+ },
1099
+ {
1100
+ "type": "image",
1101
+ "img_path": "images/46ae957cb3b7f368d34f1c0b4514b17b12adf61c1b45e0b42acaea89729f795f.jpg",
1102
+ "image_caption": [
1103
+ "Librilight-pretrain, Librispeech finetune WER",
1104
+ "Figure 2. Comparing the self-supervised learning quality of the random-projection quantizer (rq) and the Transformer-based VQ-VAE quantizer (tvae) with different pre-training data size. Starting from low amount of pre-train data, the random-projection quantizer is behind the trained Transformer VQ-VAE quantizer. As the amount of pre-train data increases, the random-projection quantizer catches up."
1105
+ ],
1106
+ "image_footnote": [],
1107
+ "bbox": [
1108
+ 89,
1109
+ 286,
1110
+ 470,
1111
+ 448
1112
+ ],
1113
+ "page_idx": 7
1114
+ },
1115
+ {
1116
+ "type": "text",
1117
+ "text": "amount of the pre-training data is limited, and whether increasing the amount of the pre-training data alleviate the discrepancy when compared to a random-projection quantizer. In this study, we compare self-supervised learning quality between a random-projection quantizer (rq) and a trained transformer-based VQ-VAE quantizer (tvae) with different pre-training data sizes. The random quantizer is untrained, and 4 Transformer VQ-VAE quantizers are trained with $\\{1 / 64, 4 / 64, 16 / 64, 64 / 64\\}$ LibriLight data, respectively. Then 4 identical random-projection quantizers and the above 4 transformer VAE quantizers are pre-trained separately with the same distinct percentages of LibriLight data as above for $100k$ steps with global batch size 2048. The pretrained models fine-tune on LibriSpeech 960h. The result in Figure 2 shows that a quantizer with better representation quality (Transformer-based VQ-VAE) performs better when pre-training data is limited, but the gap disappears as the pre-training data increase.",
1118
+ "bbox": [
1119
+ 88,
1120
+ 633,
1121
+ 473,
1122
+ 904
1123
+ ],
1124
+ "page_idx": 7
1125
+ },
1126
+ {
1127
+ "type": "text",
1128
+ "text": "5. Conclusions and Discussions",
1129
+ "text_level": 1,
1130
+ "bbox": [
1131
+ 501,
1132
+ 270,
1133
+ 759,
1134
+ 285
1135
+ ],
1136
+ "page_idx": 7
1137
+ },
1138
+ {
1139
+ "type": "text",
1140
+ "text": "We proposed BEST-RQ to perform self-supervised learning for speech recognition models. BEST-RQ uses a random-projection quantizer to quantize speech signals to discrete labels. The pre-training process masks the speech signals and trains the model to predict labels corresponding to the masked parts. This approach shows similar WERs as the existing state-of-the-art results on LibriSpeech with non-streaming models, and outperform wav2vec 2.0 and w2v-BERT on LibriSpeech with streaming models and on multilingual tasks with non-streaming models. Further analysis showed that despite the fact that the random-projection quantizer provides a poorer representation compared to a trained VQ-VAE quantizer, it is effective for the purpose of self-supervised learning.",
1141
+ "bbox": [
1142
+ 500,
1143
+ 295,
1144
+ 885,
1145
+ 505
1146
+ ],
1147
+ "page_idx": 7
1148
+ },
1149
+ {
1150
+ "type": "text",
1151
+ "text": "Our algorithm untangle the quantizer from the speech recognition model and also eliminates the requirement of representation learning. This simpler framework makes it easier to find a good recipe for the target task. The improvement on streaming models shows that the separation of the quantizer from the model makes the algorithm more effective for architectures that can be less effective for representation learning. The improvement on multilingual tasks shows that complicated tasks can benefit more from a simpler framework where finding a good recipe becomes more challenging. The quantization quality analysis implies that representation learning is not necessarily critical for self-supervised learning.",
1152
+ "bbox": [
1153
+ 500,
1154
+ 513,
1155
+ 885,
1156
+ 710
1157
+ ],
1158
+ "page_idx": 7
1159
+ },
1160
+ {
1161
+ "type": "text",
1162
+ "text": "Codebook utilization. One of the most critical factors for pre-training quality is the percentage of the codebook that is used during training. In particular, at each training step a higher percentage of the codebook being used in each batch correlates strongly with a good pre-training quality. When the distribution of the codebook utilization is skewed toward a smaller subset of codes, this usually makes the pre-training task easier and provides less effective pre-training. The $l2$ normalizations on the projected vector and the codebook are critical for providing more uniform codebook utilization. On the other hand, using randomly initialized codebook and projection matrix can introduce different codebook utiliza",
1163
+ "bbox": [
1164
+ 500,
1165
+ 718,
1166
+ 885,
1167
+ 898
1168
+ ],
1169
+ "page_idx": 7
1170
+ },
1171
+ {
1172
+ "type": "header",
1173
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
1174
+ "bbox": [
1175
+ 218,
1176
+ 56,
1177
+ 751,
1178
+ 70
1179
+ ],
1180
+ "page_idx": 7
1181
+ },
1182
+ {
1183
+ "type": "text",
1184
+ "text": "tions with different random seeds, which impact the pretraining quality across different runs with same experiment configurations. This variance impacts quality more when training with smaller pre-training and fine-tuning datasets. How to reduce this reproducibility issue caused by random initialization is an important next step for improving random-projection quantizations.",
1185
+ "bbox": [
1186
+ 84,
1187
+ 84,
1188
+ 475,
1189
+ 191
1190
+ ],
1191
+ "page_idx": 8
1192
+ },
1193
+ {
1194
+ "type": "text",
1195
+ "text": "Hyperparameters. The pre-training quality is not very sensitive to the codebook vocab size and the codebook dimension, and is more sensitive to the masking probability and the mask length. The role of the projection layer in the random-projection quantizer is to allow using different codebook dimensions, and one can achieve similar results without the projection and set the codebook dimension to be the same as the input dimension. Due to the variance coming from the random initialization, the impact of a hyperparameter usually requires multiple runs of experiments to verify the result.",
1196
+ "bbox": [
1197
+ 84,
1198
+ 198,
1199
+ 477,
1200
+ 364
1201
+ ],
1202
+ "page_idx": 8
1203
+ },
1204
+ {
1205
+ "type": "text",
1206
+ "text": "Longer convergence time for non-streaming models. One observation we have is that the algorithm takes more steps to converge with non-streaming models. We still observe improvement compared to wav2vec 2.0 and w2v-BERT at the same training step on multilingual tasks, though the final convergence usually takes $50\\%$ more steps. On the other hand, our training setup follows (Zhang et al., 2020), and it is unclear to us whether further hyperparameter tuning can help the model to converge faster. We did not observe the longer convergence property with streaming models.",
1207
+ "bbox": [
1208
+ 84,
1209
+ 372,
1210
+ 477,
1211
+ 523
1212
+ ],
1213
+ "page_idx": 8
1214
+ },
1215
+ {
1216
+ "type": "text",
1217
+ "text": "Initialization. The quantizer uses random initialization and does not update the parameters, and therefore the initialization algorithm can play an important role on the results. In this paper we showed results with Xavier initialization for the projection matrix and the standard normal distribution for the codebook, and further comparisons on different initialization algorithms can be conducted in the future work.",
1218
+ "bbox": [
1219
+ 84,
1220
+ 530,
1221
+ 477,
1222
+ 638
1223
+ ],
1224
+ "page_idx": 8
1225
+ },
1226
+ {
1227
+ "type": "text",
1228
+ "text": "6. Acknowledgements",
1229
+ "text_level": 1,
1230
+ "bbox": [
1231
+ 84,
1232
+ 655,
1233
+ 274,
1234
+ 672
1235
+ ],
1236
+ "page_idx": 8
1237
+ },
1238
+ {
1239
+ "type": "text",
1240
+ "text": "We thank Wei Han and Johan Schalkwyk for helpful discussions, and Rohit Prabhavalkar, Izhak Shafran, and Hagen Soltau for insightful feedback. We also want to thank Bo Li for the help on multilingual tasks.",
1241
+ "bbox": [
1242
+ 84,
1243
+ 681,
1244
+ 477,
1245
+ 742
1246
+ ],
1247
+ "page_idx": 8
1248
+ },
1249
+ {
1250
+ "type": "text",
1251
+ "text": "References",
1252
+ "text_level": 1,
1253
+ "bbox": [
1254
+ 86,
1255
+ 761,
1256
+ 184,
1257
+ 777
1258
+ ],
1259
+ "page_idx": 8
1260
+ },
1261
+ {
1262
+ "type": "list",
1263
+ "sub_type": "ref_text",
1264
+ "list_items": [
1265
+ "Babu, A., Wang, C., Tjandra, A., Lakhotia, K., Xu, Q., Goyal, N., Singh, K., von Platen, P., Saraf, Y., Pino, J., et al. Xls-r: Self-supervised cross-lingual speech representation learning at scale. arXiv preprint arXiv:2111.09296, 2021.",
1266
+ "Baevski, A., Auli, M., and Mohamed, A. Effectiveness of self-supervised pre-training for speech recognition. arXiv"
1267
+ ],
1268
+ "bbox": [
1269
+ 86,
1270
+ 785,
1271
+ 477,
1272
+ 906
1273
+ ],
1274
+ "page_idx": 8
1275
+ },
1276
+ {
1277
+ "type": "list",
1278
+ "sub_type": "ref_text",
1279
+ "list_items": [
1280
+ "preprint arXiv:1911.03912, 2019.",
1281
+ "Baevski, A., Schneider, S., and Auli, M. vq-wav2vec: Self-supervised learning of discrete speech representations. In ICLR, 2020a.",
1282
+ "Baevski, A., Zhou, H., Mohamed, A., and Auli, M. wav2vec 2.0: A framework for self-supervised learning of speech representations. arXiv preprint arXiv:2006.11477, 2020b.",
1283
+ "Bai, J., Li, B., Zhang, Y., Bapna, A., Siddhartha, N., Sim, K. C., and Sainath, T. N. Joint unsupervised and supervised training for multilingual asr. arXiv preprint arXiv:2111.08137, 2021.",
1284
+ "Bao, H., Dong, L., and Wei, F. Beit: Bert pre-training of image transformers, 2021.",
1285
+ "Chung, Y.-A., Zhang, Y., Han, W., Chiu, C.-C., Qin, J., Pang, R., and Wu, Y. W2v-bert: Combining contrastive learning and masked language modeling for self-supervised speech pre-training. arXiv preprint arXiv:2108.06209, 2021.",
1286
+ "Conneau, A., Baevski, A., Collobert, R., Mohamed, A., and Auli, M. Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979, 2020.",
1287
+ "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.",
1288
+ "Glorot, X. and Bengio, Y. Understanding the difficulty of training deep feedforward neural networks. In Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, volume 9 of Proceedings of Machine Learning Research, pp. 249-256. PMLR, 13-15 May 2010.",
1289
+ "Graves, A. Sequence transduction with recurrent neural networks. CoRR, abs/1211.3711, 2012.",
1290
+ "Gulati, A., Qin, J., Chiu, C.-C., Parmar, N., Zhang, Y., Yu, J., Han, W., Wang, S., Zhang, Z., Wu, Y., and Pang, R. Conformer: Convolution-augmented transformer for speech recognition, 2020.",
1291
+ "He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021.",
1292
+ "Hsu, W.-N., Bolte, B., Tsai, Y.-H. H., Lakhotia, K., Salakhutdinov, R., and Mohamed, A. HuBERT: Self-supervised speech representation learning by masked prediction of hidden units. arXiv preprint arXiv:2106.07447, 2021."
1293
+ ],
1294
+ "bbox": [
1295
+ 500,
1296
+ 84,
1297
+ 887,
1298
+ 904
1299
+ ],
1300
+ "page_idx": 8
1301
+ },
1302
+ {
1303
+ "type": "header",
1304
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
1305
+ "bbox": [
1306
+ 218,
1307
+ 56,
1308
+ 754,
1309
+ 71
1310
+ ],
1311
+ "page_idx": 8
1312
+ },
1313
+ {
1314
+ "type": "list",
1315
+ "sub_type": "ref_text",
1316
+ "list_items": [
1317
+ "Kahn, J., Rivière, M., Zheng, W., Kharitonov, E., Xu, Q., Mazare, P.-E., Karadayi, J., Liptchinsky, V., Collobert, R., Fuegen, C., Likhomanenko, T., Synnaeve, G., Joulin, A., Mohamed, A., and Dupoux, E. Libri-light: A benchmark for ASR with limited or no supervision. In ICASSP, 2020.",
1318
+ "Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. In ICLR, 2015.",
1319
+ "Li, B., Pang, R., Sainath, T. N., Gulati, A., Zhang, Y., Qin, J., Haghani, P., Huang, W. R., and Ma, M. Scaling end-to-end models for large-scale multilingual asr. arXiv preprint arXiv:2104.14830, 2021.",
1320
+ "Pratap, V., Xu, Q., Sriram, A., Synnaeve, G., and Collobert, R. Mls: A large-scale multilingual dataset for speech research. In INTERSPEECH, 2020.",
1321
+ "Sainath, T. N., He, Y., Li, B., Narayanan, A., Pang, R., Bruguier, A., Chang, S.-y., Li, W., Alvarez, R., Chen, Z., and et al. A streaming on-device end-to-end model surpassing server-side conventional model quality and latency. In ICASSP, 2020.",
1322
+ "Schneider, S., Baevski, A., Collobert, R., and Auli, M. wav2vec: Unsupervised pre-training for speech recognition. arXiv preprint arXiv:1904.05862, 2019.",
1323
+ "Schuster, M. and Nakajima, K. Japanese and Korean voice search. 2012 IEEE International Conference on Acoustics, Speech and Signal Processing, 2012.",
1324
+ "Shen, J., Nguyen, P., Wu, Y., Chen, Z., and et al. Lingvo: a modular and scalable framework for sequence-to-sequence modeling, 2019.",
1325
+ "van den Oord, A., Vinyals, O., and Kavukcuoglu, K. Neural discrete representation learning, 2018.",
1326
+ "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. Attention Is All You Need. CoRR, abs/1706.03762, 2017. URL http://arxiv.org/abs/1706.03762.",
1327
+ "Yu, J., Chiu, C.-C., Li, B., et al. FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization. In Proc. ICASSP, 2021.",
1328
+ "Zazo Candil, R., Sainath, T. N., Simko, G., and Parada, C. Feature learning with raw-waveform cldnns for voice activity detection. In *Interspeech* 2016, 2016.",
1329
+ "Zhang, Y., Qin, J., Park, D. S., Han, W., Chiu, C.-C., Pang, R., Le, Q. V., and Wu, Y. Pushing the limits of semi-supervised learning for automatic speech recognition. arXiv preprint arXiv:2010.10504, 2020."
1330
+ ],
1331
+ "bbox": [
1332
+ 86,
1333
+ 84,
1334
+ 478,
1335
+ 905
1336
+ ],
1337
+ "page_idx": 9
1338
+ },
1339
+ {
1340
+ "type": "ref_text",
1341
+ "text": "Zhang, Y., Daniel Park, S., Han, W., Qin, J., Gulati, A., Shor, J., Jansen, A., Xu, Y., Huang, Y., Wang, S., Zhou, Z., Li, B., Ma, M., Chan, W., Yu, J., Wang, Y., Cao, L., Sim, K. C., Ramabhadran, B., Sainath, T. N., Beaufays, F., Chen, Z., Le, Q. V., Chiu, C.-C., Pang, R., and Wu, Y. Bigssl: Exploring the frontier of large-scale semi-supervised learning for automatic speech recognition. arXiv preprint arXiv:2109.13226, 2021.",
1342
+ "bbox": [
1343
+ 500,
1344
+ 84,
1345
+ 887,
1346
+ 204
1347
+ ],
1348
+ "page_idx": 9
1349
+ },
1350
+ {
1351
+ "type": "header",
1352
+ "text": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition",
1353
+ "bbox": [
1354
+ 218,
1355
+ 56,
1356
+ 754,
1357
+ 71
1358
+ ],
1359
+ "page_idx": 9
1360
+ }
1361
+ ]
2202.01xxx/2202.01855/5166ae8b-4e4d-4f8b-a350-11682cc56b73_model.json ADDED
@@ -0,0 +1,1683 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.27,
8
+ 0.058,
9
+ 0.707
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2202.01855v2 [cs.CL] 29 Jun 2022"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.178,
18
+ 0.11,
19
+ 0.796,
20
+ 0.155
21
+ ],
22
+ "angle": 0,
23
+ "content": "Self-Supervised Learning with Random-Projection Quantizer for Speech Recognition"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.217,
29
+ 0.199,
30
+ 0.752,
31
+ 0.216
32
+ ],
33
+ "angle": 0,
34
+ "content": "Chung-Cheng Chiu \\(^{*1}\\) James Qin \\(^{*1}\\) Yu Zhang \\(^{1}\\) Jiahui Yu \\(^{1}\\) Yonghui Wu"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.243,
40
+ 0.243,
41
+ 0.319,
42
+ 0.259
43
+ ],
44
+ "angle": 0,
45
+ "content": "Abstract"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.118,
51
+ 0.268,
52
+ 0.443,
53
+ 0.6
54
+ ],
55
+ "angle": 0,
56
+ "content": "We present a simple and effective self-supervised learning approach for speech recognition. The approach learns a model to predict the masked speech signals, in the form of discrete labels generated with a random-projection quantizer. In particular the quantizer projects speech inputs with a randomly initialized matrix, and does a nearest-neighbor lookup in a randomly-initialized codebook. Neither the matrix nor the codebook is updated during self-supervised learning. Since the random-projection quantizer is not trained and is separated from the speech recognition model, the design makes the approach flexible and is compatible with universal speech recognition architecture. On LibriSpeech our approach achieves similar word-error-rates as previous work using self-supervised learning with non-streaming models, and provides lower word-error-rates and latency than wav2vec 2.0 and w2v-BERT with streaming models. On multilingual tasks the approach also provides significant improvement over wav2vec 2.0 and w2v-BERT."
57
+ },
58
+ {
59
+ "type": "title",
60
+ "bbox": [
61
+ 0.087,
62
+ 0.632,
63
+ 0.217,
64
+ 0.647
65
+ ],
66
+ "angle": 0,
67
+ "content": "1. Introduction"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.085,
73
+ 0.658,
74
+ 0.477,
75
+ 0.794
76
+ ],
77
+ "angle": 0,
78
+ "content": "Self-supervised learning has shown impressive improvement for the quality of the speech recognition models in recent years (Schneider et al., 2019; Baevski et al., 2020a; 2019; 2020b; Hsu et al., 2021; Zhang et al., 2020; Chung et al., 2021; Zhang et al., 2021). These learning approaches enable the model to learn from unsupervised data and combine with supervised learning to improve the recognition accuracy. The capability of learning from unsupervised data is particularly beneficial when the supervised data is limited"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.085,
84
+ 0.803,
85
+ 0.475,
86
+ 0.856
87
+ ],
88
+ "angle": 0,
89
+ "content": "*Equal contribution <Google Research, Brain Team. Correspondence to: Chung-Cheng Chiu <chungchengc@google.com>, James Qin <jamesqin@google.com>, Yu Zhang <ngyuzh@google.com>."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.085,
95
+ 0.866,
96
+ 0.475,
97
+ 0.906
98
+ ],
99
+ "angle": 0,
100
+ "content": "Proceedings of the \\(39^{th}\\) International Conference on Machine Learning, Baltimore, Maryland, USA, PMLR 162, 2022. Copyright 2022 by the author(s)."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.497,
106
+ 0.245,
107
+ 0.885,
108
+ 0.274
109
+ ],
110
+ "angle": 0,
111
+ "content": "and opens up new opportunities for low resource languages and domains."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.496,
117
+ 0.282,
118
+ 0.888,
119
+ 0.509
120
+ ],
121
+ "angle": 0,
122
+ "content": "One common design principle of self-supervised learning for speech recognition centers around learning representations. Inspired by the success of BERT (Devlin et al., 2018), one research trend in the speech community is to build BERT-inspired algorithms. One challenge in building BERT-style self-supervised learning for speech is to bridge the gap between continuous speech signals and the discrete text tokens, and a solution for addressing this issue is through learning speech representation (Schneider et al., 2019; Baevski et al., 2020b) or learning quantized representation (Baevski et al., 2020b;a; 2019; Hsu et al., 2021; Chung et al., 2021). Many previous works proposed effective algorithms for learning speech representations, and the quantized result of those learned representations showed encouraging correlation with the phoneme of the utterances."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.496,
128
+ 0.516,
129
+ 0.888,
130
+ 0.803
131
+ ],
132
+ "angle": 0,
133
+ "content": "While representation learning is a critical topic for the speech field, combining it with self-supervised learning leads to two limitations that can slow the research progress: (1) Model architecture limitation. The integration of representation learning and self-supervised learning often requires the model to act the role of providing speech representation while still being effective for the downstream tasks. An effective representation model, however, may not always be effective for the downstream tasks. For example, a good representation learning model may require accessing the future context of the utterance, while downstream tasks may require a low latency model which prohibits the access of the future context. (2) Increased complexity. The objectives of representation learning and self-supervised learning are not always aligned, and the complexity of designing both algorithms and finding their balance can impede the research development. This complexity can also motivate the field toward designing more complicated algorithms instead of finding a simple and effective alternative."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.496,
139
+ 0.811,
140
+ 0.887,
141
+ 0.901
142
+ ],
143
+ "angle": 0,
144
+ "content": "In this work we propose BERT-based Speech pre-Training with Random-projection Quantizer (BEST-RQ), a simple and effective self-supervised learning algorithm for speech recognition. The algorithm masks speech signals and feeds them to the encoder part of the speech recognition model, and the encoder learns to predict the masked region based"
145
+ }
146
+ ],
147
+ [
148
+ {
149
+ "type": "header",
150
+ "bbox": [
151
+ 0.22,
152
+ 0.058,
153
+ 0.753,
154
+ 0.071
155
+ ],
156
+ "angle": 0,
157
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
158
+ },
159
+ {
160
+ "type": "text",
161
+ "bbox": [
162
+ 0.089,
163
+ 0.086,
164
+ 0.474,
165
+ 0.326
166
+ ],
167
+ "angle": 0,
168
+ "content": "on the unmasked speech signals where the learning targets are labels provided by a random-projection quantizer. The random projection quantizer projects speech signals to a randomly initialized matrix, and finds a nearest vector in a randomly initialized codebook. The index of that vector is the target label. Neither the projection matrix nor the codebook is updated throughout the learning process. The quantizer does not require representation learning, and its separation from the model removes the limitation on the architecture design of the model. Despite its simplicity, on LibriSpeech the algorithm achieves similar results as previous work with non-streaming models, and provides better improvement with streaming models compared with previous approaches. On multilingual tasks, the algorithm exhibits further gains compared to wav2vec 2.0 (Baevski et al., 2020b) and w2v-BERT (Chung et al., 2021)."
169
+ },
170
+ {
171
+ "type": "text",
172
+ "bbox": [
173
+ 0.089,
174
+ 0.335,
175
+ 0.474,
176
+ 0.455
177
+ ],
178
+ "angle": 0,
179
+ "content": "We conduct further analysis on the relation between representation learning quality and the self-supervised learning quality, and demonstrate that the two objectives are not inherently aligned in Section 4.3 and Section 4.4. Such an observation is central to our design of self-supervised learning without representation learning, and opens up a new, less complicated research direction for self-supervised learning."
180
+ },
181
+ {
182
+ "type": "title",
183
+ "bbox": [
184
+ 0.09,
185
+ 0.476,
186
+ 0.226,
187
+ 0.49
188
+ ],
189
+ "angle": 0,
190
+ "content": "2. Related Work"
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.089,
196
+ 0.501,
197
+ 0.475,
198
+ 0.893
199
+ ],
200
+ "angle": 0,
201
+ "content": "Many of the previous work on self-supervised learning for speech recognition focus on learning speech representation. wav2vec (Schneider et al., 2019) applies contrastive learning to learn the future representation based on the past context. vq-wav2vec (Baevski et al., 2020a) uses wav2vec to learn the representations and quantizes them to discrete tokens, and performs BERT-style pre-training to further improve the representation learning. DiscreteBERT (Baevski et al., 2019) extends vq-wav2vec by fine-tuning the BERT-pre-trained model on the downstream tasks. wav2vec 2.0 (Baevski et al., 2020b) uses contrastive learning with both past and future context to predict the representation of the masked parts. HuBERT (Hsu et al., 2021) uses k-means to learn the initial quantizer that maps speech signals to discrete labels, and performs BERT-style pre-training where the inputs are masked speech signals and prediction targets are discrete labels. HuBERT further uses the pretrained model as the new quantizer to train a new iteration of the model, and repeat the process to iteratively improve the pre-training results. w2v-BERT (Chung et al., 2021) uses a sub-network of the model to perform contrastive learning to learn speech representation, and use the rest of the network to perform BERT-style pre-training. w2v-BERT trains the representation learning and the BERT-style pre-training simultaneously. Our approach distinguishes from these work in avoiding the requirement of representation learning and"
202
+ },
203
+ {
204
+ "type": "image",
205
+ "bbox": [
206
+ 0.505,
207
+ 0.085,
208
+ 0.88,
209
+ 0.302
210
+ ],
211
+ "angle": 0,
212
+ "content": null
213
+ },
214
+ {
215
+ "type": "image_caption",
216
+ "bbox": [
217
+ 0.5,
218
+ 0.335,
219
+ 0.886,
220
+ 0.432
221
+ ],
222
+ "angle": 0,
223
+ "content": "Figure 1. Overview of BEST-RQ. The approach applies random projections to project the input speech signals to a randomly initialized codebook, and map them to discrete labels through finding the nearest vector in the codebook. The pre-training objective is for the ASR encoder to take the masked input signals and predict the labels corresponding to the masked part provided by the random-projection quantizer."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.502,
229
+ 0.459,
230
+ 0.886,
231
+ 0.473
232
+ ],
233
+ "angle": 0,
234
+ "content": "separating the quantizer from the speech recognition model."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.502,
240
+ 0.481,
241
+ 0.886,
242
+ 0.661
243
+ ],
244
+ "angle": 0,
245
+ "content": "Our quantizer project input signals with a random matrix, which is similar to performing dimension reduction for the input signals. Using such quantization results as prediction target for self-supervised learning share a similar structure as the masked autoencoder (MAE) (He et al., 2021), which directly reconstruct the masked input signals. Another similar work in the computer vision community is BEiT (Bao et al., 2021), which trains a VQ-VAE (van den Oord et al., 2018) as the quantizer and use the VQ-VAE to perform BERT-style self-supervised learning. Different from these approaches, our algorithm does not require training the quantizer which further simplifies the training process."
246
+ },
247
+ {
248
+ "type": "title",
249
+ "bbox": [
250
+ 0.502,
251
+ 0.682,
252
+ 0.774,
253
+ 0.716
254
+ ],
255
+ "angle": 0,
256
+ "content": "3. Self-supervised Learning with Random-projection Quantizer"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.502,
262
+ 0.726,
263
+ 0.886,
264
+ 0.905
265
+ ],
266
+ "angle": 0,
267
+ "content": "BEST-RQ applies a random-projection quantizer to map speech signals to discrete labels to enable BERT-style pretraining for ASR encoders. The quantizer randomly initializes a matrix and a codebook, and uses the matrix to project the input speech signals and the codebook to find the nearest vector where the index of the vector is the label. The pre-training process masks the speech signals and feeds them to the ASR encoder and trains the ASR encoder to predict labels of the masked part. Both the randomly initialized matrix and codebook are fixed during the pre-training process. The input data is normalized to have 0 mean and standard deviation of 1. The normalization is critical for pre"
268
+ }
269
+ ],
270
+ [
271
+ {
272
+ "type": "header",
273
+ "bbox": [
274
+ 0.219,
275
+ 0.057,
276
+ 0.756,
277
+ 0.072
278
+ ],
279
+ "angle": 0,
280
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
281
+ },
282
+ {
283
+ "type": "text",
284
+ "bbox": [
285
+ 0.085,
286
+ 0.086,
287
+ 0.475,
288
+ 0.146
289
+ ],
290
+ "angle": 0,
291
+ "content": "venting the random projection to collapse to a small subset of codes. The framework is described in Figure 1. After the pre-training process the resulting ASR encoder is adopted to fine-tune on downstream ASR tasks."
292
+ },
293
+ {
294
+ "type": "text",
295
+ "bbox": [
296
+ 0.085,
297
+ 0.153,
298
+ 0.476,
299
+ 0.244
300
+ ],
301
+ "angle": 0,
302
+ "content": "The approach applies masks directly on the speech signal, where the masking strategy samples at every frame whether to apply masks with a fixed probability. Each mask spans from the starting frame with a fixed length. The masked parts are replaced with a noise sampled from a normal distribution with 0 mean and 0.1 standard deviation."
303
+ },
304
+ {
305
+ "type": "title",
306
+ "bbox": [
307
+ 0.086,
308
+ 0.261,
309
+ 0.332,
310
+ 0.275
311
+ ],
312
+ "angle": 0,
313
+ "content": "3.1. Random-projection Quantizer"
314
+ },
315
+ {
316
+ "type": "text",
317
+ "bbox": [
318
+ 0.085,
319
+ 0.284,
320
+ 0.476,
321
+ 0.33
322
+ ],
323
+ "angle": 0,
324
+ "content": "Given an input vector \\( x \\) where \\( x \\) is a \\( d \\)-dimensional vector computed from speech signals, the random-projection quantizer maps \\( x \\) to discrete labels \\( y \\) through"
325
+ },
326
+ {
327
+ "type": "equation",
328
+ "bbox": [
329
+ 0.132,
330
+ 0.336,
331
+ 0.474,
332
+ 0.361
333
+ ],
334
+ "angle": 0,
335
+ "content": "\\[\ny = \\underset {i} {\\operatorname {a r g m i n}} | | \\operatorname {n o r m} _ {l 2} \\left(c _ {i}\\right) - \\operatorname {n o r m} _ {l 2} (A x) | |, \\tag {1}\n\\]"
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.085,
341
+ 0.366,
342
+ 0.476,
343
+ 0.503
344
+ ],
345
+ "angle": 0,
346
+ "content": "where \\( A \\) denotes a randomly initialized \\( h \\times d \\) matrix and \\( C = \\{c_1, \\dots, c_n\\} \\) is a set of randomly initialized \\( h \\)-dimensional vectors, \\( \\text{norm}_{l2}(.) \\) is a function that normalizes the vector to have unit \\( l2 \\) norm. The projection matrix \\( A \\) use Xavier initialization (Glorot & Bengio, 2010) and the codebook \\( C \\) use standard normal distribution for initialization, and the parameters are fixed during the pre-training process and therefore the quantizations are consistent during training."
347
+ },
348
+ {
349
+ "type": "title",
350
+ "bbox": [
351
+ 0.086,
352
+ 0.519,
353
+ 0.208,
354
+ 0.535
355
+ ],
356
+ "angle": 0,
357
+ "content": "3.2. Pre-training"
358
+ },
359
+ {
360
+ "type": "text",
361
+ "bbox": [
362
+ 0.085,
363
+ 0.543,
364
+ 0.476,
365
+ 0.664
366
+ ],
367
+ "angle": 0,
368
+ "content": "The pre-training process adds a softmax layer on top of the ASR encoder to learn to predict the quantized speech labels. Since the random-projection quantizer is independent of the ASR encoder, the pre-training is flexible and can work with different architectures of the ASR encoder. We study the effectiveness of the algorithm on both non-streaming and streaming models, and in our experiments we use Conformer (Gulati et al., 2020) as the building block."
369
+ },
370
+ {
371
+ "type": "title",
372
+ "bbox": [
373
+ 0.087,
374
+ 0.678,
375
+ 0.325,
376
+ 0.693
377
+ ],
378
+ "angle": 0,
379
+ "content": "3.2.1. NON-STREAMING MODELS"
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.085,
385
+ 0.702,
386
+ 0.476,
387
+ 0.779
388
+ ],
389
+ "angle": 0,
390
+ "content": "Since the BERT-style pre-training is designed for the non-streaming models, training with this type of architecture is straightforward where the model uses both past and future context to learn to predict the quantized labels of the masked speech signals."
391
+ },
392
+ {
393
+ "type": "title",
394
+ "bbox": [
395
+ 0.087,
396
+ 0.792,
397
+ 0.287,
398
+ 0.806
399
+ ],
400
+ "angle": 0,
401
+ "content": "3.2.2 STREAMING MODELS"
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.085,
407
+ 0.815,
408
+ 0.476,
409
+ 0.907
410
+ ],
411
+ "angle": 0,
412
+ "content": "In addition to the non-streaming models, streaming architecture also plays a critical role for the speech recognition tasks as many of the applications require transcribing speakers' utterances with low-latency (Sainath et al., 2020). Streaming architecture however is less well-studied in the previous self-supervised learning work compared to the non-streaming ar"
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.497,
418
+ 0.085,
419
+ 0.887,
420
+ 0.176
421
+ ],
422
+ "angle": 0,
423
+ "content": "chitecture. Moreover, many of the previous self-supervised learning approaches specify a pre-training setup that takes both the previous and future context, making it a question of how one can generalize the approaches to streaming models. We proposed two pre-training algorithms that are compatible with the streaming architecture:"
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.497,
429
+ 0.184,
430
+ 0.887,
431
+ 0.289
432
+ ],
433
+ "angle": 0,
434
+ "content": "Streaming pre-train. As our algorithm does not require learning quantization and focuses only on training the ASR encoder, this approach largely benefits the streaming models. Pre-training for streaming models follows the same setup as non-streaming models but the ASR encoder now learns to predict the quantized labels of the masked part based only on the past context."
435
+ },
436
+ {
437
+ "type": "text",
438
+ "bbox": [
439
+ 0.497,
440
+ 0.297,
441
+ 0.887,
442
+ 0.418
443
+ ],
444
+ "angle": 0,
445
+ "content": "Non-Streaming pre-train. Given that the neural network architecture like Transformer/Conformer allows switching from non-streaming to streaming behaviors by adding a mask for the future context within the same model, one can also perform pre-training with non-streaming setup for streaming models. Our algorithm provides benefits for streaming models with both non-streaming and streaming pre-training."
446
+ },
447
+ {
448
+ "type": "title",
449
+ "bbox": [
450
+ 0.498,
451
+ 0.434,
452
+ 0.614,
453
+ 0.45
454
+ ],
455
+ "angle": 0,
456
+ "content": "3.3. Fine-tuning"
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.497,
462
+ 0.458,
463
+ 0.887,
464
+ 0.625
465
+ ],
466
+ "angle": 0,
467
+ "content": "After the pre-training, the approach initializes the encoder of the downstream ASR from the pre-trained model, and fine-tunes on the supervised set. The softmax layer added on top of the encoder during the pre-training process is not used in fine-tuning. We focus on end-to-end models with RNN transducers (Graves, 2012), where the decoder uses LSTMs for the prediction network. On constructing the encoder, an additional projection layer is added on top of the pre-trained encoder to help it adapt to the downstream ASR task. The training process also updates the encoder during the supervised fine-tuning."
468
+ },
469
+ {
470
+ "type": "title",
471
+ "bbox": [
472
+ 0.498,
473
+ 0.64,
474
+ 0.8,
475
+ 0.671
476
+ ],
477
+ "angle": 0,
478
+ "content": "3.4. Understanding the Effectiveness of the Random-projection Quantizer"
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.497,
484
+ 0.679,
485
+ 0.887,
486
+ 0.906
487
+ ],
488
+ "angle": 0,
489
+ "content": "Our algorithm uses a random-projection quantizer for self-supervised learning, and such a design raises two questions: how good is the resulting quantization quality with this quantizer and how much does the quantization quality affect the effectiveness of the self-supervised learning? We address these two questions through comparing our quantizer with VQ-VAEs. Using random-projections for quantizing speech signals shares some similarity as VQ-VAEs. The random projection performs dimension reduction for the speech signals while the random codebook provides an approximated discrete representation of the speech data distribution. VQ-VAEs also provide a discrete representation for the speech signals, but do so by learning a representation in the latent space that best preserves the speech data. Thus, comparing with VQ-VAEs gives us insight on the quantization quality"
490
+ }
491
+ ],
492
+ [
493
+ {
494
+ "type": "header",
495
+ "bbox": [
496
+ 0.22,
497
+ 0.058,
498
+ 0.753,
499
+ 0.071
500
+ ],
501
+ "angle": 0,
502
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
503
+ },
504
+ {
505
+ "type": "table_caption",
506
+ "bbox": [
507
+ 0.086,
508
+ 0.094,
509
+ 0.867,
510
+ 0.108
511
+ ],
512
+ "angle": 0,
513
+ "content": "Table 1. LibriSpeech results with non-streaming models. The LM used in our experiment is a Transformer LM with model size 0.1B."
514
+ },
515
+ {
516
+ "type": "table",
517
+ "bbox": [
518
+ 0.088,
519
+ 0.108,
520
+ 0.884,
521
+ 0.238
522
+ ],
523
+ "angle": 0,
524
+ "content": "<table><tr><td rowspan=\"2\">Method</td><td rowspan=\"2\">Size (B)</td><td colspan=\"4\">No LM</td><td colspan=\"4\">With LM</td></tr><tr><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td></tr><tr><td>wav2vec 2.0 (Baevski et al., 2020b)</td><td>0.3</td><td>2.1</td><td>4.5</td><td>2.2</td><td>4.5</td><td>1.6</td><td>3.0</td><td>1.8</td><td>3.3</td></tr><tr><td>HuBERT Large (Hsu et al., 2021)</td><td>0.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>1.5</td><td>3.0</td><td>1.9</td><td>3.3</td></tr><tr><td>HuBERT X-Large (Hsu et al., 2021)</td><td>1.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>1.5</td><td>2.5</td><td>1.8</td><td>2.9</td></tr><tr><td>w2v-Conformer XL (Zhang et al., 2020)</td><td>0.6</td><td>1.7</td><td>3.5</td><td>1.7</td><td>3.5</td><td>1.6</td><td>3.2</td><td>1.5</td><td>3.2</td></tr><tr><td>w2v-BERT XL (Chung et al., 2021)</td><td>0.6</td><td>1.5</td><td>2.9</td><td>1.5</td><td>2.9</td><td>1.4</td><td>2.8</td><td>1.5</td><td>2.8</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td><td>1.4</td><td>2.6</td><td>1.5</td><td>2.7</td></tr></table>"
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.089,
530
+ 0.262,
531
+ 0.473,
532
+ 0.292
533
+ ],
534
+ "angle": 0,
535
+ "content": "of our quantizer and the effect of representation learning for self-supervised learning."
536
+ },
537
+ {
538
+ "type": "text",
539
+ "bbox": [
540
+ 0.089,
541
+ 0.3,
542
+ 0.474,
543
+ 0.525
544
+ ],
545
+ "angle": 0,
546
+ "content": "We demonstrate that the quantization quality of the random-projection quantizer is not ideal but yet effective for self-supervised learning by comparing it with VQ-VAE-based quantizations in Section 4.3. We also show that the gap in the quantization quality is less an issue with the increase of the unsupervised data in Section 4.4. The main objective of self-supervised learning for speech recognition is to train the model to learn contextual information. The random-projection quantizer preserve the distribution of the speech data, and in order for the model to learn to predict the quantized token based on unmasked signals, the model needs to learn to process the raw signals and infer the contextual information among speech data. Such a criterion allows the model to perform effective self-supervised learning with a random-projection quantizer."
547
+ },
548
+ {
549
+ "type": "title",
550
+ "bbox": [
551
+ 0.089,
552
+ 0.546,
553
+ 0.216,
554
+ 0.562
555
+ ],
556
+ "angle": 0,
557
+ "content": "4. Experiments"
558
+ },
559
+ {
560
+ "type": "text",
561
+ "bbox": [
562
+ 0.089,
563
+ 0.572,
564
+ 0.474,
565
+ 0.676
566
+ ],
567
+ "angle": 0,
568
+ "content": "We perform self-supervised learning experiments on LibriSpeech with non-streaming and streaming models, and assess the approach on multilingual tasks with non-streaming models. We study the quantization quality of the random-projection quantizer by comparing it with the quantizer learned with VQ-VAEs. The implementation use Lingvo (Shen et al., 2019) library."
569
+ },
570
+ {
571
+ "type": "title",
572
+ "bbox": [
573
+ 0.089,
574
+ 0.694,
575
+ 0.204,
576
+ 0.708
577
+ ],
578
+ "angle": 0,
579
+ "content": "4.1. LibriSpeech"
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.089,
585
+ 0.718,
586
+ 0.474,
587
+ 0.867
588
+ ],
589
+ "angle": 0,
590
+ "content": "Following (Zhang et al., 2020), we conduct experiments on the LibriLight dataset (Kahn et al., 2020) for pre-training, and fine-tune on the LibriSpeech training set which contains 960 hours of data. The input speech signals are 80-dimensional log-mel filter bank coefficients, and each frame has the stride of 10ms. In the fine-tuning phase, the decoder has a vocab size 1024 and uses a 1024-token WordPiece model (Schuster & Nakajima, 2012) for tokenizations that is constructed from the transcripts of the LibriSpeech training set."
591
+ },
592
+ {
593
+ "type": "title",
594
+ "bbox": [
595
+ 0.502,
596
+ 0.263,
597
+ 0.735,
598
+ 0.275
599
+ ],
600
+ "angle": 0,
601
+ "content": "4.1.1. NON-STREAMING MODELS"
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.502,
607
+ 0.286,
608
+ 0.886,
609
+ 0.39
610
+ ],
611
+ "angle": 0,
612
+ "content": "We use the same architectures reported in (Zhang et al., 2020) for fair comparisons. The model has two convolution layers at the bottom which provide 4 times temporal dimension reduction for the input sequences. The rest of the layers are a stack of Conformer models. We explore 0.6B model size which is extensively studied in the previous works. The model contains 24 layers of Conformer models."
613
+ },
614
+ {
615
+ "type": "text",
616
+ "bbox": [
617
+ 0.502,
618
+ 0.399,
619
+ 0.886,
620
+ 0.548
621
+ ],
622
+ "angle": 0,
623
+ "content": "Pre-train. The pre-training uses mask length 400ms with masking probability of 0.01. The learning rate schedule uses a transformer learning rate schedule (Vaswani et al., 2017). The training of the model uses Adam optimizer (Kingma & Ba, 2015) with 0.004 peak learning rate and 25000 warm-up steps. The batch size is 2048. Since the encoder has 4 times temporal-dimension reduction, the quantization with random projections stacks every 4 frames for projections. The vocab size of the codebook is 8192 and the dimension is 16."
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.502,
629
+ 0.558,
630
+ 0.886,
631
+ 0.723
632
+ ],
633
+ "angle": 0,
634
+ "content": "Fine-tune. The fine-tuning model also follows the same architecture as in (Zhang et al., 2020) and use RNN Transducer (RNN-T) (Graves, 2012) for decoder with 2 layers of unidirectional LSTMs, where the hidden dimension of LSTMs are 1280. The fine-tuning process uses the Transformer learning rate schedule. Since the encoder is initialized from a pre-trained model, the fine-tuning process uses a lower learning rate for the encoder than the decoder. The encoder uses 0.0003 peak learning rate and 5000 warmup steps, while the decoder uses 0.001 peak learning rate and 1500 warmup steps."
635
+ },
636
+ {
637
+ "type": "text",
638
+ "bbox": [
639
+ 0.502,
640
+ 0.731,
641
+ 0.885,
642
+ 0.88
643
+ ],
644
+ "angle": 0,
645
+ "content": "The results of pre-training with LibriLight and fine-tuning on LibriSpeech, along with comparisons with previous works, are shown in Table 1. Our results with LM use shallow fusion to incorporate the LM. The LM is a 0.1B Transformer model trained on the LibriSpeech LM corpus, and the model has 8 layers, 1024 model dimension, and 4096 feed-forward network dimension. By using the same architecture and similar optimization strategy as (Zhang et al., 2020), our approach shows similar WERs as previous best results on LibriSpeech both with and without LM."
646
+ }
647
+ ],
648
+ [
649
+ {
650
+ "type": "header",
651
+ "bbox": [
652
+ 0.219,
653
+ 0.057,
654
+ 0.756,
655
+ 0.072
656
+ ],
657
+ "angle": 0,
658
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
659
+ },
660
+ {
661
+ "type": "title",
662
+ "bbox": [
663
+ 0.086,
664
+ 0.086,
665
+ 0.289,
666
+ 0.1
667
+ ],
668
+ "angle": 0,
669
+ "content": "4.1.2. STREAMING MODELS"
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.085,
675
+ 0.109,
676
+ 0.477,
677
+ 0.29
678
+ ],
679
+ "angle": 0,
680
+ "content": "The architecture we use for the streaming experiments follows a similar design as previous work for building streaming ASRs (Yu et al., 2021). We scale the model size to be also 0.6B to be consistent with the non-streaming experiments. The architecture has 3 Conformer layers at the bottom, followed by a stacking layer with 2 times temporal-dimension reduction and 20 Conformer layers on top of the stacking layer. The Conformer has 1024 hidden dimension for the self-attention layer and 4096 for the feed-forward layers. The self-attention layer attends to the current and the previous 64 frames, and the convolution has a kernel that covers the current and the past 3 frames."
681
+ },
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.085,
686
+ 0.298,
687
+ 0.475,
688
+ 0.343
689
+ ],
690
+ "angle": 0,
691
+ "content": "The training setup is mostly the same as the 0.6B model in the non-streaming experiments, with some changes on the masking ratio for different pre-training approaches."
692
+ },
693
+ {
694
+ "type": "text",
695
+ "bbox": [
696
+ 0.085,
697
+ 0.351,
698
+ 0.476,
699
+ 0.412
700
+ ],
701
+ "angle": 0,
702
+ "content": "Streaming pre-train. The streaming pre-training uses the same setup as the original architecture, and the mask length is 300ms and the masking probability is 0.02. The random-projection quantizer stacks every 2 frames for projections."
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.085,
708
+ 0.419,
709
+ 0.476,
710
+ 0.553
711
+ ],
712
+ "angle": 0,
713
+ "content": "Non-streaming pre-train. The non-streaming pre-training extends the original architecture to have access for future context by having the convolution kernel within the Conformer layer to have access for the future 3 frames. The self-attention is still limited to having access only for the previous context. We also explored having future context access for the self-attention, but this setup tends to be less stable. The masking length is \\(400\\mathrm{ms}\\) and the masking probability is 0.02."
714
+ },
715
+ {
716
+ "type": "text",
717
+ "bbox": [
718
+ 0.085,
719
+ 0.562,
720
+ 0.475,
721
+ 0.682
722
+ ],
723
+ "angle": 0,
724
+ "content": "Fine-tune. The fine-tuning ASR model uses RNN-T for decoder with a layer of unidirectional LSTM, where the hidden dimension of the LSTM is 640. The training setup is the same as the fine-tuning config for the 0.6B model in the non-streaming experiments. When initializing from a non-streaming pre-trained model, the convolution only uses the kernel weight that accesses the previous context to keep the model streaming."
725
+ },
726
+ {
727
+ "type": "text",
728
+ "bbox": [
729
+ 0.085,
730
+ 0.69,
731
+ 0.476,
732
+ 0.901
733
+ ],
734
+ "angle": 0,
735
+ "content": "Latency measurement. A streaming model can learn to delay its prediction to access the future context and improve the prediction accuracy, and therefore it is critical to measure the latency of the streaming models to see whether the model maintains similar latency. This assessment helps us identify whether the underlying approach provides real improvement instead of trading off latency for prediction accuracy. Our latency comparison process first calculates the starting time and ending time of every word for each hypothesis generated by the two models. The comparison then aligns the hypotheses from the two models, finds the matching words and calculates the difference of their starting and ending time. The relative latency measurement is the average word timing difference of all matched words"
736
+ },
737
+ {
738
+ "type": "text",
739
+ "bbox": [
740
+ 0.497,
741
+ 0.085,
742
+ 0.886,
743
+ 0.115
744
+ ],
745
+ "angle": 0,
746
+ "content": "between the two models among all utterances. Specifically the relative latency is calculated with"
747
+ },
748
+ {
749
+ "type": "equation",
750
+ "bbox": [
751
+ 0.603,
752
+ 0.127,
753
+ 0.887,
754
+ 0.166
755
+ ],
756
+ "angle": 0,
757
+ "content": "\\[\n\\sum_ {i, j} \\frac {s _ {i j} ^ {\\prime} - s _ {i j} + e _ {i j} ^ {\\prime} - e _ {i j}}{2 N}, \\tag {2}\n\\]"
758
+ },
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.497,
763
+ 0.178,
764
+ 0.887,
765
+ 0.299
766
+ ],
767
+ "angle": 0,
768
+ "content": "where \\(i\\) denotes the index of the matched words between the two hypotheses, \\(j\\) is the utterance index, \\(s_{ij}\\) and \\(e_{ij}\\) correspond to the starting and ending time of the word from the baseline model, \\(s_{ij}'\\) and \\(e_{ij}'\\) correspond to the starting and ending time of the word from the compared model, and \\(N\\) is the total number of matched words among all utterances. A negative relative latency means the compared model has lower latency than the baseline model."
769
+ },
770
+ {
771
+ "type": "text",
772
+ "bbox": [
773
+ 0.497,
774
+ 0.306,
775
+ 0.887,
776
+ 0.655
777
+ ],
778
+ "angle": 0,
779
+ "content": "The word-error-rates and the relative latency are shown in Table 2. In this comparison experiment both wav2vec 2.0 and w2v-BERT use the same architecture, same masking and training setup as BEST-RQ. Using the conventional masking setup for wav2vec 2.0 and w2v-BERT gives worse performance. Since there is no convolution layers at the bottom, the contrastive learning use speech signals as targets. The w2v-BERT model use 12 layers for the contrastive module and 12 layers for the masked prediction module, to be consistent with the non-streaming setup (Chung et al., 2021). Our algorithm outperforms wav2vec 2.0 and w2v-BERT for both streaming and non-streaming pre-training. In particular our algorithm performs well with both pre-training, while wav2vec 2.0 and w2v-BERT favors more with non-streaming pre-training. This is likely due to the fact that the representation learning of both approaches is more compatible with non-streaming architectures. Increasing the model size from 0.1B to 0.6B results a slight increase in latency, but models trained with self-supervised learning algorithms has lower latency with streaming pre-training giving the most significant latency reduction. This indicates that the self-supervised learning preserve the low-latency property while providing quality gain."
780
+ },
781
+ {
782
+ "type": "title",
783
+ "bbox": [
784
+ 0.497,
785
+ 0.663,
786
+ 0.663,
787
+ 0.679
788
+ ],
789
+ "angle": 0,
790
+ "content": "4.2. Multilingual Tasks"
791
+ },
792
+ {
793
+ "type": "text",
794
+ "bbox": [
795
+ 0.496,
796
+ 0.687,
797
+ 0.886,
798
+ 0.732
799
+ ],
800
+ "angle": 0,
801
+ "content": "We present multilingual results in this section. We use the same model setup as the LibriSpeech non-streaming experiment for these tasks."
802
+ },
803
+ {
804
+ "type": "title",
805
+ "bbox": [
806
+ 0.497,
807
+ 0.747,
808
+ 0.589,
809
+ 0.761
810
+ ],
811
+ "angle": 0,
812
+ "content": "4.2.1. DATA"
813
+ },
814
+ {
815
+ "type": "text",
816
+ "bbox": [
817
+ 0.496,
818
+ 0.77,
819
+ 0.887,
820
+ 0.906
821
+ ],
822
+ "angle": 0,
823
+ "content": "Multilingual LibriSpeech (MLS-10hrs) The Multilingual LibriSpeech dataset (Pratap et al., 2020) is a large corpus derived from read audiobooks of Librivox and consists of 8 languages: Dutch (du), English (en), French (fr), German (de), Italian (it), Polish (pl), Portuguese (pt), Spanish (es). The latest version of this corpus contains around 50k hours including 44k hours in English. We use the official 10 hours split of training data to evaluate few-shot learning capabilities."
824
+ }
825
+ ],
826
+ [
827
+ {
828
+ "type": "header",
829
+ "bbox": [
830
+ 0.219,
831
+ 0.057,
832
+ 0.756,
833
+ 0.072
834
+ ],
835
+ "angle": 0,
836
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
837
+ },
838
+ {
839
+ "type": "table_caption",
840
+ "bbox": [
841
+ 0.084,
842
+ 0.094,
843
+ 0.888,
844
+ 0.151
845
+ ],
846
+ "angle": 0,
847
+ "content": "Table 2. LibriSpeech results compared with previous works with the same streaming architecture, and use LibriLight set for pre-training and LibriSpeech 960h set for fine-tuning. The relative latency (the lower the better) is the average difference of the word prediction time when comparing with the baseline Conformer 0.1B model. Our algorithm outperforms wav2vec 2.0 and w2v-BERT on both WERs and latency."
848
+ },
849
+ {
850
+ "type": "table",
851
+ "bbox": [
852
+ 0.142,
853
+ 0.156,
854
+ 0.835,
855
+ 0.354
856
+ ],
857
+ "angle": 0,
858
+ "content": "<table><tr><td>Method</td><td>Size (B)</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>Relative latency (ms)</td></tr><tr><td>Conformer 0.1B</td><td>0.1</td><td>4.1</td><td>10.3</td><td>4.5</td><td>9.8</td><td>0</td></tr><tr><td>Conformer 0.6B</td><td>0.6</td><td>3.9</td><td>9.8</td><td>4.4</td><td>9.4</td><td>15.3</td></tr><tr><td colspan=\"7\">Non-Streaming pre-train</td></tr><tr><td>wav2vec 2.0</td><td>0.6</td><td>2.6</td><td>7.3</td><td>3.0</td><td>7.2</td><td>-10.1</td></tr><tr><td>w2v-BERT</td><td>0.6</td><td>2.8</td><td>7.2</td><td>3.3</td><td>6.9</td><td>-0.7</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>2.5</td><td>6.9</td><td>2.8</td><td>6.6</td><td>-16.3</td></tr><tr><td colspan=\"7\">Streaming pre-train</td></tr><tr><td>wav2vec 2.0</td><td>0.6</td><td>2.7</td><td>8.0</td><td>2.9</td><td>7.9</td><td>-130.6</td></tr><tr><td>w2v-BERT</td><td>0.6</td><td>2.7</td><td>8.4</td><td>3.0</td><td>8.1</td><td>-117.1</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>2.5</td><td>6.9</td><td>2.8</td><td>6.6</td><td>-130.9</td></tr></table>"
859
+ },
860
+ {
861
+ "type": "text",
862
+ "bbox": [
863
+ 0.085,
864
+ 0.377,
865
+ 0.477,
866
+ 0.515
867
+ ],
868
+ "angle": 0,
869
+ "content": "Multilingual Voice Search (VS-1000hrs) Our high resource finetune datasets is multilingual Voice Search dataset (Li et al., 2021). We sample random 1000 hour subsets (VS-1000h) across 15 languages, including English (US), English (IN), Spanish (US), Portuguese (BR), Spanish (ES), Arabic (GULF), Arabic (EG), Hindi (IN), Marathi (IN), Bengali (BD), Chinese (TW), Russian (RU), Turkish (TR), Hungarian (HU), and Malay (MY). The test set for each language contains around 3–19K utterances."
870
+ },
871
+ {
872
+ "type": "text",
873
+ "bbox": [
874
+ 0.085,
875
+ 0.532,
876
+ 0.478,
877
+ 0.684
878
+ ],
879
+ "angle": 0,
880
+ "content": "XLS-R unsupervised data (XLS-R -U) Our public unlabeled speech data follows the pre-training data used for XLS-R (Babu et al., 2021) with one major difference: we do not use any data from VoxLingua-107 due to license constraint. In total, we utilize approximately \\(429k\\) hours of unlabeled speech data in \\(51^{1}\\) languages. As a consequence our model is pre-trained on speech from 51 languages as compared to 128 for XLS-R, and our pre-training set is smaller by \\(6.6k\\) hours. We use this pretrain data on MLS-10hrs to compare with published results."
881
+ },
882
+ {
883
+ "type": "text",
884
+ "bbox": [
885
+ 0.085,
886
+ 0.701,
887
+ 0.478,
888
+ 0.884
889
+ ],
890
+ "angle": 0,
891
+ "content": "Youtube unsupervised data (YT-U) Following (Zhang et al., 2021), we collected a multilingual Youtube dataset for pretraining. For each language we prepare an unlabeled YouTube dataset segmented using voice activation detection (VAD (Zazo Candil et al., 2016)). The number of hours per languages are: English (800k hrs), Spanish (800k hrs), Marathi (600k hrs), Portuguese (800k hrs), Russian (800k), Arabic (800k), Hindi (800k), Chinese (800k), Malay (250k), Turkish (800k), Bengali (800k), Hugarian (300k). In practice, we found this data performs much better than XLS-R -U on VS-1000hrs. Thus, we use this pretrain data on VS-1000hrs to compare the performance of different pretrain"
892
+ },
893
+ {
894
+ "type": "text",
895
+ "bbox": [
896
+ 0.498,
897
+ 0.379,
898
+ 0.563,
899
+ 0.392
900
+ ],
901
+ "angle": 0,
902
+ "content": "methods."
903
+ },
904
+ {
905
+ "type": "title",
906
+ "bbox": [
907
+ 0.498,
908
+ 0.408,
909
+ 0.729,
910
+ 0.422
911
+ ],
912
+ "angle": 0,
913
+ "content": "4.2.2. RESULTS ON MLS-10HRS"
914
+ },
915
+ {
916
+ "type": "text",
917
+ "bbox": [
918
+ 0.496,
919
+ 0.431,
920
+ 0.889,
921
+ 0.644
922
+ ],
923
+ "angle": 0,
924
+ "content": "We conduct our multilingual low resource finetune experiments on MLS-10hrs. We use XLS-R -U as pretraining data and finetune it on MLS-10hrs. As shown in Table 3, our baseline w2v-BERT already outperform previous strong model from XLS-R(2B) (Babu et al., 2021). The average WER further bring down by \\(3\\%\\) relative by using the proposed BEST-RQ. This demonstrate a simple random-projection quantizer is also effective for multilingual pretraining. We also report finetune results on the MLS full supervised data. Interestingly, with more finetune data, BEST-RQ perform even better than w2v-BERT, especially for pt and pl. Our results also comparable with previously state-of-the-art results in (Bai et al., 2021) which conduct joint training for multilingual ASR."
925
+ },
926
+ {
927
+ "type": "text",
928
+ "bbox": [
929
+ 0.496,
930
+ 0.65,
931
+ 0.889,
932
+ 0.741
933
+ ],
934
+ "angle": 0,
935
+ "content": "While fine-tuning with MLS-full and MLS-10hrs both exhibit improvement compared to existing approaches, fine-tuning with MLS-full provides more relative improvement. This likely implies that pre-training with random-projection quantizers is more effective when there is more fine-tuning data."
936
+ },
937
+ {
938
+ "type": "title",
939
+ "bbox": [
940
+ 0.497,
941
+ 0.756,
942
+ 0.749,
943
+ 0.769
944
+ ],
945
+ "angle": 0,
946
+ "content": "4.2.3. RESULTS ON VOICE SEARCH"
947
+ },
948
+ {
949
+ "type": "text",
950
+ "bbox": [
951
+ 0.496,
952
+ 0.779,
953
+ 0.889,
954
+ 0.903
955
+ ],
956
+ "angle": 0,
957
+ "content": "To understand how the proposed model work for high resource (1000hrs per language), we pretrain our model on YT-U and finetune it on VS-1000hrs. We can see with more finetune data, the relative improvement is smaller compared with no pretrain baseline. However, our proposed BEST-RQ consistently outperform w2v-BERT and wav2vec 2.0 by \\(9\\%\\) and \\(5\\%\\) relatively. Compare to w2v-BERT, our proposed method outperform on all the languages. Among the 15"
958
+ },
959
+ {
960
+ "type": "page_footnote",
961
+ "bbox": [
962
+ 0.107,
963
+ 0.891,
964
+ 0.461,
965
+ 0.906
966
+ ],
967
+ "angle": 0,
968
+ "content": "<sup>1</sup>Counting languages with more than 1 hour of speech data."
969
+ }
970
+ ],
971
+ [
972
+ {
973
+ "type": "header",
974
+ "bbox": [
975
+ 0.22,
976
+ 0.058,
977
+ 0.753,
978
+ 0.071
979
+ ],
980
+ "angle": 0,
981
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
982
+ },
983
+ {
984
+ "type": "table_caption",
985
+ "bbox": [
986
+ 0.277,
987
+ 0.094,
988
+ 0.698,
989
+ 0.108
990
+ ],
991
+ "angle": 0,
992
+ "content": "Table 3. Test setWER \\((\\%)\\) comparisons on the MLS full and 10hrs set."
993
+ },
994
+ {
995
+ "type": "table",
996
+ "bbox": [
997
+ 0.097,
998
+ 0.108,
999
+ 0.875,
1000
+ 0.391
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "<table><tr><td rowspan=\"2\">Exp.</td><td colspan=\"8\">Languages</td><td rowspan=\"2\">Avg.</td></tr><tr><td>en</td><td>de</td><td>nl</td><td>fr</td><td>es</td><td>it</td><td>pt</td><td>pl</td></tr><tr><td>MLS-full</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>wav2vec 2.0 from XLSR-53 (Conneau et al., 2020)</td><td>-</td><td>7.0</td><td>10.8</td><td>7.6</td><td>6.3</td><td>10.4</td><td>14.7</td><td>17.2</td><td>10.6</td></tr><tr><td>w2v-BERT from JUST (Bai et al., 2021)</td><td>6.6</td><td>4.3</td><td>9.9</td><td>5.0</td><td>3.8</td><td>9.1</td><td>14.6</td><td>8.1</td><td>7.8</td></tr><tr><td>JUST (Bai et al., 2021) (co-train)</td><td>6.5</td><td>4.1</td><td>9.5</td><td>5.2</td><td>3.7</td><td>8.8</td><td>8.0</td><td>6.6</td><td>6.5</td></tr><tr><td>w2v-BERT (0.6B)</td><td>5.5</td><td>4.3</td><td>10.9</td><td>5.6</td><td>4.5</td><td>10.1</td><td>13.4</td><td>11.2</td><td>8.2</td></tr><tr><td>BEST-RQ (Ours, 0.6B)</td><td>6.8</td><td>4.1</td><td>9.7</td><td>5.0</td><td>4.9</td><td>7.4</td><td>9.4</td><td>5.2</td><td>6.6</td></tr><tr><td>MLS-10hrs</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>XLSR-53 (Conneau et al., 2020)</td><td>14.6</td><td>8.4</td><td>12.8</td><td>12.5</td><td>8.9</td><td>13.4</td><td>18.2</td><td>21.2</td><td>13.8</td></tr><tr><td>XLS-R(0.3B) (Babu et al., 2021)</td><td>15.9</td><td>9.0</td><td>13.5</td><td>12.4</td><td>8.1</td><td>13.1</td><td>17.0</td><td>13.9</td><td>12.8</td></tr><tr><td>XLS-R(1B) (Babu et al., 2021)</td><td>12.9</td><td>7.4</td><td>11.6</td><td>10.2</td><td>7.1</td><td>12.0</td><td>15.8</td><td>10.5</td><td>10.9</td></tr><tr><td>XLS-R(2B) (Babu et al., 2021)</td><td>14.0</td><td>7.6</td><td>11.8</td><td>10.0</td><td>6.9</td><td>12.1</td><td>15.6</td><td>9.8</td><td>11.0</td></tr><tr><td>w2v-BERT (0.6B)</td><td>12.7</td><td>7.0</td><td>12.6</td><td>8.9</td><td>5.9</td><td>10.3</td><td>14.6</td><td>6.9</td><td>9.9</td></tr><tr><td>BEST-RQ (Ours, 0.6B)</td><td>12.8</td><td>7.4</td><td>12.7</td><td>9.6</td><td>5.4</td><td>9.9</td><td>12.1</td><td>7.1</td><td>9.6</td></tr></table>"
1004
+ },
1005
+ {
1006
+ "type": "table_caption",
1007
+ "bbox": [
1008
+ 0.089,
1009
+ 0.425,
1010
+ 0.473,
1011
+ 0.452
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "Table 4. Test set WER (%) comparisons using YT-U for pretrain and VS-1000hrs for finetune, across 15 languages."
1015
+ },
1016
+ {
1017
+ "type": "table",
1018
+ "bbox": [
1019
+ 0.104,
1020
+ 0.453,
1021
+ 0.453,
1022
+ 0.543
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "<table><tr><td>Exp.</td><td>Avg. on 15 langs (VS)</td></tr><tr><td>Baseline (0.6B)</td><td>12.6</td></tr><tr><td>wav2vec 2.0 (0.6B)</td><td>12.0</td></tr><tr><td>w2v-bert (0.6B)</td><td>11.5</td></tr><tr><td>BEST-RQ (Ours) (0.6B)</td><td>10.9</td></tr></table>"
1026
+ },
1027
+ {
1028
+ "type": "text",
1029
+ "bbox": [
1030
+ 0.089,
1031
+ 0.58,
1032
+ 0.473,
1033
+ 0.639
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "languages, English, Portuguese, Russian and Turkish, are improved more than \\(10\\%\\), relatively. Indic languages (Hindi, Marathi and English (IN)) are only slightly improved, all smaller than \\(3\\%\\) relatively."
1037
+ },
1038
+ {
1039
+ "type": "title",
1040
+ "bbox": [
1041
+ 0.089,
1042
+ 0.657,
1043
+ 0.341,
1044
+ 0.672
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "4.3. Analyzing Quantization Quality"
1048
+ },
1049
+ {
1050
+ "type": "text",
1051
+ "bbox": [
1052
+ 0.089,
1053
+ 0.68,
1054
+ 0.474,
1055
+ 0.905
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "As our self-supervised learning algorithm eliminates the requirement of representation learning through applying a random-projection quantizer, it is crucial to understand the representation quality of this quantizer and how the quality of the quantization affect the self-supervised learning. We analyze the quality of quantizers by training ASR models feeding labels generated by quantizing utterances as input. The performance of the resulting ASR provides us insights on the quality of the quantizer. The ASR model embeds quantized labels and feeds the embedding to a stack of Conformer layers, followed by a CTC decoder. 16 Conformer layer has feature dim 256, local self attention with 8 heads and 128 context length and kernel size 5 for lightweight convolution, in total the model size is \\(25\\mathrm{M}\\). We study the effect of representation learning through comparing with"
1059
+ },
1060
+ {
1061
+ "type": "text",
1062
+ "bbox": [
1063
+ 0.502,
1064
+ 0.416,
1065
+ 0.886,
1066
+ 0.612
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "quantizers trained with the VQ-VAE. We compare 3 types of quantizers: a) a random-projection quantizer b) a quantizer trained with VQ-VAE where the encoder has the same architecture as the random-projection quantizer and the decoder contains only a projection layer c) a trained VQ-VAE whose encoder/decoder are Transformer models. For trained quantizers, we train on the whole LibriSpeech 960 hours audio-only data, with a constant learning rate of 1e-4 and train for 400k steps with batch size 256. For all quantizers, the input frames are stacked with 3 frames on each's left, resulting in 4x input length reduction. We also use the quantizers for self-supervised learning with the LibriSpeech 0.6B non-streaming setup to compare their performance."
1070
+ },
1071
+ {
1072
+ "type": "text",
1073
+ "bbox": [
1074
+ 0.502,
1075
+ 0.62,
1076
+ 0.886,
1077
+ 0.771
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "Table 5 shows the WER on LibriSpeech 960h. Both the random-projection quantizer and the projection-based VQ-VAE quantizer lead to poor ASR performance, while the Transformer-based VQ-VAE quantizer provides a significantly better performance. This implies that the Transformer-based VQ-VAE quantizer learns a better representation. On the other hand, when using these quantizers for the purpose of self-supervised learning, all quantizers lead to similar WERs. This indicates that the quantizer quality does not translate to self-supervised learning quality."
1081
+ },
1082
+ {
1083
+ "type": "title",
1084
+ "bbox": [
1085
+ 0.502,
1086
+ 0.788,
1087
+ 0.849,
1088
+ 0.802
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "4.4. Analyzing the Effect of Pre-training Data Size"
1092
+ },
1093
+ {
1094
+ "type": "text",
1095
+ "bbox": [
1096
+ 0.502,
1097
+ 0.812,
1098
+ 0.885,
1099
+ 0.901
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "One potential explanation for the above observation, that a sub-optimal quantization can work well for self-supervised learning, is that the self-supervised learning algorithm can learn to mitigate the quality gap given sufficient amounts of pre-training data. We investigate whether a quantizer with a better quantization quality performs better when the"
1103
+ }
1104
+ ],
1105
+ [
1106
+ {
1107
+ "type": "header",
1108
+ "bbox": [
1109
+ 0.22,
1110
+ 0.058,
1111
+ 0.753,
1112
+ 0.071
1113
+ ],
1114
+ "angle": 0,
1115
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
1116
+ },
1117
+ {
1118
+ "type": "table_caption",
1119
+ "bbox": [
1120
+ 0.089,
1121
+ 0.095,
1122
+ 0.885,
1123
+ 0.148
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "Table 5. Quantizer quality's impact on ASR tasks. Although the Transformer-based quantizer gets much better performance when used as input directly, the random-projection quantizer is equally effective for self-supervised learning. The model used in the direct ASR task has size 25M. The self-supervised learning tasks use the same setup as the LibriSpeech non-streaming experiment, which use LibriLight for pre-training and LibriSpeech for fine-tuning and has 0.6B model size."
1127
+ },
1128
+ {
1129
+ "type": "table",
1130
+ "bbox": [
1131
+ 0.088,
1132
+ 0.15,
1133
+ 0.904,
1134
+ 0.247
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "<table><tr><td rowspan=\"2\">Configuration</td><td rowspan=\"2\">Quantizer size (M)</td><td colspan=\"4\">Direct ASR WER</td><td colspan=\"4\">Pretrain-finetune WER</td></tr><tr><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td></tr><tr><td>Random quantizer</td><td>1</td><td>58.8</td><td>78.8</td><td>57.9</td><td>72.8</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td></tr><tr><td>Projection VQ-VAE</td><td>1</td><td>61.4</td><td>74.8</td><td>60.9</td><td>75.2</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td></tr><tr><td>Transformer VQ-VAE</td><td>10</td><td>17.8</td><td>35.8</td><td>17.6</td><td>36.1</td><td>1.4</td><td>2.9</td><td>1.6</td><td>3.1</td></tr></table>"
1138
+ },
1139
+ {
1140
+ "type": "image_caption",
1141
+ "bbox": [
1142
+ 0.093,
1143
+ 0.271,
1144
+ 0.398,
1145
+ 0.285
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": "Librilight-pretrain, Librispeech finetune WER"
1149
+ },
1150
+ {
1151
+ "type": "image",
1152
+ "bbox": [
1153
+ 0.09,
1154
+ 0.287,
1155
+ 0.472,
1156
+ 0.449
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": null
1160
+ },
1161
+ {
1162
+ "type": "image_caption",
1163
+ "bbox": [
1164
+ 0.089,
1165
+ 0.477,
1166
+ 0.475,
1167
+ 0.573
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": "Figure 2. Comparing the self-supervised learning quality of the random-projection quantizer (rq) and the Transformer-based VQ-VAE quantizer (tvae) with different pre-training data size. Starting from low amount of pre-train data, the random-projection quantizer is behind the trained Transformer VQ-VAE quantizer. As the amount of pre-train data increases, the random-projection quantizer catches up."
1171
+ },
1172
+ {
1173
+ "type": "text",
1174
+ "bbox": [
1175
+ 0.089,
1176
+ 0.635,
1177
+ 0.475,
1178
+ 0.905
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "amount of the pre-training data is limited, and whether increasing the amount of the pre-training data alleviate the discrepancy when compared to a random-projection quantizer. In this study, we compare self-supervised learning quality between a random-projection quantizer (rq) and a trained transformer-based VQ-VAE quantizer (tvae) with different pre-training data sizes. The random quantizer is untrained, and 4 Transformer VQ-VAE quantizers are trained with \\(\\{1 / 64, 4 / 64, 16 / 64, 64 / 64\\}\\) LibriLight data, respectively. Then 4 identical random-projection quantizers and the above 4 transformer VAE quantizers are pre-trained separately with the same distinct percentages of LibriLight data as above for \\(100k\\) steps with global batch size 2048. The pretrained models fine-tune on LibriSpeech 960h. The result in Figure 2 shows that a quantizer with better representation quality (Transformer-based VQ-VAE) performs better when pre-training data is limited, but the gap disappears as the pre-training data increase."
1182
+ },
1183
+ {
1184
+ "type": "title",
1185
+ "bbox": [
1186
+ 0.502,
1187
+ 0.271,
1188
+ 0.76,
1189
+ 0.286
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "5. Conclusions and Discussions"
1193
+ },
1194
+ {
1195
+ "type": "text",
1196
+ "bbox": [
1197
+ 0.5,
1198
+ 0.296,
1199
+ 0.886,
1200
+ 0.506
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "We proposed BEST-RQ to perform self-supervised learning for speech recognition models. BEST-RQ uses a random-projection quantizer to quantize speech signals to discrete labels. The pre-training process masks the speech signals and trains the model to predict labels corresponding to the masked parts. This approach shows similar WERs as the existing state-of-the-art results on LibriSpeech with non-streaming models, and outperform wav2vec 2.0 and w2v-BERT on LibriSpeech with streaming models and on multilingual tasks with non-streaming models. Further analysis showed that despite the fact that the random-projection quantizer provides a poorer representation compared to a trained VQ-VAE quantizer, it is effective for the purpose of self-supervised learning."
1204
+ },
1205
+ {
1206
+ "type": "text",
1207
+ "bbox": [
1208
+ 0.5,
1209
+ 0.515,
1210
+ 0.886,
1211
+ 0.711
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "Our algorithm untangle the quantizer from the speech recognition model and also eliminates the requirement of representation learning. This simpler framework makes it easier to find a good recipe for the target task. The improvement on streaming models shows that the separation of the quantizer from the model makes the algorithm more effective for architectures that can be less effective for representation learning. The improvement on multilingual tasks shows that complicated tasks can benefit more from a simpler framework where finding a good recipe becomes more challenging. The quantization quality analysis implies that representation learning is not necessarily critical for self-supervised learning."
1215
+ },
1216
+ {
1217
+ "type": "text",
1218
+ "bbox": [
1219
+ 0.5,
1220
+ 0.719,
1221
+ 0.886,
1222
+ 0.9
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "Codebook utilization. One of the most critical factors for pre-training quality is the percentage of the codebook that is used during training. In particular, at each training step a higher percentage of the codebook being used in each batch correlates strongly with a good pre-training quality. When the distribution of the codebook utilization is skewed toward a smaller subset of codes, this usually makes the pre-training task easier and provides less effective pre-training. The \\( l2 \\) normalizations on the projected vector and the codebook are critical for providing more uniform codebook utilization. On the other hand, using randomly initialized codebook and projection matrix can introduce different codebook utiliza"
1226
+ }
1227
+ ],
1228
+ [
1229
+ {
1230
+ "type": "header",
1231
+ "bbox": [
1232
+ 0.219,
1233
+ 0.057,
1234
+ 0.756,
1235
+ 0.072
1236
+ ],
1237
+ "angle": 0,
1238
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
1239
+ },
1240
+ {
1241
+ "type": "text",
1242
+ "bbox": [
1243
+ 0.085,
1244
+ 0.085,
1245
+ 0.477,
1246
+ 0.192
1247
+ ],
1248
+ "angle": 0,
1249
+ "content": "tions with different random seeds, which impact the pretraining quality across different runs with same experiment configurations. This variance impacts quality more when training with smaller pre-training and fine-tuning datasets. How to reduce this reproducibility issue caused by random initialization is an important next step for improving random-projection quantizations."
1250
+ },
1251
+ {
1252
+ "type": "text",
1253
+ "bbox": [
1254
+ 0.085,
1255
+ 0.199,
1256
+ 0.478,
1257
+ 0.365
1258
+ ],
1259
+ "angle": 0,
1260
+ "content": "Hyperparameters. The pre-training quality is not very sensitive to the codebook vocab size and the codebook dimension, and is more sensitive to the masking probability and the mask length. The role of the projection layer in the random-projection quantizer is to allow using different codebook dimensions, and one can achieve similar results without the projection and set the codebook dimension to be the same as the input dimension. Due to the variance coming from the random initialization, the impact of a hyperparameter usually requires multiple runs of experiments to verify the result."
1261
+ },
1262
+ {
1263
+ "type": "text",
1264
+ "bbox": [
1265
+ 0.085,
1266
+ 0.373,
1267
+ 0.478,
1268
+ 0.525
1269
+ ],
1270
+ "angle": 0,
1271
+ "content": "Longer convergence time for non-streaming models. One observation we have is that the algorithm takes more steps to converge with non-streaming models. We still observe improvement compared to wav2vec 2.0 and w2v-BERT at the same training step on multilingual tasks, though the final convergence usually takes \\(50\\%\\) more steps. On the other hand, our training setup follows (Zhang et al., 2020), and it is unclear to us whether further hyperparameter tuning can help the model to converge faster. We did not observe the longer convergence property with streaming models."
1272
+ },
1273
+ {
1274
+ "type": "text",
1275
+ "bbox": [
1276
+ 0.085,
1277
+ 0.531,
1278
+ 0.478,
1279
+ 0.639
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "Initialization. The quantizer uses random initialization and does not update the parameters, and therefore the initialization algorithm can play an important role on the results. In this paper we showed results with Xavier initialization for the projection matrix and the standard normal distribution for the codebook, and further comparisons on different initialization algorithms can be conducted in the future work."
1283
+ },
1284
+ {
1285
+ "type": "title",
1286
+ "bbox": [
1287
+ 0.086,
1288
+ 0.656,
1289
+ 0.275,
1290
+ 0.673
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "6. Acknowledgements"
1294
+ },
1295
+ {
1296
+ "type": "text",
1297
+ "bbox": [
1298
+ 0.085,
1299
+ 0.682,
1300
+ 0.478,
1301
+ 0.743
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "We thank Wei Han and Johan Schalkwyk for helpful discussions, and Rohit Prabhavalkar, Izhak Shafran, and Hagen Soltau for insightful feedback. We also want to thank Bo Li for the help on multilingual tasks."
1305
+ },
1306
+ {
1307
+ "type": "title",
1308
+ "bbox": [
1309
+ 0.087,
1310
+ 0.762,
1311
+ 0.185,
1312
+ 0.779
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "References"
1316
+ },
1317
+ {
1318
+ "type": "ref_text",
1319
+ "bbox": [
1320
+ 0.087,
1321
+ 0.786,
1322
+ 0.478,
1323
+ 0.862
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "Babu, A., Wang, C., Tjandra, A., Lakhotia, K., Xu, Q., Goyal, N., Singh, K., von Platen, P., Saraf, Y., Pino, J., et al. Xls-r: Self-supervised cross-lingual speech representation learning at scale. arXiv preprint arXiv:2111.09296, 2021."
1327
+ },
1328
+ {
1329
+ "type": "ref_text",
1330
+ "bbox": [
1331
+ 0.087,
1332
+ 0.876,
1333
+ 0.478,
1334
+ 0.907
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "Baevski, A., Auli, M., and Mohamed, A. Effectiveness of self-supervised pre-training for speech recognition. arXiv"
1338
+ },
1339
+ {
1340
+ "type": "list",
1341
+ "bbox": [
1342
+ 0.087,
1343
+ 0.786,
1344
+ 0.478,
1345
+ 0.907
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": null
1349
+ },
1350
+ {
1351
+ "type": "ref_text",
1352
+ "bbox": [
1353
+ 0.515,
1354
+ 0.085,
1355
+ 0.741,
1356
+ 0.102
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": "preprint arXiv:1911.03912, 2019."
1360
+ },
1361
+ {
1362
+ "type": "ref_text",
1363
+ "bbox": [
1364
+ 0.5,
1365
+ 0.111,
1366
+ 0.888,
1367
+ 0.156
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": "Baevski, A., Schneider, S., and Auli, M. vq-wav2vec: Self-supervised learning of discrete speech representations. In ICLR, 2020a."
1371
+ },
1372
+ {
1373
+ "type": "ref_text",
1374
+ "bbox": [
1375
+ 0.5,
1376
+ 0.167,
1377
+ 0.888,
1378
+ 0.213
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "Baevski, A., Zhou, H., Mohamed, A., and Auli, M. wav2vec 2.0: A framework for self-supervised learning of speech representations. arXiv preprint arXiv:2006.11477, 2020b."
1382
+ },
1383
+ {
1384
+ "type": "ref_text",
1385
+ "bbox": [
1386
+ 0.5,
1387
+ 0.223,
1388
+ 0.888,
1389
+ 0.282
1390
+ ],
1391
+ "angle": 0,
1392
+ "content": "Bai, J., Li, B., Zhang, Y., Bapna, A., Siddhartha, N., Sim, K. C., and Sainath, T. N. Joint unsupervised and supervised training for multilingual asr. arXiv preprint arXiv:2111.08137, 2021."
1393
+ },
1394
+ {
1395
+ "type": "ref_text",
1396
+ "bbox": [
1397
+ 0.5,
1398
+ 0.294,
1399
+ 0.888,
1400
+ 0.324
1401
+ ],
1402
+ "angle": 0,
1403
+ "content": "Bao, H., Dong, L., and Wei, F. Beit: Bert pre-training of image transformers, 2021."
1404
+ },
1405
+ {
1406
+ "type": "ref_text",
1407
+ "bbox": [
1408
+ 0.5,
1409
+ 0.334,
1410
+ 0.888,
1411
+ 0.408
1412
+ ],
1413
+ "angle": 0,
1414
+ "content": "Chung, Y.-A., Zhang, Y., Han, W., Chiu, C.-C., Qin, J., Pang, R., and Wu, Y. W2v-bert: Combining contrastive learning and masked language modeling for self-supervised speech pre-training. arXiv preprint arXiv:2108.06209, 2021."
1415
+ },
1416
+ {
1417
+ "type": "ref_text",
1418
+ "bbox": [
1419
+ 0.5,
1420
+ 0.421,
1421
+ 0.888,
1422
+ 0.48
1423
+ ],
1424
+ "angle": 0,
1425
+ "content": "Conneau, A., Baevski, A., Collobert, R., Mohamed, A., and Auli, M. Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979, 2020."
1426
+ },
1427
+ {
1428
+ "type": "ref_text",
1429
+ "bbox": [
1430
+ 0.5,
1431
+ 0.491,
1432
+ 0.888,
1433
+ 0.55
1434
+ ],
1435
+ "angle": 0,
1436
+ "content": "Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018."
1437
+ },
1438
+ {
1439
+ "type": "ref_text",
1440
+ "bbox": [
1441
+ 0.5,
1442
+ 0.562,
1443
+ 0.888,
1444
+ 0.653
1445
+ ],
1446
+ "angle": 0,
1447
+ "content": "Glorot, X. and Bengio, Y. Understanding the difficulty of training deep feedforward neural networks. In Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, volume 9 of Proceedings of Machine Learning Research, pp. 249-256. PMLR, 13-15 May 2010."
1448
+ },
1449
+ {
1450
+ "type": "ref_text",
1451
+ "bbox": [
1452
+ 0.5,
1453
+ 0.663,
1454
+ 0.885,
1455
+ 0.693
1456
+ ],
1457
+ "angle": 0,
1458
+ "content": "Graves, A. Sequence transduction with recurrent neural networks. CoRR, abs/1211.3711, 2012."
1459
+ },
1460
+ {
1461
+ "type": "ref_text",
1462
+ "bbox": [
1463
+ 0.5,
1464
+ 0.704,
1465
+ 0.888,
1466
+ 0.764
1467
+ ],
1468
+ "angle": 0,
1469
+ "content": "Gulati, A., Qin, J., Chiu, C.-C., Parmar, N., Zhang, Y., Yu, J., Han, W., Wang, S., Zhang, Z., Wu, Y., and Pang, R. Conformer: Convolution-augmented transformer for speech recognition, 2020."
1470
+ },
1471
+ {
1472
+ "type": "ref_text",
1473
+ "bbox": [
1474
+ 0.5,
1475
+ 0.775,
1476
+ 0.888,
1477
+ 0.821
1478
+ ],
1479
+ "angle": 0,
1480
+ "content": "He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021."
1481
+ },
1482
+ {
1483
+ "type": "ref_text",
1484
+ "bbox": [
1485
+ 0.5,
1486
+ 0.831,
1487
+ 0.888,
1488
+ 0.905
1489
+ ],
1490
+ "angle": 0,
1491
+ "content": "Hsu, W.-N., Bolte, B., Tsai, Y.-H. H., Lakhotia, K., Salakhutdinov, R., and Mohamed, A. HuBERT: Self-supervised speech representation learning by masked prediction of hidden units. arXiv preprint arXiv:2106.07447, 2021."
1492
+ },
1493
+ {
1494
+ "type": "list",
1495
+ "bbox": [
1496
+ 0.5,
1497
+ 0.085,
1498
+ 0.888,
1499
+ 0.905
1500
+ ],
1501
+ "angle": 0,
1502
+ "content": null
1503
+ }
1504
+ ],
1505
+ [
1506
+ {
1507
+ "type": "header",
1508
+ "bbox": [
1509
+ 0.219,
1510
+ 0.057,
1511
+ 0.756,
1512
+ 0.072
1513
+ ],
1514
+ "angle": 0,
1515
+ "content": "Self-supervised Learning with Random-projection Quantizer for Speech Recognition"
1516
+ },
1517
+ {
1518
+ "type": "ref_text",
1519
+ "bbox": [
1520
+ 0.088,
1521
+ 0.085,
1522
+ 0.479,
1523
+ 0.162
1524
+ ],
1525
+ "angle": 0,
1526
+ "content": "Kahn, J., Rivière, M., Zheng, W., Kharitonov, E., Xu, Q., Mazare, P.-E., Karadayi, J., Liptchinsky, V., Collobert, R., Fuegen, C., Likhomanenko, T., Synnaeve, G., Joulin, A., Mohamed, A., and Dupoux, E. Libri-light: A benchmark for ASR with limited or no supervision. In ICASSP, 2020."
1527
+ },
1528
+ {
1529
+ "type": "ref_text",
1530
+ "bbox": [
1531
+ 0.088,
1532
+ 0.173,
1533
+ 0.476,
1534
+ 0.205
1535
+ ],
1536
+ "angle": 0,
1537
+ "content": "Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. In ICLR, 2015."
1538
+ },
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.088,
1543
+ 0.217,
1544
+ 0.476,
1545
+ 0.277
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "Li, B., Pang, R., Sainath, T. N., Gulati, A., Zhang, Y., Qin, J., Haghani, P., Huang, W. R., and Ma, M. Scaling end-to-end models for large-scale multilingual asr. arXiv preprint arXiv:2104.14830, 2021."
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.088,
1554
+ 0.29,
1555
+ 0.476,
1556
+ 0.335
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "Pratap, V., Xu, Q., Sriram, A., Synnaeve, G., and Collobert, R. Mls: A large-scale multilingual dataset for speech research. In INTERSPEECH, 2020."
1560
+ },
1561
+ {
1562
+ "type": "ref_text",
1563
+ "bbox": [
1564
+ 0.088,
1565
+ 0.349,
1566
+ 0.476,
1567
+ 0.424
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": "Sainath, T. N., He, Y., Li, B., Narayanan, A., Pang, R., Bruguier, A., Chang, S.-y., Li, W., Alvarez, R., Chen, Z., and et al. A streaming on-device end-to-end model surpassing server-side conventional model quality and latency. In ICASSP, 2020."
1571
+ },
1572
+ {
1573
+ "type": "ref_text",
1574
+ "bbox": [
1575
+ 0.088,
1576
+ 0.437,
1577
+ 0.476,
1578
+ 0.483
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "Schneider, S., Baevski, A., Collobert, R., and Auli, M. wav2vec: Unsupervised pre-training for speech recognition. arXiv preprint arXiv:1904.05862, 2019."
1582
+ },
1583
+ {
1584
+ "type": "ref_text",
1585
+ "bbox": [
1586
+ 0.088,
1587
+ 0.496,
1588
+ 0.476,
1589
+ 0.541
1590
+ ],
1591
+ "angle": 0,
1592
+ "content": "Schuster, M. and Nakajima, K. Japanese and Korean voice search. 2012 IEEE International Conference on Acoustics, Speech and Signal Processing, 2012."
1593
+ },
1594
+ {
1595
+ "type": "ref_text",
1596
+ "bbox": [
1597
+ 0.088,
1598
+ 0.554,
1599
+ 0.476,
1600
+ 0.6
1601
+ ],
1602
+ "angle": 0,
1603
+ "content": "Shen, J., Nguyen, P., Wu, Y., Chen, Z., and et al. Lingvo: a modular and scalable framework for sequence-to-sequence modeling, 2019."
1604
+ },
1605
+ {
1606
+ "type": "ref_text",
1607
+ "bbox": [
1608
+ 0.088,
1609
+ 0.612,
1610
+ 0.476,
1611
+ 0.643
1612
+ ],
1613
+ "angle": 0,
1614
+ "content": "van den Oord, A., Vinyals, O., and Kavukcuoglu, K. Neural discrete representation learning, 2018."
1615
+ },
1616
+ {
1617
+ "type": "ref_text",
1618
+ "bbox": [
1619
+ 0.088,
1620
+ 0.655,
1621
+ 0.476,
1622
+ 0.716
1623
+ ],
1624
+ "angle": 0,
1625
+ "content": "Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. Attention Is All You Need. CoRR, abs/1706.03762, 2017. URL http://arxiv.org/abs/1706.03762."
1626
+ },
1627
+ {
1628
+ "type": "ref_text",
1629
+ "bbox": [
1630
+ 0.088,
1631
+ 0.729,
1632
+ 0.476,
1633
+ 0.773
1634
+ ],
1635
+ "angle": 0,
1636
+ "content": "Yu, J., Chiu, C.-C., Li, B., et al. FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization. In Proc. ICASSP, 2021."
1637
+ },
1638
+ {
1639
+ "type": "ref_text",
1640
+ "bbox": [
1641
+ 0.088,
1642
+ 0.787,
1643
+ 0.476,
1644
+ 0.833
1645
+ ],
1646
+ "angle": 0,
1647
+ "content": "Zazo Candil, R., Sainath, T. N., Simko, G., and Parada, C. Feature learning with raw-waveform cldnns for voice activity detection. In *Interspeech* 2016, 2016."
1648
+ },
1649
+ {
1650
+ "type": "ref_text",
1651
+ "bbox": [
1652
+ 0.088,
1653
+ 0.845,
1654
+ 0.476,
1655
+ 0.906
1656
+ ],
1657
+ "angle": 0,
1658
+ "content": "Zhang, Y., Qin, J., Park, D. S., Han, W., Chiu, C.-C., Pang, R., Le, Q. V., and Wu, Y. Pushing the limits of semi-supervised learning for automatic speech recognition. arXiv preprint arXiv:2010.10504, 2020."
1659
+ },
1660
+ {
1661
+ "type": "list",
1662
+ "bbox": [
1663
+ 0.088,
1664
+ 0.085,
1665
+ 0.479,
1666
+ 0.906
1667
+ ],
1668
+ "angle": 0,
1669
+ "content": null
1670
+ },
1671
+ {
1672
+ "type": "ref_text",
1673
+ "bbox": [
1674
+ 0.5,
1675
+ 0.085,
1676
+ 0.888,
1677
+ 0.205
1678
+ ],
1679
+ "angle": 0,
1680
+ "content": "Zhang, Y., Daniel Park, S., Han, W., Qin, J., Gulati, A., Shor, J., Jansen, A., Xu, Y., Huang, Y., Wang, S., Zhou, Z., Li, B., Ma, M., Chan, W., Yu, J., Wang, Y., Cao, L., Sim, K. C., Ramabhadran, B., Sainath, T. N., Beaufays, F., Chen, Z., Le, Q. V., Chiu, C.-C., Pang, R., and Wu, Y. Bigssl: Exploring the frontier of large-scale semi-supervised learning for automatic speech recognition. arXiv preprint arXiv:2109.13226, 2021."
1681
+ }
1682
+ ]
1683
+ ]
2202.01xxx/2202.01855/5166ae8b-4e4d-4f8b-a350-11682cc56b73_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:802896c69ed94eeae5c9d631553e1b9ecbe96a692d89049c4e5a7b51b593cc26
3
+ size 663575
2202.01xxx/2202.01855/full.md ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Self-Supervised Learning with Random-Projection Quantizer for Speech Recognition
2
+
3
+ Chung-Cheng Chiu $^{*1}$ James Qin $^{*1}$ Yu Zhang $^{1}$ Jiahui Yu $^{1}$ Yonghui Wu
4
+
5
+ # Abstract
6
+
7
+ We present a simple and effective self-supervised learning approach for speech recognition. The approach learns a model to predict the masked speech signals, in the form of discrete labels generated with a random-projection quantizer. In particular the quantizer projects speech inputs with a randomly initialized matrix, and does a nearest-neighbor lookup in a randomly-initialized codebook. Neither the matrix nor the codebook is updated during self-supervised learning. Since the random-projection quantizer is not trained and is separated from the speech recognition model, the design makes the approach flexible and is compatible with universal speech recognition architecture. On LibriSpeech our approach achieves similar word-error-rates as previous work using self-supervised learning with non-streaming models, and provides lower word-error-rates and latency than wav2vec 2.0 and w2v-BERT with streaming models. On multilingual tasks the approach also provides significant improvement over wav2vec 2.0 and w2v-BERT.
8
+
9
+ # 1. Introduction
10
+
11
+ Self-supervised learning has shown impressive improvement for the quality of the speech recognition models in recent years (Schneider et al., 2019; Baevski et al., 2020a; 2019; 2020b; Hsu et al., 2021; Zhang et al., 2020; Chung et al., 2021; Zhang et al., 2021). These learning approaches enable the model to learn from unsupervised data and combine with supervised learning to improve the recognition accuracy. The capability of learning from unsupervised data is particularly beneficial when the supervised data is limited
12
+
13
+ *Equal contribution <Google Research, Brain Team. Correspondence to: Chung-Cheng Chiu <chungchengc@google.com>, James Qin <jamesqin@google.com>, Yu Zhang <ngyuzh@google.com>.
14
+
15
+ Proceedings of the $39^{th}$ International Conference on Machine Learning, Baltimore, Maryland, USA, PMLR 162, 2022. Copyright 2022 by the author(s).
16
+
17
+ and opens up new opportunities for low resource languages and domains.
18
+
19
+ One common design principle of self-supervised learning for speech recognition centers around learning representations. Inspired by the success of BERT (Devlin et al., 2018), one research trend in the speech community is to build BERT-inspired algorithms. One challenge in building BERT-style self-supervised learning for speech is to bridge the gap between continuous speech signals and the discrete text tokens, and a solution for addressing this issue is through learning speech representation (Schneider et al., 2019; Baevski et al., 2020b) or learning quantized representation (Baevski et al., 2020b;a; 2019; Hsu et al., 2021; Chung et al., 2021). Many previous works proposed effective algorithms for learning speech representations, and the quantized result of those learned representations showed encouraging correlation with the phoneme of the utterances.
20
+
21
+ While representation learning is a critical topic for the speech field, combining it with self-supervised learning leads to two limitations that can slow the research progress: (1) Model architecture limitation. The integration of representation learning and self-supervised learning often requires the model to act the role of providing speech representation while still being effective for the downstream tasks. An effective representation model, however, may not always be effective for the downstream tasks. For example, a good representation learning model may require accessing the future context of the utterance, while downstream tasks may require a low latency model which prohibits the access of the future context. (2) Increased complexity. The objectives of representation learning and self-supervised learning are not always aligned, and the complexity of designing both algorithms and finding their balance can impede the research development. This complexity can also motivate the field toward designing more complicated algorithms instead of finding a simple and effective alternative.
22
+
23
+ In this work we propose BERT-based Speech pre-Training with Random-projection Quantizer (BEST-RQ), a simple and effective self-supervised learning algorithm for speech recognition. The algorithm masks speech signals and feeds them to the encoder part of the speech recognition model, and the encoder learns to predict the masked region based
24
+
25
+ on the unmasked speech signals where the learning targets are labels provided by a random-projection quantizer. The random projection quantizer projects speech signals to a randomly initialized matrix, and finds a nearest vector in a randomly initialized codebook. The index of that vector is the target label. Neither the projection matrix nor the codebook is updated throughout the learning process. The quantizer does not require representation learning, and its separation from the model removes the limitation on the architecture design of the model. Despite its simplicity, on LibriSpeech the algorithm achieves similar results as previous work with non-streaming models, and provides better improvement with streaming models compared with previous approaches. On multilingual tasks, the algorithm exhibits further gains compared to wav2vec 2.0 (Baevski et al., 2020b) and w2v-BERT (Chung et al., 2021).
26
+
27
+ We conduct further analysis on the relation between representation learning quality and the self-supervised learning quality, and demonstrate that the two objectives are not inherently aligned in Section 4.3 and Section 4.4. Such an observation is central to our design of self-supervised learning without representation learning, and opens up a new, less complicated research direction for self-supervised learning.
28
+
29
+ # 2. Related Work
30
+
31
+ Many of the previous work on self-supervised learning for speech recognition focus on learning speech representation. wav2vec (Schneider et al., 2019) applies contrastive learning to learn the future representation based on the past context. vq-wav2vec (Baevski et al., 2020a) uses wav2vec to learn the representations and quantizes them to discrete tokens, and performs BERT-style pre-training to further improve the representation learning. DiscreteBERT (Baevski et al., 2019) extends vq-wav2vec by fine-tuning the BERT-pre-trained model on the downstream tasks. wav2vec 2.0 (Baevski et al., 2020b) uses contrastive learning with both past and future context to predict the representation of the masked parts. HuBERT (Hsu et al., 2021) uses k-means to learn the initial quantizer that maps speech signals to discrete labels, and performs BERT-style pre-training where the inputs are masked speech signals and prediction targets are discrete labels. HuBERT further uses the pretrained model as the new quantizer to train a new iteration of the model, and repeat the process to iteratively improve the pre-training results. w2v-BERT (Chung et al., 2021) uses a sub-network of the model to perform contrastive learning to learn speech representation, and use the rest of the network to perform BERT-style pre-training. w2v-BERT trains the representation learning and the BERT-style pre-training simultaneously. Our approach distinguishes from these work in avoiding the requirement of representation learning and
32
+
33
+ ![](images/28a10ec1267219d950b28062ad87cae4d751b3791977246d70eb5517473fef7b.jpg)
34
+ Figure 1. Overview of BEST-RQ. The approach applies random projections to project the input speech signals to a randomly initialized codebook, and map them to discrete labels through finding the nearest vector in the codebook. The pre-training objective is for the ASR encoder to take the masked input signals and predict the labels corresponding to the masked part provided by the random-projection quantizer.
35
+
36
+ separating the quantizer from the speech recognition model.
37
+
38
+ Our quantizer project input signals with a random matrix, which is similar to performing dimension reduction for the input signals. Using such quantization results as prediction target for self-supervised learning share a similar structure as the masked autoencoder (MAE) (He et al., 2021), which directly reconstruct the masked input signals. Another similar work in the computer vision community is BEiT (Bao et al., 2021), which trains a VQ-VAE (van den Oord et al., 2018) as the quantizer and use the VQ-VAE to perform BERT-style self-supervised learning. Different from these approaches, our algorithm does not require training the quantizer which further simplifies the training process.
39
+
40
+ # 3. Self-supervised Learning with Random-projection Quantizer
41
+
42
+ BEST-RQ applies a random-projection quantizer to map speech signals to discrete labels to enable BERT-style pretraining for ASR encoders. The quantizer randomly initializes a matrix and a codebook, and uses the matrix to project the input speech signals and the codebook to find the nearest vector where the index of the vector is the label. The pre-training process masks the speech signals and feeds them to the ASR encoder and trains the ASR encoder to predict labels of the masked part. Both the randomly initialized matrix and codebook are fixed during the pre-training process. The input data is normalized to have 0 mean and standard deviation of 1. The normalization is critical for pre
43
+
44
+ venting the random projection to collapse to a small subset of codes. The framework is described in Figure 1. After the pre-training process the resulting ASR encoder is adopted to fine-tune on downstream ASR tasks.
45
+
46
+ The approach applies masks directly on the speech signal, where the masking strategy samples at every frame whether to apply masks with a fixed probability. Each mask spans from the starting frame with a fixed length. The masked parts are replaced with a noise sampled from a normal distribution with 0 mean and 0.1 standard deviation.
47
+
48
+ # 3.1. Random-projection Quantizer
49
+
50
+ Given an input vector $x$ where $x$ is a $d$ -dimensional vector computed from speech signals, the random-projection quantizer maps $x$ to discrete labels $y$ through
51
+
52
+ $$
53
+ y = \underset {i} {\operatorname {a r g m i n}} | | \operatorname {n o r m} _ {l 2} \left(c _ {i}\right) - \operatorname {n o r m} _ {l 2} (A x) | |, \tag {1}
54
+ $$
55
+
56
+ where $A$ denotes a randomly initialized $h \times d$ matrix and $C = \{c_1, \dots, c_n\}$ is a set of randomly initialized $h$ -dimensional vectors, $\text{norm}_{l2}(.)$ is a function that normalizes the vector to have unit $l2$ norm. The projection matrix $A$ use Xavier initialization (Glorot & Bengio, 2010) and the codebook $C$ use standard normal distribution for initialization, and the parameters are fixed during the pre-training process and therefore the quantizations are consistent during training.
57
+
58
+ # 3.2. Pre-training
59
+
60
+ The pre-training process adds a softmax layer on top of the ASR encoder to learn to predict the quantized speech labels. Since the random-projection quantizer is independent of the ASR encoder, the pre-training is flexible and can work with different architectures of the ASR encoder. We study the effectiveness of the algorithm on both non-streaming and streaming models, and in our experiments we use Conformer (Gulati et al., 2020) as the building block.
61
+
62
+ # 3.2.1. NON-STREAMING MODELS
63
+
64
+ Since the BERT-style pre-training is designed for the non-streaming models, training with this type of architecture is straightforward where the model uses both past and future context to learn to predict the quantized labels of the masked speech signals.
65
+
66
+ # 3.2.2 STREAMING MODELS
67
+
68
+ In addition to the non-streaming models, streaming architecture also plays a critical role for the speech recognition tasks as many of the applications require transcribing speakers' utterances with low-latency (Sainath et al., 2020). Streaming architecture however is less well-studied in the previous self-supervised learning work compared to the non-streaming ar
69
+
70
+ chitecture. Moreover, many of the previous self-supervised learning approaches specify a pre-training setup that takes both the previous and future context, making it a question of how one can generalize the approaches to streaming models. We proposed two pre-training algorithms that are compatible with the streaming architecture:
71
+
72
+ Streaming pre-train. As our algorithm does not require learning quantization and focuses only on training the ASR encoder, this approach largely benefits the streaming models. Pre-training for streaming models follows the same setup as non-streaming models but the ASR encoder now learns to predict the quantized labels of the masked part based only on the past context.
73
+
74
+ Non-Streaming pre-train. Given that the neural network architecture like Transformer/Conformer allows switching from non-streaming to streaming behaviors by adding a mask for the future context within the same model, one can also perform pre-training with non-streaming setup for streaming models. Our algorithm provides benefits for streaming models with both non-streaming and streaming pre-training.
75
+
76
+ # 3.3. Fine-tuning
77
+
78
+ After the pre-training, the approach initializes the encoder of the downstream ASR from the pre-trained model, and fine-tunes on the supervised set. The softmax layer added on top of the encoder during the pre-training process is not used in fine-tuning. We focus on end-to-end models with RNN transducers (Graves, 2012), where the decoder uses LSTMs for the prediction network. On constructing the encoder, an additional projection layer is added on top of the pre-trained encoder to help it adapt to the downstream ASR task. The training process also updates the encoder during the supervised fine-tuning.
79
+
80
+ # 3.4. Understanding the Effectiveness of the Random-projection Quantizer
81
+
82
+ Our algorithm uses a random-projection quantizer for self-supervised learning, and such a design raises two questions: how good is the resulting quantization quality with this quantizer and how much does the quantization quality affect the effectiveness of the self-supervised learning? We address these two questions through comparing our quantizer with VQ-VAEs. Using random-projections for quantizing speech signals shares some similarity as VQ-VAEs. The random projection performs dimension reduction for the speech signals while the random codebook provides an approximated discrete representation of the speech data distribution. VQ-VAEs also provide a discrete representation for the speech signals, but do so by learning a representation in the latent space that best preserves the speech data. Thus, comparing with VQ-VAEs gives us insight on the quantization quality
83
+
84
+ Table 1. LibriSpeech results with non-streaming models. The LM used in our experiment is a Transformer LM with model size 0.1B.
85
+
86
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Size (B)</td><td colspan="4">No LM</td><td colspan="4">With LM</td></tr><tr><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td></tr><tr><td>wav2vec 2.0 (Baevski et al., 2020b)</td><td>0.3</td><td>2.1</td><td>4.5</td><td>2.2</td><td>4.5</td><td>1.6</td><td>3.0</td><td>1.8</td><td>3.3</td></tr><tr><td>HuBERT Large (Hsu et al., 2021)</td><td>0.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>1.5</td><td>3.0</td><td>1.9</td><td>3.3</td></tr><tr><td>HuBERT X-Large (Hsu et al., 2021)</td><td>1.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>1.5</td><td>2.5</td><td>1.8</td><td>2.9</td></tr><tr><td>w2v-Conformer XL (Zhang et al., 2020)</td><td>0.6</td><td>1.7</td><td>3.5</td><td>1.7</td><td>3.5</td><td>1.6</td><td>3.2</td><td>1.5</td><td>3.2</td></tr><tr><td>w2v-BERT XL (Chung et al., 2021)</td><td>0.6</td><td>1.5</td><td>2.9</td><td>1.5</td><td>2.9</td><td>1.4</td><td>2.8</td><td>1.5</td><td>2.8</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td><td>1.4</td><td>2.6</td><td>1.5</td><td>2.7</td></tr></table>
87
+
88
+ of our quantizer and the effect of representation learning for self-supervised learning.
89
+
90
+ We demonstrate that the quantization quality of the random-projection quantizer is not ideal but yet effective for self-supervised learning by comparing it with VQ-VAE-based quantizations in Section 4.3. We also show that the gap in the quantization quality is less an issue with the increase of the unsupervised data in Section 4.4. The main objective of self-supervised learning for speech recognition is to train the model to learn contextual information. The random-projection quantizer preserve the distribution of the speech data, and in order for the model to learn to predict the quantized token based on unmasked signals, the model needs to learn to process the raw signals and infer the contextual information among speech data. Such a criterion allows the model to perform effective self-supervised learning with a random-projection quantizer.
91
+
92
+ # 4. Experiments
93
+
94
+ We perform self-supervised learning experiments on LibriSpeech with non-streaming and streaming models, and assess the approach on multilingual tasks with non-streaming models. We study the quantization quality of the random-projection quantizer by comparing it with the quantizer learned with VQ-VAEs. The implementation use Lingvo (Shen et al., 2019) library.
95
+
96
+ # 4.1. LibriSpeech
97
+
98
+ Following (Zhang et al., 2020), we conduct experiments on the LibriLight dataset (Kahn et al., 2020) for pre-training, and fine-tune on the LibriSpeech training set which contains 960 hours of data. The input speech signals are 80-dimensional log-mel filter bank coefficients, and each frame has the stride of 10ms. In the fine-tuning phase, the decoder has a vocab size 1024 and uses a 1024-token WordPiece model (Schuster & Nakajima, 2012) for tokenizations that is constructed from the transcripts of the LibriSpeech training set.
99
+
100
+ # 4.1.1. NON-STREAMING MODELS
101
+
102
+ We use the same architectures reported in (Zhang et al., 2020) for fair comparisons. The model has two convolution layers at the bottom which provide 4 times temporal dimension reduction for the input sequences. The rest of the layers are a stack of Conformer models. We explore 0.6B model size which is extensively studied in the previous works. The model contains 24 layers of Conformer models.
103
+
104
+ Pre-train. The pre-training uses mask length 400ms with masking probability of 0.01. The learning rate schedule uses a transformer learning rate schedule (Vaswani et al., 2017). The training of the model uses Adam optimizer (Kingma & Ba, 2015) with 0.004 peak learning rate and 25000 warm-up steps. The batch size is 2048. Since the encoder has 4 times temporal-dimension reduction, the quantization with random projections stacks every 4 frames for projections. The vocab size of the codebook is 8192 and the dimension is 16.
105
+
106
+ Fine-tune. The fine-tuning model also follows the same architecture as in (Zhang et al., 2020) and use RNN Transducer (RNN-T) (Graves, 2012) for decoder with 2 layers of unidirectional LSTMs, where the hidden dimension of LSTMs are 1280. The fine-tuning process uses the Transformer learning rate schedule. Since the encoder is initialized from a pre-trained model, the fine-tuning process uses a lower learning rate for the encoder than the decoder. The encoder uses 0.0003 peak learning rate and 5000 warmup steps, while the decoder uses 0.001 peak learning rate and 1500 warmup steps.
107
+
108
+ The results of pre-training with LibriLight and fine-tuning on LibriSpeech, along with comparisons with previous works, are shown in Table 1. Our results with LM use shallow fusion to incorporate the LM. The LM is a 0.1B Transformer model trained on the LibriSpeech LM corpus, and the model has 8 layers, 1024 model dimension, and 4096 feed-forward network dimension. By using the same architecture and similar optimization strategy as (Zhang et al., 2020), our approach shows similar WERs as previous best results on LibriSpeech both with and without LM.
109
+
110
+ # 4.1.2. STREAMING MODELS
111
+
112
+ The architecture we use for the streaming experiments follows a similar design as previous work for building streaming ASRs (Yu et al., 2021). We scale the model size to be also 0.6B to be consistent with the non-streaming experiments. The architecture has 3 Conformer layers at the bottom, followed by a stacking layer with 2 times temporal-dimension reduction and 20 Conformer layers on top of the stacking layer. The Conformer has 1024 hidden dimension for the self-attention layer and 4096 for the feed-forward layers. The self-attention layer attends to the current and the previous 64 frames, and the convolution has a kernel that covers the current and the past 3 frames.
113
+
114
+ The training setup is mostly the same as the 0.6B model in the non-streaming experiments, with some changes on the masking ratio for different pre-training approaches.
115
+
116
+ Streaming pre-train. The streaming pre-training uses the same setup as the original architecture, and the mask length is 300ms and the masking probability is 0.02. The random-projection quantizer stacks every 2 frames for projections.
117
+
118
+ Non-streaming pre-train. The non-streaming pre-training extends the original architecture to have access for future context by having the convolution kernel within the Conformer layer to have access for the future 3 frames. The self-attention is still limited to having access only for the previous context. We also explored having future context access for the self-attention, but this setup tends to be less stable. The masking length is $400\mathrm{ms}$ and the masking probability is 0.02.
119
+
120
+ Fine-tune. The fine-tuning ASR model uses RNN-T for decoder with a layer of unidirectional LSTM, where the hidden dimension of the LSTM is 640. The training setup is the same as the fine-tuning config for the 0.6B model in the non-streaming experiments. When initializing from a non-streaming pre-trained model, the convolution only uses the kernel weight that accesses the previous context to keep the model streaming.
121
+
122
+ Latency measurement. A streaming model can learn to delay its prediction to access the future context and improve the prediction accuracy, and therefore it is critical to measure the latency of the streaming models to see whether the model maintains similar latency. This assessment helps us identify whether the underlying approach provides real improvement instead of trading off latency for prediction accuracy. Our latency comparison process first calculates the starting time and ending time of every word for each hypothesis generated by the two models. The comparison then aligns the hypotheses from the two models, finds the matching words and calculates the difference of their starting and ending time. The relative latency measurement is the average word timing difference of all matched words
123
+
124
+ between the two models among all utterances. Specifically the relative latency is calculated with
125
+
126
+ $$
127
+ \sum_ {i, j} \frac {s _ {i j} ^ {\prime} - s _ {i j} + e _ {i j} ^ {\prime} - e _ {i j}}{2 N}, \tag {2}
128
+ $$
129
+
130
+ where $i$ denotes the index of the matched words between the two hypotheses, $j$ is the utterance index, $s_{ij}$ and $e_{ij}$ correspond to the starting and ending time of the word from the baseline model, $s_{ij}'$ and $e_{ij}'$ correspond to the starting and ending time of the word from the compared model, and $N$ is the total number of matched words among all utterances. A negative relative latency means the compared model has lower latency than the baseline model.
131
+
132
+ The word-error-rates and the relative latency are shown in Table 2. In this comparison experiment both wav2vec 2.0 and w2v-BERT use the same architecture, same masking and training setup as BEST-RQ. Using the conventional masking setup for wav2vec 2.0 and w2v-BERT gives worse performance. Since there is no convolution layers at the bottom, the contrastive learning use speech signals as targets. The w2v-BERT model use 12 layers for the contrastive module and 12 layers for the masked prediction module, to be consistent with the non-streaming setup (Chung et al., 2021). Our algorithm outperforms wav2vec 2.0 and w2v-BERT for both streaming and non-streaming pre-training. In particular our algorithm performs well with both pre-training, while wav2vec 2.0 and w2v-BERT favors more with non-streaming pre-training. This is likely due to the fact that the representation learning of both approaches is more compatible with non-streaming architectures. Increasing the model size from 0.1B to 0.6B results a slight increase in latency, but models trained with self-supervised learning algorithms has lower latency with streaming pre-training giving the most significant latency reduction. This indicates that the self-supervised learning preserve the low-latency property while providing quality gain.
133
+
134
+ # 4.2. Multilingual Tasks
135
+
136
+ We present multilingual results in this section. We use the same model setup as the LibriSpeech non-streaming experiment for these tasks.
137
+
138
+ # 4.2.1. DATA
139
+
140
+ Multilingual LibriSpeech (MLS-10hrs) The Multilingual LibriSpeech dataset (Pratap et al., 2020) is a large corpus derived from read audiobooks of Librivox and consists of 8 languages: Dutch (du), English (en), French (fr), German (de), Italian (it), Polish (pl), Portuguese (pt), Spanish (es). The latest version of this corpus contains around 50k hours including 44k hours in English. We use the official 10 hours split of training data to evaluate few-shot learning capabilities.
141
+
142
+ Table 2. LibriSpeech results compared with previous works with the same streaming architecture, and use LibriLight set for pre-training and LibriSpeech 960h set for fine-tuning. The relative latency (the lower the better) is the average difference of the word prediction time when comparing with the baseline Conformer 0.1B model. Our algorithm outperforms wav2vec 2.0 and w2v-BERT on both WERs and latency.
143
+
144
+ <table><tr><td>Method</td><td>Size (B)</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>Relative latency (ms)</td></tr><tr><td>Conformer 0.1B</td><td>0.1</td><td>4.1</td><td>10.3</td><td>4.5</td><td>9.8</td><td>0</td></tr><tr><td>Conformer 0.6B</td><td>0.6</td><td>3.9</td><td>9.8</td><td>4.4</td><td>9.4</td><td>15.3</td></tr><tr><td colspan="7">Non-Streaming pre-train</td></tr><tr><td>wav2vec 2.0</td><td>0.6</td><td>2.6</td><td>7.3</td><td>3.0</td><td>7.2</td><td>-10.1</td></tr><tr><td>w2v-BERT</td><td>0.6</td><td>2.8</td><td>7.2</td><td>3.3</td><td>6.9</td><td>-0.7</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>2.5</td><td>6.9</td><td>2.8</td><td>6.6</td><td>-16.3</td></tr><tr><td colspan="7">Streaming pre-train</td></tr><tr><td>wav2vec 2.0</td><td>0.6</td><td>2.7</td><td>8.0</td><td>2.9</td><td>7.9</td><td>-130.6</td></tr><tr><td>w2v-BERT</td><td>0.6</td><td>2.7</td><td>8.4</td><td>3.0</td><td>8.1</td><td>-117.1</td></tr><tr><td>BEST-RQ (Ours)</td><td>0.6</td><td>2.5</td><td>6.9</td><td>2.8</td><td>6.6</td><td>-130.9</td></tr></table>
145
+
146
+ Multilingual Voice Search (VS-1000hrs) Our high resource finetune datasets is multilingual Voice Search dataset (Li et al., 2021). We sample random 1000 hour subsets (VS-1000h) across 15 languages, including English (US), English (IN), Spanish (US), Portuguese (BR), Spanish (ES), Arabic (GULF), Arabic (EG), Hindi (IN), Marathi (IN), Bengali (BD), Chinese (TW), Russian (RU), Turkish (TR), Hungarian (HU), and Malay (MY). The test set for each language contains around 3–19K utterances.
147
+
148
+ XLS-R unsupervised data (XLS-R -U) Our public unlabeled speech data follows the pre-training data used for XLS-R (Babu et al., 2021) with one major difference: we do not use any data from VoxLingua-107 due to license constraint. In total, we utilize approximately $429k$ hours of unlabeled speech data in $51^{1}$ languages. As a consequence our model is pre-trained on speech from 51 languages as compared to 128 for XLS-R, and our pre-training set is smaller by $6.6k$ hours. We use this pretrain data on MLS-10hrs to compare with published results.
149
+
150
+ Youtube unsupervised data (YT-U) Following (Zhang et al., 2021), we collected a multilingual Youtube dataset for pretraining. For each language we prepare an unlabeled YouTube dataset segmented using voice activation detection (VAD (Zazo Candil et al., 2016)). The number of hours per languages are: English (800k hrs), Spanish (800k hrs), Marathi (600k hrs), Portuguese (800k hrs), Russian (800k), Arabic (800k), Hindi (800k), Chinese (800k), Malay (250k), Turkish (800k), Bengali (800k), Hugarian (300k). In practice, we found this data performs much better than XLS-R -U on VS-1000hrs. Thus, we use this pretrain data on VS-1000hrs to compare the performance of different pretrain
151
+
152
+ methods.
153
+
154
+ # 4.2.2. RESULTS ON MLS-10HRS
155
+
156
+ We conduct our multilingual low resource finetune experiments on MLS-10hrs. We use XLS-R -U as pretraining data and finetune it on MLS-10hrs. As shown in Table 3, our baseline w2v-BERT already outperform previous strong model from XLS-R(2B) (Babu et al., 2021). The average WER further bring down by $3\%$ relative by using the proposed BEST-RQ. This demonstrate a simple random-projection quantizer is also effective for multilingual pretraining. We also report finetune results on the MLS full supervised data. Interestingly, with more finetune data, BEST-RQ perform even better than w2v-BERT, especially for pt and pl. Our results also comparable with previously state-of-the-art results in (Bai et al., 2021) which conduct joint training for multilingual ASR.
157
+
158
+ While fine-tuning with MLS-full and MLS-10hrs both exhibit improvement compared to existing approaches, fine-tuning with MLS-full provides more relative improvement. This likely implies that pre-training with random-projection quantizers is more effective when there is more fine-tuning data.
159
+
160
+ # 4.2.3. RESULTS ON VOICE SEARCH
161
+
162
+ To understand how the proposed model work for high resource (1000hrs per language), we pretrain our model on YT-U and finetune it on VS-1000hrs. We can see with more finetune data, the relative improvement is smaller compared with no pretrain baseline. However, our proposed BEST-RQ consistently outperform w2v-BERT and wav2vec 2.0 by $9\%$ and $5\%$ relatively. Compare to w2v-BERT, our proposed method outperform on all the languages. Among the 15
163
+
164
+ Table 3. Test setWER $(\%)$ comparisons on the MLS full and 10hrs set.
165
+
166
+ <table><tr><td rowspan="2">Exp.</td><td colspan="8">Languages</td><td rowspan="2">Avg.</td></tr><tr><td>en</td><td>de</td><td>nl</td><td>fr</td><td>es</td><td>it</td><td>pt</td><td>pl</td></tr><tr><td>MLS-full</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>wav2vec 2.0 from XLSR-53 (Conneau et al., 2020)</td><td>-</td><td>7.0</td><td>10.8</td><td>7.6</td><td>6.3</td><td>10.4</td><td>14.7</td><td>17.2</td><td>10.6</td></tr><tr><td>w2v-BERT from JUST (Bai et al., 2021)</td><td>6.6</td><td>4.3</td><td>9.9</td><td>5.0</td><td>3.8</td><td>9.1</td><td>14.6</td><td>8.1</td><td>7.8</td></tr><tr><td>JUST (Bai et al., 2021) (co-train)</td><td>6.5</td><td>4.1</td><td>9.5</td><td>5.2</td><td>3.7</td><td>8.8</td><td>8.0</td><td>6.6</td><td>6.5</td></tr><tr><td>w2v-BERT (0.6B)</td><td>5.5</td><td>4.3</td><td>10.9</td><td>5.6</td><td>4.5</td><td>10.1</td><td>13.4</td><td>11.2</td><td>8.2</td></tr><tr><td>BEST-RQ (Ours, 0.6B)</td><td>6.8</td><td>4.1</td><td>9.7</td><td>5.0</td><td>4.9</td><td>7.4</td><td>9.4</td><td>5.2</td><td>6.6</td></tr><tr><td>MLS-10hrs</td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>XLSR-53 (Conneau et al., 2020)</td><td>14.6</td><td>8.4</td><td>12.8</td><td>12.5</td><td>8.9</td><td>13.4</td><td>18.2</td><td>21.2</td><td>13.8</td></tr><tr><td>XLS-R(0.3B) (Babu et al., 2021)</td><td>15.9</td><td>9.0</td><td>13.5</td><td>12.4</td><td>8.1</td><td>13.1</td><td>17.0</td><td>13.9</td><td>12.8</td></tr><tr><td>XLS-R(1B) (Babu et al., 2021)</td><td>12.9</td><td>7.4</td><td>11.6</td><td>10.2</td><td>7.1</td><td>12.0</td><td>15.8</td><td>10.5</td><td>10.9</td></tr><tr><td>XLS-R(2B) (Babu et al., 2021)</td><td>14.0</td><td>7.6</td><td>11.8</td><td>10.0</td><td>6.9</td><td>12.1</td><td>15.6</td><td>9.8</td><td>11.0</td></tr><tr><td>w2v-BERT (0.6B)</td><td>12.7</td><td>7.0</td><td>12.6</td><td>8.9</td><td>5.9</td><td>10.3</td><td>14.6</td><td>6.9</td><td>9.9</td></tr><tr><td>BEST-RQ (Ours, 0.6B)</td><td>12.8</td><td>7.4</td><td>12.7</td><td>9.6</td><td>5.4</td><td>9.9</td><td>12.1</td><td>7.1</td><td>9.6</td></tr></table>
167
+
168
+ Table 4. Test set WER (%) comparisons using YT-U for pretrain and VS-1000hrs for finetune, across 15 languages.
169
+
170
+ <table><tr><td>Exp.</td><td>Avg. on 15 langs (VS)</td></tr><tr><td>Baseline (0.6B)</td><td>12.6</td></tr><tr><td>wav2vec 2.0 (0.6B)</td><td>12.0</td></tr><tr><td>w2v-bert (0.6B)</td><td>11.5</td></tr><tr><td>BEST-RQ (Ours) (0.6B)</td><td>10.9</td></tr></table>
171
+
172
+ languages, English, Portuguese, Russian and Turkish, are improved more than $10\%$ , relatively. Indic languages (Hindi, Marathi and English (IN)) are only slightly improved, all smaller than $3\%$ relatively.
173
+
174
+ # 4.3. Analyzing Quantization Quality
175
+
176
+ As our self-supervised learning algorithm eliminates the requirement of representation learning through applying a random-projection quantizer, it is crucial to understand the representation quality of this quantizer and how the quality of the quantization affect the self-supervised learning. We analyze the quality of quantizers by training ASR models feeding labels generated by quantizing utterances as input. The performance of the resulting ASR provides us insights on the quality of the quantizer. The ASR model embeds quantized labels and feeds the embedding to a stack of Conformer layers, followed by a CTC decoder. 16 Conformer layer has feature dim 256, local self attention with 8 heads and 128 context length and kernel size 5 for lightweight convolution, in total the model size is $25\mathrm{M}$ . We study the effect of representation learning through comparing with
177
+
178
+ quantizers trained with the VQ-VAE. We compare 3 types of quantizers: a) a random-projection quantizer b) a quantizer trained with VQ-VAE where the encoder has the same architecture as the random-projection quantizer and the decoder contains only a projection layer c) a trained VQ-VAE whose encoder/decoder are Transformer models. For trained quantizers, we train on the whole LibriSpeech 960 hours audio-only data, with a constant learning rate of 1e-4 and train for 400k steps with batch size 256. For all quantizers, the input frames are stacked with 3 frames on each's left, resulting in 4x input length reduction. We also use the quantizers for self-supervised learning with the LibriSpeech 0.6B non-streaming setup to compare their performance.
179
+
180
+ Table 5 shows the WER on LibriSpeech 960h. Both the random-projection quantizer and the projection-based VQ-VAE quantizer lead to poor ASR performance, while the Transformer-based VQ-VAE quantizer provides a significantly better performance. This implies that the Transformer-based VQ-VAE quantizer learns a better representation. On the other hand, when using these quantizers for the purpose of self-supervised learning, all quantizers lead to similar WERs. This indicates that the quantizer quality does not translate to self-supervised learning quality.
181
+
182
+ # 4.4. Analyzing the Effect of Pre-training Data Size
183
+
184
+ One potential explanation for the above observation, that a sub-optimal quantization can work well for self-supervised learning, is that the self-supervised learning algorithm can learn to mitigate the quality gap given sufficient amounts of pre-training data. We investigate whether a quantizer with a better quantization quality performs better when the
185
+
186
+ Table 5. Quantizer quality's impact on ASR tasks. Although the Transformer-based quantizer gets much better performance when used as input directly, the random-projection quantizer is equally effective for self-supervised learning. The model used in the direct ASR task has size 25M. The self-supervised learning tasks use the same setup as the LibriSpeech non-streaming experiment, which use LibriLight for pre-training and LibriSpeech for fine-tuning and has 0.6B model size.
187
+
188
+ <table><tr><td rowspan="2">Configuration</td><td rowspan="2">Quantizer size (M)</td><td colspan="4">Direct ASR WER</td><td colspan="4">Pretrain-finetune WER</td></tr><tr><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td><td>dev</td><td>dev-other</td><td>test</td><td>test-other</td></tr><tr><td>Random quantizer</td><td>1</td><td>58.8</td><td>78.8</td><td>57.9</td><td>72.8</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td></tr><tr><td>Projection VQ-VAE</td><td>1</td><td>61.4</td><td>74.8</td><td>60.9</td><td>75.2</td><td>1.5</td><td>2.8</td><td>1.6</td><td>2.9</td></tr><tr><td>Transformer VQ-VAE</td><td>10</td><td>17.8</td><td>35.8</td><td>17.6</td><td>36.1</td><td>1.4</td><td>2.9</td><td>1.6</td><td>3.1</td></tr></table>
189
+
190
+ ![](images/46ae957cb3b7f368d34f1c0b4514b17b12adf61c1b45e0b42acaea89729f795f.jpg)
191
+ Librilight-pretrain, Librispeech finetune WER
192
+ Figure 2. Comparing the self-supervised learning quality of the random-projection quantizer (rq) and the Transformer-based VQ-VAE quantizer (tvae) with different pre-training data size. Starting from low amount of pre-train data, the random-projection quantizer is behind the trained Transformer VQ-VAE quantizer. As the amount of pre-train data increases, the random-projection quantizer catches up.
193
+
194
+ amount of the pre-training data is limited, and whether increasing the amount of the pre-training data alleviate the discrepancy when compared to a random-projection quantizer. In this study, we compare self-supervised learning quality between a random-projection quantizer (rq) and a trained transformer-based VQ-VAE quantizer (tvae) with different pre-training data sizes. The random quantizer is untrained, and 4 Transformer VQ-VAE quantizers are trained with $\{1 / 64, 4 / 64, 16 / 64, 64 / 64\}$ LibriLight data, respectively. Then 4 identical random-projection quantizers and the above 4 transformer VAE quantizers are pre-trained separately with the same distinct percentages of LibriLight data as above for $100k$ steps with global batch size 2048. The pretrained models fine-tune on LibriSpeech 960h. The result in Figure 2 shows that a quantizer with better representation quality (Transformer-based VQ-VAE) performs better when pre-training data is limited, but the gap disappears as the pre-training data increase.
195
+
196
+ # 5. Conclusions and Discussions
197
+
198
+ We proposed BEST-RQ to perform self-supervised learning for speech recognition models. BEST-RQ uses a random-projection quantizer to quantize speech signals to discrete labels. The pre-training process masks the speech signals and trains the model to predict labels corresponding to the masked parts. This approach shows similar WERs as the existing state-of-the-art results on LibriSpeech with non-streaming models, and outperform wav2vec 2.0 and w2v-BERT on LibriSpeech with streaming models and on multilingual tasks with non-streaming models. Further analysis showed that despite the fact that the random-projection quantizer provides a poorer representation compared to a trained VQ-VAE quantizer, it is effective for the purpose of self-supervised learning.
199
+
200
+ Our algorithm untangle the quantizer from the speech recognition model and also eliminates the requirement of representation learning. This simpler framework makes it easier to find a good recipe for the target task. The improvement on streaming models shows that the separation of the quantizer from the model makes the algorithm more effective for architectures that can be less effective for representation learning. The improvement on multilingual tasks shows that complicated tasks can benefit more from a simpler framework where finding a good recipe becomes more challenging. The quantization quality analysis implies that representation learning is not necessarily critical for self-supervised learning.
201
+
202
+ Codebook utilization. One of the most critical factors for pre-training quality is the percentage of the codebook that is used during training. In particular, at each training step a higher percentage of the codebook being used in each batch correlates strongly with a good pre-training quality. When the distribution of the codebook utilization is skewed toward a smaller subset of codes, this usually makes the pre-training task easier and provides less effective pre-training. The $l2$ normalizations on the projected vector and the codebook are critical for providing more uniform codebook utilization. On the other hand, using randomly initialized codebook and projection matrix can introduce different codebook utiliza
203
+
204
+ tions with different random seeds, which impact the pretraining quality across different runs with same experiment configurations. This variance impacts quality more when training with smaller pre-training and fine-tuning datasets. How to reduce this reproducibility issue caused by random initialization is an important next step for improving random-projection quantizations.
205
+
206
+ Hyperparameters. The pre-training quality is not very sensitive to the codebook vocab size and the codebook dimension, and is more sensitive to the masking probability and the mask length. The role of the projection layer in the random-projection quantizer is to allow using different codebook dimensions, and one can achieve similar results without the projection and set the codebook dimension to be the same as the input dimension. Due to the variance coming from the random initialization, the impact of a hyperparameter usually requires multiple runs of experiments to verify the result.
207
+
208
+ Longer convergence time for non-streaming models. One observation we have is that the algorithm takes more steps to converge with non-streaming models. We still observe improvement compared to wav2vec 2.0 and w2v-BERT at the same training step on multilingual tasks, though the final convergence usually takes $50\%$ more steps. On the other hand, our training setup follows (Zhang et al., 2020), and it is unclear to us whether further hyperparameter tuning can help the model to converge faster. We did not observe the longer convergence property with streaming models.
209
+
210
+ Initialization. The quantizer uses random initialization and does not update the parameters, and therefore the initialization algorithm can play an important role on the results. In this paper we showed results with Xavier initialization for the projection matrix and the standard normal distribution for the codebook, and further comparisons on different initialization algorithms can be conducted in the future work.
211
+
212
+ # 6. Acknowledgements
213
+
214
+ We thank Wei Han and Johan Schalkwyk for helpful discussions, and Rohit Prabhavalkar, Izhak Shafran, and Hagen Soltau for insightful feedback. We also want to thank Bo Li for the help on multilingual tasks.
215
+
216
+ # References
217
+
218
+ Babu, A., Wang, C., Tjandra, A., Lakhotia, K., Xu, Q., Goyal, N., Singh, K., von Platen, P., Saraf, Y., Pino, J., et al. Xls-r: Self-supervised cross-lingual speech representation learning at scale. arXiv preprint arXiv:2111.09296, 2021.
219
+ Baevski, A., Auli, M., and Mohamed, A. Effectiveness of self-supervised pre-training for speech recognition. arXiv
220
+
221
+ preprint arXiv:1911.03912, 2019.
222
+ Baevski, A., Schneider, S., and Auli, M. vq-wav2vec: Self-supervised learning of discrete speech representations. In ICLR, 2020a.
223
+ Baevski, A., Zhou, H., Mohamed, A., and Auli, M. wav2vec 2.0: A framework for self-supervised learning of speech representations. arXiv preprint arXiv:2006.11477, 2020b.
224
+ Bai, J., Li, B., Zhang, Y., Bapna, A., Siddhartha, N., Sim, K. C., and Sainath, T. N. Joint unsupervised and supervised training for multilingual asr. arXiv preprint arXiv:2111.08137, 2021.
225
+ Bao, H., Dong, L., and Wei, F. Beit: Bert pre-training of image transformers, 2021.
226
+ Chung, Y.-A., Zhang, Y., Han, W., Chiu, C.-C., Qin, J., Pang, R., and Wu, Y. W2v-bert: Combining contrastive learning and masked language modeling for self-supervised speech pre-training. arXiv preprint arXiv:2108.06209, 2021.
227
+ Conneau, A., Baevski, A., Collobert, R., Mohamed, A., and Auli, M. Unsupervised cross-lingual representation learning for speech recognition. arXiv preprint arXiv:2006.13979, 2020.
228
+ Devlin, J., Chang, M.-W., Lee, K., and Toutanova, K. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
229
+ Glorot, X. and Bengio, Y. Understanding the difficulty of training deep feedforward neural networks. In Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, volume 9 of Proceedings of Machine Learning Research, pp. 249-256. PMLR, 13-15 May 2010.
230
+ Graves, A. Sequence transduction with recurrent neural networks. CoRR, abs/1211.3711, 2012.
231
+ Gulati, A., Qin, J., Chiu, C.-C., Parmar, N., Zhang, Y., Yu, J., Han, W., Wang, S., Zhang, Z., Wu, Y., and Pang, R. Conformer: Convolution-augmented transformer for speech recognition, 2020.
232
+ He, K., Chen, X., Xie, S., Li, Y., Dollar, P., and Girshick, R. Masked autoencoders are scalable vision learners. arXiv preprint arXiv:2111.06377, 2021.
233
+ Hsu, W.-N., Bolte, B., Tsai, Y.-H. H., Lakhotia, K., Salakhutdinov, R., and Mohamed, A. HuBERT: Self-supervised speech representation learning by masked prediction of hidden units. arXiv preprint arXiv:2106.07447, 2021.
234
+
235
+ Kahn, J., Rivière, M., Zheng, W., Kharitonov, E., Xu, Q., Mazare, P.-E., Karadayi, J., Liptchinsky, V., Collobert, R., Fuegen, C., Likhomanenko, T., Synnaeve, G., Joulin, A., Mohamed, A., and Dupoux, E. Libri-light: A benchmark for ASR with limited or no supervision. In ICASSP, 2020.
236
+ Kingma, D. P. and Ba, J. Adam: A method for stochastic optimization. In ICLR, 2015.
237
+ Li, B., Pang, R., Sainath, T. N., Gulati, A., Zhang, Y., Qin, J., Haghani, P., Huang, W. R., and Ma, M. Scaling end-to-end models for large-scale multilingual asr. arXiv preprint arXiv:2104.14830, 2021.
238
+ Pratap, V., Xu, Q., Sriram, A., Synnaeve, G., and Collobert, R. Mls: A large-scale multilingual dataset for speech research. In INTERSPEECH, 2020.
239
+ Sainath, T. N., He, Y., Li, B., Narayanan, A., Pang, R., Bruguier, A., Chang, S.-y., Li, W., Alvarez, R., Chen, Z., and et al. A streaming on-device end-to-end model surpassing server-side conventional model quality and latency. In ICASSP, 2020.
240
+ Schneider, S., Baevski, A., Collobert, R., and Auli, M. wav2vec: Unsupervised pre-training for speech recognition. arXiv preprint arXiv:1904.05862, 2019.
241
+ Schuster, M. and Nakajima, K. Japanese and Korean voice search. 2012 IEEE International Conference on Acoustics, Speech and Signal Processing, 2012.
242
+ Shen, J., Nguyen, P., Wu, Y., Chen, Z., and et al. Lingvo: a modular and scalable framework for sequence-to-sequence modeling, 2019.
243
+ van den Oord, A., Vinyals, O., and Kavukcuoglu, K. Neural discrete representation learning, 2018.
244
+ Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A. N., Kaiser, L., and Polosukhin, I. Attention Is All You Need. CoRR, abs/1706.03762, 2017. URL http://arxiv.org/abs/1706.03762.
245
+ Yu, J., Chiu, C.-C., Li, B., et al. FastEmit: Low-latency Streaming ASR with Sequence-level Emission Regularization. In Proc. ICASSP, 2021.
246
+ Zazo Candil, R., Sainath, T. N., Simko, G., and Parada, C. Feature learning with raw-waveform cldnns for voice activity detection. In *Interspeech* 2016, 2016.
247
+ Zhang, Y., Qin, J., Park, D. S., Han, W., Chiu, C.-C., Pang, R., Le, Q. V., and Wu, Y. Pushing the limits of semi-supervised learning for automatic speech recognition. arXiv preprint arXiv:2010.10504, 2020.
248
+
249
+ Zhang, Y., Daniel Park, S., Han, W., Qin, J., Gulati, A., Shor, J., Jansen, A., Xu, Y., Huang, Y., Wang, S., Zhou, Z., Li, B., Ma, M., Chan, W., Yu, J., Wang, Y., Cao, L., Sim, K. C., Ramabhadran, B., Sainath, T. N., Beaufays, F., Chen, Z., Le, Q. V., Chiu, C.-C., Pang, R., and Wu, Y. Bigssl: Exploring the frontier of large-scale semi-supervised learning for automatic speech recognition. arXiv preprint arXiv:2109.13226, 2021.
2202.01xxx/2202.01855/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cc93e2517592a5ed353d4c5a79e0b5eece74fddf16f751df077fa4397ff7c26
3
+ size 302157
2202.01xxx/2202.01855/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01875/70c382aa-6520-4228-8f67-cbc134991973_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01875/70c382aa-6520-4228-8f67-cbc134991973_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01875/70c382aa-6520-4228-8f67-cbc134991973_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7616784e90267b6c3f00f04f69113ff7accc327908863ee438118336a2b0f031
3
+ size 593889
2202.01xxx/2202.01875/full.md ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Rethinking Explainability as a Dialogue: A Practitioner's Perspective
2
+
3
+ Himabindu Lakkaraju*
4
+
5
+ Harvard University
6
+
7
+ hlakkaraju@hbs.edu
8
+
9
+ Dylan Slack*
10
+
11
+ UC Irvine
12
+
13
+ dslack@uci.edu
14
+
15
+ Yuxin Chen
16
+
17
+ University of Chicago
18
+
19
+ chenyuxin@uchicago.edu
20
+
21
+ Chenhao Tan
22
+
23
+ University of Chicago
24
+
25
+ chenhao@uchicago.edu
26
+
27
+ Sameer Singh
28
+
29
+ UC Irvine / AI2
30
+
31
+ sameer@uci.edu
32
+
33
+ # Abstract
34
+
35
+ As practitioners increasingly deploy machine learning models in critical domains such as healthcare, finance, and policy, it becomes vital to ensure that domain experts function effectively alongside these models. Explainability is one way to bridge the gap between human decision-makers and machine learning models. However, most of the existing work on explainability focuses on one-off, static explanations like feature importances or rule-lists. These sorts of explanations may not be sufficient for many use cases that require dynamic, continuous discovery from stakeholders that have a range of skills and expertise. In the literature, few works ask decision-makers such as doctors, healthcare professionals, and policymakers about the utility of existing explanations and other desiderata they would like to see in an explanation going forward. In this work, we address this gap and carry out a study where we interview doctors, healthcare professionals, and policymakers about their needs and desires for explanations. Our study indicates that decision-makers would strongly prefer interactive explanations. In particular, they would prefer these interactions to take the form of natural language dialogues. Domain experts wish to treat machine learning models as "another colleague", i.e., one who can be held accountable by asking why they made a particular decision through expressive and accessible natural language interactions. Considering these needs, we outline a set of five principles researchers should follow when designing interactive explanations as a starting place for future work. Further, we show why natural language dialogues satisfy these principles and are a desirable way to build interactive explanations. Next, we provide a design of a dialogue system for explainability, and discuss the risks, trade-offs, and research opportunities of building these systems. Overall, we hope our work serves as a starting place for researchers and engineers to design interactive, natural language dialogue systems for explainability that better serve users' needs.
36
+
37
+ # 1 Introduction
38
+
39
+ As engineers, researchers, and domain experts increasingly deploy machine learning models in societally critical domains, such as healthcare, criminal justice, and public policy, there is an ever-growing demand for explainability of these models [59, 26, 48, 83, 36]. Researchers have proposed a variety of approaches to address this demand for explainability. For example, a popular class of approaches identifies feature
40
+
41
+ importance, i.e., how much each feature contributes to the model prediction [73, 63]. The seminal work, LIME, shows that such feature importance can help people understand why a model makes a particular prediction and allow model developers to debug and improve model performance [73]. More generally, explainability holds the promise for enabling appropriate trust in model predictions, detecting discriminatory biases, scientific discovery, and ultimately improving human decision-making.
42
+
43
+ Meanwhile, the research community has begun to recognize the importance of human perspectives in realizing the promise of explainability. Explanations of machine learning models serve as a bridge between machines and humans; they are only helpful if they satisfy the need of humans. In addition to a large body of work on evaluative studies with human subjects [52, 50, 35, 34, 99, 68, 91, 49], Liao et al. [58] interviewed 20 UX and design practitioners working on various AI products to identify gaps between the current algorithmic work and practices for creating explainable AI products. They developed an explainable AI question bank, representing user needs for explainability as prototypical questions users might ask about the AI (e.g., "what kind of mistakes is the system likely to make?" and "what feature(s) of this instance determine the system's prediction of it?"). However, it remains an open question how end-users, such as domain experts or laypeople, are satisfied with current approaches to generating explanations, and—if not—(1) what are the fundamental limitations of the current explanations, and (2) what are desirable approaches to explaining model algorithms in the real world.
44
+
45
+ Our work fills this gap by retrospectively evaluating the existing approaches to explainability widely adopted by the research community through the lens of real-world decision-makers. In particular, we conducted interviews with domain experts in healthcare and policy-making—including fourteen doctors and twelve policy experts—to understand how they use explanations in their day-to-day work, the pain points they experience with existing explanations, and what would they like to see in the next generation of explanations. The key findings of our qualitative user study are summarized as follows:
46
+
47
+ 1. Domain experts are not satisfied with existing explanation paradigms.
48
+ 2. Domain experts would prefer an increased interaction with the model about its behavior instead of just seeing one-off explanations such as feature importances or saliency maps.
49
+ 3. Domain experts agree that interaction with models and explanations through natural language dialogues would be an advantageous route to more interactive explanations.
50
+ 4. Domain experts place a high value on the accuracy/correctness of explanations, yet existing explanations often do not come with an estimate of these metrics.
51
+
52
+ Building on the user study, we argue that interactive explanations are a promising avenue for future work for explainability. We synthesize responses from our interviewees and propose the following principles of interactive explanations: 1) Interact appropriately; 2) Respond appropriately; 3) Properly calibrated responses; 4) Reduce explainability overhead; 5) Consider context. Next, we point out natural language dialogues could prove to be a powerful tool for building an interactive explainability system that satisfies these principles. Natural language dialogues can promote accurate and continuous understanding of user queries through rich text interactions and appropriate presentation of explanations, along with understudied problems such as providing confidence and accuracy estimates of explanations. Ultimately, natural language dialogues for explainability could enhance the model's understanding with greater ease than current one-off explanations. To illustrate these principles, Figure 1 illustrates an instantiation of such natural language dialogues. A dialogue-based interface provides a novel perspective on interacting with ML models. We further lay out a possible roadmap towards this exciting vision with four modules: 1) natural language understanding module; 2) explanation algorithm module; 3) response generation module; 4) a GUI interface. It requires advances in algorithms and human-computer interaction to ensure that these modules enable a satisfying and accurate interaction with models.
53
+
54
+ ![](images/34c8d669cc3f27258b01d39008e7a2a71ea49f42588c93c6c0312018e138a0aa.jpg)
55
+
56
+ FOR WOMEN PREDICTED HIGH RISK FOR LUNG CANCER THAT ARE OLDER THAN 65, WHY DID THE MODEL DECIDE TO PREDICT THEM AS HIGH RISK?
57
+
58
+ GOOD QUESTION! IT LOOKS LIKE THE MODEL PREDICTED THESE INDIVIDUALS AS HIGH RISK MOSTLY BECAUSE THEY WERE SMOKERS BUT ALSO BECAUSE THEY LIVE IN LARGE CITIES. I'M HIGHLY CONFIDENT THESE ARE THE REASONS BECAUSE THE EXPLANATIONS HAVE HIGH FIDELITY. HERE'S THE AVERAGE FEATURE IMPORTANCE FOR THESE PEOPLE (HIGHER MEANS MORE IMPORTANT).
59
+
60
+ ![](images/7d965170af6642e2ad65ae96ef585f25b92e47728956b8d815909bcac46df675.jpg)
61
+
62
+ ![](images/bf25be0b44ed0c672ae29f2d71ed185339040ac28ea2280039d237f2fdff5c3b.jpg)
63
+ Figure 1: A visualization of an example dialogue users could have with the system for a lung cancer prediction task. The user asks why the model predicted certain individuals who are female and over 65 as high risk. The model responds with text and visual descriptions and confidence in the explanations. Finally, the user can ask more questions about the explanations. In this case, the user expressed surprise about one of the more important features, and the model runs an additional analysis to demonstrate the effects the feature can have on the predictions for this group.
64
+
65
+ Wow, it's surprising that whether the person lives is in a city is so important.
66
+
67
+ YES, LIVES_IN_CITY HAS A SIGNIFICANT EFFECT ON THE PREDICTIONS FOR THESE INDIVIDUALS. PERTURBING THIS FEATURE CAN FIP THE PREDICTION FOR 4 OF 15 OF THE INSTANCES IN THIS GROUP.
68
+
69
+ ![](images/4076ebea50e75b8b09529f28383d39ea9c0d5c09d67e977abf2f080c20c20a7f.jpg)
70
+
71
+ Finally, we discuss concrete challenges that we foresee to realize this vision. We group them into three categories: 1) challenges in natural language understanding given the large set of possible query types, different ways to phrase explainability questions, and complexity due to different applications; 2) challenges in explanation generation that maps intent on correcting explanations and providing confidence estimates for the quality of explanations; 3) challenges in scalability so that the system can respond in real-time. These challenges serve as an open call for the community to work on them collectively.
72
+
73
+ In summary, our study identifies the mismatch between current approaches to explainability and the demand of decision-makers in practice, a significant hurdle in adopting complex predictive models in these high-stakes domains. Our interviews point to natural language dialogues to remedy users' pain points. We thus propose a research agenda around developing natural language dialogues as the next generation of explainability. We develop a list of principles, a roadmap towards desirable natural language dialogues, and a set of challenges going forward. We hope that our work can help the research community realize the promise of explainability and enable effective human-AI interaction with complex machine learning models.
74
+
75
+ # 2 Practitioner Perspectives on Rethinking Model Explainability
76
+
77
+ In this section, we describe the user study design. Also, we discuss the results from interviewing 26 practitioners in the user study and summarize our findings.
78
+
79
+ # 2.1 User Study Design
80
+
81
+ Here, we discuss the study that we carried out with practitioners from healthcare and policy to understand how they use explanations in their day-to-day work, the pain points they experience with
82
+
83
+ existing explanations, and what would they like to see in the next generation of explanations. More specifically, we conducted 30-minute long semi-structured interviews with 26 practitioners who regularly employ explainability techniques in their workflow. 14 out of these 26 (53.8%) practitioners are both medical doctors and researchers who actively use explanation methods to understand ML models that diagnose different kinds of diseases ranging from diabetes to rare cancers. The remaining 12 (46.2%) of these practitioners are policy researchers who are utilizing explanation methods to understand financial decision making (e.g., loan approvals) models. Furthermore, 18 out of 26 (69.2%) practitioners are male and the remaining 8 (30.7%) are female. 16 practitioners (61.5%) had more than an year of experience working with explainability tools, and the remaining 10 of them (38.5%) had about 6 months to an year of experience. All the practitioners in our study have used local post hoc explanation methods such as LIME and SHAP in their workflow, and 12 of them (46.2%) also used various gradient based methods (e.g., GradCAM, Integrated Gradients etc.). 11 participants (42.3%) also mentioned that they understand the technical details of LIME, but none of the participants had any understanding of the inner workings and details of any other explanation methods.
84
+
85
+ We began the interview by asking each of the participants about how exactly they leverage model explanations. All the participants said that they look at feature attributions output by post hoc explanation methods for each model prediction of interest, and that they specifically focus on the top 5 to 8 features that are driving the prediction. 21 out of 26 participants mentioned that they also look at the sign (or direction of the contribution) of the feature attribution for certain features of interest—e.g., is salary contributing positively to the loan approval decision? Lastly, 19 out of 26 $(73.1\%)$ participants mentioned that they also compare features w.r.t. their sign, rank, and feature importance values. Our interviews further included, but were not limited to the following questions:
86
+
87
+ - What do you like about model explanations output by state-of-the-art methods?
88
+ - What do you dislike about model explanations output by state-of-the-art methods?
89
+ - What other features should explanations have for you to comfortably use them in your day-to-day work? (24 out of 26 participants wanted some form of an interactive dialogue for explanations.)
90
+ - Would you prefer a one-shot (single) explanation or interactive dialogue style explanations? (We asked this question only if participants did not bring up interactive dialogue on their own; only 2 out of 26 participants did not)
91
+ - What are the key desiderata you would like to have in interactive dialog style explanations? (We asked this question to the 25 out of 26 participants who wanted interactive dialogue style explanations).
92
+
93
+ # 2.2 Results and Findings
94
+
95
+ Our study evaluated users' perceptions about the strengths and weaknesses of current explanations and whether explainability dialogues could help users better understand machine learning models. This section discusses the respondent's opinions and feedback to this end. Overall, while respondents were satisfied with many features of current explainability techniques, they pointed to several critical shortcomings with current methods. Further, respondents expressed a strong desire for interactive explanations and felt that natural language dialogues could serve as an advantageous type of interactive explanations. Last, interviewees felt that natural language dialogues could create a better explainability experience and identified critical criteria explainability dialogues should satisfy.
96
+
97
+ # 2.2.1 The Need for Interactive Explanations
98
+
99
+ During the interviews, respondents indicated several aspects of current explainability techniques they liked. Respondents most enjoyed getting some understanding of deep learning models (26/26 liked) and understanding which features contribute positively and negatively (21/26 liked). Slightly fewer respondents enjoyed seeing the essential features for predictions (19/26 liked) and comparing the relative importance of features (18/26 liked). All in all, respondents expressed that current explainability techniques help understand how machine learning models work and how different features affect the model predictions.
100
+
101
+ While respondents indicated they enjoyed certain features of explainability techniques, they also expressed several unsatisfactory aspects of explanations. Respondents were most dissatisfied with the lack of additional interaction with explanations after generation. Respondents answered that they were highly dissatisfied with the fact that conversations with the explanations are not possible (25/26 disliked), there is no capacity to follow up on explanations (24/26 disliked) interactively, nor ask custom questions (23/26 disliked). One respondent stated, "It is extremely frustrating to just look at one explanation [per prediction] and not be able to follow up on it!" Another indicated, "I should be able to ask custom questions [to the explanation] and get answers." Respondents also disliked that they could not understand the accuracy of explanations (24/26 disliked). One of the interviewees described, "I don't know anything about how correct the explanation is! How do you expect me to use it meaningfully? I constantly struggle with worrying about using an incorrect explanation and missing out on not using a correct explanation that is giving me more insights." Slightly fewer respondents indicated they disliked the limited capacity of explanations to generate subgroup-level explanations (21/26 disliked). One respondent questioned, "Why is all explanation work focused on local explanations? I would like to see at least subgroup level explanations. I think there is one algorithm (MUSE?) but need a lot more work." Overall, respondents expressed evident dissatisfaction with the one-off nature of explanations. Respondents felt that, in almost all cases, they had further follow-up questions for explanations to do with the explanation's accuracy or additional tasks they would like the explanation to solve. The interviewees felt that the lack of interactivity with explanations is a significant shortcoming of existing techniques.
102
+
103
+ When we asked respondents what could improve explanations, respondents discussed several potential improvements. Overall, respondents expressed the strongest desire for explainability through fully-fledged conversations with ML models (25/26 said this was important). Multiple respondents voiced support for conversational explanations. One stated, "I can see myself using explainable tools a ton more if only it were like a free-flowing dialogue. Oh I can't wait for that day." Another said, "dialogue-based explanations will totally revolutionize how medical science uses ML. Wow, I am excited just thinking about the possibility." Respondents also indicated the inclusion of reliable accuracy metrics for explanations as a critical place of improvement (24/26 said this was important). Slightly fewer respondents indicated that custom questions are vital for improving explanations (22/26 said this was important) and improving subgroup level explanations (24/26 said this was important). While the interviewees indicated several places to improve explanations (such as accuracy metrics), respondents most heavily fixated on the potential of having conversations with ML models to support explainability. Considering that respondents indicated a strong desire for dialogue-based explanation systems, we next discussed key desiderata for an explainability dialogue system.
104
+
105
+ # 2.3 Explainability Dialogue Desiderata from Interviewees
106
+
107
+ Respondents felt that fully-fledged conversations in natural language with explanations would help them better understand ML models. In addition, they felt that a explainability dialogue system could greatly help their explainability workflows. Further, respondents had numerous ideas about the system's
108
+
109
+ capabilities, different ways the system could augment their explainability workflows, and what they hoped to get out of such a system.
110
+
111
+ Critically, many respondents envisioned explainability dialogues happening much like conversations with colleagues where the goal is to understand "why" another practitioner made a particular decision or choice (e.g., medical diagnosis, financial risk assessment). In this sense, they imagined treating models like colleagues and using explainability dialogues to facilitate natural interactions between models and people. Respondents viewed such natural language conversations as more intuitive for understanding model decisions than writing and debugging cumbersome code to generate explanations. Further, they imagined explainability dialogues giving more context to the explanations, such as assessments of accuracy, descriptions of how to interpret the explanations, and uncertainty, much like people do in everyday conversations [66]. Finally, they viewed conversations happening in a context-dependent manner, where they could easily follow up on previous queries for additional clarification or further lines of questioning. We summarize the key desiderata agreed on by the respondents below, in order of requirements most respondents agreed was important, where $(\mathrm{N} / 26)$ indicates the number that agreed:
112
+
113
+ (24/26) The dialogue should eliminate the need to learn and write the commands for generating explanations.
114
+ (24/26) The system should describe the accuracy of the explanation in the dialogues.
115
+ (23/26) The system should preserve context and enable follow-up questions.
116
+ (21/26) The responses should be provided in real-time.
117
+ (17/26) The dialogue system should decide which explanations to run. Users should not have to ask for a specific explainability algorithm.
118
+
119
+ These desiderata capture key elements in respondents' ultimate goals of engaging in conversations with machine learning models. For instance, respondents were excited about explainability dialogues involving natural, everyday questions to machine learning models such as, "why did you make this decision?" and therefore agreed such systems should eliminate the need to write code, take the conversation context into account, and happen in real-time. Overall, respondents felt that natural language explainability dialogues would greatly improve their experiences using explanations and had clear ideas about how such a system should behave.
120
+
121
+ # 3 Principles of Interactive Explanations via Natural Language Dialogue
122
+
123
+ Leveraging our findings from the interviews, we outline a set of principles interactive explanations should follow. Based on these principles, we suggest natural-language explainability dialogues as a promising solution to enabling interactive explanations. As a starting place for further research in this direction, we suggest a concrete design for an explainability dialogue system.
124
+
125
+ # 3.1 Principles of Interactive Explanations
126
+
127
+ Given the need for explanations to enable better interactions with models through custom queries, additional follow-up questions, and proper contextualization, there are exciting research opportunities for developing interactive explanations for machine learning models. Interactive explanations should enable rich and continuous interactions with models that enable users to understand how their models work. Further, users should engage with interactive explanations in ways that are not frustrating, require
128
+
129
+ minimal or no coding overhead, and facilitate improved model understanding as they use the system. Also, interactive explanations should improve users' ability to correctly utilize explainability techniques to understand how trustworthy models are and interpret the results of explanations. Finally, interactive explanations should give users a properly calibrated sense of trust in their models, encouraging trust when continued interactions reveal their models are right for the right reasons and reducing trust when this is not the case.
130
+
131
+ Considering the interviews and our perceptions about what interactive explanations should look like, we now propose five principles for designing an interactive explanation system as a starting place for research in this direction. The principles are as follows:
132
+
133
+ - Principle 1 (Interact Appropriately). The system should understand continuous requests for explanations and be able to efficiently map these to appropriate explanations to run.
134
+ - Principle 2 (Respond Appropriately). The system should respond with informative, properly contextualized, and satisfying explanations for why the model made specific decisions.
135
+ - Principle 3 (Properly Calibrated Responses). The system should provide reliable notions of confidence along with explanations.
136
+ - Principle 4 (Reduce Explainability Overhead). The system should reduce or eliminate the need for users to write code to explain machine learning models. The system should strictly make understanding machine learning models easier for users.
137
+ - Principle 5 (Consider Context). The system should condition its understanding of inputs on the previous interactions, including prior inputs, responses, and data sets among potentially other artifacts generated in the interaction.
138
+
139
+ Principle 1 is critical for the system to comprehend users' inputs and map them to appropriate explanation outcomes. The system must understand a wide range of queries and how to act on each of them appropriately. Principle 2 ensures the users can understand the explanations provided by the system. This principle is essential because users should easily comprehend the system outputs and retain the natural flow of the interactions. Principle 3 is important because current explanations often do not adequately contextual responses. Ideally, the system should provide confidence or accuracy associated with the explanation so that users will know where to trust the explanations. Principle 4 is paramount because adding an interaction layer on top of explanations inherently creates further technical complexity. Consequently, this increases the risk of errors by misunderstanding user inputs or running the wrong explanations. The benefits of enabling interactive explanations should outweigh any potential issues and complexities of implementing an interactive explanation system. Principle 5 is important for ensuring the system engages in natural interactions with users. The interactions should build on themselves, establishing different threads used to condition future responses where appropriate.
140
+
141
+ Overall, Principle 1 and 2 speak to the quality of single-round explanations; Principle 3 highlights new capabilities that prior works have overlooked; Principle 4 weighs the benefit of natural language dialogues against the risks; Principle 5 promotes multi-round conversations. We recommend that designers of explainability dialogue systems use these principles as a starting place when deciding whether and how to implement such a system.
142
+
143
+ # 3.2 Roadmap towards an Explainability Dialogue System
144
+
145
+ Considering the need for an interactive explainability system and favorable opinions of the interview respondents, we suggest natural language dialogues as an appropriate way to accomplish interactive explanations. Natural language dialogues satisfy principles (1-5), making them an ideal choice for such
146
+
147
+ a system. For instance, dialogues can handle a diverse set of requests, can offer dynamic responses, and are inherently contextual (Principles 1, 2, 5). Further, it is possible to include extensive context for explanations generated by the system in natural language (Principle 3). Last, by enabling machine learning models to be questioned in natural language, like another colleague, explainability dialogues will make it straightforward for anyone to understand ML models (Principle 4).
148
+
149
+ Moreover, a dialogue-based explanation interface provides a novel perspective on interacting with ML models. Rather than treating a model as an object that only returns decisions for inputs, we can think of models as entities that anyone can interact with in natural language. This allows models to be "accountable" in the sense that anyone can query them for a justification behind decisions. As we see in the interviews, domain experts that use machine learning models have a strong desire to treat models as another colleague, i.e., an entity that can be asked, in natural language terms, for a decision and justification. In this way, domain experts wish to decide whether or not to trust machine learning models in a manner that is more accessible and natural than using current explainability techniques out of the box.
150
+
151
+ Implementing an explainable dialogue system that satisfies principles (1-5) involves designing several non-trivial technical components that touch on a wide array of technologies. Likely it will require building both supervised and generative natural language processing models, a wide variety of different explanations, and novel improvements to explanations. In addition, ensuring that the system is available and can rapidly serve responses will require advances in efficiency (e.g., distributed systems). Finally, ensuring that users can interact with the dialogue system satisfactorily will require further HCI research and user studies.
152
+
153
+ One way to design an explainability dialogue system is by separating the system into four modules:
154
+
155
+ (i) A natural language understanding module that understands the user input and determines what explanation(s) to generate.
156
+ (ii) An explanation module that runs the explanations.
157
+ (iii) A response generation module that serves a response to the user.
158
+ (iv) A GUI interface for the system.
159
+
160
+ For this system design, we will consider designing them independently, though it could be possible to design modules (i-iii) in an end-to-end manner.
161
+
162
+ For module (i), dialogue system designers could train two large language models (LLMs) [23, 71]. The first could predict what filtering operations to run (i.e., get instance $\text{id} = 0$ ) in a semantic parsing style [38]. This step would determine which instances to explain. A second model could predict what explanation to run out of a finite set of possible explanations. To consider the context of the conversation, designers could condition these two models on the previous text in the conversation of fixed window size. Finally, training these models would require generating two separate datasets. The first model would require a standard text to SQL dataset, like Spider or WikiSQL [94, 100]. The dataset for the second model would need potential input queries and appropriate filtering & explanation responses. Also, it would be necessary to augment both of these datasets with examples of context from conversations.
163
+
164
+ With the explanations to generate and filtering operations in hand, module (ii) runs the explanations. One complication with this step is that if users request many explanations, generating explanations could be a bottleneck in the system. It will likely be necessary to batch out running explanations across a set of machines or servers to ensure the system rapidly generates them.
165
+
166
+ For generating responses in module (iii), designers could train a generative LLM in a fact-aware manner to return rich text outputs from the system that also include factually correct explanation
167
+
168
+ responses [61]. Because visualizations are vital components of explanations, it will also be likely that system designers will include the visualizations from the explanations [63].
169
+
170
+ Finally, to facilitate users providing text input and serving responses, it will be necessary to design a graphical user interface (GUI). It could be possible for ML-friendly UI packages such as Gradio [2] to create such an interface. We envision what a conversation in such an explainability dialogue system could look like in Figure 1. The user provides the system with a high-level question about why the model predicts women older than sixty-five as high-risk for developing lung cancer. The system understands the user's request to generate feature importance explanation across this demographic. The system performs the filter operations necessary to get these instances and runs the feature importance explanations. Last, it generates a helpful summary of the operations and gives them to the user. Finally, the user indicates surprise surrounding one of the features, lives_in_city being important. The system understands the user's hesitation and provides further validation for the claim to the user. Overall, the dialogue system correctly handles the user's questions, provides valuable responses, and understands sufficient context in the conversation to handle further follow-ups.
171
+
172
+ # 4 Natural Language Dialogues for Explainability: Risks and Research Opportunities
173
+
174
+ While there are concrete approaches to designing natural language dialogues for explainability, there are numerous challenges in implementing such a system that motivate several research opportunities. We divide our discussion about the risks and opportunities into four parts. First, we focus on the natural language processing aspect of the system, including the language understanding and generation components. Second, we examine the explainability aspect of the system. Next, we evaluate the interface and UI component of the dialogue. Last, we discuss the scalability and real-time response needs of the explainability dialogue system.
175
+
176
+ # 4.1 Language Understanding Considerations
177
+
178
+ In this subsection, we discuss challenges due to understanding natural language in explainability dialogues.
179
+
180
+ # 4.1.1 Understanding Language in Explainability Dialogues
181
+
182
+ A fundamental difficulty in developing an explainability dialogue system is that model designers ask many complex questions when interacting with machine learning models. Developing a system capable of understanding various user questions is difficult given the broad domain of possible queries, their complexity, and the different ways users might structure them. For instance, why does a model make predictions across the entire domain, for groups of instances, or individual instances? Does the model learn intuitive rules for prediction or something more complicated? What parts of the model are most important for predictions? What data is most useful for learning? In what ways do you have to change instances to get alternative predictions? The wide variety of questions users of an explainability dialogue system will ask dramatically adds to the complexity of developing such a system.
183
+
184
+ There are many possible ways to structure the language in the explainability dialogue in addition to application-specific terminology. These variations can come in the form of different semantics. For instance, a user might ask "what are the most important features for this prediction" or "what inputs did the model rely on when making this classification." In both cases, the explainability dialogue system must understand that the user requests a feature importance explanation. Users can also ask questions with different levels of specificity. A user concerned with running a particular explanation might ask,
185
+
186
+ "please provide LIME feature importance explanations for data points with id's 10-15." Users will also likely ask high-level questions such as "what is the reason for the predictions for my data." In both these cases, the explainability dialogue system must correctly understand the user questions and map them to an appropriate outcome.
187
+
188
+ Finally, explainability dialogues are complicated by the application-specific nature of explaining machine learning models. In a medical application, users will ask questions about different features and outcomes than in a finance application. For instance, users of such a system applied to a medical application will ask whether medical history had to do with specific predictions by the model. In contrast, financial applications will likely ask about income or employment history. Designing a satisfactory explainability dialogue system involves developing the system to handle a wide variety of application-specific language.
189
+
190
+ Given the large set of possible query types, complexity due to different applications, and different ways to phrase explainability questions, developing natural language processing systems that can understand explainability dialogues is technically challenging. Ideally, such a system should be capable of understanding a wide variety of explainability dialogue and quickly adapting to new, domain-specific terminology. LLM's may be unequipped out-of-the-box to handle explainability dialogues, due to deficiencies in their training data for such tasks for instance. Consequently, additional work may be necessary to adapt LLM's to explainability dialogue.
191
+
192
+ # 4.1.2 Allowing Rich Multi-Turn Dialogue
193
+
194
+ Another technical challenge with explainability dialogue systems is developing multi-turn conversational systems—those that use the context of the conversation to inform future responses. Though many single-turn conversational AI systems exist that do not leverage the previous context (e.g., Siri), ideally an explainability dialogue system should be capable of leveraging the entire conversation to inform future responses. Leveraging context to inform the dialogue responses enables richer and more natural conversations with the explainability dialogue system. For example, it is highly desirable to compare and contrast with previous explanations within a conversation. Users might want to ask, “Does the model rely on similar features for this instance as it did with the previous explanation?” or “if I changed this data point, how would the explanation change?” In both these situations, it is necessary to maintain the state of the conversation to inform the future response.
195
+
196
+ Enabling multi-turn conversations presents a number of significant technical hurdles. For instance, it could be possible to condition LLM's on the conversation. However, LLM's typically have a fixed window size for text, making it difficult to include the entire conversation. Consequently, this motivates difficult technical decisions, such as whether to include a finite window size of the conversation or select parts of the conversation to condition on in some intelligent manner. Of course, selecting which parts of the conversation to condition on increases the likelihood critical parts of the conversation will not be included by error and consequently harm the user-experience.
197
+
198
+ # 4.1.3 Responding Appropriately in Dialogue
199
+
200
+ An additional technical challenge is determining how to appropriately generate responses to user inputs in the explainability dialogue. Ideally, responses provided by the explainability dialogue system will be highly flexible, dynamically generated for any given user response, sensitive to the tone of the conversation and the expected effects of the response, seek additional information from the user when necessary, and present explanations in both a visually and semantically satisfying way. Nevertheless, generating such responses in dialogue is still an open problem using current state-of-the-art language models, such as LLM's. Though LLM's generate realistic text outputs, allowing free-form natural
201
+
202
+ language responses in dialogue is unpredictable, often forgets context, and can lead to incorrect or, in the worst case, offensive responses [97, 19, 65, 98]. In settings where it is not acceptable to have unpredictable and potentially harmful responses, it may not be appropriate to use dynamically generated responses by LLM's. Consequently, it could be necessary to use scripted responses, in order to enforce the trustworthiness of the explainability dialogue system.
203
+
204
+ Still, using scripted responses presents a number of significant considerations and shortcomings. As we have discussed, there are numerous different types of explanations and creating acceptable responses for all of these explanations is a challenging and laborious task. For example, explanations are often presented visually (e.g., the SHAP feature importance plots [63]). Determining an acceptable way to present visual explanation responses in conversations along with sufficient textual explanations so that the user will understand the explanations requires careful consideration. Further, scripted dialogues may lack the flexibility to enable meaningful interactions in multi-turn conversations. Last, there are opportunities for a middle ground between completely dynamic explainability dialogue systems and fully scripted ones, in ways that can satisfy both reliability and flexibility goals. Overall, designers of explainability dialogue systems must carefully weight the trade-offs between dynamically generating responses and hard-coding them.
205
+
206
+ # 4.2 Explainability Considerations
207
+
208
+ This subsection discusses explainability considerations in the dialogue system. First, we focus on what explanations to use in such a dialogue system. Next, we look at issues with current explanations and how these may cause complications in a dialogue. Finally, we discuss how current explanations are unsatisfactory for dialogue and opportunities for improving explanations.
209
+
210
+ # 4.2.1 Mapping Intent to Correct Explanations
211
+
212
+ Once the system understands user intent within the dialogue, a critical technical consideration is mapping the intent to an appropriate set of explanations to generate for the conversation. Foremost, mapping intent to explanations is complicated because multiple different explanations could provide a satisfactory response to a user request. For example, in conversations where users request the important features for prediction, the dialogue system could provide any feature importance explanation. Considering that there are numerous different types of feature importance explanations (LIME [73], BayesLIME [80], SHAP [63], Smooth Grad [81], etc.) each with different trade-offs, it is difficult to decide what explanations to provide. For instance, LIME explanations are often relatively quicker to generate than SHAP explanations. However, SHAP provides a more robust implementation and better support. These trade-offs must be considered when choosing between what (or what set of) feature importance explanations to provide in the explainability dialogue.
213
+
214
+ Ultimately it is the responsibility of the explainability dialogue system designer to make decisions regarding which explanations to provide in the dialogue. This decision is not straightforward because explanations within the same categories (i.e., feature importance, counterfactual explanations, global decision rules) make differing assumptions and often provide different results. The designer will need to consider the applications of the system, technical implementations of the explanations, and fundamental trade-offs between the different types of explanations to make an informed decision around what explanations to provide.
215
+
216
+ # 4.2.2 Shortcomings of Current Explanations
217
+
218
+ Though explanations are beneficial for informing users why a model makes decisions, current techniques suffer from several shortcomings. If used in explainability dialogue, these shortcomings will likely
219
+
220
+ translate into the system, potentially causing issues by providing misleading explanations or causing significant system slowdowns. These shortcomings include that explanations are unstable [33, 79, 25, 3, 5]. Meaning, slight perturbations to instances can result in significant changes to the explanation. In addition, it is difficult to set the hyperparameter values of explanations (the number of perturbations in LIME [73] & SHAP [63] or the kernel width in LIME). These hyperparameter choices can have significant effects on the explanation. Further, they are inconsistent [55]—rerunning explanations can lead to different results. In addition, there are few metrics to determine the quality of the explanations, making it challenging to decide when to disregard generated explanations. Finally, specific explanations (e.g., LIME & SHAP) are incredibly time intensive to generate because they rely on repeatedly querying the model [20]. Designers of explainability dialogue systems must consider the different limitations of current explanation techniques and evaluate how they will affect the system's performance.
221
+
222
+ # 4.2.3 Opportunities For Improving Explanations
223
+
224
+ There are several opportunities to improve explanations to ensure they are satisfactory for an explainability dialogue. Designers of explainability dialogues should focus on developing consistent explanations and ensuring that the same query repeated does not return different results. In addition, designers of such systems should consider how to generate confidence or accuracy metrics for explanations. There is some work in this direction in the form of Bayesian Local Explanations by Slack et al. [80] that generate local explanations along with associated confidence values. However, extensive further work is needed to determine confidence metrics for additional types of explanations beyond local explanations. Further, designers should develop techniques to set the explanation hyperparameters without requiring intervention from users. This direction also motivates developing hyperparameter-free explanations. Systems designers could more easily incorporate hyperparameter-free explanations in dialogue systems to eliminate the need to set hyperparameters throughout the dialog. Finally, researchers should consider developing explanations that are more accurate in the first place, making them less error prone at the start. Overall, there is considerable opportunity for developing novel explainability techniques that make explanations more compatible with dialogues.
225
+
226
+ # 4.3 Interface Considerations
227
+
228
+ An interface to the explainability dialogue system should facilitate users providing text inputs and rapidly receiving responses from the back-end system in a straightforward manner to understand. Depending on the application, it could be possible to have either a text-based interface or a spoken-word dialogue system. For operational applications where users may be unable to take the time to type out sentences, a spoken-word system may be appropriate. However, adding a step to convert spoken words into text adds further complexity and room for error. An explainability dialogue system in the text will likely suffice in many scientific, engineering, or corporate applications. Also, because explanations often involve visual components, it should be possible for the interface to display images, plots, and tables. Finally, the designers should build an interface that is easily accessible for users, such as a web application compatible with desktop computers, tablets, and cell phones. Altogether, the interface for the dialogue system should be easy to use, both in terms of its accessibility and presentation.
229
+
230
+ # 4.4 Scalability Considerations
231
+
232
+ Ideally, an explainabilitiy dialogue system should yield explanations without lag time from when the user provides input. However, there are several technical considerations that influence the response time and scalability of such a system. These considerations can be divided between the NLP and explainability
233
+
234
+ components of the dialogue system. First, we discuss NLP scalability considerations. After, we cover explainability concerns.
235
+
236
+ Designers will likely use LLM's in the explainability dialogue system to understand complex natural language inputs and yield rich text responses. Because LLM's have many parameters, GPU acceleration is necessary. GPUs can increase the burden of running and scaling the system due to their cost, lack of availability, and difficulty to maintain. Further, querying parameter-intensive LLM's can be time-consuming, even with GPU acceleration, slowing down the system's response time. Finally, it may be necessary to use state-of-the-art LLMs to achieve acceptable performance at explainability dialogue. System designers cannot easily run these models on available hardware because of their high parameter counts, and designers may have to use costly APIs provided by private companies (e.g., GPT3 [15]). Designers will need to carefully weigh NLP modeling choices with hardware availability when designing dialogue systems so that they are not a bottleneck in scaling and running explainability dialogue systems.
237
+
238
+ In addition, there are potential scalability issues with the explanations used in the system. Various explanations have prolonged run times, which could adversely affect the system's response time. For example, model-agnostic feature importance explanations such as LIME [73] and SHAP [63] are notoriously slow due to their repeated querying of the black-box model [80]. If used to generate feature importance explanations in an explainability dialogue, such explanations could lead to long response times between when the user provides input and the system responds, especially if the black-box model is complex or the users ask for many explanations. Though explanations have slow runtimes, there are potential technical solutions. For instance, the system could generate and cache explanations in the background during the conversation. System designers could further accelerate explanation generation through parallelism. If users request explanations that are cached, the system can use them in the dialogue immediately. Of course, introducing caching and parallelism further complicate the system's complexity and resource requirements. Overall, system designers will need to consider how to best handle slow explanation run time to ensure real-time explainability dialogue, given the system's expected needs and resource constraints.
239
+
240
+ # 5 Related Work
241
+
242
+ Interpretability Techniques Work on machine learning explanations includes two main directions: inherently interpretable models and post hoc explanations. Researchers have proposed models that are inherently interpretable—i.e., those models that are interpretable by design. Inherently interpretable methods include decision lists and sets [53, 7], additive models [84, 62], and prototype based models [47, 18, 57]. However, inherently interpretable models constrain model designers to specific models that may lack sufficient expressiveness for complex tasks. Consequently, there has been considerable recent interest in post hoc explanations.
243
+
244
+ On the other hand, post hoc explanations provide explanations for machine learning models that have already been trained, allowing greater flexibility in the modeling process. There are several different types of post hoc explanations. These include model agnostic methods that do not rely on access to model internals such as LIME [73] & SHAP [63, 21], BayesLIME & BayesSHAP [80], partial dependency plots [30], and permutation feature importance [14]. Also, there are post hoc explanations, which assume access to model internals (e.g., gradient access) [81, 75, 82, 76]. There are also global post hoc explanations. These methods summarize the model's decision logic across part of or the entire domain into interpretable rules or decision trees [11, 53, 48]. Last, there has been considerable recent interest in counterfactual explanations that describe changes to instance which will result in different model predictions [87, 85, 70, 86, 10, 43, 44]. Model providers can leverage counterfactual explanations to provide individuals adversely affected by model decisions with recourse. Though there are numerous
245
+
246
+ works on developing explanations, few works have considered interactive explanations in the form of dialogue.
247
+
248
+ HCI Analyses of Explanations Within the human-computer interaction (HCI) literature, several studies examine how model designers design, build, and correct ML models, finding that interpretability and interactivity are critical for iterating on ML models [1, 27, 41, 12]. Fails and Olsen [29] first propose the term "interactive machine learning" for systems where users train ML models and correct their predictions. Numerous interactive machine learning exist, including general purpose systems like Orange [22] and application-specific platforms such as Abstracker for citation review [88]. Additional works study to what extent ML explainability techniques help data scientists and find data scientists trust explanations too much or do not use them in the correct way [45]. Further works evaluate the interpretability of certain classes of machine learning models and determine that humans have an easier time simulating models with fewer features, fewer parameters, and access to the model's internals. However, people still struggle to decide when to trust the model predictions, even if they can simulate it [78, 69]. Furthermore, a growing set of literature adopts application-based evaluation and examines the impact of explanations on human-AI decision making [52, 50, 35, 34, 99, 68, 91, 49]. In particular, Liu et al. [60] studies the effect of interactive explanations that allow users to change the input and observe the differences in the output. To the best of our knowledge, there is little work in allowing interaction through natural language dialogue for explainability.
249
+
250
+ ML Analyses of Explanations There have been several critical analyses of explanations from within the ML literature. Foremost, Rudin [74] makes the case that post hoc explanations are inherently unfaithful to the model. Instead, model designers should build inherently interpretable models. Other works examine the robustness of explanations from both theoretical and empirical perspectives [32, 56, 17, 33, 4, 96, 51]. Alvarez-Melis and Jaakkola [5] show that explanations are unstable and small perturbations can lead to drastically different explanations. Zafar et al. [96] demonstrates models only differing in their initialization can have distinct and sometimes contradictory explanations. Further works demonstrate that malicious adversaries can manipulate explanations, demonstrating their unreliability [6, 89, 24]. For instance, Slack et al. [79] show adversaries can design models that how arbitrary LIME [73] & SHAP [63] explanations.
251
+
252
+ Dialogue Systems There has been considerable work in developing dialogue systems in the past decades [8, 92, 19, 31]. Currently, state-of-the-art approaches rely on deep learning systems to understand user inputs and for generating responses [64]. Work in dialogue systems can be divided into task-oriented dialogues and open-domain dialogues. Task oriented dialogue systems address a particular problem, such as scheduling an appointment or making a reservations. Open-domain dialogues do not solve a particular task and instead attempt to "chit-chat" with users. We focus on task-oriented dialogues because they are most relevant to an explainability dialogue system. There are several different approaches to task oriented dialogue systems [13, 93, 28, 101, 72, 16, 67, 95]. Certain systems use separate modules for language understanding, state tracking, planning, and response generation to accomplish task oriented dialogues [40, 39, 46, 77]. Other systems use fully end-to-end approaches for task oriented dialogue [54, 90, 37]. There are several trade-offs between modular and end-to-end approaches. With modular approaches, it is difficult to propagate errors in the system response to all the system modules [64, 54]. Instead, designers must carefully design each of the modules. However, end-to-end approaches require sufficient data in order to have high response quality, making it difficult to use them in data-scarce settings [9, 42]. Further, when errors due occur, it is often more difficult to debug end-to-end dialogue systems. Designers of explainability dialogues will need to carefully consider the availability of data for such systems when deciding which approaches to pursue.
253
+
254
+ # 6 Discussion & Conclusions
255
+
256
+ Natural language dialogues are a promising approach for facilitating interactive explanations of machine learning models. Language is an ideal medium to engage with machine learning models because of its flexibility and accessibility, enabling anyone to understand ML models. Consequently, explainability dialogue systems could enable rich interactions with models through complex, high-level queries and in-depth, contextualized responses consisting of both text and visual artifacts. While explainability dialogues are appealing for domain experts or users with limited machine learning knowledge, these systems are still valuable to experts because of their ease of use and capacity to rapidly facilitate understanding models in many ways. As a result, explainability dialogues could serve a vital role in enabling model understanding for any stakeholder in an ML model.
257
+
258
+ While there are numerous advantages of natural language as a solution to interactive explanations, it is important to understand its limitations in various application scenarios. First, different domains may need specific interactions to appropriately engage with the models and data. For example, users will likely ask questions to an explainability dialogue system for images that reference specific parts of the image such as, "is the nose of the dog an important feature" or "is the model using the upper right-hand side of the image?" It is challenging to understand what parts of the image the user refers to with natural language. However, it could be possible to develop multimodal models capable of handling such requests. To better suit these domains, it could also be possible to incorporate other types of interactions, such as clicking on parts of an image, into the dialogue system interface.
259
+
260
+ Similarly, some system responses may be difficult to communicate in natural language. For example, it is pretty challenging to communicate uncertainty in natural language, making it difficult for a dialogue system to explain uncertainty. One solution is to use plots or other visuals to present difficult to communicate concepts. Designers can easily include these in an explainability dialogue, like in Figure 1.
261
+
262
+ In addition, domain experts engaging in multiple lines of inquiry with a machine learning model may wish to pick up early threads, drop recent ones, or switch between many different threads in their inquiry. However, a natural language dialogue is a linear interaction, where the system and users take turns responding. This structure makes it challenging to have multiple threads in the conversation. However, it is possible to include a multi-threaded conversation feature, much like Threads in the Slack messaging application or Replies in iMessage.
263
+
264
+ Finally, language can often be vague. Questions are often under-specified, forcing respondents to assume what the questioner wants. Often, it is necessary to ask follow-up questions. Also, different people may have completely different intentions when they ask the same thing. This ambiguity makes it difficult to understand what the questioner wants even though the system fully comprehends the text provided. A natural language dialogue system will likely need to follow up on underspecified questions. When users cannot specify what they want in words, it may be necessary for the system to provide access to a command line or coding interface to request what they want.
265
+
266
+ Overall, explainability dialogue systems deserve considerable attention from the research community. Such systems could revolutionize how domain experts and users interface with machine learning models, facilitating the safer use of machine learning models. We encourage the research community to develop explainability dialogue systems and enable more accessible model understanding.
267
+
268
+ # References
269
+
270
+ [1] A. Abdul, J. Vermeulen, D. Wang, B. Y. Lim, and M. Kankanhalli. Trends and Trajectories for Explainable, Accountable and Intelligible Systems: An HCI Research Agenda, page 1-18. Association for Computing Machinery, New York, NY, USA, 2018. ISBN 9781450356206. URL https://doi.org/10.1145/3173574.3174156.
271
+
272
+ [2] A. Abid, A. Abdalla, A. Abid, D. Khan, A. Alfozan, and J. Zou. Gradio: Hassle-free sharing and testing of ml models in the wild. arXiv preprint arXiv:1906.02569, 2019.
273
+ [3] J. Adebayo, J. Gilmer, M. Muelly, I. Goodfellow, M. Hardt, and B. Kim. Sanity checks for saliency maps. In Advances in Neural Information Processing Systems, pages 9505-9515, 2018.
274
+ [4] S. Agarwal, S. Jabbari, C. Agarwal, S. Upadhyay, S. Wu, and H. Lakkaraju. Towards the unification and robustness of perturbation and gradient based explanations. In M. Meila and T. Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pages 110-119. PMLR, 18-24 Jul 2021. URL https://proceedings.mlr.press/v139/agarwal21c.html.
275
+ [5] D. Alvarez-Melis and T. S. Jaakkola. On the robustness of interpretability methods. ICML Workshop on Human Interpretability in Machine Learning, 2018.
276
+ [6] C. Anders, P. Pasliev, A.-K. Dombrowski, K.-R. Müller, and P. Kessel. Fairwashing explanations with off-manifold detergent. In H. D. III and A. Singh, editors, Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 314-323. PMLR, 13-18 Jul 2020. URL http://proceedings.mlr.press/v119/anders20a.htm1.
277
+ [7] E. Angelino, N. Larus-Stone, D. Alabi, M. Seltzer, and C. Rudin. Learning certifiably optimal rule lists for categorical data. arXiv preprint arXiv:1704.01701, 2017.
278
+ [8] S. Arora, K. Batra, and S. Singh. Dialogue system: A brief review. *ArXiv*, abs/1306.4134, 2013.
279
+ [9] A. Balakrishnan, J. Rao, K. Upasani, M. White, and R. Subba. Constrained decoding for neural NLG from compositional representations in task-oriented dialogue. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 831-844, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1080. URL https://aclanthology.org/P19-1080.
280
+ [10] S. Barocas, A. D. Selfst, and M. Raghavan. The hidden assumptions behind counterfactual explanations and principal reasons. In Proceedings of the 2020 Conference on Fairness, Accountability, and Transparency, FAT* '20, page 80-89, New York, NY, USA, 2020. Association for Computing Machinery. ISBN 9781450369367. doi: 10.1145/3351095.3372830. URL https://doi.org/10.1145/3351095.3372830.
281
+ [11] O. Bastani, C. Kim, and H. Bastani. Interpretability via model extraction. FAT/ML Workshop 2017, 2017.
282
+ [12] V. Bellotti and K. Edwards. Intelligibility and accountability: Human considerations in context-aware systems. Hum.-Comput. Interact., 16(2):193-212, dec 2001. ISSN 0737-0024. doi: 10.1207/S15327051HCI16234_05. URL https://doi.org/10.1207/S15327051HCI16234_05.
283
+ [13] A. Bordes, Y.-L. Boureau, and J. Weston. Learning end-to-end goal-oriented dialog. *ICLR*, 2017.
284
+ [14] L. Breiman. Random forests. Machine learning, 45(1):5-32, 2001.
285
+ [15] T. Brown, B. Mann, N. Ryder, M. Subbiah, J. D. Kaplan, P. Dhariwal, A. Neelakantan, P. Shyam, G. Sastry, A. Askell, S. Agarwal, A. Herbert-Voss, G. Krueger, T. Henighan, R. Child, A. Ramesh, D. Ziegler, J. Wu, C. Winter, C. Hesse, M. Chen, E. Sigler, M. Litwin, S. Gray, B. Chess, J. Clark, C. Berner, S. McCandlish, A. Radford, I. Sutskever, and D. Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and
286
+
287
+ H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper/2020/file/1457c0d6bcbd4967418bfb8ac142f64a-Paper.pdf.
288
+ [16] B. Byrne, K. Krishnamoorthi, C. Sankar, A. Neelakantan, B. Goodrich, D. Duckworth, S. Yavuz, A. Dubey, K.-Y. Kim, and A. Cedilnik. Taskmaster-1: Toward a realistic and diverse dialog dataset. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4516–4525, Hong Kong, China, Nov. 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1459. URL https://aclanthology.org/D19-1459.
289
+ [17] P. Chalasani, J. Chen, A. R. Chowdhury, X. Wu, and S. Jha. Concise explanations of neural networks using adversarial training. In H. D. III and A. Singh, editors, Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 1383-1391. PMLR, 13-18 Jul 2020. URL https://proceedings.mlr.press/v119/chalasani20a.html.
290
+ [18] C. Chen, O. Li, D. Tao, A. Barnett, C. Rudin, and J. K. Su. This looks like that: Deep learning for interpretable image recognition. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. URL https://proceedings.neurips.cc/paper/2019/file/adb7ee2DCF142b0e11888e72b43fcb75-Paper.pdf.
291
+ [19] H. Chen, X. Liu, D. Yin, and J. Tang. A survey on dialogue systems: Recent advances and new frontiers. SIGKDD Explor. Newsl., 19(2):25-35, nov 2017. ISSN 1931-0145. doi: 10.1145/3166054.3166058. URL https://doi.org/10.1145/3166054.3166058.
292
+ [20] J. Chen, L. Song, M. J. Wainwright, and M. I. Jordan. L-shapley and c-shapley: Efficient model interpretation for structured data. In International Conference on Learning Representations, 2019.
293
+ [21] I. Covert and S.-I. Lee. Improving kernelshap: Practical shapley value estimation using linear regression. In A. Banerjee and K. Fukumizu, editors, Proceedings of The 24th International Conference on Artificial Intelligence and Statistics, volume 130 of Proceedings of Machine Learning Research, pages 3457-3465. PMLR, 13-15 Apr 2021. URL https://proceedings.mlr.press/v130/covert21a.html.
294
+ [22] J. Demšar, B. Zupan, G. Leban, and T. Curk. Orange: From experimental machine learning to interactive data mining. In J.-F. Boulicaut, F. Esposito, F. Giannotti, and D. Pedreschi, editors, Knowledge Discovery in Databases: PKDD 2004, pages 537-539, Berlin, Heidelberg, 2004. Springer Berlin Heidelberg. ISBN 978-3-540-30116-5.
295
+ [23] J. Devlin, M.-W. Chang, K. Lee, and K. Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota, June 2019. Association for Computational Linguistics. doi: 10.18653/v1/N19-1423. URL https://aclanthology.org/N19-1423.
296
+ [24] B. Dimanov, U. Bhatt, M. Jamnik, and A. Weller. You shouldn't trust me: Learning models which conceal unfairness from multiple explanation methods. In SafeAI@AAAI, 2020.
297
+
298
+ [25] A.-K. Dombrowski, M. Alber, C. J. Anders, M. Ackermann, K.-R. Müller, and P. Kessel. Explanations can be manipulated and geometry is to blame. arXiv preprint arXiv:1906.07983, 2019.
299
+ [26] F. Doshi-Velez and B. Kim. Towards a rigorous science of interpretable machine learning. arXiv preprint arXiv:1702.08608, 2017.
300
+ [27] P. Dourish. Algorithms and their others: Algorithmic culture in context. *Big Data & Society*, 3 (2):2053951716665128, 2016. doi: 10.1177/2053951716665128. URL https://doi.org/10.1177/2053951716665128.
301
+ [28] L. El Asri, H. Schulz, S. Sharma, J. Zumer, J. Harris, E. Fine, R. Mehrotra, and K. Suleman. Frames: a corpus for adding memory to goal-oriented dialogue systems. In Proceedings of the 18th Annual SIGdial Meeting on Discourse and Dialogue, pages 207-219, Saarbrücken, Germany, Aug. 2017. Association for Computational Linguistics. doi: 10.18653/v1/W17-5526. URL https://aclanthology.org/W17-5526.
302
+ [29] J. A. Fails and D. R. Olsen. Interactive machine learning. In Proceedings of the 8th International Conference on Intelligent User Interfaces, IUI '03, page 39-45, New York, NY, USA, 2003. Association for Computing Machinery. ISBN 1581135866. doi: 10.1145/604045.604056. URL https://doi.org/10.1145/604045.604056.
303
+ [30] J. H. Friedman. Greedy function approximation: a gradient boosting machine. Annals of statistics, pages 1189-1232, 2001.
304
+ [31] J. Gao, M. Galley, and L. Li. Neural approaches to conversational AI. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 2-7, Melbourne, Australia, July 2018. Association for Computational Linguistics. doi: 10.18653/v1/P18-5002. URL https://aclanthology.org/P18-5002.
305
+ [32] D. Garreau and U. von Luxburg. Looking deeper into lime. arXiv preprint arXiv:2008.11092, 2020.
306
+ [33] A. Ghorbani, A. Abid, and J. Zou. Interpretation of neural networks is fragile. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 3681-3688, 2019.
307
+ [34] B. Green and Y. Chen. Disparate interactions: An algorithm-in-the-loop analysis of fairness in risk assessments. In Proceedings of the Conference on Fairness, Accountability, and Transparency, pages 90-99. ACM, 2019.
308
+ [35] B. Green and Y. Chen. The principles and limits of algorithm-in-the-loop decision making. Proceedings of the ACM on Human-Computer Interaction, 3(CSCW):50, 2019.
309
+ [36] R. Guidotti, A. Monreale, S. Ruggieri, F. Turini, F. Giannotti, and D. Pedreschi. A survey of methods for explaining black box models. ACM computing surveys (CSUR), 51(5):1-42, 2018.
310
+ [37] D. Ham, J.-G. Lee, Y. Jang, and K.-E. Kim. End-to-end neural pipeline for goal-oriented dialogue systems using GPT-2. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 583–592, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.54. URL https://aclanthology.org/2020.acl-main.54.
311
+ [38] H. He and J. D. Choi. Establishing Strong Baselines for the New Decade: Sequence Tagging, Syntactic and Semantic Parsing with BERT. In Proceedings of the 33rd International Florida Artificial Intelligence Research Society Conference, FLAIRS'20, pages 228-233, 2020. URL https://www.flairs-33.info. Best Student Paper Candidate.
312
+
313
+ [39] M. Henderson. Machine learning for dialog state tracking: A review. 2015.
314
+ [40] M. Henderson, B. Thomson, and S. Young. Deep neural network approach for the dialog state tracking challenge. In Proceedings of the SIGDIAL 2013 Conference, pages 467-471, Metz, France, Aug. 2013. Association for Computational Linguistics. URL https://aclanthology.org/W13-4073.
315
+ [41] F. Hohman, A. Head, R. Caruana, R. DeLine, and S. M. Drucker. Gamut: A Design Probe to Understand How Data Scientists Understand Machine Learning Models, page 1-13. Association for Computing Machinery, New York, NY, USA, 2019. ISBN 9781450359702. URL https://doi.org/10.1145/3290605.3300809.
316
+ [42] M. Kale and A. Rastogi. Template guided text generation for task oriented dialogue. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6505-6520, Online, Nov. 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.emnlp-main.527.
317
+ [43] A.-H. Karimi*, J. von Kügelgen*, B. Schölkopf, and I. Valera. Algorithmic recourse under imperfect causal knowledge: a probabilistic approach. In Advances in Neural Information Processing Systems 33, Dec. 2020. *equal contribution.
318
+ [44] A.-H. Karimi, B. Schölkopf, and I. Valera. Algorithmic recourse: from counterfactual explanations to interventions. In 4th Conference on Fairness, Accountability, and Transparency (ACM FAccT), Mar. 2021.
319
+ [45] H. Kaur, H. Nori, S. Jenkins, R. Caruana, H. Wallach, and J. Wortman Vaughan. Interpreting interpretability: Understanding data scientists' use of interpretability tools for machine learning. In CHI, April 2020.
320
+ [46] A. Kim, H. Song, and S. Park. A two-step neural dialog state tracker for task-oriented dialog processing. Computational Intelligence and Neuroscience, 2018, 2018. ISSN 1687-5265. doi: 10.1155/2018/5798684. Funding Information: .is research was supported by the Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Education (no. NRF-2016R1D1A1B04935678). Funding Information: This research was supported by the Basic Science Research Program through the National Research Foundation of Korea (NRF) funded by the Ministry of Education (no. NRF-2016R1D1A1B04935678). Publisher Copyright: Copyright © 2018 A-Yeong Kim et al.
321
+ [47] B. Kim, C. Rudin, and J. Shah. The bayesian case model: A generative approach for case-based reasoning and prototype classification. arXiv preprint arXiv:1503.01161, 2015.
322
+ [48] B. Kim, M. Wattenberg, J. Gilmer, C. Cai, J. Wexler, F. Viegas, and R. S ayres. Interpretability beyond feature attribution: Quantitative testing with concept activation vectors (TCAV). In J. Dy and A. Krause, editors, Proceedings of the 35th International Conference on Machine Learning, volume 80 of Proceedings of Machine Learning Research, pages 2668-2677, Stockholm, Sweden, 10-15 Jul 2018. PMLR.
323
+ [49] I. Lage, E. Chen, J. He, M. Narayanan, B. Kim, S. Gershman, and F. Doshi-Velez. An evaluation of the human-interpretability of explanation. arXiv preprint arXiv:1902.00006, 2019.
324
+ [50] V. Lai and C. Tan. On human predictions with explanations and predictions of machine learning models: A case study on deception detection. In Proceedings of the Conference on Fairness, Accountability, and Transparency, pages 29-38, 2019.
325
+
326
+ [51] V. Lai, J. Z. Cai, and C. Tan. Many faces of feature importance: Comparing built-in and post-hoc feature importance in text classification. In Proceedings of EMNLP, 2019.
327
+ [52] V. Lai, H. Liu, and C. Tan. "why is'chicago'deceptive?" towards building model-driven tutorials for humans. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, pages 1-13, 2020.
328
+ [53] H. Lakkaraju, S. H. Bach, and J. Leskovec. Interpretable decision sets: A joint framework for description and prediction. In Knowledge Discovery and Data mining (KDD), 2016.
329
+ [54] H. Le, D. Sahoo, C. Liu, N. Chen, and S. C. Hoi. UniConv: A unified conversational neural architecture for multi-domain task-oriented dialogues. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1860-1877, Online, Nov. 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.emnlp-main.146. URL https://aclanthology.org/2020.emnlp-main.146.
330
+ [55] E. Lee, D. Braines, M. Stiffler, A. Hudler, and D. Harborne. Developing the sensitivity of lime for better machine learning explanation. In Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications, volume 11006, page 1100610. International Society for Optics and Photonics, 2019.
331
+ [56] A. Levine, S. Singla, and S. Feizi. Certifiably robust interpretation in deep learning. arXiv preprint arXiv:1905.12105, 2019.
332
+ [57] O. Li, H. Liu, C. Chen, and C. Rudin. Deep learning for case-based reasoning through prototypes: A neural network that explains its predictions. In AAAI, 2018.
333
+ [58] Q. V. Liao, D. Gruen, and S. Miller. Questioning the ai: informing design practices for explainable ai user experiences. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems, pages 1-15, 2020.
334
+ [59] Z. C. Lipton. The mythos of model interpretability: In machine learning, the concept of interpretability is both important and slippery. Queue, 16(3):31-57, 2018.
335
+ [60] H. Liu, V. Lai, and C. Tan. Understanding the effect of out-of-distribution examples and interactive explanations on human-ai decision making. arXiv preprint arXiv:2101.05303, 2021.
336
+ [61] R. Logan, N. F. Liu, M. E. Peters, M. Gardner, and S. Singh. Barack's wife hillary: Using knowledge graphs for fact-aware language modeling. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5962-5971, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1598. URL https://aclanthology.org/P19-1598.
337
+ [62] Y. Lou, R. Caruana, J. Gehrke, and G. Hooker. Accurate intelligible models with pairwise interactions. In KDD, 2013.
338
+ [63] S. M. Lundberg and S.-I. Lee. A unified approach to interpreting model predictions. In Advances in Neural Information Processing Systems, pages 4765-4774, 2017.
339
+ [64] J. Ni, T. Young, V. Pandelea, F. Xue, V. A. K. Adiga, and E. Cambria. Recent advances in deep learning based dialogue systems: A systematic survey. *ArXiv*, abs/2105.04387, 2021.
340
+
341
+ [65] O. Oluwatobi and E. Mueller. DLGNet: A transformer-based model for dialogue response generation. In Proceedings of the 2nd Workshop on Natural Language Processing for Conversational AI, pages 54-62, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.nlp4convai-1.7. URL https://aclanthology.org/2020.nlp4convai-1.7.
342
+ [66] T. Paek and E. Horvitz. Conversation as action under uncertainty. In Proceedings of the Sixteenth Conference on Uncertainty in Artificial Intelligence, UAI'00, page 455-464, San Francisco, CA, USA, 2000. Morgan Kaufmann Publishers Inc. ISBN 1558607099.
343
+ [67] D. Peskov, N. Clarke, J. Krone, B. Fodor, Y. Zhang, A. Youssef, and M. Diab. Multi-domain goal-oriented dialogues (MultiDoGO): Strategies toward curating and annotating large scale dialogue data. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4526-4536, Hong Kong, China, Nov. 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1460. URL https://aclanthology.org/D19-1460.
344
+ [68] F. Poursabzi-Sangdeh, D. G. Goldstein, J. M. Hofman, J. W. Vaughan, and H. Wallach. Manipulating and measuring model interpretability. arXiv preprint arXiv:1802.07810, 2018.
345
+ [69] F. Poursabzi-Sangdeh, D. G. Goldstein, J. M. Hofman, J. W. Vaughan, and H. M. Wallach. Manipulating and measuring model interpretability. Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems, 2021.
346
+ [70] R. Poyiadzi, K. Sokol, R. Santos-Rodriguez, T. De Bie, and P. Flach. Face: feasible and actionable counterfactual explanations. In Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society, pages 344-350, 2020.
347
+ [71] C. Raffel, N. Shazeer, A. Roberts, K. Lee, S. Narang, M. Matena, Y. Zhou, W. Li, and P. J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67, 2020. URL http://jmlr.org/papers/v21/20-074.html.
348
+ [72] A. Rastogi, X. Zang, S. Sunkara, R. Gupta, and P. Khaitan. Towards scalable multi-domain conversational agents: The schema-guided dialogue dataset. arXiv preprint arXiv:1909.05855, 2019.
349
+ [73] M. T. Ribeiro, S. Singh, and C. Guestrin. Why Should I Trust You? explaining the predictions of any classifier. In Knowledge Discovery and Data mining (KDD), 2016.
350
+ [74] C. Rudin. Stop explaining black box machine learning models for high stakes decisions and use interpretable models instead. Nature Machine Intelligence, 1(5):206, 2019.
351
+ [75] R. R. Selvaraju, M. Cogswell, A. Das, R. Vedantam, D. Parikh, and D. Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In ICCV, 2017.
352
+ [76] K. Simonyan, A. Vedaldi, and A. Zisserman. Deep inside convolutional networks: Visualising image classification models and saliency maps. In Workshop at International Conference on Learning Representations, 2014.
353
+ [77] K. Singla, Z. Chen, D. Atkins, and S. Narayanan. Towards end-2-end learning for predicting behavior codes from spoken utterances in psychotherapy conversations. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 3797–3803, Online, July 2020. Association for Computational Linguistics. doi: 10.18653/v1/2020.acl-main.351. URL https://aclanthology.org/2020.acl-main.351.
354
+
355
+ [78] D. Slack, S. A. Friedler, C. E. Scheidegger, and C. D. Roy. Assessing the local interpretability of machine learning models. Workshop on Human-Centric Machine Learning, NeurIPS, 2019.
356
+ [79] D. Slack, S. Hilgard, E. Jia, S. Singh, and H. Lakkaraju. Fooling lime and shap: Adversarial attacks on post hoc explanation methods. Conference on Artificial Intelligence, Ethics, and Society (AIES), 2020.
357
+ [80] D. Slack, S. Hilgard, S. Singh, and H. Lakkaraju. Reliable Post hoc Explanations Modeling Uncertainty in Explainability. In Neural Information Processing Systems (NeurIPS), 2021.
358
+ [81] D. Smilkov, N. Thorat, B. Kim, F. Viégas, and M. Wattenberg. Smoothgrad: removing noise by adding noise. Workshop on Visualization for Deep Learning, ICML, 2017.
359
+ [82] M. Sundararajan, A. Taly, and Q. Yan. Axiomatic attribution for deep networks. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pages 3319–3328. JMLR.org, 2017.
360
+ [83] S. Tan, R. Caruana, G. Hooker, and Y. Lou. Distill-and-compare: Auditing black-box models using transparent model distillation. In Proceedings of the 2018 AAAI/ACM Conference on AI, Ethics, and Society, pages 303-310, 2018.
361
+ [84] B. Ustun, S. Traca, and C. Rudin. Supersparse linear integer models for interpretable classification. arXiv preprint arXiv:1306.6677, 2013.
362
+ [85] B. Ustun, A. Spangher, and Y. Liu. Actionable recourse in linear classification. In Proceedings of the Conference on Fairness, Accountability, and Transparency, FAT* '19, pages 10-19, 2019. ISBN 978-1-4503-6125-5.
363
+ [86] A. Van Looveren and J. Klaise. Interpretable Counterfactual Explanations Guided by Prototypes. arXiv, art. arXiv:1907.02584, July 2019.
364
+ [87] S. Wachter, B. Mittelstadt, and C. Russell. Counterfactual explanations without opening the black box: Automated decisions and the gdpr. Harv. JL & Tech., 31:841, 2017.
365
+ [88] B. C. Wallace, K. Small, C. E. Brodley, J. Lau, and T. A. Trikalinos. Deploying an interactive machine learning system in an evidence-based practice center: Abstrackr. In Proceedings of the 2nd ACM SIGHIT International Health Informatics Symposium, IHI '12, page 819-824, New York, NY, USA, 2012. Association for Computing Machinery. ISBN 9781450307819. doi: 10.1145/2110363.2110464. URL https://doi.org/10.1145/2110363.2110464.
366
+ [89] J. Wang, J. Tuyls, E. Wallace, and S. Singh. Gradient-based Analysis of NLP Models is Manipulable. In Findings of the Association for Computational Linguistics: EMNLP (EMNLP Findings), page 247-258, 2020.
367
+ [90] W. Wang, J. Zhang, Q. Li, M.-Y. Hwang, C. Zong, and Z. Li. Incremental learning from scratch for task-oriented dialogue systems. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3710-3720, Florence, Italy, July 2019. Association for Computational Linguistics. doi: 10.18653/v1/P19-1361. URL https://aclanthology.org/P19-1361.
368
+ [91] X. Wang and M. Yin. Are explanations helpful? a comparative study of the effects of explanations in ai-assisted decision-making. In 26rd International Conference on Intelligent User Interfaces, 2021.
369
+
370
+ [92] X. Wang and C. Yuan. Recent advances on human-computer dialogue. CAAI Transactions on Intelligence Technology, 1(4):303-312, 2016. ISSN 2468-2322. doi: https://doi.org/10.1016/j.trit.2016.12.004. URL https://www.sciencedirect.com/science/article/pii/S2468232216301081.
371
+ [93] J. Williams, A. Raux, D. Ramachandran, and A. Black. The dialog state tracking challenge. In Proceedings of the SIGDIAL 2013 Conference, pages 404-413, Metz, France, Aug. 2013. Association for Computational Linguistics. URL https://aclanthology.org/W13-4065.
372
+ [94] T. Yu, R. Zhang, K. Yang, M. Yasunaga, D. Wang, Z. Li, J. Ma, I. Li, Q. Yao, S. Roman, Z. Zhang, and D. Radev. Spider: A large-scale human-labeled dataset for complex and cross-domain semantic parsing and text-to-sql task. In "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", "Brussels, Belgium", 2018. "Association for Computational Linguistics".
373
+ [95] T. Yu, R. Zhang, H. Er, S. Li, E. Xue, B. Pang, X. V. Lin, Y. C. Tan, T. Shi, Z. Li, Y. Jiang, M. Yasunaga, S. Shim, T. Chen, A. Fabbri, Z. Li, L. Chen, Y. Zhang, S. Dixit, V. Zhang, C. Xiong, R. Socher, W. Lasecki, and D. Radev. CoSQL: A conversational text-to-SQL challenge towards cross-domain natural language interfaces to databases. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 1962-1979, Hong Kong, China, Nov. 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1204. URL https://aclanthology.org/D19-1204.
374
+ [96] M. B. Zafar, M. Donini, D. Slack, C. Archambeau, S. Das, and K. Kenthapadi. On the lack of robust interpretability of neural text classifiers. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 3730-3740, Online, Aug. 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.findings-acl.327. URL https://aclanthology.org/2021 findings-acl.327.
375
+ [97] M. Zaib, Q. Z. Sheng, and W. Emma Zhang. A short survey of pre-trained language models for conversational ai-a new age in nlp. In Proceedings of the Australasian Computer Science Week Multiconference, ACSW '20, New York, NY, USA, 2020. Association for Computing Machinery. ISBN 9781450376976. doi: 10.1145/3373017.3373028. URL https://doi.org/10.1145/3373017.3373028.
376
+ [98] Y. Zhang, S. Sun, M. Galley, Y.-C. Chen, C. Brockett, X. Gao, J. Gao, J. J. Liu, and B. Dolan. Dialogpt: Large-scale generative pre-training for conversational response generation. In arXiv:1911.00536, November 2019. URL https://www.microsoft.com/en-us/research/publication/dialogpt-large-scale-generative-pre-training-for-conversational-response-generation/.
377
+ [99] Y. Zhang, Q. V. Liao, and R. K. Bellamy. Effect of confidence and explanation on accuracy and trust calibration in ai-assisted decision making. arXiv preprint arXiv:2001.02114, 2020.
378
+ [100] V. Zhong, C. Xiong, and R. Socher. Seq2sql: Generating structured queries from natural language using reinforcement learning. CoRR, abs/1709.00103, 2017.
379
+ [101] Q. Zhu, K. Huang, Z. Zhang, X. Zhu, and M. Huang. CrossWOZ: A large-scale Chinese cross-domain task-oriented dialogue dataset. Transactions of the Association for Computational Linguistics, 8:281-295, 2020. doi: 10.1162/tacl_a_00314. URL https://aclanthology.org/2020.tacl-1.19.
2202.01xxx/2202.01875/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:551d93a9f10756e18e59e6bc79d2895ba0aa04d142df95a49790621d973e3c97
3
+ size 21222
2202.01xxx/2202.01875/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01938/0bdc1864-0877-4963-a9de-68c2b5f8ab9e_content_list.json ADDED
@@ -0,0 +1,1574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "CFP-SLAM: A Real-time Visual SLAM Based on Coarse-to-Fine Probability in Dynamic Environments",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 124,
8
+ 95,
9
+ 872,
10
+ 145
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Xinggang Hu $^{1}$ , Yunzhou Zhang $^{1*}$ , Zhenzhong Cao $^{1}$ , Rong Ma $^{2}$ , Yanmin Wu $^{3}$ , Zhiqiang Deng $^{1}$ , Wenkai Sun $^{1}$",
17
+ "bbox": [
18
+ 274,
19
+ 165,
20
+ 692,
21
+ 200
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—The dynamic factors in the environment will lead to the decline of camera localization accuracy due to the violation of the static environment assumption of SLAM algorithm. Recently, some related works generally use the combination of semantic constraints and geometric constraints to deal with dynamic objects, but problems can still be raised, such as poor real-time performance, easy to treat people as rigid bodies, and poor performance in low dynamic scenes. In this paper, a dynamic scene-oriented visual SLAM algorithm based on object detection and coarse-to-fine static probability named CFP-SLAM is proposed. The algorithm combines semantic constraints and geometric constraints to calculate the static probability of objects, keypoints and map points, and takes them as weights to participate in camera pose estimation. Extensive evaluations show that our approach can achieve almost the best results in high dynamic and low dynamic scenarios compared to the state-of-the-art dynamic SLAM methods, and shows quite high real-time ability.",
28
+ "bbox": [
29
+ 71,
30
+ 224,
31
+ 488,
32
+ 441
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "I. INTRODUCTION",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 200,
42
+ 450,
43
+ 361,
44
+ 465
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Simultaneous localization and mapping (SLAM) is the key technology for autonomous navigation of mobile robots, and it is widely applied in the fields of autopilot, UAV and augmented reality (AR). SLAM system is based on environmental static assumption [1], and dynamic factors will bring wrong observation data to the system, making it difficult to establish various geometric constraints on which SLAM system works, and reducing the accuracy and robustness of SLAM system. The abnormal point processing mechanism of RANSAC (Random Sample Consensus) algorithm can solve the influence of certain abnormal points in static or slightly dynamic environment. However, when dynamic objects occupy most of the camera view, RANSAC algorithm has little effect.",
51
+ "bbox": [
52
+ 71,
53
+ 470,
54
+ 488,
55
+ 667
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "With the development of deep learning technology, some advanced researchers have used semantic constraints to solve the visual SLAM problem in dynamic environment recent years. The general approach is to take the semantic information obtained from object detection [2], [3] or semantic segmentation [4]-[12] as a priori and eliminate the dynamic objects in the environment combined with geometric constraints. Semantic",
62
+ "bbox": [
63
+ 71,
64
+ 667,
65
+ 488,
66
+ 773
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "segmentation can provide a fine pixel level object mask, but its real-time performance is poor. The improvement of segmentation accuracy and robustness often comes at the cost of huge computational cost. Even so, the segmentation boundary of the object can not be extremely accurate and can not completely cover the moving object [12]. Object detection can circumvent the problems above, but there are a large amount of background point clouds in the box of objects, and some complex cases will be missed easily [3]. In addition, there are two common problems with current schemes: 1) All dynamic objects are treated as high dynamic attributes, which leads to poor performance in low dynamic scene. 2) As non-rigid objects, human bodies often perform partial movement. Directly eliminating the human body as a whole object will reduce the constraint of keypoints and introduce a negative effect on accuracy of localization.",
73
+ "bbox": [
74
+ 504,
75
+ 223,
76
+ 926,
77
+ 465
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "For the above problems, we propose CFP-SLAM, which is a high-performance high-efficiency visual SLAM system based on object detection and static probability in indoor dynamic environments. On the basis of ORB-SLAM2 [13], CFP-SLAM uses YOLOv5 to obtain semantic information, uses extended Kalman filter (EKF) and Hungarian algorithm to compensate missed detection, calculates the static probability of objects to distinguish high dynamic objects from low dynamic objects, and distinguishes foreground points and background points of object detection results based on DBSCAN (Density-Based Spatial Clustering of Applications with Noise) algorithm. Established on a variety of constraints, a two-stage calculation method of the static probability of keypoints from coarse to fine is designed. The static probability of keypoints is used as a weight to participate in the camera pose optimization. Considering the needs of different scenarios, we provide a lower-performance version to improve the real-time performance without calculating the static probability of objects.",
84
+ "bbox": [
85
+ 504,
86
+ 464,
87
+ 924,
88
+ 736
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Extensive experiments are conducted on public datasets. Compared with state-of-the-art dynamic SLAM methods, our approach achieves the highest localization accuracy in almost all low dynamics and high dynamic scenarios. The main contributions of this paper are as follows:",
95
+ "bbox": [
96
+ 504,
97
+ 736,
98
+ 924,
99
+ 811
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "aside_text",
105
+ "text": "arXiv:2202.01938v2 [cs.RO] 25 Feb 2022",
106
+ "bbox": [
107
+ 22,
108
+ 263,
109
+ 57,
110
+ 705
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "page_footnote",
116
+ "text": "*The corresponding author of this paper.",
117
+ "bbox": [
118
+ 88,
119
+ 786,
120
+ 310,
121
+ 799
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "page_footnote",
127
+ "text": "$^{1}$ Xinggang Hu, Yunzhou Zhang, Zhenzhong Cao, Zhiqiang Deng and Wenkai Sun are with College of Information Science and Engineering, Northeastern University, Shenyang 110819, China (Email: zhangyunzhou@mail.neu.edu.cn).",
128
+ "bbox": [
129
+ 75,
130
+ 799,
131
+ 488,
132
+ 844
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "page_footnote",
138
+ "text": "$^{2}$ Rong Ma is with Beijing Simulation Center, China (Email: mar_buaa@163.com).",
139
+ "bbox": [
140
+ 75,
141
+ 845,
142
+ 486,
143
+ 868
144
+ ],
145
+ "page_idx": 0
146
+ },
147
+ {
148
+ "type": "page_footnote",
149
+ "text": "$^{3}$ Yanmin Wu is with School of Electronic and Computer Engineering, Peking University, Shenzhen, China.",
150
+ "bbox": [
151
+ 75,
152
+ 868,
153
+ 486,
154
+ 890
155
+ ],
156
+ "page_idx": 0
157
+ },
158
+ {
159
+ "type": "page_footnote",
160
+ "text": "This work was supported by National Natural Science Foundation of China (No. 61973066), Major Science and Technology Projects of Liaoning Province(No.2021JH1/10400049), Fundation of Key Laboratory of Equipment Reliability(No.WD2C20205500306), Fundation of Key Laboratory of Aerospace System Simulation(No.6142002200301).",
161
+ "bbox": [
162
+ 75,
163
+ 890,
164
+ 488,
165
+ 948
166
+ ],
167
+ "page_idx": 0
168
+ },
169
+ {
170
+ "type": "page_footnote",
171
+ "text": "- Compensating missed detection based on EKF and Hungarian algorithm, while using DBSCAN clustering algorithm to distinguish the foreground points and background points of box.",
172
+ "bbox": [
173
+ 522,
174
+ 811,
175
+ 921,
176
+ 872
177
+ ],
178
+ "page_idx": 0
179
+ },
180
+ {
181
+ "type": "page_footnote",
182
+ "text": "- The distinction of object dynamic attributes. Based on the YOLOv5 object detection and geometric constraints, the object motion attributes are divided into high dynamics and low dynamics, which are provided to the subsequent methods as a priori information for processing with",
183
+ "bbox": [
184
+ 524,
185
+ 873,
186
+ 923,
187
+ 948
188
+ ],
189
+ "page_idx": 0
190
+ },
191
+ {
192
+ "type": "text",
193
+ "text": "different strategies, so as to improve the robustness and adaptability of SLAM system.",
194
+ "bbox": [
195
+ 102,
196
+ 74,
197
+ 488,
198
+ 103
199
+ ],
200
+ "page_idx": 1
201
+ },
202
+ {
203
+ "type": "text",
204
+ "text": "- The static probability of keypoints from coarse to fine. A two-stage static probability of keypoints calculation method based on the static probability of object, the DBSCAN clustering algorithm, the epipolar constraints and the projection constraints is proposed to solve the problem of false deletion of static keypoints caused by non-rigid body local motion.",
205
+ "bbox": [
206
+ 89,
207
+ 104,
208
+ 488,
209
+ 210
210
+ ],
211
+ "page_idx": 1
212
+ },
213
+ {
214
+ "type": "text",
215
+ "text": "II. RELATED WORK",
216
+ "text_level": 1,
217
+ "bbox": [
218
+ 207,
219
+ 219,
220
+ 354,
221
+ 232
222
+ ],
223
+ "page_idx": 1
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "A. Dynamic SLAM without Priori Semantic Information",
228
+ "text_level": 1,
229
+ "bbox": [
230
+ 71,
231
+ 237,
232
+ 455,
233
+ 252
234
+ ],
235
+ "page_idx": 1
236
+ },
237
+ {
238
+ "type": "text",
239
+ "text": "When there is no semantic information as the priori, using reliable constraints to find the correct feature matching relationship is the basic method to deal with dynamic SLAM problem. Li et al. [14] propose a static weighting method of keyframe edge points, and integrated into the IAICP method to reduce tracking error. Sun et al. [15] roughly detect the motion of moving objects based on self motion compensation image difference, and enhance the motion detection by tracking the motion using particle filter. Then, they [16] propose a novel RGB-D data-based on-line motion removal approach, and build and update the foreground model incrementally. StaticFusion [17] simultaneously estimates the camera motion as well as a probabilistic static/dynamic segmentation of the current RGB-D image pair. DMS-SLAM [18] uses GMS [19] to eliminate mismatched points. Kim et al. [20] propose a dense visual mileage calculation method based on background model to estimate the nonparametric background model from depth scene. Dai et al. [21] distinguishes dynamic and static map points based on feature correlation. Flowfusion [22] uses optical flow residuals to highlight dynamic regions in rgbd point clouds. Because there is no need for deep learning networks to provide semantic priors, the above methods are usually fast in dealing with dynamic factors, but lack of accuracy.",
240
+ "bbox": [
241
+ 71,
242
+ 257,
243
+ 488,
244
+ 619
245
+ ],
246
+ "page_idx": 1
247
+ },
248
+ {
249
+ "type": "text",
250
+ "text": "B. Dynamic SLAM Based on Semantic Constraints",
251
+ "text_level": 1,
252
+ "bbox": [
253
+ 71,
254
+ 628,
255
+ 419,
256
+ 642
257
+ ],
258
+ "page_idx": 1
259
+ },
260
+ {
261
+ "type": "text",
262
+ "text": "Semantic segmentation or object detection can provide a steady and reliable priority constraint for dynamic SLAM. Detect-SLAM [2] detects objects in keyframes and propagates the motion probability of keypoints in real time to eliminate the influence of dynamic objects in SLAM. DS-SLAM [4] uses SegNet [23] to obtain semantic information, combines sparse optical flow and motion consistency detection to judge people's dynamic and static attributes. Dyna-SLAM [5] combines mask R-CNN [24] and multi view geometry to process moving objects. Brasch et al. [6] present monocular SLAM approach for highly dynamic environments which models dynamic outliers with a joint probabilistic model based on semantic prior information predicted by a CNN. With the help of the initial segmentation results, Wang et al. [7] extract the accurate pose from the rough pose by identifying and processing the moving object and possible moving object respectively, and further help to make up for the error and boundary inaccuracy of the segmentation area. Dynamic-SLAM [3] compensates SSD for missed detection based on the speed invariance of adjacent frames, and eliminates dynamic",
263
+ "bbox": [
264
+ 71,
265
+ 646,
266
+ 488,
267
+ 949
268
+ ],
269
+ "page_idx": 1
270
+ },
271
+ {
272
+ "type": "text",
273
+ "text": "objects combined with selective tracking algorithm. SaDSLAM [8] extracts static feature points from objects judged as dynamic based on semantic by verifying whether the inter frame feature points meet the epipolar constraints. Vincent et al. [9] perform semantic segmentation of object instances in the image, and use EKF to identify, track and remove dynamic objects from the scene. DP-SLAM [10] combines the results of geometric constraints and semantic segmentation, the dynamic keypoints are tracked in the Bayesian probability estimation framework. Ji et al. [11] only perform semantic segmentation on keyframes, cluster the depth map and identifies moving objects combined with re-projection error to remove known and unknown dynamic objects. Blitz-SLAM [12] repairs the mask of BlitzNet [25] based on depth information, and classifies static and dynamic matching points in potential dynamic areas using epipolar constraints. Generally, the above methods can accurately eliminate dynamic objects in the environment, but it is difficult to give consideration to both localization accuracy and real-time, and the performance is generally poor in low dynamic scenes.",
274
+ "bbox": [
275
+ 504,
276
+ 74,
277
+ 924,
278
+ 377
279
+ ],
280
+ "page_idx": 1
281
+ },
282
+ {
283
+ "type": "text",
284
+ "text": "III. SYSTEM OVERVIEW",
285
+ "text_level": 1,
286
+ "bbox": [
287
+ 625,
288
+ 385,
289
+ 800,
290
+ 398
291
+ ],
292
+ "page_idx": 1
293
+ },
294
+ {
295
+ "type": "text",
296
+ "text": "A. Definition of Variables",
297
+ "text_level": 1,
298
+ "bbox": [
299
+ 504,
300
+ 405,
301
+ 686,
302
+ 420
303
+ ],
304
+ "page_idx": 1
305
+ },
306
+ {
307
+ "type": "text",
308
+ "text": "In this paper, common variables are defined as follows:",
309
+ "bbox": [
310
+ 522,
311
+ 422,
312
+ 900,
313
+ 438
314
+ ],
315
+ "page_idx": 1
316
+ },
317
+ {
318
+ "type": "list",
319
+ "sub_type": "text",
320
+ "list_items": [
321
+ "$F_{k}$ -FrameK.",
322
+ "- $K$ - The intrinsic matrix of a pinhole camera model.",
323
+ "- $T_{k,w} \\in R^{4 \\times 4}$ - The transformation from world frame to camera frame $\\mathbf{K}$ , which is composed of a rotation $R_{k,w} \\in R^{3 \\times 3}$ and a translation $t_{k,w} \\in R^{3 \\times 1}$ .",
324
+ "- $P_{i}^{k}$ - The keypoint with ID $i$ in $F_{k}$ . Its pixel coordinate is $P_{i w}^{k} = \\left[u_{i}^{k}, v_{i}^{k}\\right]^{T}$ , camera coordinate is $P_{i k}^{k} = \\left[X_{i k}^{k}, Y_{i k}^{k}, Z_{i k}^{k}\\right]^{T}$ , world coordinate is $P_{i w}^{k} = \\left[X_{i w}^{k}, Y_{i w}^{k}, Z_{i w}^{k}\\right]^{T}$ . $(\\cdot)$ is the form of homogeneous coordinates in each coordinate system.",
325
+ "- $P_{i^*}^{k - 1}$ - The keypoint with ID $i^*$ in $F_{k - 1}$ which forms a matching relationship with $P_{i}^{k}$ .",
326
+ "- $O_{i+}^{k}$ - The static probability of potential moving object with ID $i^{+}$ . $P_{i}^{k}$ is the extracted keypoint on the object.",
327
+ "- $O_{Th}$ - The threshold to distinguish whether the object motion attribute is high dynamic or low dynamic.",
328
+ "- $K_{i}^{k}$ - The static probability of $P_{i}^{k}$ , which is in the update state and participates in camera pose optimization.",
329
+ "- $K_{i}^{Dk}, K_{i}^{Tk}, K_{i}^{Fk}$ - The static probability of $P_{i}^{k}$ obtained by the DBSCAN clustering algorithm, the projection constraints and the epipolar constraints respectively.",
330
+ "- $M_{i - }^{k}$ - The static probability of the map point forming a matching relationship with $P_{i}^{k}$ ."
331
+ ],
332
+ "bbox": [
333
+ 522,
334
+ 441,
335
+ 921,
336
+ 797
337
+ ],
338
+ "page_idx": 1
339
+ },
340
+ {
341
+ "type": "text",
342
+ "text": "B. System Architecture",
343
+ "text_level": 1,
344
+ "bbox": [
345
+ 506,
346
+ 808,
347
+ 666,
348
+ 823
349
+ ],
350
+ "page_idx": 1
351
+ },
352
+ {
353
+ "type": "text",
354
+ "text": "The overview of CFP-SLAM is demonstrated in Fig.1. Based on ORB-SLAM2 [13], we design a complete static probability calculation and update framework of keypoints based on multiple constraints to deal with the influence of moving objects in dynamic environment. The system obtains semantic information based on YOLOv5, compensates for missed detection based on EKF and Hungarian algorithm, and then the box between adjacent frames is associated. In",
355
+ "bbox": [
356
+ 504,
357
+ 827,
358
+ 923,
359
+ 949
360
+ ],
361
+ "page_idx": 1
362
+ },
363
+ {
364
+ "type": "image",
365
+ "img_path": "images/da155394ba58c7e9876e7c943df7fae3c5177943673b05a76d36a65e18717ed5.jpg",
366
+ "image_caption": [
367
+ "Fig. 1. The overview of CFP-SLAM. The green portion and the purple portion are the input and output modules of the system respectively. The yellow portion is the semantic module, including object detection, missed detection compensation, and data association. The orange portion and the blue portion are static probability calculation modules for two stages of keypoints, respectively. In the first stage, the rough static probability of keypoints is calculated based on the static probability of objects and the results of DBSCAN clustering. In the second stage, based on the epipolar constraint and projection constraint, and considering the static probability of the object and the data association result of the box, the accurate static probability of feature points is calculated. During the whole process, the static probability of the map points is maintained and updated, and together with the static probability of the keypoints will be used as weight to participate in pose optimization."
368
+ ],
369
+ "image_footnote": [],
370
+ "bbox": [
371
+ 84,
372
+ 70,
373
+ 906,
374
+ 327
375
+ ],
376
+ "page_idx": 2
377
+ },
378
+ {
379
+ "type": "text",
380
+ "text": "$F_{k}$ , only calculate and update the static probability of the keypoints inside the potential moving object box. Firstly, the static probability of potential moving object $O_{i+}^{k}$ is obtained by using the optical flow and the epipolar constraints, and the object is divided into high dynamic object and low dynamic object. Initialize $K_{i}^{k}$ as the static probability of the object to which the keypoint belongs. Then, foreground points and background points is distinguished and the $K_{i}^{Dk}$ is calculated by using the DBSCAN clustering results, and the $K_{i}^{k}$ is updated to estimate the camera pose in the first stage to obtain $T_{k,w}$ . Next, $K_{i}^{Tk}, K_{i}^{Fk}$ are obtained by using the projection constraints and the epipolar constraints, $K_{i}^{k}$ and $M_{i-}^{k}$ are updated to participate in camera pose optimization as weights to obtain a more accurate $T_{k,w}$ .",
381
+ "bbox": [
382
+ 71,
383
+ 429,
384
+ 488,
385
+ 642
386
+ ],
387
+ "page_idx": 2
388
+ },
389
+ {
390
+ "type": "text",
391
+ "text": "IV. SPECIFIC IMPLEMENTATION",
392
+ "text_level": 1,
393
+ "bbox": [
394
+ 166,
395
+ 652,
396
+ 395,
397
+ 665
398
+ ],
399
+ "page_idx": 2
400
+ },
401
+ {
402
+ "type": "text",
403
+ "text": "A. Missed Detection Compensation Algorithm",
404
+ "text_level": 1,
405
+ "bbox": [
406
+ 71,
407
+ 672,
408
+ 388,
409
+ 686
410
+ ],
411
+ "page_idx": 2
412
+ },
413
+ {
414
+ "type": "text",
415
+ "text": "When processing dynamic objects, if the semantic information as a priori is suddenly missing in some frames, on the one hand, the subsequent methods based on semantic priors will not be able to process dynamic objects. On the other hand, the sudden emergence of dynamic objects in high dynamic scenes will lead to a sharp increase in the number of keypoints incorrectly matched between adjacent frames, which leads to the loss of tracking in SLAM system in high dynamic scenario. Therefore, stable and accurate semantic information is critical.",
416
+ "bbox": [
417
+ 71,
418
+ 691,
419
+ 488,
420
+ 825
421
+ ],
422
+ "page_idx": 2
423
+ },
424
+ {
425
+ "type": "text",
426
+ "text": "In order to solve the missed detection problem of YOLOv5, we introduce EKF and Hungarian algorithm to compensate the missed detection of potential moving objects. EKF is used to predict the boxes of potential moving objects in $F_{k}$ , while the Hungarian algorithm is used to correlate the predicted boxes with the boxes detected by YOLOv5. If the predicted box does not find a matching detected box, it could be considered that $F_{k}$ has missed detection, and the prediction result of EKF",
427
+ "bbox": [
428
+ 71,
429
+ 827,
430
+ 490,
431
+ 948
432
+ ],
433
+ "page_idx": 2
434
+ },
435
+ {
436
+ "type": "text",
437
+ "text": "is adopted to compensate the missed detection result. After missed detection compensation, EKF and Hungarian algorithm are used again for inter frame data association of boxes.",
438
+ "bbox": [
439
+ 504,
440
+ 429,
441
+ 921,
442
+ 474
443
+ ],
444
+ "page_idx": 2
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "B. Static Probability of Objects",
449
+ "text_level": 1,
450
+ "bbox": [
451
+ 504,
452
+ 483,
453
+ 723,
454
+ 498
455
+ ],
456
+ "page_idx": 2
457
+ },
458
+ {
459
+ "type": "text",
460
+ "text": "When calculating the static probability of each potential moving object, we use the idea of DS-SLAM [4] for reference to solve the fundamental matrix $LF_{k,k - 1}$ and get the polar error $Ld_{i}^{F_{k,k - 1}}$ . We use the epipolar constraints and chi-square distribution to test the epipolar error. Since the pixel coordinates of the matching point pair obtained by the optical flow tracking have $k = 2$ degrees of freedom, if they are assumed to follow the Gauss Distribution $N(0,1)$ , then according to the chi-square distribution:",
461
+ "bbox": [
462
+ 504,
463
+ 501,
464
+ 923,
465
+ 638
466
+ ],
467
+ "page_idx": 2
468
+ },
469
+ {
470
+ "type": "equation",
471
+ "text": "\n$$\n\\operatorname {c h s q} (x; k) = \\left\\{ \\begin{array}{c} \\frac {x ^ {(k / 2 - 1)} e ^ {- x / 2}}{2 ^ {k / 2} \\Gamma \\left(\\frac {k}{2}\\right)}, x > 0 \\\\ 0, x \\leq 0 \\end{array} \\right. \\tag {1}\n$$\n",
472
+ "text_format": "latex",
473
+ "bbox": [
474
+ 584,
475
+ 647,
476
+ 921,
477
+ 689
478
+ ],
479
+ "page_idx": 2
480
+ },
481
+ {
482
+ "type": "text",
483
+ "text": "The definition of the function $\\Gamma(v)$ is:",
484
+ "bbox": [
485
+ 522,
486
+ 695,
487
+ 785,
488
+ 710
489
+ ],
490
+ "page_idx": 2
491
+ },
492
+ {
493
+ "type": "equation",
494
+ "text": "\n$$\n\\Gamma (v) = \\int_ {0} ^ {\\infty} e ^ {- t} t ^ {v - 1} d t, \\operatorname {R e} v > 0 \\tag {2}\n$$\n",
495
+ "text_format": "latex",
496
+ "bbox": [
497
+ 599,
498
+ 712,
499
+ 921,
500
+ 744
501
+ ],
502
+ "page_idx": 2
503
+ },
504
+ {
505
+ "type": "text",
506
+ "text": "The single estimation result of $O_{i^{+}}^{k}$ can be obtained:",
507
+ "bbox": [
508
+ 522,
509
+ 750,
510
+ 880,
511
+ 767
512
+ ],
513
+ "page_idx": 2
514
+ },
515
+ {
516
+ "type": "equation",
517
+ "text": "\n$$\n\\left(O _ {i ^ {+}} ^ {k}\\right) _ {m} = \\operatorname {c h s q} \\left(\\left(L d _ {i} ^ {F _ {k, k - 1}}\\right) ^ {2}; 2\\right) \\tag {3}\n$$\n",
518
+ "text_format": "latex",
519
+ "bbox": [
520
+ 594,
521
+ 775,
522
+ 921,
523
+ 809
524
+ ],
525
+ "page_idx": 2
526
+ },
527
+ {
528
+ "type": "text",
529
+ "text": "After all estimation results are obtained by using all optical flow point pairs belonging to the object, all estimation results are sorted from small to large. Let the number of all estimation results be $M$ , and take the average value of $(O_{i+}^{k})_{m}$ at 0.1M, 0.2M, 0.3M position after ranking as the estimated value of object static probability $O_{i+}^{k}$ .",
530
+ "bbox": [
531
+ 504,
532
+ 813,
533
+ 921,
534
+ 902
535
+ ],
536
+ "page_idx": 2
537
+ },
538
+ {
539
+ "type": "text",
540
+ "text": "According to the calculation result of the static probability of the object and the real motion of the object, and taking into account that the negative effect of the dynamic point is",
541
+ "bbox": [
542
+ 504,
543
+ 904,
544
+ 921,
545
+ 948
546
+ ],
547
+ "page_idx": 2
548
+ },
549
+ {
550
+ "type": "text",
551
+ "text": "generally greater than the positive effect of the increase of the static constraint when the camera pose is estimated, we set $O_{Th} = 0.9$ , the object motion attributes are divided into high dynamic and low dynamic, which are provided to the subsequent methods as a priori information for processing with different strategies. The static probability of all keypoints in the box of the potential moving object is initialized to $O_{i^{+}}^{k}$ and the static probability of other keypoints is initialized to 1.0.",
552
+ "bbox": [
553
+ 71,
554
+ 74,
555
+ 491,
556
+ 209
557
+ ],
558
+ "page_idx": 3
559
+ },
560
+ {
561
+ "type": "text",
562
+ "text": "C. Static Probability of Keypoints in the First Stage",
563
+ "text_level": 1,
564
+ "bbox": [
565
+ 73,
566
+ 220,
567
+ 429,
568
+ 236
569
+ ],
570
+ "page_idx": 3
571
+ },
572
+ {
573
+ "type": "text",
574
+ "text": "1) DBSCAN Density Clustering Algorithm: Compared with semantic segmentation methods, object detection technology has great advantages in real-time, but it can not provide accurate object mask. In the indoor dynamic SLAM scene, this problem leads to numerous static backgrounds in the boxes classified as people, and the false deletion of static keypoints will reduce the constraints of camera pose optimization and reduce the accuracy of camera pose estimation. We noticed that people as the foreground as a non-rigid body, his depth has a good continuity, and usually has a large fault with the background depth. To this end, we use the DBSCAN density clustering algorithm to distinguish between the foreground and background points of boxes classified as people.",
575
+ "bbox": [
576
+ 71,
577
+ 243,
578
+ 490,
579
+ 439
580
+ ],
581
+ "page_idx": 3
582
+ },
583
+ {
584
+ "type": "text",
585
+ "text": "We adaptively determine $eps$ (the neighborhood radius of DBSCAN density clustering algorithm) and $minPts$ (the threshold of the number of samples in the neighborhood). After clustering, the one with the lowest average value of samples in cluster $\\mathbf{C} = \\{C_1,C_2,\\dots ,C_k\\}$ is taken as the foreground points of box.",
586
+ "bbox": [
587
+ 71,
588
+ 439,
589
+ 488,
590
+ 529
591
+ ],
592
+ "page_idx": 3
593
+ },
594
+ {
595
+ "type": "text",
596
+ "text": "After getting the DBSCAN clustering results, we adopt a soft strategy to further estimate the static probability of background points in the box of a potential moving object. Obviously, the static probability of background points must be greater than that of the object, and it is positively correlated with the static probability of the object. Specifies that the static probability of background points derived from the DBSCAN cluster is:",
597
+ "bbox": [
598
+ 71,
599
+ 530,
600
+ 488,
601
+ 650
602
+ ],
603
+ "page_idx": 3
604
+ },
605
+ {
606
+ "type": "equation",
607
+ "text": "\n$$\nK _ {i} ^ {D k} = \\left\\{ \\begin{array}{c} \\frac {1 - O _ {T h}}{\\left(O _ {T h}\\right) ^ {4}} \\left(K _ {i} ^ {k}\\right) ^ {3} + 1, O _ {i +} ^ {k} \\leq O _ {T h} \\\\ \\frac {1}{K _ {i} ^ {k}}, \\quad O _ {i +} ^ {k} > O _ {T h} \\end{array} \\right. \\tag {4}\n$$\n",
608
+ "text_format": "latex",
609
+ "bbox": [
610
+ 130,
611
+ 656,
612
+ 488,
613
+ 699
614
+ ],
615
+ "page_idx": 3
616
+ },
617
+ {
618
+ "type": "text",
619
+ "text": "Considering that the static probability estimation of keypoints has not been strictly calculated at each point, in other words, the static probability of the keypoints is coarse at present, and the camera pose estimation is vulnerable to dynamic points, we set the static probability of all foreground points in the box of high dynamic objects to 0.",
620
+ "bbox": [
621
+ 71,
622
+ 702,
623
+ 488,
624
+ 792
625
+ ],
626
+ "page_idx": 3
627
+ },
628
+ {
629
+ "type": "text",
630
+ "text": "2) First Stage Pose Optimization: Update the static probability of keypoints:",
631
+ "bbox": [
632
+ 71,
633
+ 792,
634
+ 488,
635
+ 823
636
+ ],
637
+ "page_idx": 3
638
+ },
639
+ {
640
+ "type": "equation",
641
+ "text": "\n$$\nK _ {i} ^ {k} = K _ {i} ^ {k} \\times K _ {i} ^ {D k} \\tag {5}\n$$\n",
642
+ "text_format": "latex",
643
+ "bbox": [
644
+ 217,
645
+ 824,
646
+ 488,
647
+ 842
648
+ ],
649
+ "page_idx": 3
650
+ },
651
+ {
652
+ "type": "text",
653
+ "text": "When initializing the SLAM system, map points will be created. At this time, the static probability of map point $M_{i}^{k}$ will be initialized to the static probability of corresponding keypoint $K_{i}^{k}$ . In the frame after initialization, $K_{i}^{k}$ and $M_{i}^{k}$ are used as weights to optimize the camera pose, and the camera pose estimation value $T_{k,w}$ in the first stage is obtained. Then, the static probability of $P_{i}^{k}$ , which has a matching relation",
654
+ "bbox": [
655
+ 71,
656
+ 843,
657
+ 490,
658
+ 949
659
+ ],
660
+ "page_idx": 3
661
+ },
662
+ {
663
+ "type": "text",
664
+ "text": "with the keypoints in $F_{k - 1}$ , is calculated precisely based on the projection constraints and the epipolar constraints.",
665
+ "bbox": [
666
+ 504,
667
+ 74,
668
+ 923,
669
+ 104
670
+ ],
671
+ "page_idx": 3
672
+ },
673
+ {
674
+ "type": "text",
675
+ "text": "D. Static Probability of Keypoints in the Second Stage",
676
+ "text_level": 1,
677
+ "bbox": [
678
+ 504,
679
+ 119,
680
+ 879,
681
+ 136
682
+ ],
683
+ "page_idx": 3
684
+ },
685
+ {
686
+ "type": "text",
687
+ "text": "1) Static Probability Based on the Projection Constraints: Convert the $P_{i^*}^{k-1}$ from the pixel coordinate to the camera coordinate:",
688
+ "bbox": [
689
+ 504,
690
+ 143,
691
+ 923,
692
+ 188
693
+ ],
694
+ "page_idx": 3
695
+ },
696
+ {
697
+ "type": "equation",
698
+ "text": "\n$$\nP _ {i _ {k - 1} ^ {*}} ^ {k - 1} = \\frac {1}{K} Z _ {i _ {k - 1} ^ {*}} ^ {k - 1} \\widetilde {P _ {i _ {u \\nu}} ^ {k - 1}} \\tag {6}\n$$\n",
699
+ "text_format": "latex",
700
+ "bbox": [
701
+ 635,
702
+ 195,
703
+ 921,
704
+ 224
705
+ ],
706
+ "page_idx": 3
707
+ },
708
+ {
709
+ "type": "text",
710
+ "text": "Transform and project $P_{i_{k-1}^*}^{k-1}$ to $F_k$ , and the Euclidean distance between the projection point and $P_i^k$ is:",
711
+ "bbox": [
712
+ 504,
713
+ 236,
714
+ 921,
715
+ 272
716
+ ],
717
+ "page_idx": 3
718
+ },
719
+ {
720
+ "type": "equation",
721
+ "text": "\n$$\nd _ {i} ^ {T} = \\left\\| \\right. P _ {i _ {u v}} ^ {k} - \\left| \\frac {1}{\\left| T _ {k , k - 1} \\widetilde {P _ {i _ {k - 1} ^ {*}}} ^ {k - 1} \\right| _ {Z}} K \\right| T _ {k, k - 1} \\widetilde {P _ {i _ {k - 1} ^ {*}}} \\left. \\right| _ {X Y Z} \\Bigg | _ {u \\nu} \\left. \\right\\| _ {2} \\tag {7}\n$$\n",
722
+ "text_format": "latex",
723
+ "bbox": [
724
+ 511,
725
+ 279,
726
+ 921,
727
+ 362
728
+ ],
729
+ "page_idx": 3
730
+ },
731
+ {
732
+ "type": "text",
733
+ "text": "Where function $|P|_{Z}$ represents the z-axis coordinate of point $P$ , and $|P|_{XYZ}$ represents the non-homogeneous coordinate form of point $P$ . On the premise that the camera pose $T_{k,w}$ is relatively accurate, the greater $d_{i}^{T}$ , the greater the possibility that $P_{i}^{k}$ and $P_{i^{*}}^{k-1}$ are mismatched. Based on this principle, we design a static probability model based on the projection constraints. After sorting the $d_{i}^{T}$ of all keypoints outside the box of the dynamic object in $F_{k}$ from small to large, take $d_{i}^{T}$ at the truncated position of 0.8 as the adaptive threshold $D_{Th}^{T}$ of the projection error, and obtain the minimum value $d_{min}^{T}$ of $d_{i}^{T}$ . We use the Sigmoid function form to measure the static probability of keypoints of the matching relationship in the box:",
734
+ "bbox": [
735
+ 504,
736
+ 363,
737
+ 923,
738
+ 560
739
+ ],
740
+ "page_idx": 3
741
+ },
742
+ {
743
+ "type": "equation",
744
+ "text": "\n$$\nK _ {i} ^ {T k} = \\frac {1}{1 + e ^ {\\left(d _ {i} ^ {T} - D _ {T h} ^ {T}\\right) \\times \\frac {5}{D _ {T h} ^ {T} - d _ {\\operatorname* {m i n}} ^ {T}}}} \\tag {8}\n$$\n",
745
+ "text_format": "latex",
746
+ "bbox": [
747
+ 593,
748
+ 574,
749
+ 921,
750
+ 616
751
+ ],
752
+ "page_idx": 3
753
+ },
754
+ {
755
+ "type": "text",
756
+ "text": "For a pair of matching points, the satisfaction of the projection constraints is not only related to whether the corresponding spatial points strictly meet the static environment assumption, but also directly related to the number of constraints when solving the pose matrix and whether the pose matrix itself is correctly solved. Therefore, the statistical confidence $C_s^{Tk}$ and calculation confidence $C_c^{Tk}$ of the pose matrix are introduced:",
757
+ "bbox": [
758
+ 504,
759
+ 625,
760
+ 921,
761
+ 744
762
+ ],
763
+ "page_idx": 3
764
+ },
765
+ {
766
+ "type": "equation",
767
+ "text": "\n$$\nC _ {S} ^ {T k} = \\frac {1}{1 + e ^ {- N _ {B A} + 0 . 5 T h _ {B A}}} \\tag {9}\n$$\n",
768
+ "text_format": "latex",
769
+ "bbox": [
770
+ 614,
771
+ 758,
772
+ 921,
773
+ 789
774
+ ],
775
+ "page_idx": 3
776
+ },
777
+ {
778
+ "type": "equation",
779
+ "text": "\n$$\nC _ {C} ^ {T k} = 1 - \\frac {\\sum d _ {i} ^ {T}}{N _ {T} \\times D _ {T h} ^ {T}} \\tag {10}\n$$\n",
780
+ "text_format": "latex",
781
+ "bbox": [
782
+ 630,
783
+ 805,
784
+ 921,
785
+ 842
786
+ ],
787
+ "page_idx": 3
788
+ },
789
+ {
790
+ "type": "text",
791
+ "text": "Where $N_{BA}$ is the number of interior points obtained by participating in the last camera pose solution, and threshold $Th_{BA}$ is the minimum number of interior points required to participate in the camera pose solution, $N_{T}$ and $\\sum d_i^T$ respectively represent the number of all sample points and the sum of $d_i^T$ satisfying $d_i^T < D_{Th}^T$ .",
792
+ "bbox": [
793
+ 504,
794
+ 849,
795
+ 923,
796
+ 943
797
+ ],
798
+ "page_idx": 3
799
+ },
800
+ {
801
+ "type": "text",
802
+ "text": "2) Static Probability Based on the Epipolar Constraints: Based on the camera pose estimation $T_{k,w}$ in the first stage, a more accurate fundamental matrix can be calculated:",
803
+ "bbox": [
804
+ 71,
805
+ 74,
806
+ 488,
807
+ 118
808
+ ],
809
+ "page_idx": 4
810
+ },
811
+ {
812
+ "type": "equation",
813
+ "text": "\n$$\nF _ {k, k - 1} = \\mathrm {K} ^ {- \\mathrm {T}} \\left(t _ {k, k - 1}\\right) ^ {\\wedge} R _ {k, k - 1} \\mathrm {K} ^ {- 1} \\tag {11}\n$$\n",
814
+ "text_format": "latex",
815
+ "bbox": [
816
+ 151,
817
+ 128,
818
+ 488,
819
+ 148
820
+ ],
821
+ "page_idx": 4
822
+ },
823
+ {
824
+ "type": "text",
825
+ "text": "The pole line $l_i^k = \\left[A_i^k, B_i^k, C_i^k\\right]^T$ corresponding to $P_i^k$ is:",
826
+ "bbox": [
827
+ 88,
828
+ 155,
829
+ 488,
830
+ 176
831
+ ],
832
+ "page_idx": 4
833
+ },
834
+ {
835
+ "type": "equation",
836
+ "text": "\n$$\nl _ {i} ^ {k} = F _ {k, k - 1} \\widetilde {P _ {i _ {u v} ^ {*}} ^ {k - 1}} \\tag {12}\n$$\n",
837
+ "text_format": "latex",
838
+ "bbox": [
839
+ 218,
840
+ 181,
841
+ 488,
842
+ 208
843
+ ],
844
+ "page_idx": 4
845
+ },
846
+ {
847
+ "type": "text",
848
+ "text": "Then the polar error $d_i^F$ is:",
849
+ "bbox": [
850
+ 89,
851
+ 212,
852
+ 277,
853
+ 231
854
+ ],
855
+ "page_idx": 4
856
+ },
857
+ {
858
+ "type": "equation",
859
+ "text": "\n$$\nd _ {i} ^ {F} = \\frac {\\left| \\left(\\widetilde {P _ {i _ {\\mathrm {u v}}} ^ {k}}\\right) ^ {T} l _ {i} ^ {k} \\right|}{\\sqrt {\\left(A _ {i} ^ {k}\\right) ^ {2} + \\left(B _ {i} ^ {k}\\right) ^ {2}}} \\tag {13}\n$$\n",
860
+ "text_format": "latex",
861
+ "bbox": [
862
+ 194,
863
+ 238,
864
+ 488,
865
+ 299
866
+ ],
867
+ "page_idx": 4
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "Similar to the projection constraints, we calculate static probability and confidence based on the epipolar constraints to obtain $K_{i}^{Fk}$ , the statistical confidence $C_s^{Fk}$ and calculation confidence $C_c^{Fk}$ of the fundamental matrix.",
872
+ "bbox": [
873
+ 71,
874
+ 305,
875
+ 488,
876
+ 364
877
+ ],
878
+ "page_idx": 4
879
+ },
880
+ {
881
+ "type": "text",
882
+ "text": "It should be noted that, as Eq.11 mentioned, the fundamental matrix can not be obtained when the camera translation is not large enough. Therefore, when the camera translation is less than the set threshold $t_{Th}$ , skip the calculation of static probability and confidence based on the epipolar constraints, that is:",
883
+ "bbox": [
884
+ 71,
885
+ 366,
886
+ 488,
887
+ 455
888
+ ],
889
+ "page_idx": 4
890
+ },
891
+ {
892
+ "type": "equation",
893
+ "text": "\n$$\nK _ {i} ^ {F k} = 0, C _ {S} ^ {F k} = C _ {C} ^ {F k} = 0 \\quad \\text {s . t .} \\| t _ {k, k - 1} \\| _ {2} \\leq t _ {T h} \\tag {14}\n$$\n",
894
+ "text_format": "latex",
895
+ "bbox": [
896
+ 94,
897
+ 464,
898
+ 488,
899
+ 484
900
+ ],
901
+ "page_idx": 4
902
+ },
903
+ {
904
+ "type": "text",
905
+ "text": "3) Second Stage Pose Optimization: After calculating the static probability of the keypoints based on the projection constraints and the epipolar constraints, we update the static probability of $P_{i}^{k}$ which matches the keypoints in $F_{k - 1}$ for the second time. When the object is in high dynamics, the negative impact of dynamic points on camera pose estimation is generally greater than the positive impact of the increase in the number of static point constraints, which is just the opposite when the object is in low dynamics. This is because ORB-SLAM2 has certain outlier suppression strategies, which can suppress dynamic disturbances in low dynamics, but does not work in high dynamics. So, when $O_{i + }^{k}\\leq O_{Th}$ ,",
906
+ "bbox": [
907
+ 71,
908
+ 491,
909
+ 490,
910
+ 674
911
+ ],
912
+ "page_idx": 4
913
+ },
914
+ {
915
+ "type": "equation",
916
+ "text": "\n$$\nK _ {i} ^ {k} = \\left\\{ \\begin{array}{c} K _ {i} ^ {T k} \\times K _ {i} ^ {F k}, \\| t _ {k, k - 1} \\| _ {2} > t _ {T h} \\\\ K _ {i} ^ {T k}, \\| t _ {k, k - 1} \\| _ {2} \\leq t _ {T h} \\end{array} \\right. \\tag {15}\n$$\n",
917
+ "text_format": "latex",
918
+ "bbox": [
919
+ 138,
920
+ 681,
921
+ 488,
922
+ 718
923
+ ],
924
+ "page_idx": 4
925
+ },
926
+ {
927
+ "type": "text",
928
+ "text": "when $O_{i^{+}}^{k} > O_{Th}$",
929
+ "bbox": [
930
+ 89,
931
+ 723,
932
+ 220,
933
+ 741
934
+ ],
935
+ "page_idx": 4
936
+ },
937
+ {
938
+ "type": "equation",
939
+ "text": "\n$$\nK _ {i} ^ {k} = \\frac {K _ {i} ^ {T k} \\times C _ {s} ^ {T k} C _ {c} ^ {T k}}{C _ {s} ^ {T k} C _ {c} ^ {T k} + C _ {s} ^ {F k} C _ {c} ^ {F k}} + \\frac {K _ {i} ^ {F k} \\times C _ {s} ^ {F k} C _ {c} ^ {F k}}{C _ {s} ^ {T k} C _ {c} ^ {T k} + C _ {s} ^ {F k} C _ {c} ^ {F k}} \\tag {16}\n$$\n",
940
+ "text_format": "latex",
941
+ "bbox": [
942
+ 86,
943
+ 750,
944
+ 488,
945
+ 787
946
+ ],
947
+ "page_idx": 4
948
+ },
949
+ {
950
+ "type": "text",
951
+ "text": "After missed detection compensation, we use EKF and Hungarian algorithm to correlate the boxes of potential moving objects between adjacent frames. It is easy to know that if the association result of a box in $F_{k}$ is not found in $F_{k-1}$ , even if there is a matching relationship between the foreground points in the box, it is generally a false matching, so let $K_{i}^{k} = 0$ in this case. For $P_{i}^{k}$ that does not match the keypoints in $F_{k-1}$ , according to the results of DBSCAN clustering, if $P_{i}^{k}$ belongs to the foreground points, let $K_{i}^{k} = 0$ , else let $K_{i}^{k} = M_{i-}^{k}$ . After the second estimation result of $K_{i}^{k}$ is obtained, $M_{i-}^{k}$ is updated. When $M_{i-}^{k} < 0.3$ , delete the map point. Then $K_{i}^{k}$ and $M_{i-}^{k}$ are used as weights to participate in the second stage of camera pose optimization. When there is a big difference between $K_{i}^{k}$ and $M_{i-}^{k}$ , it can be considered that $K_{i}^{k}$ and $M_{i-}^{k}$ are mismatched and do not participate in optimization.",
952
+ "bbox": [
953
+ 504,
954
+ 74,
955
+ 924,
956
+ 301
957
+ ],
958
+ "page_idx": 4
959
+ },
960
+ {
961
+ "type": "text",
962
+ "text": "V. EXPERIMENTS AND RESULTS",
963
+ "text_level": 1,
964
+ "bbox": [
965
+ 598,
966
+ 311,
967
+ 830,
968
+ 324
969
+ ],
970
+ "page_idx": 4
971
+ },
972
+ {
973
+ "type": "text",
974
+ "text": "In this section, we test the performance of the proposed algorithm in 8 dynamic sequences of the TUM RGB-D dataset [26], including 4 low dynamic sequences (fr3/s for short) and 4 high dynamic sequences (fr3/w for short), and the camera includes 4 kinds of motion: static, xyz, halfsphere and rpy. The indicators used to evaluate the accuracy are the Absolute Trajectory Error (ATE) and the Relative Pose Error (RPE). ATE represents the global consistency of trajectory. RPE includes translation drift and rotation drift. The Root-Mean-Square-Error (RMSE) and Standard Deviation (S.D.) of both are used to represent the robustness and stability of the system [12]. Firstly, we show the effect of missed detection compensation and DBSCAN clustering, then compare our method with some of the most advanced methods, then design a series of ablation experiments to test the impact of each module, and finally carry out real-time analysis. All the experiments are performed on a computer with Intel i7 CPU, 3060 GPU, and 16GB memory.",
975
+ "bbox": [
976
+ 504,
977
+ 330,
978
+ 923,
979
+ 602
980
+ ],
981
+ "page_idx": 4
982
+ },
983
+ {
984
+ "type": "text",
985
+ "text": "A. Missed Detection Compensation and DBSCAN Clustering",
986
+ "text_level": 1,
987
+ "bbox": [
988
+ 504,
989
+ 612,
990
+ 921,
991
+ 628
992
+ ],
993
+ "page_idx": 4
994
+ },
995
+ {
996
+ "type": "text",
997
+ "text": "In the dynamic SLAM scene, the motion of the object, the incomplete appearance of the object to be detected in the camera field of view, the blurred image and the singular angle of view caused by camera rotation all bring severe challenges to the object detection, very easy to cause miss detection, even will lead to continuous frame miss detection. Fig.2(a)-(d) and Fig.2(e) show the results of missed detection compensation of object detection in the above four cases and six consecutive frames, respectively. Fig.3 shows the DBSCAN clustering results after missed detection compensation. We",
998
+ "bbox": [
999
+ 504,
1000
+ 632,
1001
+ 923,
1002
+ 784
1003
+ ],
1004
+ "page_idx": 4
1005
+ },
1006
+ {
1007
+ "type": "image",
1008
+ "img_path": "images/ff1c128d6060c985c297fe42728df4a5ea51389605249d24213710281f93db46.jpg",
1009
+ "image_caption": [
1010
+ "(a)"
1011
+ ],
1012
+ "image_footnote": [],
1013
+ "bbox": [
1014
+ 73,
1015
+ 797,
1016
+ 158,
1017
+ 895
1018
+ ],
1019
+ "page_idx": 4
1020
+ },
1021
+ {
1022
+ "type": "image",
1023
+ "img_path": "images/ae834cf0f6c63fa0e7dc213482997505c1fea05ec74c87aacc7e4794b21b1adf.jpg",
1024
+ "image_caption": [
1025
+ "(b)"
1026
+ ],
1027
+ "image_footnote": [],
1028
+ "bbox": [
1029
+ 160,
1030
+ 797,
1031
+ 243,
1032
+ 895
1033
+ ],
1034
+ "page_idx": 4
1035
+ },
1036
+ {
1037
+ "type": "image",
1038
+ "img_path": "images/0427ddbf5b50ee8fc050748aa0f0184808fc468cffcc04cde63405a5271317c3.jpg",
1039
+ "image_caption": [
1040
+ "(c)",
1041
+ "Fig. 2. Missed detection and the results of missed detection compensation in the following cases: (a) The rapid motion of the object. (b) The incomplete appearance of the object to be detected in the camera field of view. (c) The blurred image. (d) The singular angle of view caused by camera rotation. (e) Continuous frame miss detection."
1042
+ ],
1043
+ "image_footnote": [],
1044
+ "bbox": [
1045
+ 245,
1046
+ 797,
1047
+ 330,
1048
+ 895
1049
+ ],
1050
+ "page_idx": 4
1051
+ },
1052
+ {
1053
+ "type": "image",
1054
+ "img_path": "images/40465acd668c315979a165895cb55a3666db26e5e967cee37939dafed3bf6ec6.jpg",
1055
+ "image_caption": [
1056
+ "(d)"
1057
+ ],
1058
+ "image_footnote": [],
1059
+ "bbox": [
1060
+ 331,
1061
+ 797,
1062
+ 415,
1063
+ 895
1064
+ ],
1065
+ "page_idx": 4
1066
+ },
1067
+ {
1068
+ "type": "image",
1069
+ "img_path": "images/b47b37538225d2e83e23b798d40329bd8daef0367cc597ad249a8b48fcd8c9d5.jpg",
1070
+ "image_caption": [
1071
+ "一"
1072
+ ],
1073
+ "image_footnote": [],
1074
+ "bbox": [
1075
+ 416,
1076
+ 797,
1077
+ 501,
1078
+ 895
1079
+ ],
1080
+ "page_idx": 4
1081
+ },
1082
+ {
1083
+ "type": "image",
1084
+ "img_path": "images/ca42b56ae6fd6070b60ad474a7a62727b097a101c88fdc00b874a0e3efb5e7c0.jpg",
1085
+ "image_caption": [
1086
+ "(e)"
1087
+ ],
1088
+ "image_footnote": [],
1089
+ "bbox": [
1090
+ 501,
1091
+ 797,
1092
+ 609,
1093
+ 895
1094
+ ],
1095
+ "page_idx": 4
1096
+ },
1097
+ {
1098
+ "type": "image",
1099
+ "img_path": "images/187fa9b67dc9c5fbeec0440a020315a87daa9a4e0f1f7489e3e61ccd6790173d.jpg",
1100
+ "image_caption": [],
1101
+ "image_footnote": [],
1102
+ "bbox": [
1103
+ 611,
1104
+ 797,
1105
+ 750,
1106
+ 895
1107
+ ],
1108
+ "page_idx": 4
1109
+ },
1110
+ {
1111
+ "type": "text",
1112
+ "text": "(e)",
1113
+ "bbox": [
1114
+ 660,
1115
+ 896,
1116
+ 674,
1117
+ 906
1118
+ ],
1119
+ "page_idx": 4
1120
+ },
1121
+ {
1122
+ "type": "image",
1123
+ "img_path": "images/d433b01f04df1c7c67175d3f265d49c00c5e339fa012d7d8cfd9b8aa5523bade.jpg",
1124
+ "image_caption": [],
1125
+ "image_footnote": [],
1126
+ "bbox": [
1127
+ 750,
1128
+ 797,
1129
+ 831,
1130
+ 895
1131
+ ],
1132
+ "page_idx": 4
1133
+ },
1134
+ {
1135
+ "type": "image",
1136
+ "img_path": "images/244acb79e6241bdcf3cf4a65cc2d1bc730a11ab9eb22f8182784bfb2d3b198e8.jpg",
1137
+ "image_caption": [],
1138
+ "image_footnote": [],
1139
+ "bbox": [
1140
+ 831,
1141
+ 797,
1142
+ 916,
1143
+ 895
1144
+ ],
1145
+ "page_idx": 4
1146
+ },
1147
+ {
1148
+ "type": "image",
1149
+ "img_path": "images/6abd595501a021b81a916629174dc23a3d7d1c4b52c8bf1d33b7b8956ab91b28.jpg",
1150
+ "image_caption": [
1151
+ "Fig. 3. Effect of DBSCAN density clustering algorithm in two consecutive frames. The top set of images is taken every 8 frames, and the bottom set of images is taken every 4 frames. The images contain three common states of movement: sitting in a chair, slow motion and fast motion. After clustering, the foreground and background points are shown in red and green respectively."
1152
+ ],
1153
+ "image_footnote": [],
1154
+ "bbox": [
1155
+ 76,
1156
+ 68,
1157
+ 919,
1158
+ 200
1159
+ ],
1160
+ "page_idx": 5
1161
+ },
1162
+ {
1163
+ "type": "text",
1164
+ "text": "select two consecutive frames to show the clustering effect. The foreground points are marked with red and the background points are marked with green. The upper image group contains two people sitting on the chair and moving slowly respectively, and the people in the lower image group are in the fast walking state. It is worth noting from Fig.3 that many keypoints are extracted from the edge of the person, which is generally the part with the highest dynamic attributes. However, semantic segmentation is difficult to accurately judge the boundary of objects [12], which leads to the misjudgment of dynamic and static attributes of keypoints. We use DBSCAN algorithm to cluster keypoints based on depth information, which can well avoid this problem. The experimental results fully show the effectiveness and robustness of the missed detection compensation algorithm and clustering algorithm.",
1165
+ "bbox": [
1166
+ 71,
1167
+ 255,
1168
+ 488,
1169
+ 483
1170
+ ],
1171
+ "page_idx": 5
1172
+ },
1173
+ {
1174
+ "type": "text",
1175
+ "text": "B. Comparison with State-of-the-arts",
1176
+ "text_level": 1,
1177
+ "bbox": [
1178
+ 73,
1179
+ 489,
1180
+ 326,
1181
+ 505
1182
+ ],
1183
+ "page_idx": 5
1184
+ },
1185
+ {
1186
+ "type": "text",
1187
+ "text": "We contrast with ORB-SLAM2 [13] and forth most advanced dynamic SLAM methods, including DS-SLAM [4], Dyna-SLAM [5], Blitz-SLAM [12] and TRS [11]. Like our method, these algorithms are all improved based on ORB-",
1188
+ "bbox": [
1189
+ 71,
1190
+ 508,
1191
+ 488,
1192
+ 570
1193
+ ],
1194
+ "page_idx": 5
1195
+ },
1196
+ {
1197
+ "type": "text",
1198
+ "text": "SLAM2. Without calculating the static probability of the object, we provide a lower performance version of the algorithm in this paper with higher real-time performance, which is called CFP-SLAM $^{-}$ . The quantitative comparison results are shown in Tables I, II and III, in which the best results are highlighted in bold and the second-best are underlined. The data of DS-SLAM, Dyna-SLAM, Blitz-SLAM and TRS comes from the source literature, / indicates that the corresponding data is not provided in the source literature. The experimental results show that, unlike other dynamic SLAM algorithms, which only have advantages over ORB-SLAM2 in high dynamic scenarios, this algorithm can achieve almost the best results in high dynamic and low dynamic scenarios. Even the low-performance version we provide shows better performance than other algorithms. In rpy sequences, on the one hand, the epipolar constraints cannot be used, on the other hand, the large change of camera angle leads to insufficient feature matching, so our method performs slightly worse. The ATE and RPE plots of our algorithm on 8 sequences are shown in Fig.4.",
1199
+ "bbox": [
1200
+ 504,
1201
+ 255,
1202
+ 924,
1203
+ 558
1204
+ ],
1205
+ "page_idx": 5
1206
+ },
1207
+ {
1208
+ "type": "image",
1209
+ "img_path": "images/2fa6d7ad37630c63a897a09f659b769b268329f0273af69d60d0315e42c24442.jpg",
1210
+ "image_caption": [
1211
+ "(1) s/xyz"
1212
+ ],
1213
+ "image_footnote": [],
1214
+ "bbox": [
1215
+ 81,
1216
+ 594,
1217
+ 178,
1218
+ 705
1219
+ ],
1220
+ "page_idx": 5
1221
+ },
1222
+ {
1223
+ "type": "image",
1224
+ "img_path": "images/348266f84dfd5e413286dd35b997165aa4961552e24f64843d1a5255d2f22d49.jpg",
1225
+ "image_caption": [
1226
+ "(2) $\\mathrm{s / hs}$"
1227
+ ],
1228
+ "image_footnote": [],
1229
+ "bbox": [
1230
+ 187,
1231
+ 595,
1232
+ 282,
1233
+ 705
1234
+ ],
1235
+ "page_idx": 5
1236
+ },
1237
+ {
1238
+ "type": "image",
1239
+ "img_path": "images/b1eabac0e68b499317a8f0b5fe276e08fdcf621d5ac263e5bcd6e8345a3b9bf2.jpg",
1240
+ "image_caption": [
1241
+ "(3) s/static"
1242
+ ],
1243
+ "image_footnote": [],
1244
+ "bbox": [
1245
+ 289,
1246
+ 595,
1247
+ 385,
1248
+ 707
1249
+ ],
1250
+ "page_idx": 5
1251
+ },
1252
+ {
1253
+ "type": "image",
1254
+ "img_path": "images/8b00b35846db96edfae173a0095f3649ef611cc95adeb2f7cd40c75b84d25472.jpg",
1255
+ "image_caption": [
1256
+ "(4) s/rpy"
1257
+ ],
1258
+ "image_footnote": [],
1259
+ "bbox": [
1260
+ 393,
1261
+ 595,
1262
+ 488,
1263
+ 707
1264
+ ],
1265
+ "page_idx": 5
1266
+ },
1267
+ {
1268
+ "type": "image",
1269
+ "img_path": "images/5cdf1d68258e15e403fcb1547006337fbf974f5d402103fdcaf94578734daf45.jpg",
1270
+ "image_caption": [
1271
+ "(5) w/xyz"
1272
+ ],
1273
+ "image_footnote": [],
1274
+ "bbox": [
1275
+ 496,
1276
+ 595,
1277
+ 591,
1278
+ 707
1279
+ ],
1280
+ "page_idx": 5
1281
+ },
1282
+ {
1283
+ "type": "image",
1284
+ "img_path": "images/b5315d7d3d888d3489640b43b6d81d2676315ec3a42c64157b57639b7d8e015d.jpg",
1285
+ "image_caption": [
1286
+ "(6) w/hs",
1287
+ "Fig. 4. ATE and RPE from CFP-SLAM."
1288
+ ],
1289
+ "image_footnote": [],
1290
+ "bbox": [
1291
+ 601,
1292
+ 595,
1293
+ 696,
1294
+ 707
1295
+ ],
1296
+ "page_idx": 5
1297
+ },
1298
+ {
1299
+ "type": "image",
1300
+ "img_path": "images/f54e3add35c00d7baf106a28f0ee521980f8d13b8f24376e0c5e6ca2f398daf7.jpg",
1301
+ "image_caption": [
1302
+ "(7) w/static"
1303
+ ],
1304
+ "image_footnote": [],
1305
+ "bbox": [
1306
+ 705,
1307
+ 595,
1308
+ 800,
1309
+ 707
1310
+ ],
1311
+ "page_idx": 5
1312
+ },
1313
+ {
1314
+ "type": "image",
1315
+ "img_path": "images/ebb61adcbc4fb1813a6b49dd665c1637a510b31492d3cd6f14fd09fa182120e0.jpg",
1316
+ "image_caption": [
1317
+ "(8) w/ry"
1318
+ ],
1319
+ "image_footnote": [],
1320
+ "bbox": [
1321
+ 808,
1322
+ 595,
1323
+ 901,
1324
+ 707
1325
+ ],
1326
+ "page_idx": 5
1327
+ },
1328
+ {
1329
+ "type": "table",
1330
+ "img_path": "images/f440ffa3a80427629c22b755bea89cee3a34573c68e742ac342122ace144c3fe.jpg",
1331
+ "table_caption": [
1332
+ "TABLEI RESULTS OF METRICS ABSOLUTE TRAJECTORY ERROR (ATE)"
1333
+ ],
1334
+ "table_footnote": [],
1335
+ "table_body": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">ORB-SLAM2</td><td colspan=\"2\">Dyna-SLAM</td><td colspan=\"2\">DS-SLAM</td><td colspan=\"2\">Blitz-SLAM</td><td colspan=\"2\">TRS</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0092</td><td>0.0047</td><td>0.0127</td><td>0.0060</td><td>/</td><td>/</td><td>0.0148</td><td>0.0069</td><td>0.0117</td><td>/</td><td>0.0129</td><td>0.0068</td><td>0.0090</td><td>0.0042</td></tr><tr><td>fr3/s/half</td><td>0.0192</td><td>0.0110</td><td>0.0186</td><td>0.0086</td><td>/</td><td>/</td><td>0.0160</td><td>0.0076</td><td>0.0172</td><td>/</td><td>0.0159</td><td>0.0072</td><td>0.0147</td><td>0.0069</td></tr><tr><td>fr3/s/static</td><td>0.0087</td><td>0.0042</td><td>/</td><td>/</td><td>0.0065</td><td>0.0033</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0061</td><td>0.0029</td><td>0.0053</td><td>0.0027</td></tr><tr><td>fr3/s/rpy</td><td>0.0195</td><td>0.0124</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0244</td><td>0.0175</td><td>0.0253</td><td>0.0154</td></tr><tr><td>fr3/w/xyz</td><td>0.7214</td><td>0.2560</td><td>0.0164</td><td>0.0086</td><td>0.0247</td><td>0.0161</td><td>0.0153</td><td>0.0078</td><td>0.0194</td><td>/</td><td>0.0149</td><td>0.0077</td><td>0.0141</td><td>0.0072</td></tr><tr><td>fr3/w/half</td><td>0.4667</td><td>0.2601</td><td>0.0296</td><td>0.0157</td><td>0.0303</td><td>0.0159</td><td>0.0256</td><td>0.0126</td><td>0.0290</td><td>/</td><td>0.0235</td><td>0.0114</td><td>0.0237</td><td>0.0114</td></tr><tr><td>fr3/w/static</td><td>0.3872</td><td>0.1636</td><td>0.0068</td><td>0.0032</td><td>0.0081</td><td>0.0036</td><td>0.0102</td><td>0.0052</td><td>0.0111</td><td>/</td><td>0.0069</td><td>0.0032</td><td>0.0066</td><td>0.0030</td></tr><tr><td>fr3/w/rpy</td><td>0.7842</td><td>0.4005</td><td>0.0354</td><td>0.0190</td><td>0.4442</td><td>0.2350</td><td>0.0356</td><td>0.0220</td><td>0.0371</td><td>/</td><td>0.0411</td><td>0.0257</td><td>0.0368</td><td>0.0230</td></tr></table>",
1336
+ "bbox": [
1337
+ 76,
1338
+ 784,
1339
+ 924,
1340
+ 940
1341
+ ],
1342
+ "page_idx": 5
1343
+ },
1344
+ {
1345
+ "type": "table",
1346
+ "img_path": "images/e8b9041c175c6dfa08641e39d3434ad1835f0007dffef71c9733d61d835c10b0.jpg",
1347
+ "table_caption": [
1348
+ "TABLE II RESULTS OF METRIC TRANSLATIONAL DRIFT (RPE)"
1349
+ ],
1350
+ "table_footnote": [],
1351
+ "table_body": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">ORB-SLAM2</td><td colspan=\"2\">Dyna-SLAM</td><td colspan=\"2\">DS-SLAM</td><td colspan=\"2\">Blitz-SLAM</td><td colspan=\"2\">TRS</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0117</td><td>0.0060</td><td>0.0142</td><td>0.0073</td><td>/</td><td>/</td><td>0.0144</td><td>0.0071</td><td>0.0166</td><td>/</td><td>0.0149</td><td>0.0081</td><td>0.0114</td><td>0.0055</td></tr><tr><td>fr3/s/half</td><td>0.0231</td><td>0.0163</td><td>0.0239</td><td>0.0120</td><td>/</td><td>/</td><td>0.0165</td><td>0.0073</td><td>0.0259</td><td>/</td><td>0.0214</td><td>0.0099</td><td>0.0162</td><td>0.0079</td></tr><tr><td>fr3/s/static</td><td>0.0090</td><td>0.0043</td><td>/</td><td>/</td><td>0.0078</td><td>0.0038</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0078</td><td>0.0034</td><td>0.0072</td><td>0.0035</td></tr><tr><td>fr3/s/rpy</td><td>0.0245</td><td>0.0144</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0322</td><td>0.0217</td><td>0.0316</td><td>0.0186</td></tr><tr><td>fr3/w/xyz</td><td>0.3944</td><td>0.2964</td><td>0.0217</td><td>0.0119</td><td>0.0333</td><td>0.0229</td><td>0.0197</td><td>0.0096</td><td>0.0234</td><td>/</td><td>0.0196</td><td>0.0099</td><td>0.0190</td><td>0.0097</td></tr><tr><td>fr3/w/half</td><td>0.3480</td><td>0.2859</td><td>0.0284</td><td>0.0149</td><td>0.0297</td><td>0.0152</td><td>0.0253</td><td>0.0123</td><td>0.0423</td><td>/</td><td>0.0274</td><td>0.0130</td><td>0.0259</td><td>0.0128</td></tr><tr><td>fr3/w/static</td><td>0.2349</td><td>0.2151</td><td>0.0089</td><td>0.0044</td><td>0.0102</td><td>0.0048</td><td>0.0129</td><td>0.0069</td><td>0.0117</td><td>/</td><td>0.0092</td><td>0.0043</td><td>0.0089</td><td>0.0040</td></tr><tr><td>fr3/w/rpy</td><td>0.4582</td><td>0.3447</td><td>0.0448</td><td>0.0262</td><td>0.1503</td><td>0.1168</td><td>0.0473</td><td>0.0283</td><td>0.0471</td><td>/</td><td>0.0540</td><td>0.0350</td><td>0.0500</td><td>0.0306</td></tr></table>",
1352
+ "bbox": [
1353
+ 75,
1354
+ 99,
1355
+ 923,
1356
+ 256
1357
+ ],
1358
+ "page_idx": 6
1359
+ },
1360
+ {
1361
+ "type": "table",
1362
+ "img_path": "images/8f9ffd8befbfa84588cbbdb2490c699aa23b8ffc5ac5bba46ba54d394deb978f.jpg",
1363
+ "table_caption": [
1364
+ "TABLE III RESULTS OF METRIC ROTATIONAL DRIFT (RPE)"
1365
+ ],
1366
+ "table_footnote": [],
1367
+ "table_body": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">ORB-SLAM2</td><td colspan=\"2\">Dyna-SLAM</td><td colspan=\"2\">DS-SLAM</td><td colspan=\"2\">Blitz-SLAM</td><td colspan=\"2\">TRS</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.4890</td><td>0.2713</td><td>0.5042</td><td>0.2651</td><td>/</td><td>/</td><td>0.5024</td><td>0.2634</td><td>0.5968</td><td>/</td><td>0.5126</td><td>0.2793</td><td>0.4875</td><td>0.2640</td></tr><tr><td>fr3/s/half</td><td>0.6015</td><td>0.2924</td><td>0.7045</td><td>0.3488</td><td>/</td><td>/</td><td>0.5981</td><td>0.2739</td><td>0.7891</td><td>/</td><td>0.7697</td><td>0.3718</td><td>0.5917</td><td>0.2834</td></tr><tr><td>fr3/s/static</td><td>0.2850</td><td>0.1241</td><td>/</td><td>/</td><td>0.2735</td><td>0.1215</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.2749</td><td>0.1192</td><td>0.2654</td><td>0.1183</td></tr><tr><td>fr3/s/rpy</td><td>0.7772</td><td>0.3999</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.8303</td><td>0.4653</td><td>0.7410</td><td>0.3665</td></tr><tr><td>fr3/w/xyz</td><td>7.7846</td><td>5.8335</td><td>0.6284</td><td>0.3848</td><td>0.8266</td><td>0.5826</td><td>0.6132</td><td>0.3348</td><td>0.6368</td><td>/</td><td>0.6204</td><td>0.3850</td><td>0.6023</td><td>0.3719</td></tr><tr><td>fr3/w/half</td><td>7.2138</td><td>5.8299</td><td>0.7842</td><td>0.4012</td><td>0.8142</td><td>0.4101</td><td>0.7879</td><td>0.3751</td><td>0.9650</td><td>/</td><td>0.7853</td><td>0.3821</td><td>0.7575</td><td>0.3743</td></tr><tr><td>fr3/w/static</td><td>4.1856</td><td>3.8077</td><td>0.2612</td><td>0.1259</td><td>0.2690</td><td>0.1182</td><td>0.3038</td><td>0.1437</td><td>0.2872</td><td>/</td><td>0.2535</td><td>0.1130</td><td>0.2527</td><td>0.1051</td></tr><tr><td>fr3/w/rpy</td><td>8.8923</td><td>6.6658</td><td>0.9894</td><td>0.5701</td><td>3.0042</td><td>2.3065</td><td>1.0841</td><td>0.6668</td><td>1.0587</td><td>/</td><td>1.0521</td><td>0.5577</td><td>1.1084</td><td>0.6722</td></tr></table>",
1368
+ "bbox": [
1369
+ 75,
1370
+ 297,
1371
+ 923,
1372
+ 452
1373
+ ],
1374
+ "page_idx": 6
1375
+ },
1376
+ {
1377
+ "type": "table",
1378
+ "img_path": "images/04ccfb7fcc552f819804c1482f97e7b9c5d40ffaddcfb5a56764f25c48d5b52f.jpg",
1379
+ "table_caption": [
1380
+ "TABLE IV RESULTS OF METRICS ABSOLUTE TRAJECTORY ERROR (ATE) WITH DIFFERENT CONFIGURATIONS"
1381
+ ],
1382
+ "table_footnote": [],
1383
+ "table_body": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">CFP-SLAM</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">W/O-MDC</td><td colspan=\"2\">W/O-DBS</td><td colspan=\"2\">W/O-KSP</td><td colspan=\"2\">Only-YOLO</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0090</td><td>0.0042</td><td>0.0129</td><td>0.0068</td><td>0.0123</td><td>0.0066</td><td>0.0130</td><td>0.0060</td><td>0.0142</td><td>0.0063</td><td>0.0174</td><td>0.0079</td></tr><tr><td>fr3/s/half</td><td>0.0147</td><td>0.0069</td><td>0.0159</td><td>0.0072</td><td>0.0150</td><td>0.0074</td><td>0.0305</td><td>0.0179</td><td>0.0201</td><td>0.0089</td><td>0.0281</td><td>0.0158</td></tr><tr><td>fr3/s/static</td><td>0.0053</td><td>0.0027</td><td>0.0061</td><td>0.0029</td><td>0.0055</td><td>0.0025</td><td>0.0064</td><td>0.0030</td><td>0.0062</td><td>0.0030</td><td>0.0064</td><td>0.0027</td></tr><tr><td>fr3/s/rpy</td><td>0.0253</td><td>0.0154</td><td>0.0244</td><td>0.0175</td><td>0.0237</td><td>0.0149</td><td>0.0297</td><td>0.0205</td><td>0.0287</td><td>0.0195</td><td>0.0460</td><td>0.0332</td></tr><tr><td>fr3/w/xyz</td><td>0.0141</td><td>0.0072</td><td>0.0149</td><td>0.0077</td><td>0.0158</td><td>0.0079</td><td>0.0159</td><td>0.0081</td><td>0.0154</td><td>0.0076</td><td>0.0165</td><td>0.0082</td></tr><tr><td>fr3/w/half</td><td>0.0237</td><td>0.0114</td><td>0.0235</td><td>0.0114</td><td>0.0258</td><td>0.0134</td><td>0.0274</td><td>0.0137</td><td>0.0307</td><td>0.0151</td><td>0.0310</td><td>0.0165</td></tr><tr><td>fr3/w/static</td><td>0.0066</td><td>0.0030</td><td>0.0069</td><td>0.0032</td><td>0.0070</td><td>0.0031</td><td>0.0078</td><td>0.0033</td><td>0.0076</td><td>0.0033</td><td>0.0073</td><td>0.0032</td></tr><tr><td>fr3/w/rpy</td><td>0.0368</td><td>0.0230</td><td>0.0411</td><td>0.0257</td><td>0.1910</td><td>0.1594</td><td>0.0749</td><td>0.0536</td><td>0.0405</td><td>0.0211</td><td>0.0456</td><td>0.0312</td></tr></table>",
1384
+ "bbox": [
1385
+ 122,
1386
+ 494,
1387
+ 874,
1388
+ 648
1389
+ ],
1390
+ "page_idx": 6
1391
+ },
1392
+ {
1393
+ "type": "text",
1394
+ "text": "C. Ablation Experiment",
1395
+ "text_level": 1,
1396
+ "bbox": [
1397
+ 73,
1398
+ 660,
1399
+ 238,
1400
+ 674
1401
+ ],
1402
+ "page_idx": 6
1403
+ },
1404
+ {
1405
+ "type": "text",
1406
+ "text": "In order to prove the function of each module of our algorithm, We design a series of ablation experiments, and the experimental results are shown in Table IV. Among them, CFP-SLAM: The algorithm of this paper; CFP-SLAM $^{-}$ : Do not use static probability of objects; W/O-MDC: Without missed detection compensation; W/O-DBS: Without DBSCAN clustering; W/O-KSP: Without the static probability of keypoints, that is, all the foreground points after missed detection compensation and DBSCAN clustering are directly eliminated; Only-YOLO: Directly eliminate all keypoints in the box with human category.",
1407
+ "bbox": [
1408
+ 71,
1409
+ 688,
1410
+ 488,
1411
+ 854
1412
+ ],
1413
+ "page_idx": 6
1414
+ },
1415
+ {
1416
+ "type": "text",
1417
+ "text": "The experimental results show that CFP-SLAM $^{-}$ shows worse performance in low dynamic scenes, because we cannot distinguish between high dynamic objects and low dynamic objects, so all objects are processed according to high dynamic. W/O-MDC is almost unaffected in low dynamic scenes, but the performance is very poor in high dynamic scenes,",
1418
+ "bbox": [
1419
+ 71,
1420
+ 857,
1421
+ 488,
1422
+ 950
1423
+ ],
1424
+ "page_idx": 6
1425
+ },
1426
+ {
1427
+ "type": "text",
1428
+ "text": "especially in w/ropy, when the camera and objects are moving violently. In fact, the tracking is often lost in w/xyz, w/half and w/ropy because of missed detection. W/O-DBS and W/O-KSP show general performance in all sequences, which illustrates the effectiveness of DBSCAN clustering and the limitation of dealing with non-rigid bodies with partial motion as a whole, respectively. Only-YOLO encounters difficulties in initialization due to insufficient features in almost all sequences, and tracking is lost in some sequences.",
1429
+ "bbox": [
1430
+ 504,
1431
+ 660,
1432
+ 924,
1433
+ 796
1434
+ ],
1435
+ "page_idx": 6
1436
+ },
1437
+ {
1438
+ "type": "text",
1439
+ "text": "D. Real-time Analysis",
1440
+ "text_level": 1,
1441
+ "bbox": [
1442
+ 506,
1443
+ 808,
1444
+ 661,
1445
+ 821
1446
+ ],
1447
+ "page_idx": 6
1448
+ },
1449
+ {
1450
+ "type": "text",
1451
+ "text": "Real-time performance is one of the important evaluation indexes of SLAM system. We test the average running time of each module, as shown in Table V. EKF represents the missed detection compensation and data association of boxes module, OSP represents the static probability calculation module of objects, and KSP represents the static probability calculation module of keypoints based on the epipolar constraints and the projection constraints. Semantic threads based on YOLOv5s",
1452
+ "bbox": [
1453
+ 504,
1454
+ 828,
1455
+ 924,
1456
+ 950
1457
+ ],
1458
+ "page_idx": 6
1459
+ },
1460
+ {
1461
+ "type": "text",
1462
+ "text": "run in parallel with ORB feature extraction. The results show that the average processing time per frame for the main threads of CFP-SLAM and CFP-SLAM $^{-}$ is 42.7 ms and 24.77 ms, that is, the running speed reaches 23 Fps and 40 Fps respectively. Compared with the SLAM system based on semantic segmentation, it can better meet the real-time requirements while ensure the accuracy.",
1463
+ "bbox": [
1464
+ 71,
1465
+ 74,
1466
+ 491,
1467
+ 181
1468
+ ],
1469
+ "page_idx": 7
1470
+ },
1471
+ {
1472
+ "type": "table",
1473
+ "img_path": "images/9d22fe17f065f72bc0d386a52d2a9139bbe69efd866771a4319a1b59d1fc92e5.jpg",
1474
+ "table_caption": [
1475
+ "TABLEV THE AVERAGE RUNNING TIME OF EACH MODULE."
1476
+ ],
1477
+ "table_footnote": [],
1478
+ "table_body": "<table><tr><td>Methods</td><td>YOLO</td><td>EKF</td><td>OSP</td><td>DBSCAN</td><td>KSP</td><td>Tracking</td></tr><tr><td>CFP-SLAM</td><td>12.44</td><td>0.07</td><td>17.93</td><td>1.76</td><td>3.66</td><td>42.7</td></tr><tr><td>CFP-SLAM-</td><td>12.44</td><td>0.07</td><td>/</td><td>1.76</td><td>3.66</td><td>24.77</td></tr></table>",
1479
+ "bbox": [
1480
+ 86,
1481
+ 223,
1482
+ 475,
1483
+ 262
1484
+ ],
1485
+ "page_idx": 7
1486
+ },
1487
+ {
1488
+ "type": "text",
1489
+ "text": "VI. CONCLUSION",
1490
+ "text_level": 1,
1491
+ "bbox": [
1492
+ 215,
1493
+ 279,
1494
+ 346,
1495
+ 292
1496
+ ],
1497
+ "page_idx": 7
1498
+ },
1499
+ {
1500
+ "type": "text",
1501
+ "text": "In this paper, we propose a dynamic scene-oriented visual SLAM algorithm based on YOLOv5s and coarse-to-fine static probability. After missed detection compensation and keypoints clustering, the static probabilities of objects, keypoints and map points are calculated and updated as weights to participate in pose optimization. Extensive evaluation shows that our algorithm achieves the highest accuracy of localization in almost all low dynamic and high dynamic scenes, and has quite high real-time performance. In the future, we intend to build a lightweight plane and object map containing only static environment for robot navigation and augmented reality.",
1502
+ "bbox": [
1503
+ 71,
1504
+ 301,
1505
+ 490,
1506
+ 467
1507
+ ],
1508
+ "page_idx": 7
1509
+ },
1510
+ {
1511
+ "type": "text",
1512
+ "text": "REFERENCES",
1513
+ "text_level": 1,
1514
+ "bbox": [
1515
+ 233,
1516
+ 479,
1517
+ 328,
1518
+ 492
1519
+ ],
1520
+ "page_idx": 7
1521
+ },
1522
+ {
1523
+ "type": "list",
1524
+ "sub_type": "ref_text",
1525
+ "list_items": [
1526
+ "[1] M. R. U. Saputra, A. Markham, and N. Trigoni, “Visual slam and structure from motion in dynamic environments: A survey,” ACM Computing Surveys (CSUR), vol. 51, no. 2, pp. 1–36, 2018.",
1527
+ "[2] F. Zhong, S. Wang, Z. Zhang, and Y. Wang, \"Detect-slam: Making object detection and slam mutually beneficial,\" in 2018 IEEE Winter Conference on Applications of Computer Vision (WACV). IEEE, 2018, pp. 1001-1010.",
1528
+ "[3] L. Xiao, J. Wang, X. Qiu, Z. Rong, and X. Zou, \"Dynamic-slam: Semantic monocular visual localization and mapping based on deep learning in dynamic environment,\" Robotics and Autonomous Systems, vol. 117, pp. 1-16, 2019.",
1529
+ "[4] C. Yu, Z. Liu, X.-J. Liu, F. Xie, Y. Yang, Q. Wei, and Q. Fei, \"Dsslam: A semantic visual slam towards dynamic environments,\" in 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2018, pp. 1168-1174.",
1530
+ "[5] B. Bescos, J. M. Fácil, J. Civera, and J. Neira, “Dynoslam: Tracking, mapping, and inpainting in dynamic scenes,” IEEE Robotics and Automation Letters, vol. 3, no. 4, pp. 4076–4083, 2018.",
1531
+ "[6] N. Brasch, A. Bozic, J. Lallemand, and F. Tombari, \"Semantic monocular slam for highly dynamic environments,\" in 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2018, pp. 393-400.",
1532
+ "[7] K. Wang, Y. Lin, L. Wang, L. Han, M. Hua, X. Wang, S. Lian, and B. Huang, “A unified framework for mutual improvement of slam and semantic segmentation,” in 2019 International Conference on Robotics and Automation (ICRA). IEEE, 2019, pp. 5224–5230.",
1533
+ "[8] X. Yuan and S. Chen, \"Sad-slam: A visual slam based on semantic and depth information,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 4930-4935.",
1534
+ "[9] J. Vincent, M. Labbe, J.-S. Lauzon, F. Grondin, P.-M. Comtois-Rivet, and F. Michaud, \"Dynamic object tracking and masking for visual slam,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 4974-4979.",
1535
+ "[10] A. Li, J. Wang, M. Xu, and Z. Chen, “Dp-slam: A visual slam with moving probability towards dynamic environments,” Information Sciences, vol. 556, pp. 128-142, 2021.",
1536
+ "[11] T. Ji, C. Wang, and L. Xie, \"Towards real-time semantic rgb-d slam in dynamic environments,\" in 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2021, pp. 11 175-11 181."
1537
+ ],
1538
+ "bbox": [
1539
+ 76,
1540
+ 503,
1541
+ 488,
1542
+ 948
1543
+ ],
1544
+ "page_idx": 7
1545
+ },
1546
+ {
1547
+ "type": "list",
1548
+ "sub_type": "ref_text",
1549
+ "list_items": [
1550
+ "[12] Y. Fan, Q. Zhang, Y. Tang, S. Liu, and H. Han, \"Blitz-slam: A semantic slam in dynamic environments,\" Pattern Recognition, vol. 121, p. 108225, 2022.",
1551
+ "[13] R. Mur-Artal and J. D. Tardós, \"Orb-slam2: An open-source slam system for monocular, stereo, and rgb-d cameras,\" IEEE transactions on robotics, vol. 33, no. 5, pp. 1255-1262, 2017.",
1552
+ "[14] S. Li and D. Lee, \"Rgb-d slam in dynamic environments using static point weighting,\" IEEE Robotics and Automation Letters, vol. 2, no. 4, pp. 2263-2270, 2017.",
1553
+ "[15] Y. Sun, M. Liu, and M. Q.-H. Meng, \"Improving rgb-d slam in dynamic environments: A motion removal approach,\" Robotics and Autonomous Systems, vol. 89, pp. 110-122, 2017.",
1554
+ "[16] ——, “Motion removal for reliable rgb-d slam in dynamic environments,” Robotics and Autonomous Systems, vol. 108, pp. 115–128, 2018.",
1555
+ "[17] R. Scona, M. Jaimez, Y. R. Petillot, M. Fallon, and D. Cremers, \"Staticfusion: Background reconstruction for dense rgb-d slam in dynamic environments,\" in 2018 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2018, pp. 3849-3856.",
1556
+ "[18] G. Liu, W. Zeng, B. Feng, and F. Xu, \"Dms-slam: A general visual slam system for dynamic scenes with multiple sensors,\" Sensors, vol. 19, no. 17, p. 3714, 2019.",
1557
+ "[19] J. Bian, W.-Y. Lin, Y. Matsushita, S.-K. Yeung, T.-D. Nguyen, and M.-M. Cheng, \"Gms: Grid-based motion statistics for fast, ultra-robust feature correspondence,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 4181-4190.",
1558
+ "[20] D.-H. Kim and J.-H. Kim, \"Effective background model-based rgb-d dense visual odometry in a dynamic environment,\" IEEE Transactions on Robotics, vol. 32, no. 6, pp. 1565-1573, 2016.",
1559
+ "[21] W. Dai, Y. Zhang, P. Li, Z. Fang, and S. Scherer, \"Rgb-d slam in dynamic environments using point correlations,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020.",
1560
+ "[22] T. Zhang, H. Zhang, Y. Li, Y. Nakamura, and L. Zhang, \"Flowfusion: Dynamic dense rgb-d slam based on optical flow,\" in 2020 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2020, pp. 7322-7328.",
1561
+ "[23] V. Badrinarayanan, A. Kendall, and R. Cipolla, \"Segnet: A deep convolutional encoder-decoder architecture for image segmentation,\" IEEE transactions on pattern analysis and machine intelligence, vol. 39, no. 12, pp. 2481-2495, 2017.",
1562
+ "[24] K. He, G. Gkioxari, P. Dollár, and R. Girshick, “Mask r-cnn,” in Proceedings of the IEEE international conference on computer vision, 2017, pp. 2961–2969.",
1563
+ "[25] N. Dvornik, K. Shmelkov, J. Mairal, and C. Schmid, \"Blitznet: A real-time deep network for scene understanding,\" in Proceedings of the IEEE international conference on computer vision, 2017, pp. 4154-4162.",
1564
+ "[26] J. Sturm, N. Engelhard, F. Endres, W. Burgard, and D. Cremers, “A benchmark for the evaluation of rgb-d slam systems,” in 2012 IEEE/RSJ international conference on intelligent robots and systems. IEEE, 2012, pp. 573–580."
1565
+ ],
1566
+ "bbox": [
1567
+ 509,
1568
+ 75,
1569
+ 921,
1570
+ 632
1571
+ ],
1572
+ "page_idx": 7
1573
+ }
1574
+ ]
2202.01xxx/2202.01938/0bdc1864-0877-4963-a9de-68c2b5f8ab9e_model.json ADDED
@@ -0,0 +1,2086 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.264,
8
+ 0.058,
9
+ 0.707
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2202.01938v2 [cs.RO] 25 Feb 2022"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.125,
18
+ 0.097,
19
+ 0.873,
20
+ 0.146
21
+ ],
22
+ "angle": 0,
23
+ "content": "CFP-SLAM: A Real-time Visual SLAM Based on Coarse-to-Fine Probability in Dynamic Environments"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.275,
29
+ 0.166,
30
+ 0.694,
31
+ 0.201
32
+ ],
33
+ "angle": 0,
34
+ "content": "Xinggang Hu\\(^{1}\\), Yunzhou Zhang\\(^{1*}\\), Zhenzhong Cao\\(^{1}\\), Rong Ma\\(^{2}\\), Yanmin Wu\\(^{3}\\), Zhiqiang Deng\\(^{1}\\), Wenkai Sun\\(^{1}\\)"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.072,
40
+ 0.225,
41
+ 0.49,
42
+ 0.442
43
+ ],
44
+ "angle": 0,
45
+ "content": "Abstract—The dynamic factors in the environment will lead to the decline of camera localization accuracy due to the violation of the static environment assumption of SLAM algorithm. Recently, some related works generally use the combination of semantic constraints and geometric constraints to deal with dynamic objects, but problems can still be raised, such as poor real-time performance, easy to treat people as rigid bodies, and poor performance in low dynamic scenes. In this paper, a dynamic scene-oriented visual SLAM algorithm based on object detection and coarse-to-fine static probability named CFP-SLAM is proposed. The algorithm combines semantic constraints and geometric constraints to calculate the static probability of objects, keypoints and map points, and takes them as weights to participate in camera pose estimation. Extensive evaluations show that our approach can achieve almost the best results in high dynamic and low dynamic scenarios compared to the state-of-the-art dynamic SLAM methods, and shows quite high real-time ability."
46
+ },
47
+ {
48
+ "type": "title",
49
+ "bbox": [
50
+ 0.202,
51
+ 0.452,
52
+ 0.362,
53
+ 0.466
54
+ ],
55
+ "angle": 0,
56
+ "content": "I. INTRODUCTION"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.072,
62
+ 0.472,
63
+ 0.49,
64
+ 0.669
65
+ ],
66
+ "angle": 0,
67
+ "content": "Simultaneous localization and mapping (SLAM) is the key technology for autonomous navigation of mobile robots, and it is widely applied in the fields of autopilot, UAV and augmented reality (AR). SLAM system is based on environmental static assumption [1], and dynamic factors will bring wrong observation data to the system, making it difficult to establish various geometric constraints on which SLAM system works, and reducing the accuracy and robustness of SLAM system. The abnormal point processing mechanism of RANSAC (Random Sample Consensus) algorithm can solve the influence of certain abnormal points in static or slightly dynamic environment. However, when dynamic objects occupy most of the camera view, RANSAC algorithm has little effect."
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.072,
73
+ 0.669,
74
+ 0.49,
75
+ 0.775
76
+ ],
77
+ "angle": 0,
78
+ "content": "With the development of deep learning technology, some advanced researchers have used semantic constraints to solve the visual SLAM problem in dynamic environment recent years. The general approach is to take the semantic information obtained from object detection [2], [3] or semantic segmentation [4]-[12] as a priori and eliminate the dynamic objects in the environment combined with geometric constraints. Semantic"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.505,
84
+ 0.224,
85
+ 0.928,
86
+ 0.466
87
+ ],
88
+ "angle": 0,
89
+ "content": "segmentation can provide a fine pixel level object mask, but its real-time performance is poor. The improvement of segmentation accuracy and robustness often comes at the cost of huge computational cost. Even so, the segmentation boundary of the object can not be extremely accurate and can not completely cover the moving object [12]. Object detection can circumvent the problems above, but there are a large amount of background point clouds in the box of objects, and some complex cases will be missed easily [3]. In addition, there are two common problems with current schemes: 1) All dynamic objects are treated as high dynamic attributes, which leads to poor performance in low dynamic scene. 2) As non-rigid objects, human bodies often perform partial movement. Directly eliminating the human body as a whole object will reduce the constraint of keypoints and introduce a negative effect on accuracy of localization."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.505,
95
+ 0.465,
96
+ 0.925,
97
+ 0.737
98
+ ],
99
+ "angle": 0,
100
+ "content": "For the above problems, we propose CFP-SLAM, which is a high-performance high-efficiency visual SLAM system based on object detection and static probability in indoor dynamic environments. On the basis of ORB-SLAM2 [13], CFP-SLAM uses YOLOv5 to obtain semantic information, uses extended Kalman filter (EKF) and Hungarian algorithm to compensate missed detection, calculates the static probability of objects to distinguish high dynamic objects from low dynamic objects, and distinguishes foreground points and background points of object detection results based on DBSCAN (Density-Based Spatial Clustering of Applications with Noise) algorithm. Established on a variety of constraints, a two-stage calculation method of the static probability of keypoints from coarse to fine is designed. The static probability of keypoints is used as a weight to participate in the camera pose optimization. Considering the needs of different scenarios, we provide a lower-performance version to improve the real-time performance without calculating the static probability of objects."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.505,
106
+ 0.737,
107
+ 0.925,
108
+ 0.812
109
+ ],
110
+ "angle": 0,
111
+ "content": "Extensive experiments are conducted on public datasets. Compared with state-of-the-art dynamic SLAM methods, our approach achieves the highest localization accuracy in almost all low dynamics and high dynamic scenarios. The main contributions of this paper are as follows:"
112
+ },
113
+ {
114
+ "type": "page_footnote",
115
+ "bbox": [
116
+ 0.089,
117
+ 0.787,
118
+ 0.312,
119
+ 0.8
120
+ ],
121
+ "angle": 0,
122
+ "content": "*The corresponding author of this paper."
123
+ },
124
+ {
125
+ "type": "page_footnote",
126
+ "bbox": [
127
+ 0.076,
128
+ 0.8,
129
+ 0.489,
130
+ 0.845
131
+ ],
132
+ "angle": 0,
133
+ "content": "\\(^{1}\\)Xinggang Hu, Yunzhou Zhang, Zhenzhong Cao, Zhiqiang Deng and Wenkai Sun are with College of Information Science and Engineering, Northeastern University, Shenyang 110819, China (Email: zhangyunzhou@mail.neu.edu.cn)."
134
+ },
135
+ {
136
+ "type": "page_footnote",
137
+ "bbox": [
138
+ 0.076,
139
+ 0.846,
140
+ 0.488,
141
+ 0.869
142
+ ],
143
+ "angle": 0,
144
+ "content": "\\(^{2}\\)Rong Ma is with Beijing Simulation Center, China (Email: mar_buaa@163.com)."
145
+ },
146
+ {
147
+ "type": "page_footnote",
148
+ "bbox": [
149
+ 0.076,
150
+ 0.869,
151
+ 0.488,
152
+ 0.891
153
+ ],
154
+ "angle": 0,
155
+ "content": "\\(^{3}\\)Yanmin Wu is with School of Electronic and Computer Engineering, Peking University, Shenzhen, China."
156
+ },
157
+ {
158
+ "type": "page_footnote",
159
+ "bbox": [
160
+ 0.076,
161
+ 0.891,
162
+ 0.49,
163
+ 0.949
164
+ ],
165
+ "angle": 0,
166
+ "content": "This work was supported by National Natural Science Foundation of China (No. 61973066), Major Science and Technology Projects of Liaoning Province(No.2021JH1/10400049), Fundation of Key Laboratory of Equipment Reliability(No.WD2C20205500306), Fundation of Key Laboratory of Aerospace System Simulation(No.6142002200301)."
167
+ },
168
+ {
169
+ "type": "list",
170
+ "bbox": [
171
+ 0.076,
172
+ 0.787,
173
+ 0.49,
174
+ 0.949
175
+ ],
176
+ "angle": 0,
177
+ "content": null
178
+ },
179
+ {
180
+ "type": "page_footnote",
181
+ "bbox": [
182
+ 0.523,
183
+ 0.813,
184
+ 0.922,
185
+ 0.873
186
+ ],
187
+ "angle": 0,
188
+ "content": "- Compensating missed detection based on EKF and Hungarian algorithm, while using DBSCAN clustering algorithm to distinguish the foreground points and background points of box."
189
+ },
190
+ {
191
+ "type": "page_footnote",
192
+ "bbox": [
193
+ 0.525,
194
+ 0.874,
195
+ 0.924,
196
+ 0.949
197
+ ],
198
+ "angle": 0,
199
+ "content": "- The distinction of object dynamic attributes. Based on the YOLOv5 object detection and geometric constraints, the object motion attributes are divided into high dynamics and low dynamics, which are provided to the subsequent methods as a priori information for processing with"
200
+ },
201
+ {
202
+ "type": "list",
203
+ "bbox": [
204
+ 0.523,
205
+ 0.813,
206
+ 0.924,
207
+ 0.949
208
+ ],
209
+ "angle": 0,
210
+ "content": null
211
+ }
212
+ ],
213
+ [
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.104,
218
+ 0.075,
219
+ 0.489,
220
+ 0.104
221
+ ],
222
+ "angle": 0,
223
+ "content": "different strategies, so as to improve the robustness and adaptability of SLAM system."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.09,
229
+ 0.106,
230
+ 0.49,
231
+ 0.211
232
+ ],
233
+ "angle": 0,
234
+ "content": "- The static probability of keypoints from coarse to fine. A two-stage static probability of keypoints calculation method based on the static probability of object, the DBSCAN clustering algorithm, the epipolar constraints and the projection constraints is proposed to solve the problem of false deletion of static keypoints caused by non-rigid body local motion."
235
+ },
236
+ {
237
+ "type": "title",
238
+ "bbox": [
239
+ 0.208,
240
+ 0.22,
241
+ 0.355,
242
+ 0.233
243
+ ],
244
+ "angle": 0,
245
+ "content": "II. RELATED WORK"
246
+ },
247
+ {
248
+ "type": "title",
249
+ "bbox": [
250
+ 0.072,
251
+ 0.238,
252
+ 0.456,
253
+ 0.253
254
+ ],
255
+ "angle": 0,
256
+ "content": "A. Dynamic SLAM without Priori Semantic Information"
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.072,
262
+ 0.258,
263
+ 0.49,
264
+ 0.62
265
+ ],
266
+ "angle": 0,
267
+ "content": "When there is no semantic information as the priori, using reliable constraints to find the correct feature matching relationship is the basic method to deal with dynamic SLAM problem. Li et al. [14] propose a static weighting method of keyframe edge points, and integrated into the IAICP method to reduce tracking error. Sun et al. [15] roughly detect the motion of moving objects based on self motion compensation image difference, and enhance the motion detection by tracking the motion using particle filter. Then, they [16] propose a novel RGB-D data-based on-line motion removal approach, and build and update the foreground model incrementally. StaticFusion [17] simultaneously estimates the camera motion as well as a probabilistic static/dynamic segmentation of the current RGB-D image pair. DMS-SLAM [18] uses GMS [19] to eliminate mismatched points. Kim et al. [20] propose a dense visual mileage calculation method based on background model to estimate the nonparametric background model from depth scene. Dai et al. [21] distinguishes dynamic and static map points based on feature correlation. Flowfusion [22] uses optical flow residuals to highlight dynamic regions in rgbd point clouds. Because there is no need for deep learning networks to provide semantic priors, the above methods are usually fast in dealing with dynamic factors, but lack of accuracy."
268
+ },
269
+ {
270
+ "type": "title",
271
+ "bbox": [
272
+ 0.073,
273
+ 0.629,
274
+ 0.421,
275
+ 0.643
276
+ ],
277
+ "angle": 0,
278
+ "content": "B. Dynamic SLAM Based on Semantic Constraints"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.072,
284
+ 0.647,
285
+ 0.49,
286
+ 0.95
287
+ ],
288
+ "angle": 0,
289
+ "content": "Semantic segmentation or object detection can provide a steady and reliable priority constraint for dynamic SLAM. Detect-SLAM [2] detects objects in keyframes and propagates the motion probability of keypoints in real time to eliminate the influence of dynamic objects in SLAM. DS-SLAM [4] uses SegNet [23] to obtain semantic information, combines sparse optical flow and motion consistency detection to judge people's dynamic and static attributes. Dyna-SLAM [5] combines mask R-CNN [24] and multi view geometry to process moving objects. Brasch et al. [6] present monocular SLAM approach for highly dynamic environments which models dynamic outliers with a joint probabilistic model based on semantic prior information predicted by a CNN. With the help of the initial segmentation results, Wang et al. [7] extract the accurate pose from the rough pose by identifying and processing the moving object and possible moving object respectively, and further help to make up for the error and boundary inaccuracy of the segmentation area. Dynamic-SLAM [3] compensates SSD for missed detection based on the speed invariance of adjacent frames, and eliminates dynamic"
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.505,
295
+ 0.075,
296
+ 0.925,
297
+ 0.378
298
+ ],
299
+ "angle": 0,
300
+ "content": "objects combined with selective tracking algorithm. SaDSLAM [8] extracts static feature points from objects judged as dynamic based on semantic by verifying whether the inter frame feature points meet the epipolar constraints. Vincent et al. [9] perform semantic segmentation of object instances in the image, and use EKF to identify, track and remove dynamic objects from the scene. DP-SLAM [10] combines the results of geometric constraints and semantic segmentation, the dynamic keypoints are tracked in the Bayesian probability estimation framework. Ji et al. [11] only perform semantic segmentation on keyframes, cluster the depth map and identifies moving objects combined with re-projection error to remove known and unknown dynamic objects. Blitz-SLAM [12] repairs the mask of BlitzNet [25] based on depth information, and classifies static and dynamic matching points in potential dynamic areas using epipolar constraints. Generally, the above methods can accurately eliminate dynamic objects in the environment, but it is difficult to give consideration to both localization accuracy and real-time, and the performance is generally poor in low dynamic scenes."
301
+ },
302
+ {
303
+ "type": "title",
304
+ "bbox": [
305
+ 0.627,
306
+ 0.386,
307
+ 0.802,
308
+ 0.4
309
+ ],
310
+ "angle": 0,
311
+ "content": "III. SYSTEM OVERVIEW"
312
+ },
313
+ {
314
+ "type": "title",
315
+ "bbox": [
316
+ 0.506,
317
+ 0.406,
318
+ 0.687,
319
+ 0.421
320
+ ],
321
+ "angle": 0,
322
+ "content": "A. Definition of Variables"
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.523,
328
+ 0.424,
329
+ 0.901,
330
+ 0.439
331
+ ],
332
+ "angle": 0,
333
+ "content": "In this paper, common variables are defined as follows:"
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.523,
339
+ 0.443,
340
+ 0.644,
341
+ 0.456
342
+ ],
343
+ "angle": 0,
344
+ "content": "\\(F_{k}\\) -FrameK."
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.523,
350
+ 0.457,
351
+ 0.899,
352
+ 0.471
353
+ ],
354
+ "angle": 0,
355
+ "content": "- \\( K \\) - The intrinsic matrix of a pinhole camera model."
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.523,
361
+ 0.473,
362
+ 0.922,
363
+ 0.517
364
+ ],
365
+ "angle": 0,
366
+ "content": "- \\( T_{k,w} \\in R^{4 \\times 4} \\) - The transformation from world frame to camera frame \\( \\mathbf{K} \\), which is composed of a rotation \\( R_{k,w} \\in R^{3 \\times 3} \\) and a translation \\( t_{k,w} \\in R^{3 \\times 1} \\)."
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.523,
372
+ 0.518,
373
+ 0.922,
374
+ 0.6
375
+ ],
376
+ "angle": 0,
377
+ "content": "- \\( P_{i}^{k} \\) - The keypoint with ID \\( i \\) in \\( F_{k} \\). Its pixel coordinate is \\( P_{i w}^{k} = \\left[u_{i}^{k}, v_{i}^{k}\\right]^{T} \\), camera coordinate is \\( P_{i k}^{k} = \\left[X_{i k}^{k}, Y_{i k}^{k}, Z_{i k}^{k}\\right]^{T} \\), world coordinate is \\( P_{i w}^{k} = \\left[X_{i w}^{k}, Y_{i w}^{k}, Z_{i w}^{k}\\right]^{T} \\). \\( (\\cdot) \\) is the form of homogeneous coordinates in each coordinate system."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.523,
383
+ 0.601,
384
+ 0.922,
385
+ 0.631
386
+ ],
387
+ "angle": 0,
388
+ "content": "- \\( P_{i^*}^{k - 1} \\) - The keypoint with ID \\( i^* \\) in \\( F_{k - 1} \\) which forms a matching relationship with \\( P_{i}^{k} \\)."
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.523,
394
+ 0.632,
395
+ 0.922,
396
+ 0.661
397
+ ],
398
+ "angle": 0,
399
+ "content": "- \\( O_{i+}^{k} \\) - The static probability of potential moving object with ID \\( i^{+} \\). \\( P_{i}^{k} \\) is the extracted keypoint on the object."
400
+ },
401
+ {
402
+ "type": "text",
403
+ "bbox": [
404
+ 0.523,
405
+ 0.662,
406
+ 0.922,
407
+ 0.692
408
+ ],
409
+ "angle": 0,
410
+ "content": "- \\( O_{Th} \\) - The threshold to distinguish whether the object motion attribute is high dynamic or low dynamic."
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.523,
416
+ 0.692,
417
+ 0.922,
418
+ 0.721
419
+ ],
420
+ "angle": 0,
421
+ "content": "- \\( K_{i}^{k} \\) - The static probability of \\( P_{i}^{k} \\), which is in the update state and participates in camera pose optimization."
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.523,
427
+ 0.721,
428
+ 0.922,
429
+ 0.767
430
+ ],
431
+ "angle": 0,
432
+ "content": "- \\( K_{i}^{Dk}, K_{i}^{Tk}, K_{i}^{Fk} \\) - The static probability of \\( P_{i}^{k} \\) obtained by the DBSCAN clustering algorithm, the projection constraints and the epipolar constraints respectively."
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.523,
438
+ 0.767,
439
+ 0.922,
440
+ 0.798
441
+ ],
442
+ "angle": 0,
443
+ "content": "- \\( M_{i - }^{k} \\) - The static probability of the map point forming a matching relationship with \\( P_{i}^{k} \\)."
444
+ },
445
+ {
446
+ "type": "list",
447
+ "bbox": [
448
+ 0.523,
449
+ 0.443,
450
+ 0.922,
451
+ 0.798
452
+ ],
453
+ "angle": 0,
454
+ "content": null
455
+ },
456
+ {
457
+ "type": "title",
458
+ "bbox": [
459
+ 0.507,
460
+ 0.809,
461
+ 0.667,
462
+ 0.824
463
+ ],
464
+ "angle": 0,
465
+ "content": "B. System Architecture"
466
+ },
467
+ {
468
+ "type": "text",
469
+ "bbox": [
470
+ 0.505,
471
+ 0.828,
472
+ 0.924,
473
+ 0.95
474
+ ],
475
+ "angle": 0,
476
+ "content": "The overview of CFP-SLAM is demonstrated in Fig.1. Based on ORB-SLAM2 [13], we design a complete static probability calculation and update framework of keypoints based on multiple constraints to deal with the influence of moving objects in dynamic environment. The system obtains semantic information based on YOLOv5, compensates for missed detection based on EKF and Hungarian algorithm, and then the box between adjacent frames is associated. In"
477
+ }
478
+ ],
479
+ [
480
+ {
481
+ "type": "image",
482
+ "bbox": [
483
+ 0.086,
484
+ 0.071,
485
+ 0.907,
486
+ 0.328
487
+ ],
488
+ "angle": 0,
489
+ "content": null
490
+ },
491
+ {
492
+ "type": "image_caption",
493
+ "bbox": [
494
+ 0.071,
495
+ 0.334,
496
+ 0.925,
497
+ 0.415
498
+ ],
499
+ "angle": 0,
500
+ "content": "Fig. 1. The overview of CFP-SLAM. The green portion and the purple portion are the input and output modules of the system respectively. The yellow portion is the semantic module, including object detection, missed detection compensation, and data association. The orange portion and the blue portion are static probability calculation modules for two stages of keypoints, respectively. In the first stage, the rough static probability of keypoints is calculated based on the static probability of objects and the results of DBSCAN clustering. In the second stage, based on the epipolar constraint and projection constraint, and considering the static probability of the object and the data association result of the box, the accurate static probability of feature points is calculated. During the whole process, the static probability of the map points is maintained and updated, and together with the static probability of the keypoints will be used as weight to participate in pose optimization."
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.072,
506
+ 0.43,
507
+ 0.49,
508
+ 0.643
509
+ ],
510
+ "angle": 0,
511
+ "content": "\\(F_{k}\\), only calculate and update the static probability of the keypoints inside the potential moving object box. Firstly, the static probability of potential moving object \\(O_{i+}^{k}\\) is obtained by using the optical flow and the epipolar constraints, and the object is divided into high dynamic object and low dynamic object. Initialize \\(K_{i}^{k}\\) as the static probability of the object to which the keypoint belongs. Then, foreground points and background points is distinguished and the \\(K_{i}^{Dk}\\) is calculated by using the DBSCAN clustering results, and the \\(K_{i}^{k}\\) is updated to estimate the camera pose in the first stage to obtain \\(T_{k,w}\\). Next, \\(K_{i}^{Tk}, K_{i}^{Fk}\\) are obtained by using the projection constraints and the epipolar constraints, \\(K_{i}^{k}\\) and \\(M_{i-}^{k}\\) are updated to participate in camera pose optimization as weights to obtain a more accurate \\(T_{k,w}\\)."
512
+ },
513
+ {
514
+ "type": "title",
515
+ "bbox": [
516
+ 0.167,
517
+ 0.653,
518
+ 0.396,
519
+ 0.666
520
+ ],
521
+ "angle": 0,
522
+ "content": "IV. SPECIFIC IMPLEMENTATION"
523
+ },
524
+ {
525
+ "type": "title",
526
+ "bbox": [
527
+ 0.072,
528
+ 0.673,
529
+ 0.389,
530
+ 0.687
531
+ ],
532
+ "angle": 0,
533
+ "content": "A. Missed Detection Compensation Algorithm"
534
+ },
535
+ {
536
+ "type": "text",
537
+ "bbox": [
538
+ 0.072,
539
+ 0.692,
540
+ 0.49,
541
+ 0.827
542
+ ],
543
+ "angle": 0,
544
+ "content": "When processing dynamic objects, if the semantic information as a priori is suddenly missing in some frames, on the one hand, the subsequent methods based on semantic priors will not be able to process dynamic objects. On the other hand, the sudden emergence of dynamic objects in high dynamic scenes will lead to a sharp increase in the number of keypoints incorrectly matched between adjacent frames, which leads to the loss of tracking in SLAM system in high dynamic scenario. Therefore, stable and accurate semantic information is critical."
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.072,
550
+ 0.828,
551
+ 0.491,
552
+ 0.949
553
+ ],
554
+ "angle": 0,
555
+ "content": "In order to solve the missed detection problem of YOLOv5, we introduce EKF and Hungarian algorithm to compensate the missed detection of potential moving objects. EKF is used to predict the boxes of potential moving objects in \\( F_{k} \\), while the Hungarian algorithm is used to correlate the predicted boxes with the boxes detected by YOLOv5. If the predicted box does not find a matching detected box, it could be considered that \\( F_{k} \\) has missed detection, and the prediction result of EKF"
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.505,
561
+ 0.43,
562
+ 0.923,
563
+ 0.476
564
+ ],
565
+ "angle": 0,
566
+ "content": "is adopted to compensate the missed detection result. After missed detection compensation, EKF and Hungarian algorithm are used again for inter frame data association of boxes."
567
+ },
568
+ {
569
+ "type": "title",
570
+ "bbox": [
571
+ 0.506,
572
+ 0.484,
573
+ 0.725,
574
+ 0.499
575
+ ],
576
+ "angle": 0,
577
+ "content": "B. Static Probability of Objects"
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.505,
583
+ 0.502,
584
+ 0.924,
585
+ 0.64
586
+ ],
587
+ "angle": 0,
588
+ "content": "When calculating the static probability of each potential moving object, we use the idea of DS-SLAM [4] for reference to solve the fundamental matrix \\(LF_{k,k - 1}\\) and get the polar error \\(Ld_{i}^{F_{k,k - 1}}\\). We use the epipolar constraints and chi-square distribution to test the epipolar error. Since the pixel coordinates of the matching point pair obtained by the optical flow tracking have \\(k = 2\\) degrees of freedom, if they are assumed to follow the Gauss Distribution \\(N(0,1)\\), then according to the chi-square distribution:"
589
+ },
590
+ {
591
+ "type": "equation",
592
+ "bbox": [
593
+ 0.585,
594
+ 0.648,
595
+ 0.923,
596
+ 0.69
597
+ ],
598
+ "angle": 0,
599
+ "content": "\\[\n\\operatorname {c h s q} (x; k) = \\left\\{ \\begin{array}{c} \\frac {x ^ {(k / 2 - 1)} e ^ {- x / 2}}{2 ^ {k / 2} \\Gamma \\left(\\frac {k}{2}\\right)}, x > 0 \\\\ 0, x \\leq 0 \\end{array} \\right. \\tag {1}\n\\]"
600
+ },
601
+ {
602
+ "type": "text",
603
+ "bbox": [
604
+ 0.523,
605
+ 0.696,
606
+ 0.786,
607
+ 0.711
608
+ ],
609
+ "angle": 0,
610
+ "content": "The definition of the function \\(\\Gamma(v)\\) is:"
611
+ },
612
+ {
613
+ "type": "equation",
614
+ "bbox": [
615
+ 0.601,
616
+ 0.713,
617
+ 0.923,
618
+ 0.746
619
+ ],
620
+ "angle": 0,
621
+ "content": "\\[\n\\Gamma (v) = \\int_ {0} ^ {\\infty} e ^ {- t} t ^ {v - 1} d t, \\operatorname {R e} v > 0 \\tag {2}\n\\]"
622
+ },
623
+ {
624
+ "type": "text",
625
+ "bbox": [
626
+ 0.523,
627
+ 0.751,
628
+ 0.881,
629
+ 0.768
630
+ ],
631
+ "angle": 0,
632
+ "content": "The single estimation result of \\(O_{i^{+}}^{k}\\) can be obtained:"
633
+ },
634
+ {
635
+ "type": "equation",
636
+ "bbox": [
637
+ 0.596,
638
+ 0.776,
639
+ 0.923,
640
+ 0.81
641
+ ],
642
+ "angle": 0,
643
+ "content": "\\[\n\\left(O _ {i ^ {+}} ^ {k}\\right) _ {m} = \\operatorname {c h s q} \\left(\\left(L d _ {i} ^ {F _ {k, k - 1}}\\right) ^ {2}; 2\\right) \\tag {3}\n\\]"
644
+ },
645
+ {
646
+ "type": "text",
647
+ "bbox": [
648
+ 0.505,
649
+ 0.814,
650
+ 0.923,
651
+ 0.904
652
+ ],
653
+ "angle": 0,
654
+ "content": "After all estimation results are obtained by using all optical flow point pairs belonging to the object, all estimation results are sorted from small to large. Let the number of all estimation results be \\( M \\), and take the average value of \\( (O_{i+}^{k})_{m} \\) at 0.1M, 0.2M, 0.3M position after ranking as the estimated value of object static probability \\( O_{i+}^{k} \\)."
655
+ },
656
+ {
657
+ "type": "text",
658
+ "bbox": [
659
+ 0.505,
660
+ 0.905,
661
+ 0.923,
662
+ 0.949
663
+ ],
664
+ "angle": 0,
665
+ "content": "According to the calculation result of the static probability of the object and the real motion of the object, and taking into account that the negative effect of the dynamic point is"
666
+ }
667
+ ],
668
+ [
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.072,
673
+ 0.075,
674
+ 0.493,
675
+ 0.21
676
+ ],
677
+ "angle": 0,
678
+ "content": "generally greater than the positive effect of the increase of the static constraint when the camera pose is estimated, we set \\( O_{Th} = 0.9 \\), the object motion attributes are divided into high dynamic and low dynamic, which are provided to the subsequent methods as a priori information for processing with different strategies. The static probability of all keypoints in the box of the potential moving object is initialized to \\( O_{i^{+}}^{k} \\) and the static probability of other keypoints is initialized to 1.0."
679
+ },
680
+ {
681
+ "type": "title",
682
+ "bbox": [
683
+ 0.074,
684
+ 0.222,
685
+ 0.43,
686
+ 0.237
687
+ ],
688
+ "angle": 0,
689
+ "content": "C. Static Probability of Keypoints in the First Stage"
690
+ },
691
+ {
692
+ "type": "text",
693
+ "bbox": [
694
+ 0.072,
695
+ 0.244,
696
+ 0.491,
697
+ 0.44
698
+ ],
699
+ "angle": 0,
700
+ "content": "1) DBSCAN Density Clustering Algorithm: Compared with semantic segmentation methods, object detection technology has great advantages in real-time, but it can not provide accurate object mask. In the indoor dynamic SLAM scene, this problem leads to numerous static backgrounds in the boxes classified as people, and the false deletion of static keypoints will reduce the constraints of camera pose optimization and reduce the accuracy of camera pose estimation. We noticed that people as the foreground as a non-rigid body, his depth has a good continuity, and usually has a large fault with the background depth. To this end, we use the DBSCAN density clustering algorithm to distinguish between the foreground and background points of boxes classified as people."
701
+ },
702
+ {
703
+ "type": "text",
704
+ "bbox": [
705
+ 0.073,
706
+ 0.44,
707
+ 0.49,
708
+ 0.53
709
+ ],
710
+ "angle": 0,
711
+ "content": "We adaptively determine \\(eps\\) (the neighborhood radius of DBSCAN density clustering algorithm) and \\(minPts\\) (the threshold of the number of samples in the neighborhood). After clustering, the one with the lowest average value of samples in cluster \\(\\mathbf{C} = \\{C_1,C_2,\\dots ,C_k\\}\\) is taken as the foreground points of box."
712
+ },
713
+ {
714
+ "type": "text",
715
+ "bbox": [
716
+ 0.072,
717
+ 0.531,
718
+ 0.49,
719
+ 0.651
720
+ ],
721
+ "angle": 0,
722
+ "content": "After getting the DBSCAN clustering results, we adopt a soft strategy to further estimate the static probability of background points in the box of a potential moving object. Obviously, the static probability of background points must be greater than that of the object, and it is positively correlated with the static probability of the object. Specifies that the static probability of background points derived from the DBSCAN cluster is:"
723
+ },
724
+ {
725
+ "type": "equation",
726
+ "bbox": [
727
+ 0.131,
728
+ 0.657,
729
+ 0.49,
730
+ 0.7
731
+ ],
732
+ "angle": 0,
733
+ "content": "\\[\nK _ {i} ^ {D k} = \\left\\{ \\begin{array}{c} \\frac {1 - O _ {T h}}{\\left(O _ {T h}\\right) ^ {4}} \\left(K _ {i} ^ {k}\\right) ^ {3} + 1, O _ {i +} ^ {k} \\leq O _ {T h} \\\\ \\frac {1}{K _ {i} ^ {k}}, \\quad O _ {i +} ^ {k} > O _ {T h} \\end{array} \\right. \\tag {4}\n\\]"
734
+ },
735
+ {
736
+ "type": "text",
737
+ "bbox": [
738
+ 0.072,
739
+ 0.703,
740
+ 0.49,
741
+ 0.794
742
+ ],
743
+ "angle": 0,
744
+ "content": "Considering that the static probability estimation of keypoints has not been strictly calculated at each point, in other words, the static probability of the keypoints is coarse at present, and the camera pose estimation is vulnerable to dynamic points, we set the static probability of all foreground points in the box of high dynamic objects to 0."
745
+ },
746
+ {
747
+ "type": "text",
748
+ "bbox": [
749
+ 0.073,
750
+ 0.794,
751
+ 0.49,
752
+ 0.824
753
+ ],
754
+ "angle": 0,
755
+ "content": "2) First Stage Pose Optimization: Update the static probability of keypoints:"
756
+ },
757
+ {
758
+ "type": "equation",
759
+ "bbox": [
760
+ 0.218,
761
+ 0.825,
762
+ 0.489,
763
+ 0.843
764
+ ],
765
+ "angle": 0,
766
+ "content": "\\[\nK _ {i} ^ {k} = K _ {i} ^ {k} \\times K _ {i} ^ {D k} \\tag {5}\n\\]"
767
+ },
768
+ {
769
+ "type": "text",
770
+ "bbox": [
771
+ 0.072,
772
+ 0.844,
773
+ 0.491,
774
+ 0.95
775
+ ],
776
+ "angle": 0,
777
+ "content": "When initializing the SLAM system, map points will be created. At this time, the static probability of map point \\( M_{i}^{k} \\) will be initialized to the static probability of corresponding keypoint \\( K_{i}^{k} \\). In the frame after initialization, \\( K_{i}^{k} \\) and \\( M_{i}^{k} \\) are used as weights to optimize the camera pose, and the camera pose estimation value \\( T_{k,w} \\) in the first stage is obtained. Then, the static probability of \\( P_{i}^{k} \\), which has a matching relation"
778
+ },
779
+ {
780
+ "type": "text",
781
+ "bbox": [
782
+ 0.506,
783
+ 0.075,
784
+ 0.924,
785
+ 0.106
786
+ ],
787
+ "angle": 0,
788
+ "content": "with the keypoints in \\(F_{k - 1}\\), is calculated precisely based on the projection constraints and the epipolar constraints."
789
+ },
790
+ {
791
+ "type": "title",
792
+ "bbox": [
793
+ 0.506,
794
+ 0.121,
795
+ 0.88,
796
+ 0.137
797
+ ],
798
+ "angle": 0,
799
+ "content": "D. Static Probability of Keypoints in the Second Stage"
800
+ },
801
+ {
802
+ "type": "text",
803
+ "bbox": [
804
+ 0.506,
805
+ 0.144,
806
+ 0.924,
807
+ 0.189
808
+ ],
809
+ "angle": 0,
810
+ "content": "1) Static Probability Based on the Projection Constraints: Convert the \\( P_{i^*}^{k-1} \\) from the pixel coordinate to the camera coordinate:"
811
+ },
812
+ {
813
+ "type": "equation",
814
+ "bbox": [
815
+ 0.636,
816
+ 0.196,
817
+ 0.923,
818
+ 0.226
819
+ ],
820
+ "angle": 0,
821
+ "content": "\\[\nP _ {i _ {k - 1} ^ {*}} ^ {k - 1} = \\frac {1}{K} Z _ {i _ {k - 1} ^ {*}} ^ {k - 1} \\widetilde {P _ {i _ {u \\nu}} ^ {k - 1}} \\tag {6}\n\\]"
822
+ },
823
+ {
824
+ "type": "text",
825
+ "bbox": [
826
+ 0.506,
827
+ 0.237,
828
+ 0.923,
829
+ 0.273
830
+ ],
831
+ "angle": 0,
832
+ "content": "Transform and project \\( P_{i_{k-1}^*}^{k-1} \\) to \\( F_k \\), and the Euclidean distance between the projection point and \\( P_i^k \\) is:"
833
+ },
834
+ {
835
+ "type": "equation",
836
+ "bbox": [
837
+ 0.512,
838
+ 0.28,
839
+ 0.923,
840
+ 0.363
841
+ ],
842
+ "angle": 0,
843
+ "content": "\\[\nd _ {i} ^ {T} = \\left\\| \\right. P _ {i _ {u v}} ^ {k} - \\left| \\frac {1}{\\left| T _ {k , k - 1} \\widetilde {P _ {i _ {k - 1} ^ {*}}} ^ {k - 1} \\right| _ {Z}} K \\right| T _ {k, k - 1} \\widetilde {P _ {i _ {k - 1} ^ {*}}} \\left. \\right| _ {X Y Z} \\Bigg | _ {u \\nu} \\left. \\right\\| _ {2} \\tag {7}\n\\]"
844
+ },
845
+ {
846
+ "type": "text",
847
+ "bbox": [
848
+ 0.505,
849
+ 0.364,
850
+ 0.924,
851
+ 0.561
852
+ ],
853
+ "angle": 0,
854
+ "content": "Where function \\( |P|_{Z} \\) represents the z-axis coordinate of point \\( P \\), and \\( |P|_{XYZ} \\) represents the non-homogeneous coordinate form of point \\( P \\). On the premise that the camera pose \\( T_{k,w} \\) is relatively accurate, the greater \\( d_{i}^{T} \\), the greater the possibility that \\( P_{i}^{k} \\) and \\( P_{i^{*}}^{k-1} \\) are mismatched. Based on this principle, we design a static probability model based on the projection constraints. After sorting the \\( d_{i}^{T} \\) of all keypoints outside the box of the dynamic object in \\( F_{k} \\) from small to large, take \\( d_{i}^{T} \\) at the truncated position of 0.8 as the adaptive threshold \\( D_{Th}^{T} \\) of the projection error, and obtain the minimum value \\( d_{min}^{T} \\) of \\( d_{i}^{T} \\). We use the Sigmoid function form to measure the static probability of keypoints of the matching relationship in the box:"
855
+ },
856
+ {
857
+ "type": "equation",
858
+ "bbox": [
859
+ 0.594,
860
+ 0.575,
861
+ 0.922,
862
+ 0.617
863
+ ],
864
+ "angle": 0,
865
+ "content": "\\[\nK _ {i} ^ {T k} = \\frac {1}{1 + e ^ {\\left(d _ {i} ^ {T} - D _ {T h} ^ {T}\\right) \\times \\frac {5}{D _ {T h} ^ {T} - d _ {\\operatorname* {m i n}} ^ {T}}}} \\tag {8}\n\\]"
866
+ },
867
+ {
868
+ "type": "text",
869
+ "bbox": [
870
+ 0.506,
871
+ 0.625,
872
+ 0.923,
873
+ 0.745
874
+ ],
875
+ "angle": 0,
876
+ "content": "For a pair of matching points, the satisfaction of the projection constraints is not only related to whether the corresponding spatial points strictly meet the static environment assumption, but also directly related to the number of constraints when solving the pose matrix and whether the pose matrix itself is correctly solved. Therefore, the statistical confidence \\( C_s^{Tk} \\) and calculation confidence \\( C_c^{Tk} \\) of the pose matrix are introduced:"
877
+ },
878
+ {
879
+ "type": "equation",
880
+ "bbox": [
881
+ 0.616,
882
+ 0.76,
883
+ 0.922,
884
+ 0.79
885
+ ],
886
+ "angle": 0,
887
+ "content": "\\[\nC _ {S} ^ {T k} = \\frac {1}{1 + e ^ {- N _ {B A} + 0 . 5 T h _ {B A}}} \\tag {9}\n\\]"
888
+ },
889
+ {
890
+ "type": "equation",
891
+ "bbox": [
892
+ 0.632,
893
+ 0.806,
894
+ 0.922,
895
+ 0.843
896
+ ],
897
+ "angle": 0,
898
+ "content": "\\[\nC _ {C} ^ {T k} = 1 - \\frac {\\sum d _ {i} ^ {T}}{N _ {T} \\times D _ {T h} ^ {T}} \\tag {10}\n\\]"
899
+ },
900
+ {
901
+ "type": "text",
902
+ "bbox": [
903
+ 0.506,
904
+ 0.851,
905
+ 0.924,
906
+ 0.944
907
+ ],
908
+ "angle": 0,
909
+ "content": "Where \\(N_{BA}\\) is the number of interior points obtained by participating in the last camera pose solution, and threshold \\(Th_{BA}\\) is the minimum number of interior points required to participate in the camera pose solution, \\(N_{T}\\) and \\(\\sum d_i^T\\) respectively represent the number of all sample points and the sum of \\(d_i^T\\) satisfying \\(d_i^T < D_{Th}^T\\)."
910
+ }
911
+ ],
912
+ [
913
+ {
914
+ "type": "text",
915
+ "bbox": [
916
+ 0.073,
917
+ 0.075,
918
+ 0.49,
919
+ 0.119
920
+ ],
921
+ "angle": 0,
922
+ "content": "2) Static Probability Based on the Epipolar Constraints: Based on the camera pose estimation \\( T_{k,w} \\) in the first stage, a more accurate fundamental matrix can be calculated:"
923
+ },
924
+ {
925
+ "type": "equation",
926
+ "bbox": [
927
+ 0.152,
928
+ 0.13,
929
+ 0.49,
930
+ 0.15
931
+ ],
932
+ "angle": 0,
933
+ "content": "\\[\nF _ {k, k - 1} = \\mathrm {K} ^ {- \\mathrm {T}} \\left(t _ {k, k - 1}\\right) ^ {\\wedge} R _ {k, k - 1} \\mathrm {K} ^ {- 1} \\tag {11}\n\\]"
934
+ },
935
+ {
936
+ "type": "text",
937
+ "bbox": [
938
+ 0.089,
939
+ 0.156,
940
+ 0.489,
941
+ 0.178
942
+ ],
943
+ "angle": 0,
944
+ "content": "The pole line \\( l_i^k = \\left[A_i^k, B_i^k, C_i^k\\right]^T \\) corresponding to \\( P_i^k \\) is:"
945
+ },
946
+ {
947
+ "type": "equation",
948
+ "bbox": [
949
+ 0.219,
950
+ 0.182,
951
+ 0.49,
952
+ 0.209
953
+ ],
954
+ "angle": 0,
955
+ "content": "\\[\nl _ {i} ^ {k} = F _ {k, k - 1} \\widetilde {P _ {i _ {u v} ^ {*}} ^ {k - 1}} \\tag {12}\n\\]"
956
+ },
957
+ {
958
+ "type": "text",
959
+ "bbox": [
960
+ 0.09,
961
+ 0.213,
962
+ 0.279,
963
+ 0.232
964
+ ],
965
+ "angle": 0,
966
+ "content": "Then the polar error \\(d_i^F\\) is:"
967
+ },
968
+ {
969
+ "type": "equation",
970
+ "bbox": [
971
+ 0.195,
972
+ 0.239,
973
+ 0.489,
974
+ 0.3
975
+ ],
976
+ "angle": 0,
977
+ "content": "\\[\nd _ {i} ^ {F} = \\frac {\\left| \\left(\\widetilde {P _ {i _ {\\mathrm {u v}}} ^ {k}}\\right) ^ {T} l _ {i} ^ {k} \\right|}{\\sqrt {\\left(A _ {i} ^ {k}\\right) ^ {2} + \\left(B _ {i} ^ {k}\\right) ^ {2}}} \\tag {13}\n\\]"
978
+ },
979
+ {
980
+ "type": "text",
981
+ "bbox": [
982
+ 0.072,
983
+ 0.306,
984
+ 0.49,
985
+ 0.366
986
+ ],
987
+ "angle": 0,
988
+ "content": "Similar to the projection constraints, we calculate static probability and confidence based on the epipolar constraints to obtain \\( K_{i}^{Fk} \\), the statistical confidence \\( C_s^{Fk} \\) and calculation confidence \\( C_c^{Fk} \\) of the fundamental matrix."
989
+ },
990
+ {
991
+ "type": "text",
992
+ "bbox": [
993
+ 0.072,
994
+ 0.367,
995
+ 0.49,
996
+ 0.456
997
+ ],
998
+ "angle": 0,
999
+ "content": "It should be noted that, as Eq.11 mentioned, the fundamental matrix can not be obtained when the camera translation is not large enough. Therefore, when the camera translation is less than the set threshold \\( t_{Th} \\), skip the calculation of static probability and confidence based on the epipolar constraints, that is:"
1000
+ },
1001
+ {
1002
+ "type": "equation",
1003
+ "bbox": [
1004
+ 0.095,
1005
+ 0.465,
1006
+ 0.49,
1007
+ 0.485
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": "\\[\nK _ {i} ^ {F k} = 0, C _ {S} ^ {F k} = C _ {C} ^ {F k} = 0 \\quad \\text {s . t .} \\| t _ {k, k - 1} \\| _ {2} \\leq t _ {T h} \\tag {14}\n\\]"
1011
+ },
1012
+ {
1013
+ "type": "text",
1014
+ "bbox": [
1015
+ 0.072,
1016
+ 0.492,
1017
+ 0.491,
1018
+ 0.675
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": "3) Second Stage Pose Optimization: After calculating the static probability of the keypoints based on the projection constraints and the epipolar constraints, we update the static probability of \\( P_{i}^{k} \\) which matches the keypoints in \\( F_{k - 1} \\) for the second time. When the object is in high dynamics, the negative impact of dynamic points on camera pose estimation is generally greater than the positive impact of the increase in the number of static point constraints, which is just the opposite when the object is in low dynamics. This is because ORB-SLAM2 has certain outlier suppression strategies, which can suppress dynamic disturbances in low dynamics, but does not work in high dynamics. So, when \\( O_{i + }^{k}\\leq O_{Th} \\),"
1022
+ },
1023
+ {
1024
+ "type": "equation",
1025
+ "bbox": [
1026
+ 0.139,
1027
+ 0.683,
1028
+ 0.49,
1029
+ 0.719
1030
+ ],
1031
+ "angle": 0,
1032
+ "content": "\\[\nK _ {i} ^ {k} = \\left\\{ \\begin{array}{c} K _ {i} ^ {T k} \\times K _ {i} ^ {F k}, \\| t _ {k, k - 1} \\| _ {2} > t _ {T h} \\\\ K _ {i} ^ {T k}, \\| t _ {k, k - 1} \\| _ {2} \\leq t _ {T h} \\end{array} \\right. \\tag {15}\n\\]"
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "bbox": [
1037
+ 0.09,
1038
+ 0.724,
1039
+ 0.221,
1040
+ 0.742
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "when \\(O_{i^{+}}^{k} > O_{Th}\\)"
1044
+ },
1045
+ {
1046
+ "type": "equation",
1047
+ "bbox": [
1048
+ 0.087,
1049
+ 0.751,
1050
+ 0.489,
1051
+ 0.788
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "\\[\nK _ {i} ^ {k} = \\frac {K _ {i} ^ {T k} \\times C _ {s} ^ {T k} C _ {c} ^ {T k}}{C _ {s} ^ {T k} C _ {c} ^ {T k} + C _ {s} ^ {F k} C _ {c} ^ {F k}} + \\frac {K _ {i} ^ {F k} \\times C _ {s} ^ {F k} C _ {c} ^ {F k}}{C _ {s} ^ {T k} C _ {c} ^ {T k} + C _ {s} ^ {F k} C _ {c} ^ {F k}} \\tag {16}\n\\]"
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "bbox": [
1059
+ 0.506,
1060
+ 0.075,
1061
+ 0.925,
1062
+ 0.302
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "After missed detection compensation, we use EKF and Hungarian algorithm to correlate the boxes of potential moving objects between adjacent frames. It is easy to know that if the association result of a box in \\( F_{k} \\) is not found in \\( F_{k-1} \\), even if there is a matching relationship between the foreground points in the box, it is generally a false matching, so let \\( K_{i}^{k} = 0 \\) in this case. For \\( P_{i}^{k} \\) that does not match the keypoints in \\( F_{k-1} \\), according to the results of DBSCAN clustering, if \\( P_{i}^{k} \\) belongs to the foreground points, let \\( K_{i}^{k} = 0 \\), else let \\( K_{i}^{k} = M_{i-}^{k} \\). After the second estimation result of \\( K_{i}^{k} \\) is obtained, \\( M_{i-}^{k} \\) is updated. When \\( M_{i-}^{k} < 0.3 \\), delete the map point. Then \\( K_{i}^{k} \\) and \\( M_{i-}^{k} \\) are used as weights to participate in the second stage of camera pose optimization. When there is a big difference between \\( K_{i}^{k} \\) and \\( M_{i-}^{k} \\), it can be considered that \\( K_{i}^{k} \\) and \\( M_{i-}^{k} \\) are mismatched and do not participate in optimization."
1066
+ },
1067
+ {
1068
+ "type": "title",
1069
+ "bbox": [
1070
+ 0.599,
1071
+ 0.312,
1072
+ 0.831,
1073
+ 0.325
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "V. EXPERIMENTS AND RESULTS"
1077
+ },
1078
+ {
1079
+ "type": "text",
1080
+ "bbox": [
1081
+ 0.505,
1082
+ 0.331,
1083
+ 0.924,
1084
+ 0.603
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "In this section, we test the performance of the proposed algorithm in 8 dynamic sequences of the TUM RGB-D dataset [26], including 4 low dynamic sequences (fr3/s for short) and 4 high dynamic sequences (fr3/w for short), and the camera includes 4 kinds of motion: static, xyz, halfsphere and rpy. The indicators used to evaluate the accuracy are the Absolute Trajectory Error (ATE) and the Relative Pose Error (RPE). ATE represents the global consistency of trajectory. RPE includes translation drift and rotation drift. The Root-Mean-Square-Error (RMSE) and Standard Deviation (S.D.) of both are used to represent the robustness and stability of the system [12]. Firstly, we show the effect of missed detection compensation and DBSCAN clustering, then compare our method with some of the most advanced methods, then design a series of ablation experiments to test the impact of each module, and finally carry out real-time analysis. All the experiments are performed on a computer with Intel i7 CPU, 3060 GPU, and 16GB memory."
1088
+ },
1089
+ {
1090
+ "type": "title",
1091
+ "bbox": [
1092
+ 0.505,
1093
+ 0.613,
1094
+ 0.922,
1095
+ 0.629
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "A. Missed Detection Compensation and DBSCAN Clustering"
1099
+ },
1100
+ {
1101
+ "type": "text",
1102
+ "bbox": [
1103
+ 0.505,
1104
+ 0.633,
1105
+ 0.924,
1106
+ 0.785
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "In the dynamic SLAM scene, the motion of the object, the incomplete appearance of the object to be detected in the camera field of view, the blurred image and the singular angle of view caused by camera rotation all bring severe challenges to the object detection, very easy to cause miss detection, even will lead to continuous frame miss detection. Fig.2(a)-(d) and Fig.2(e) show the results of missed detection compensation of object detection in the above four cases and six consecutive frames, respectively. Fig.3 shows the DBSCAN clustering results after missed detection compensation. We"
1110
+ },
1111
+ {
1112
+ "type": "image",
1113
+ "bbox": [
1114
+ 0.075,
1115
+ 0.798,
1116
+ 0.16,
1117
+ 0.896
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": null
1121
+ },
1122
+ {
1123
+ "type": "image_caption",
1124
+ "bbox": [
1125
+ 0.107,
1126
+ 0.897,
1127
+ 0.124,
1128
+ 0.907
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": "(a)"
1132
+ },
1133
+ {
1134
+ "type": "image",
1135
+ "bbox": [
1136
+ 0.161,
1137
+ 0.799,
1138
+ 0.245,
1139
+ 0.896
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": null
1143
+ },
1144
+ {
1145
+ "type": "image_caption",
1146
+ "bbox": [
1147
+ 0.197,
1148
+ 0.897,
1149
+ 0.212,
1150
+ 0.907
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "(b)"
1154
+ },
1155
+ {
1156
+ "type": "image",
1157
+ "bbox": [
1158
+ 0.246,
1159
+ 0.799,
1160
+ 0.331,
1161
+ 0.896
1162
+ ],
1163
+ "angle": 0,
1164
+ "content": null
1165
+ },
1166
+ {
1167
+ "type": "image_caption",
1168
+ "bbox": [
1169
+ 0.295,
1170
+ 0.897,
1171
+ 0.309,
1172
+ 0.907
1173
+ ],
1174
+ "angle": 0,
1175
+ "content": "(c)"
1176
+ },
1177
+ {
1178
+ "type": "image",
1179
+ "bbox": [
1180
+ 0.332,
1181
+ 0.799,
1182
+ 0.416,
1183
+ 0.896
1184
+ ],
1185
+ "angle": 0,
1186
+ "content": null
1187
+ },
1188
+ {
1189
+ "type": "image_caption",
1190
+ "bbox": [
1191
+ 0.365,
1192
+ 0.897,
1193
+ 0.381,
1194
+ 0.907
1195
+ ],
1196
+ "angle": 0,
1197
+ "content": "(d)"
1198
+ },
1199
+ {
1200
+ "type": "image",
1201
+ "bbox": [
1202
+ 0.418,
1203
+ 0.799,
1204
+ 0.503,
1205
+ 0.896
1206
+ ],
1207
+ "angle": 0,
1208
+ "content": null
1209
+ },
1210
+ {
1211
+ "type": "image_caption",
1212
+ "bbox": [
1213
+ 0.445,
1214
+ 0.897,
1215
+ 0.461,
1216
+ 0.907
1217
+ ],
1218
+ "angle": 0,
1219
+ "content": "一"
1220
+ },
1221
+ {
1222
+ "type": "image",
1223
+ "bbox": [
1224
+ 0.503,
1225
+ 0.799,
1226
+ 0.61,
1227
+ 0.896
1228
+ ],
1229
+ "angle": 0,
1230
+ "content": null
1231
+ },
1232
+ {
1233
+ "type": "image_caption",
1234
+ "bbox": [
1235
+ 0.585,
1236
+ 0.897,
1237
+ 0.677,
1238
+ 0.907
1239
+ ],
1240
+ "angle": 0,
1241
+ "content": "(e)"
1242
+ },
1243
+ {
1244
+ "type": "image",
1245
+ "bbox": [
1246
+ 0.612,
1247
+ 0.799,
1248
+ 0.75,
1249
+ 0.896
1250
+ ],
1251
+ "angle": 0,
1252
+ "content": null
1253
+ },
1254
+ {
1255
+ "type": "image_caption",
1256
+ "bbox": [
1257
+ 0.661,
1258
+ 0.897,
1259
+ 0.676,
1260
+ 0.907
1261
+ ],
1262
+ "angle": 0,
1263
+ "content": "(e)"
1264
+ },
1265
+ {
1266
+ "type": "image",
1267
+ "bbox": [
1268
+ 0.751,
1269
+ 0.799,
1270
+ 0.833,
1271
+ 0.896
1272
+ ],
1273
+ "angle": 0,
1274
+ "content": null
1275
+ },
1276
+ {
1277
+ "type": "image",
1278
+ "bbox": [
1279
+ 0.833,
1280
+ 0.799,
1281
+ 0.917,
1282
+ 0.896
1283
+ ],
1284
+ "angle": 0,
1285
+ "content": null
1286
+ },
1287
+ {
1288
+ "type": "image_caption",
1289
+ "bbox": [
1290
+ 0.072,
1291
+ 0.913,
1292
+ 0.924,
1293
+ 0.949
1294
+ ],
1295
+ "angle": 0,
1296
+ "content": "Fig. 2. Missed detection and the results of missed detection compensation in the following cases: (a) The rapid motion of the object. (b) The incomplete appearance of the object to be detected in the camera field of view. (c) The blurred image. (d) The singular angle of view caused by camera rotation. (e) Continuous frame miss detection."
1297
+ }
1298
+ ],
1299
+ [
1300
+ {
1301
+ "type": "image",
1302
+ "bbox": [
1303
+ 0.078,
1304
+ 0.069,
1305
+ 0.921,
1306
+ 0.201
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": null
1310
+ },
1311
+ {
1312
+ "type": "image_caption",
1313
+ "bbox": [
1314
+ 0.072,
1315
+ 0.203,
1316
+ 0.924,
1317
+ 0.24
1318
+ ],
1319
+ "angle": 0,
1320
+ "content": "Fig. 3. Effect of DBSCAN density clustering algorithm in two consecutive frames. The top set of images is taken every 8 frames, and the bottom set of images is taken every 4 frames. The images contain three common states of movement: sitting in a chair, slow motion and fast motion. After clustering, the foreground and background points are shown in red and green respectively."
1321
+ },
1322
+ {
1323
+ "type": "text",
1324
+ "bbox": [
1325
+ 0.072,
1326
+ 0.256,
1327
+ 0.49,
1328
+ 0.484
1329
+ ],
1330
+ "angle": 0,
1331
+ "content": "select two consecutive frames to show the clustering effect. The foreground points are marked with red and the background points are marked with green. The upper image group contains two people sitting on the chair and moving slowly respectively, and the people in the lower image group are in the fast walking state. It is worth noting from Fig.3 that many keypoints are extracted from the edge of the person, which is generally the part with the highest dynamic attributes. However, semantic segmentation is difficult to accurately judge the boundary of objects [12], which leads to the misjudgment of dynamic and static attributes of keypoints. We use DBSCAN algorithm to cluster keypoints based on depth information, which can well avoid this problem. The experimental results fully show the effectiveness and robustness of the missed detection compensation algorithm and clustering algorithm."
1332
+ },
1333
+ {
1334
+ "type": "title",
1335
+ "bbox": [
1336
+ 0.074,
1337
+ 0.491,
1338
+ 0.328,
1339
+ 0.506
1340
+ ],
1341
+ "angle": 0,
1342
+ "content": "B. Comparison with State-of-the-arts"
1343
+ },
1344
+ {
1345
+ "type": "text",
1346
+ "bbox": [
1347
+ 0.072,
1348
+ 0.51,
1349
+ 0.49,
1350
+ 0.571
1351
+ ],
1352
+ "angle": 0,
1353
+ "content": "We contrast with ORB-SLAM2 [13] and forth most advanced dynamic SLAM methods, including DS-SLAM [4], Dyna-SLAM [5], Blitz-SLAM [12] and TRS [11]. Like our method, these algorithms are all improved based on ORB-"
1354
+ },
1355
+ {
1356
+ "type": "text",
1357
+ "bbox": [
1358
+ 0.505,
1359
+ 0.256,
1360
+ 0.925,
1361
+ 0.559
1362
+ ],
1363
+ "angle": 0,
1364
+ "content": "SLAM2. Without calculating the static probability of the object, we provide a lower performance version of the algorithm in this paper with higher real-time performance, which is called CFP-SLAM\\(^{-}\\). The quantitative comparison results are shown in Tables I, II and III, in which the best results are highlighted in bold and the second-best are underlined. The data of DS-SLAM, Dyna-SLAM, Blitz-SLAM and TRS comes from the source literature, / indicates that the corresponding data is not provided in the source literature. The experimental results show that, unlike other dynamic SLAM algorithms, which only have advantages over ORB-SLAM2 in high dynamic scenarios, this algorithm can achieve almost the best results in high dynamic and low dynamic scenarios. Even the low-performance version we provide shows better performance than other algorithms. In rpy sequences, on the one hand, the epipolar constraints cannot be used, on the other hand, the large change of camera angle leads to insufficient feature matching, so our method performs slightly worse. The ATE and RPE plots of our algorithm on 8 sequences are shown in Fig.4."
1365
+ },
1366
+ {
1367
+ "type": "image",
1368
+ "bbox": [
1369
+ 0.083,
1370
+ 0.595,
1371
+ 0.179,
1372
+ 0.707
1373
+ ],
1374
+ "angle": 0,
1375
+ "content": null
1376
+ },
1377
+ {
1378
+ "type": "image_caption",
1379
+ "bbox": [
1380
+ 0.109,
1381
+ 0.711,
1382
+ 0.159,
1383
+ 0.723
1384
+ ],
1385
+ "angle": 0,
1386
+ "content": "(1) s/xyz"
1387
+ },
1388
+ {
1389
+ "type": "image",
1390
+ "bbox": [
1391
+ 0.188,
1392
+ 0.596,
1393
+ 0.283,
1394
+ 0.707
1395
+ ],
1396
+ "angle": 0,
1397
+ "content": null
1398
+ },
1399
+ {
1400
+ "type": "image_caption",
1401
+ "bbox": [
1402
+ 0.217,
1403
+ 0.711,
1404
+ 0.258,
1405
+ 0.722
1406
+ ],
1407
+ "angle": 0,
1408
+ "content": "(2) \\( \\mathrm{s / hs} \\)"
1409
+ },
1410
+ {
1411
+ "type": "image",
1412
+ "bbox": [
1413
+ 0.29,
1414
+ 0.596,
1415
+ 0.387,
1416
+ 0.708
1417
+ ],
1418
+ "angle": 0,
1419
+ "content": null
1420
+ },
1421
+ {
1422
+ "type": "image_caption",
1423
+ "bbox": [
1424
+ 0.314,
1425
+ 0.711,
1426
+ 0.369,
1427
+ 0.722
1428
+ ],
1429
+ "angle": 0,
1430
+ "content": "(3) s/static"
1431
+ },
1432
+ {
1433
+ "type": "image",
1434
+ "bbox": [
1435
+ 0.395,
1436
+ 0.596,
1437
+ 0.49,
1438
+ 0.708
1439
+ ],
1440
+ "angle": 0,
1441
+ "content": null
1442
+ },
1443
+ {
1444
+ "type": "image_caption",
1445
+ "bbox": [
1446
+ 0.422,
1447
+ 0.711,
1448
+ 0.466,
1449
+ 0.723
1450
+ ],
1451
+ "angle": 0,
1452
+ "content": "(4) s/rpy"
1453
+ },
1454
+ {
1455
+ "type": "image",
1456
+ "bbox": [
1457
+ 0.498,
1458
+ 0.596,
1459
+ 0.593,
1460
+ 0.708
1461
+ ],
1462
+ "angle": 0,
1463
+ "content": null
1464
+ },
1465
+ {
1466
+ "type": "image_caption",
1467
+ "bbox": [
1468
+ 0.523,
1469
+ 0.711,
1470
+ 0.575,
1471
+ 0.723
1472
+ ],
1473
+ "angle": 0,
1474
+ "content": "(5) w/xyz"
1475
+ },
1476
+ {
1477
+ "type": "image",
1478
+ "bbox": [
1479
+ 0.602,
1480
+ 0.596,
1481
+ 0.697,
1482
+ 0.708
1483
+ ],
1484
+ "angle": 0,
1485
+ "content": null
1486
+ },
1487
+ {
1488
+ "type": "image_caption",
1489
+ "bbox": [
1490
+ 0.63,
1491
+ 0.711,
1492
+ 0.674,
1493
+ 0.722
1494
+ ],
1495
+ "angle": 0,
1496
+ "content": "(6) w/hs"
1497
+ },
1498
+ {
1499
+ "type": "image",
1500
+ "bbox": [
1501
+ 0.706,
1502
+ 0.596,
1503
+ 0.801,
1504
+ 0.708
1505
+ ],
1506
+ "angle": 0,
1507
+ "content": null
1508
+ },
1509
+ {
1510
+ "type": "image_caption",
1511
+ "bbox": [
1512
+ 0.725,
1513
+ 0.711,
1514
+ 0.785,
1515
+ 0.723
1516
+ ],
1517
+ "angle": 0,
1518
+ "content": "(7) w/static"
1519
+ },
1520
+ {
1521
+ "type": "image",
1522
+ "bbox": [
1523
+ 0.81,
1524
+ 0.596,
1525
+ 0.903,
1526
+ 0.708
1527
+ ],
1528
+ "angle": 0,
1529
+ "content": null
1530
+ },
1531
+ {
1532
+ "type": "image_caption",
1533
+ "bbox": [
1534
+ 0.834,
1535
+ 0.711,
1536
+ 0.883,
1537
+ 0.723
1538
+ ],
1539
+ "angle": 0,
1540
+ "content": "(8) w/ry"
1541
+ },
1542
+ {
1543
+ "type": "image_caption",
1544
+ "bbox": [
1545
+ 0.382,
1546
+ 0.729,
1547
+ 0.614,
1548
+ 0.742
1549
+ ],
1550
+ "angle": 0,
1551
+ "content": "Fig. 4. ATE and RPE from CFP-SLAM."
1552
+ },
1553
+ {
1554
+ "type": "table_caption",
1555
+ "bbox": [
1556
+ 0.295,
1557
+ 0.753,
1558
+ 0.702,
1559
+ 0.781
1560
+ ],
1561
+ "angle": 0,
1562
+ "content": "TABLEI RESULTS OF METRICS ABSOLUTE TRAJECTORY ERROR (ATE)"
1563
+ },
1564
+ {
1565
+ "type": "table",
1566
+ "bbox": [
1567
+ 0.077,
1568
+ 0.785,
1569
+ 0.925,
1570
+ 0.941
1571
+ ],
1572
+ "angle": 0,
1573
+ "content": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">ORB-SLAM2</td><td colspan=\"2\">Dyna-SLAM</td><td colspan=\"2\">DS-SLAM</td><td colspan=\"2\">Blitz-SLAM</td><td colspan=\"2\">TRS</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0092</td><td>0.0047</td><td>0.0127</td><td>0.0060</td><td>/</td><td>/</td><td>0.0148</td><td>0.0069</td><td>0.0117</td><td>/</td><td>0.0129</td><td>0.0068</td><td>0.0090</td><td>0.0042</td></tr><tr><td>fr3/s/half</td><td>0.0192</td><td>0.0110</td><td>0.0186</td><td>0.0086</td><td>/</td><td>/</td><td>0.0160</td><td>0.0076</td><td>0.0172</td><td>/</td><td>0.0159</td><td>0.0072</td><td>0.0147</td><td>0.0069</td></tr><tr><td>fr3/s/static</td><td>0.0087</td><td>0.0042</td><td>/</td><td>/</td><td>0.0065</td><td>0.0033</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0061</td><td>0.0029</td><td>0.0053</td><td>0.0027</td></tr><tr><td>fr3/s/rpy</td><td>0.0195</td><td>0.0124</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0244</td><td>0.0175</td><td>0.0253</td><td>0.0154</td></tr><tr><td>fr3/w/xyz</td><td>0.7214</td><td>0.2560</td><td>0.0164</td><td>0.0086</td><td>0.0247</td><td>0.0161</td><td>0.0153</td><td>0.0078</td><td>0.0194</td><td>/</td><td>0.0149</td><td>0.0077</td><td>0.0141</td><td>0.0072</td></tr><tr><td>fr3/w/half</td><td>0.4667</td><td>0.2601</td><td>0.0296</td><td>0.0157</td><td>0.0303</td><td>0.0159</td><td>0.0256</td><td>0.0126</td><td>0.0290</td><td>/</td><td>0.0235</td><td>0.0114</td><td>0.0237</td><td>0.0114</td></tr><tr><td>fr3/w/static</td><td>0.3872</td><td>0.1636</td><td>0.0068</td><td>0.0032</td><td>0.0081</td><td>0.0036</td><td>0.0102</td><td>0.0052</td><td>0.0111</td><td>/</td><td>0.0069</td><td>0.0032</td><td>0.0066</td><td>0.0030</td></tr><tr><td>fr3/w/rpy</td><td>0.7842</td><td>0.4005</td><td>0.0354</td><td>0.0190</td><td>0.4442</td><td>0.2350</td><td>0.0356</td><td>0.0220</td><td>0.0371</td><td>/</td><td>0.0411</td><td>0.0257</td><td>0.0368</td><td>0.0230</td></tr></table>"
1574
+ }
1575
+ ],
1576
+ [
1577
+ {
1578
+ "type": "table_caption",
1579
+ "bbox": [
1580
+ 0.328,
1581
+ 0.069,
1582
+ 0.671,
1583
+ 0.096
1584
+ ],
1585
+ "angle": 0,
1586
+ "content": "TABLE II RESULTS OF METRIC TRANSLATIONAL DRIFT (RPE)"
1587
+ },
1588
+ {
1589
+ "type": "table",
1590
+ "bbox": [
1591
+ 0.076,
1592
+ 0.1,
1593
+ 0.924,
1594
+ 0.257
1595
+ ],
1596
+ "angle": 0,
1597
+ "content": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">ORB-SLAM2</td><td colspan=\"2\">Dyna-SLAM</td><td colspan=\"2\">DS-SLAM</td><td colspan=\"2\">Blitz-SLAM</td><td colspan=\"2\">TRS</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0117</td><td>0.0060</td><td>0.0142</td><td>0.0073</td><td>/</td><td>/</td><td>0.0144</td><td>0.0071</td><td>0.0166</td><td>/</td><td>0.0149</td><td>0.0081</td><td>0.0114</td><td>0.0055</td></tr><tr><td>fr3/s/half</td><td>0.0231</td><td>0.0163</td><td>0.0239</td><td>0.0120</td><td>/</td><td>/</td><td>0.0165</td><td>0.0073</td><td>0.0259</td><td>/</td><td>0.0214</td><td>0.0099</td><td>0.0162</td><td>0.0079</td></tr><tr><td>fr3/s/static</td><td>0.0090</td><td>0.0043</td><td>/</td><td>/</td><td>0.0078</td><td>0.0038</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0078</td><td>0.0034</td><td>0.0072</td><td>0.0035</td></tr><tr><td>fr3/s/rpy</td><td>0.0245</td><td>0.0144</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0322</td><td>0.0217</td><td>0.0316</td><td>0.0186</td></tr><tr><td>fr3/w/xyz</td><td>0.3944</td><td>0.2964</td><td>0.0217</td><td>0.0119</td><td>0.0333</td><td>0.0229</td><td>0.0197</td><td>0.0096</td><td>0.0234</td><td>/</td><td>0.0196</td><td>0.0099</td><td>0.0190</td><td>0.0097</td></tr><tr><td>fr3/w/half</td><td>0.3480</td><td>0.2859</td><td>0.0284</td><td>0.0149</td><td>0.0297</td><td>0.0152</td><td>0.0253</td><td>0.0123</td><td>0.0423</td><td>/</td><td>0.0274</td><td>0.0130</td><td>0.0259</td><td>0.0128</td></tr><tr><td>fr3/w/static</td><td>0.2349</td><td>0.2151</td><td>0.0089</td><td>0.0044</td><td>0.0102</td><td>0.0048</td><td>0.0129</td><td>0.0069</td><td>0.0117</td><td>/</td><td>0.0092</td><td>0.0043</td><td>0.0089</td><td>0.0040</td></tr><tr><td>fr3/w/rpy</td><td>0.4582</td><td>0.3447</td><td>0.0448</td><td>0.0262</td><td>0.1503</td><td>0.1168</td><td>0.0473</td><td>0.0283</td><td>0.0471</td><td>/</td><td>0.0540</td><td>0.0350</td><td>0.0500</td><td>0.0306</td></tr></table>"
1598
+ },
1599
+ {
1600
+ "type": "table_caption",
1601
+ "bbox": [
1602
+ 0.342,
1603
+ 0.266,
1604
+ 0.655,
1605
+ 0.293
1606
+ ],
1607
+ "angle": 0,
1608
+ "content": "TABLE III RESULTS OF METRIC ROTATIONAL DRIFT (RPE)"
1609
+ },
1610
+ {
1611
+ "type": "table",
1612
+ "bbox": [
1613
+ 0.076,
1614
+ 0.298,
1615
+ 0.924,
1616
+ 0.453
1617
+ ],
1618
+ "angle": 0,
1619
+ "content": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">ORB-SLAM2</td><td colspan=\"2\">Dyna-SLAM</td><td colspan=\"2\">DS-SLAM</td><td colspan=\"2\">Blitz-SLAM</td><td colspan=\"2\">TRS</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.4890</td><td>0.2713</td><td>0.5042</td><td>0.2651</td><td>/</td><td>/</td><td>0.5024</td><td>0.2634</td><td>0.5968</td><td>/</td><td>0.5126</td><td>0.2793</td><td>0.4875</td><td>0.2640</td></tr><tr><td>fr3/s/half</td><td>0.6015</td><td>0.2924</td><td>0.7045</td><td>0.3488</td><td>/</td><td>/</td><td>0.5981</td><td>0.2739</td><td>0.7891</td><td>/</td><td>0.7697</td><td>0.3718</td><td>0.5917</td><td>0.2834</td></tr><tr><td>fr3/s/static</td><td>0.2850</td><td>0.1241</td><td>/</td><td>/</td><td>0.2735</td><td>0.1215</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.2749</td><td>0.1192</td><td>0.2654</td><td>0.1183</td></tr><tr><td>fr3/s/rpy</td><td>0.7772</td><td>0.3999</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.8303</td><td>0.4653</td><td>0.7410</td><td>0.3665</td></tr><tr><td>fr3/w/xyz</td><td>7.7846</td><td>5.8335</td><td>0.6284</td><td>0.3848</td><td>0.8266</td><td>0.5826</td><td>0.6132</td><td>0.3348</td><td>0.6368</td><td>/</td><td>0.6204</td><td>0.3850</td><td>0.6023</td><td>0.3719</td></tr><tr><td>fr3/w/half</td><td>7.2138</td><td>5.8299</td><td>0.7842</td><td>0.4012</td><td>0.8142</td><td>0.4101</td><td>0.7879</td><td>0.3751</td><td>0.9650</td><td>/</td><td>0.7853</td><td>0.3821</td><td>0.7575</td><td>0.3743</td></tr><tr><td>fr3/w/static</td><td>4.1856</td><td>3.8077</td><td>0.2612</td><td>0.1259</td><td>0.2690</td><td>0.1182</td><td>0.3038</td><td>0.1437</td><td>0.2872</td><td>/</td><td>0.2535</td><td>0.1130</td><td>0.2527</td><td>0.1051</td></tr><tr><td>fr3/w/rpy</td><td>8.8923</td><td>6.6658</td><td>0.9894</td><td>0.5701</td><td>3.0042</td><td>2.3065</td><td>1.0841</td><td>0.6668</td><td>1.0587</td><td>/</td><td>1.0521</td><td>0.5577</td><td>1.1084</td><td>0.6722</td></tr></table>"
1620
+ },
1621
+ {
1622
+ "type": "table_caption",
1623
+ "bbox": [
1624
+ 0.174,
1625
+ 0.464,
1626
+ 0.825,
1627
+ 0.491
1628
+ ],
1629
+ "angle": 0,
1630
+ "content": "TABLE IV RESULTS OF METRICS ABSOLUTE TRAJECTORY ERROR (ATE) WITH DIFFERENT CONFIGURATIONS"
1631
+ },
1632
+ {
1633
+ "type": "table",
1634
+ "bbox": [
1635
+ 0.124,
1636
+ 0.495,
1637
+ 0.875,
1638
+ 0.65
1639
+ ],
1640
+ "angle": 0,
1641
+ "content": "<table><tr><td rowspan=\"2\">Sequences</td><td colspan=\"2\">CFP-SLAM</td><td colspan=\"2\">CFP-SLAM-</td><td colspan=\"2\">W/O-MDC</td><td colspan=\"2\">W/O-DBS</td><td colspan=\"2\">W/O-KSP</td><td colspan=\"2\">Only-YOLO</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0090</td><td>0.0042</td><td>0.0129</td><td>0.0068</td><td>0.0123</td><td>0.0066</td><td>0.0130</td><td>0.0060</td><td>0.0142</td><td>0.0063</td><td>0.0174</td><td>0.0079</td></tr><tr><td>fr3/s/half</td><td>0.0147</td><td>0.0069</td><td>0.0159</td><td>0.0072</td><td>0.0150</td><td>0.0074</td><td>0.0305</td><td>0.0179</td><td>0.0201</td><td>0.0089</td><td>0.0281</td><td>0.0158</td></tr><tr><td>fr3/s/static</td><td>0.0053</td><td>0.0027</td><td>0.0061</td><td>0.0029</td><td>0.0055</td><td>0.0025</td><td>0.0064</td><td>0.0030</td><td>0.0062</td><td>0.0030</td><td>0.0064</td><td>0.0027</td></tr><tr><td>fr3/s/rpy</td><td>0.0253</td><td>0.0154</td><td>0.0244</td><td>0.0175</td><td>0.0237</td><td>0.0149</td><td>0.0297</td><td>0.0205</td><td>0.0287</td><td>0.0195</td><td>0.0460</td><td>0.0332</td></tr><tr><td>fr3/w/xyz</td><td>0.0141</td><td>0.0072</td><td>0.0149</td><td>0.0077</td><td>0.0158</td><td>0.0079</td><td>0.0159</td><td>0.0081</td><td>0.0154</td><td>0.0076</td><td>0.0165</td><td>0.0082</td></tr><tr><td>fr3/w/half</td><td>0.0237</td><td>0.0114</td><td>0.0235</td><td>0.0114</td><td>0.0258</td><td>0.0134</td><td>0.0274</td><td>0.0137</td><td>0.0307</td><td>0.0151</td><td>0.0310</td><td>0.0165</td></tr><tr><td>fr3/w/static</td><td>0.0066</td><td>0.0030</td><td>0.0069</td><td>0.0032</td><td>0.0070</td><td>0.0031</td><td>0.0078</td><td>0.0033</td><td>0.0076</td><td>0.0033</td><td>0.0073</td><td>0.0032</td></tr><tr><td>fr3/w/rpy</td><td>0.0368</td><td>0.0230</td><td>0.0411</td><td>0.0257</td><td>0.1910</td><td>0.1594</td><td>0.0749</td><td>0.0536</td><td>0.0405</td><td>0.0211</td><td>0.0456</td><td>0.0312</td></tr></table>"
1642
+ },
1643
+ {
1644
+ "type": "title",
1645
+ "bbox": [
1646
+ 0.074,
1647
+ 0.661,
1648
+ 0.24,
1649
+ 0.675
1650
+ ],
1651
+ "angle": 0,
1652
+ "content": "C. Ablation Experiment"
1653
+ },
1654
+ {
1655
+ "type": "text",
1656
+ "bbox": [
1657
+ 0.072,
1658
+ 0.689,
1659
+ 0.49,
1660
+ 0.856
1661
+ ],
1662
+ "angle": 0,
1663
+ "content": "In order to prove the function of each module of our algorithm, We design a series of ablation experiments, and the experimental results are shown in Table IV. Among them, CFP-SLAM: The algorithm of this paper; CFP-SLAM\\(^{-}\\): Do not use static probability of objects; W/O-MDC: Without missed detection compensation; W/O-DBS: Without DBSCAN clustering; W/O-KSP: Without the static probability of keypoints, that is, all the foreground points after missed detection compensation and DBSCAN clustering are directly eliminated; Only-YOLO: Directly eliminate all keypoints in the box with human category."
1664
+ },
1665
+ {
1666
+ "type": "text",
1667
+ "bbox": [
1668
+ 0.072,
1669
+ 0.858,
1670
+ 0.49,
1671
+ 0.951
1672
+ ],
1673
+ "angle": 0,
1674
+ "content": "The experimental results show that CFP-SLAM\\(^{-}\\) shows worse performance in low dynamic scenes, because we cannot distinguish between high dynamic objects and low dynamic objects, so all objects are processed according to high dynamic. W/O-MDC is almost unaffected in low dynamic scenes, but the performance is very poor in high dynamic scenes,"
1675
+ },
1676
+ {
1677
+ "type": "text",
1678
+ "bbox": [
1679
+ 0.505,
1680
+ 0.661,
1681
+ 0.925,
1682
+ 0.797
1683
+ ],
1684
+ "angle": 0,
1685
+ "content": "especially in w/ropy, when the camera and objects are moving violently. In fact, the tracking is often lost in w/xyz, w/half and w/ropy because of missed detection. W/O-DBS and W/O-KSP show general performance in all sequences, which illustrates the effectiveness of DBSCAN clustering and the limitation of dealing with non-rigid bodies with partial motion as a whole, respectively. Only-YOLO encounters difficulties in initialization due to insufficient features in almost all sequences, and tracking is lost in some sequences."
1686
+ },
1687
+ {
1688
+ "type": "title",
1689
+ "bbox": [
1690
+ 0.507,
1691
+ 0.809,
1692
+ 0.663,
1693
+ 0.823
1694
+ ],
1695
+ "angle": 0,
1696
+ "content": "D. Real-time Analysis"
1697
+ },
1698
+ {
1699
+ "type": "text",
1700
+ "bbox": [
1701
+ 0.505,
1702
+ 0.829,
1703
+ 0.925,
1704
+ 0.951
1705
+ ],
1706
+ "angle": 0,
1707
+ "content": "Real-time performance is one of the important evaluation indexes of SLAM system. We test the average running time of each module, as shown in Table V. EKF represents the missed detection compensation and data association of boxes module, OSP represents the static probability calculation module of objects, and KSP represents the static probability calculation module of keypoints based on the epipolar constraints and the projection constraints. Semantic threads based on YOLOv5s"
1708
+ }
1709
+ ],
1710
+ [
1711
+ {
1712
+ "type": "text",
1713
+ "bbox": [
1714
+ 0.072,
1715
+ 0.075,
1716
+ 0.493,
1717
+ 0.182
1718
+ ],
1719
+ "angle": 0,
1720
+ "content": "run in parallel with ORB feature extraction. The results show that the average processing time per frame for the main threads of CFP-SLAM and CFP-SLAM\\(^{-}\\) is 42.7 ms and 24.77 ms, that is, the running speed reaches 23 Fps and 40 Fps respectively. Compared with the SLAM system based on semantic segmentation, it can better meet the real-time requirements while ensure the accuracy."
1721
+ },
1722
+ {
1723
+ "type": "table_caption",
1724
+ "bbox": [
1725
+ 0.115,
1726
+ 0.193,
1727
+ 0.446,
1728
+ 0.22
1729
+ ],
1730
+ "angle": 0,
1731
+ "content": "TABLEV THE AVERAGE RUNNING TIME OF EACH MODULE."
1732
+ },
1733
+ {
1734
+ "type": "table",
1735
+ "bbox": [
1736
+ 0.088,
1737
+ 0.224,
1738
+ 0.477,
1739
+ 0.263
1740
+ ],
1741
+ "angle": 0,
1742
+ "content": "<table><tr><td>Methods</td><td>YOLO</td><td>EKF</td><td>OSP</td><td>DBSCAN</td><td>KSP</td><td>Tracking</td></tr><tr><td>CFP-SLAM</td><td>12.44</td><td>0.07</td><td>17.93</td><td>1.76</td><td>3.66</td><td>42.7</td></tr><tr><td>CFP-SLAM-</td><td>12.44</td><td>0.07</td><td>/</td><td>1.76</td><td>3.66</td><td>24.77</td></tr></table>"
1743
+ },
1744
+ {
1745
+ "type": "title",
1746
+ "bbox": [
1747
+ 0.217,
1748
+ 0.28,
1749
+ 0.347,
1750
+ 0.293
1751
+ ],
1752
+ "angle": 0,
1753
+ "content": "VI. CONCLUSION"
1754
+ },
1755
+ {
1756
+ "type": "text",
1757
+ "bbox": [
1758
+ 0.072,
1759
+ 0.302,
1760
+ 0.491,
1761
+ 0.468
1762
+ ],
1763
+ "angle": 0,
1764
+ "content": "In this paper, we propose a dynamic scene-oriented visual SLAM algorithm based on YOLOv5s and coarse-to-fine static probability. After missed detection compensation and keypoints clustering, the static probabilities of objects, keypoints and map points are calculated and updated as weights to participate in pose optimization. Extensive evaluation shows that our algorithm achieves the highest accuracy of localization in almost all low dynamic and high dynamic scenes, and has quite high real-time performance. In the future, we intend to build a lightweight plane and object map containing only static environment for robot navigation and augmented reality."
1765
+ },
1766
+ {
1767
+ "type": "title",
1768
+ "bbox": [
1769
+ 0.234,
1770
+ 0.481,
1771
+ 0.33,
1772
+ 0.493
1773
+ ],
1774
+ "angle": 0,
1775
+ "content": "REFERENCES"
1776
+ },
1777
+ {
1778
+ "type": "ref_text",
1779
+ "bbox": [
1780
+ 0.082,
1781
+ 0.504,
1782
+ 0.49,
1783
+ 0.538
1784
+ ],
1785
+ "angle": 0,
1786
+ "content": "[1] M. R. U. Saputra, A. Markham, and N. Trigoni, “Visual slam and structure from motion in dynamic environments: A survey,” ACM Computing Surveys (CSUR), vol. 51, no. 2, pp. 1–36, 2018."
1787
+ },
1788
+ {
1789
+ "type": "ref_text",
1790
+ "bbox": [
1791
+ 0.082,
1792
+ 0.538,
1793
+ 0.49,
1794
+ 0.584
1795
+ ],
1796
+ "angle": 0,
1797
+ "content": "[2] F. Zhong, S. Wang, Z. Zhang, and Y. Wang, \"Detect-slam: Making object detection and slam mutually beneficial,\" in 2018 IEEE Winter Conference on Applications of Computer Vision (WACV). IEEE, 2018, pp. 1001-1010."
1798
+ },
1799
+ {
1800
+ "type": "ref_text",
1801
+ "bbox": [
1802
+ 0.082,
1803
+ 0.584,
1804
+ 0.49,
1805
+ 0.629
1806
+ ],
1807
+ "angle": 0,
1808
+ "content": "[3] L. Xiao, J. Wang, X. Qiu, Z. Rong, and X. Zou, \"Dynamic-slam: Semantic monocular visual localization and mapping based on deep learning in dynamic environment,\" Robotics and Autonomous Systems, vol. 117, pp. 1-16, 2019."
1809
+ },
1810
+ {
1811
+ "type": "ref_text",
1812
+ "bbox": [
1813
+ 0.082,
1814
+ 0.629,
1815
+ 0.49,
1816
+ 0.675
1817
+ ],
1818
+ "angle": 0,
1819
+ "content": "[4] C. Yu, Z. Liu, X.-J. Liu, F. Xie, Y. Yang, Q. Wei, and Q. Fei, \"Dsslam: A semantic visual slam towards dynamic environments,\" in 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2018, pp. 1168-1174."
1820
+ },
1821
+ {
1822
+ "type": "ref_text",
1823
+ "bbox": [
1824
+ 0.082,
1825
+ 0.675,
1826
+ 0.49,
1827
+ 0.709
1828
+ ],
1829
+ "angle": 0,
1830
+ "content": "[5] B. Bescos, J. M. Fácil, J. Civera, and J. Neira, “Dynoslam: Tracking, mapping, and inpainting in dynamic scenes,” IEEE Robotics and Automation Letters, vol. 3, no. 4, pp. 4076–4083, 2018."
1831
+ },
1832
+ {
1833
+ "type": "ref_text",
1834
+ "bbox": [
1835
+ 0.082,
1836
+ 0.709,
1837
+ 0.49,
1838
+ 0.755
1839
+ ],
1840
+ "angle": 0,
1841
+ "content": "[6] N. Brasch, A. Bozic, J. Lallemand, and F. Tombari, \"Semantic monocular slam for highly dynamic environments,\" in 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2018, pp. 393-400."
1842
+ },
1843
+ {
1844
+ "type": "ref_text",
1845
+ "bbox": [
1846
+ 0.082,
1847
+ 0.755,
1848
+ 0.49,
1849
+ 0.8
1850
+ ],
1851
+ "angle": 0,
1852
+ "content": "[7] K. Wang, Y. Lin, L. Wang, L. Han, M. Hua, X. Wang, S. Lian, and B. Huang, “A unified framework for mutual improvement of slam and semantic segmentation,�� in 2019 International Conference on Robotics and Automation (ICRA). IEEE, 2019, pp. 5224–5230."
1853
+ },
1854
+ {
1855
+ "type": "ref_text",
1856
+ "bbox": [
1857
+ 0.082,
1858
+ 0.8,
1859
+ 0.49,
1860
+ 0.835
1861
+ ],
1862
+ "angle": 0,
1863
+ "content": "[8] X. Yuan and S. Chen, \"Sad-slam: A visual slam based on semantic and depth information,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 4930-4935."
1864
+ },
1865
+ {
1866
+ "type": "ref_text",
1867
+ "bbox": [
1868
+ 0.082,
1869
+ 0.835,
1870
+ 0.49,
1871
+ 0.88
1872
+ ],
1873
+ "angle": 0,
1874
+ "content": "[9] J. Vincent, M. Labbe, J.-S. Lauzon, F. Grondin, P.-M. Comtois-Rivet, and F. Michaud, \"Dynamic object tracking and masking for visual slam,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 4974-4979."
1875
+ },
1876
+ {
1877
+ "type": "ref_text",
1878
+ "bbox": [
1879
+ 0.077,
1880
+ 0.88,
1881
+ 0.49,
1882
+ 0.914
1883
+ ],
1884
+ "angle": 0,
1885
+ "content": "[10] A. Li, J. Wang, M. Xu, and Z. Chen, “Dp-slam: A visual slam with moving probability towards dynamic environments,” Information Sciences, vol. 556, pp. 128-142, 2021."
1886
+ },
1887
+ {
1888
+ "type": "ref_text",
1889
+ "bbox": [
1890
+ 0.077,
1891
+ 0.914,
1892
+ 0.49,
1893
+ 0.949
1894
+ ],
1895
+ "angle": 0,
1896
+ "content": "[11] T. Ji, C. Wang, and L. Xie, \"Towards real-time semantic rgb-d slam in dynamic environments,\" in 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2021, pp. 11 175-11 181."
1897
+ },
1898
+ {
1899
+ "type": "list",
1900
+ "bbox": [
1901
+ 0.077,
1902
+ 0.504,
1903
+ 0.49,
1904
+ 0.949
1905
+ ],
1906
+ "angle": 0,
1907
+ "content": null
1908
+ },
1909
+ {
1910
+ "type": "ref_text",
1911
+ "bbox": [
1912
+ 0.51,
1913
+ 0.077,
1914
+ 0.923,
1915
+ 0.111
1916
+ ],
1917
+ "angle": 0,
1918
+ "content": "[12] Y. Fan, Q. Zhang, Y. Tang, S. Liu, and H. Han, \"Blitz-slam: A semantic slam in dynamic environments,\" Pattern Recognition, vol. 121, p. 108225, 2022."
1919
+ },
1920
+ {
1921
+ "type": "ref_text",
1922
+ "bbox": [
1923
+ 0.51,
1924
+ 0.112,
1925
+ 0.923,
1926
+ 0.145
1927
+ ],
1928
+ "angle": 0,
1929
+ "content": "[13] R. Mur-Artal and J. D. Tardós, \"Orb-slam2: An open-source slam system for monocular, stereo, and rgb-d cameras,\" IEEE transactions on robotics, vol. 33, no. 5, pp. 1255-1262, 2017."
1930
+ },
1931
+ {
1932
+ "type": "ref_text",
1933
+ "bbox": [
1934
+ 0.51,
1935
+ 0.145,
1936
+ 0.923,
1937
+ 0.179
1938
+ ],
1939
+ "angle": 0,
1940
+ "content": "[14] S. Li and D. Lee, \"Rgb-d slam in dynamic environments using static point weighting,\" IEEE Robotics and Automation Letters, vol. 2, no. 4, pp. 2263-2270, 2017."
1941
+ },
1942
+ {
1943
+ "type": "ref_text",
1944
+ "bbox": [
1945
+ 0.51,
1946
+ 0.179,
1947
+ 0.923,
1948
+ 0.214
1949
+ ],
1950
+ "angle": 0,
1951
+ "content": "[15] Y. Sun, M. Liu, and M. Q.-H. Meng, \"Improving rgb-d slam in dynamic environments: A motion removal approach,\" Robotics and Autonomous Systems, vol. 89, pp. 110-122, 2017."
1952
+ },
1953
+ {
1954
+ "type": "ref_text",
1955
+ "bbox": [
1956
+ 0.51,
1957
+ 0.214,
1958
+ 0.923,
1959
+ 0.236
1960
+ ],
1961
+ "angle": 0,
1962
+ "content": "[16] ——, “Motion removal for reliable rgb-d slam in dynamic environments,” Robotics and Autonomous Systems, vol. 108, pp. 115–128, 2018."
1963
+ },
1964
+ {
1965
+ "type": "ref_text",
1966
+ "bbox": [
1967
+ 0.51,
1968
+ 0.236,
1969
+ 0.923,
1970
+ 0.281
1971
+ ],
1972
+ "angle": 0,
1973
+ "content": "[17] R. Scona, M. Jaimez, Y. R. Petillot, M. Fallon, and D. Cremers, \"Staticfusion: Background reconstruction for dense rgb-d slam in dynamic environments,\" in 2018 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2018, pp. 3849-3856."
1974
+ },
1975
+ {
1976
+ "type": "ref_text",
1977
+ "bbox": [
1978
+ 0.51,
1979
+ 0.281,
1980
+ 0.923,
1981
+ 0.316
1982
+ ],
1983
+ "angle": 0,
1984
+ "content": "[18] G. Liu, W. Zeng, B. Feng, and F. Xu, \"Dms-slam: A general visual slam system for dynamic scenes with multiple sensors,\" Sensors, vol. 19, no. 17, p. 3714, 2019."
1985
+ },
1986
+ {
1987
+ "type": "ref_text",
1988
+ "bbox": [
1989
+ 0.51,
1990
+ 0.316,
1991
+ 0.923,
1992
+ 0.361
1993
+ ],
1994
+ "angle": 0,
1995
+ "content": "[19] J. Bian, W.-Y. Lin, Y. Matsushita, S.-K. Yeung, T.-D. Nguyen, and M.-M. Cheng, \"Gms: Grid-based motion statistics for fast, ultra-robust feature correspondence,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 4181-4190."
1996
+ },
1997
+ {
1998
+ "type": "ref_text",
1999
+ "bbox": [
2000
+ 0.51,
2001
+ 0.361,
2002
+ 0.923,
2003
+ 0.395
2004
+ ],
2005
+ "angle": 0,
2006
+ "content": "[20] D.-H. Kim and J.-H. Kim, \"Effective background model-based rgb-d dense visual odometry in a dynamic environment,\" IEEE Transactions on Robotics, vol. 32, no. 6, pp. 1565-1573, 2016."
2007
+ },
2008
+ {
2009
+ "type": "ref_text",
2010
+ "bbox": [
2011
+ 0.51,
2012
+ 0.395,
2013
+ 0.923,
2014
+ 0.429
2015
+ ],
2016
+ "angle": 0,
2017
+ "content": "[21] W. Dai, Y. Zhang, P. Li, Z. Fang, and S. Scherer, \"Rgb-d slam in dynamic environments using point correlations,\" IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020."
2018
+ },
2019
+ {
2020
+ "type": "ref_text",
2021
+ "bbox": [
2022
+ 0.51,
2023
+ 0.429,
2024
+ 0.923,
2025
+ 0.473
2026
+ ],
2027
+ "angle": 0,
2028
+ "content": "[22] T. Zhang, H. Zhang, Y. Li, Y. Nakamura, and L. Zhang, \"Flowfusion: Dynamic dense rgb-d slam based on optical flow,\" in 2020 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2020, pp. 7322-7328."
2029
+ },
2030
+ {
2031
+ "type": "ref_text",
2032
+ "bbox": [
2033
+ 0.51,
2034
+ 0.473,
2035
+ 0.923,
2036
+ 0.519
2037
+ ],
2038
+ "angle": 0,
2039
+ "content": "[23] V. Badrinarayanan, A. Kendall, and R. Cipolla, \"Segnet: A deep convolutional encoder-decoder architecture for image segmentation,\" IEEE transactions on pattern analysis and machine intelligence, vol. 39, no. 12, pp. 2481-2495, 2017."
2040
+ },
2041
+ {
2042
+ "type": "ref_text",
2043
+ "bbox": [
2044
+ 0.51,
2045
+ 0.519,
2046
+ 0.923,
2047
+ 0.553
2048
+ ],
2049
+ "angle": 0,
2050
+ "content": "[24] K. He, G. Gkioxari, P. Dollár, and R. Girshick, “Mask r-cnn,” in Proceedings of the IEEE international conference on computer vision, 2017, pp. 2961–2969."
2051
+ },
2052
+ {
2053
+ "type": "ref_text",
2054
+ "bbox": [
2055
+ 0.51,
2056
+ 0.553,
2057
+ 0.923,
2058
+ 0.587
2059
+ ],
2060
+ "angle": 0,
2061
+ "content": "[25] N. Dvornik, K. Shmelkov, J. Mairal, and C. Schmid, \"Blitznet: A real-time deep network for scene understanding,\" in Proceedings of the IEEE international conference on computer vision, 2017, pp. 4154-4162."
2062
+ },
2063
+ {
2064
+ "type": "ref_text",
2065
+ "bbox": [
2066
+ 0.51,
2067
+ 0.587,
2068
+ 0.923,
2069
+ 0.633
2070
+ ],
2071
+ "angle": 0,
2072
+ "content": "[26] J. Sturm, N. Engelhard, F. Endres, W. Burgard, and D. Cremers, “A benchmark for the evaluation of rgb-d slam systems,” in 2012 IEEE/RSJ international conference on intelligent robots and systems. IEEE, 2012, pp. 573–580."
2073
+ },
2074
+ {
2075
+ "type": "list",
2076
+ "bbox": [
2077
+ 0.51,
2078
+ 0.077,
2079
+ 0.923,
2080
+ 0.633
2081
+ ],
2082
+ "angle": 0,
2083
+ "content": null
2084
+ }
2085
+ ]
2086
+ ]
2202.01xxx/2202.01938/0bdc1864-0877-4963-a9de-68c2b5f8ab9e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6ba38932e4a9520c239f139b439f1886a562802e10090a197f62300ff8614c8
3
+ size 10292921
2202.01xxx/2202.01938/full.md ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CFP-SLAM: A Real-time Visual SLAM Based on Coarse-to-Fine Probability in Dynamic Environments
2
+
3
+ Xinggang Hu $^{1}$ , Yunzhou Zhang $^{1*}$ , Zhenzhong Cao $^{1}$ , Rong Ma $^{2}$ , Yanmin Wu $^{3}$ , Zhiqiang Deng $^{1}$ , Wenkai Sun $^{1}$
4
+
5
+ Abstract—The dynamic factors in the environment will lead to the decline of camera localization accuracy due to the violation of the static environment assumption of SLAM algorithm. Recently, some related works generally use the combination of semantic constraints and geometric constraints to deal with dynamic objects, but problems can still be raised, such as poor real-time performance, easy to treat people as rigid bodies, and poor performance in low dynamic scenes. In this paper, a dynamic scene-oriented visual SLAM algorithm based on object detection and coarse-to-fine static probability named CFP-SLAM is proposed. The algorithm combines semantic constraints and geometric constraints to calculate the static probability of objects, keypoints and map points, and takes them as weights to participate in camera pose estimation. Extensive evaluations show that our approach can achieve almost the best results in high dynamic and low dynamic scenarios compared to the state-of-the-art dynamic SLAM methods, and shows quite high real-time ability.
6
+
7
+ # I. INTRODUCTION
8
+
9
+ Simultaneous localization and mapping (SLAM) is the key technology for autonomous navigation of mobile robots, and it is widely applied in the fields of autopilot, UAV and augmented reality (AR). SLAM system is based on environmental static assumption [1], and dynamic factors will bring wrong observation data to the system, making it difficult to establish various geometric constraints on which SLAM system works, and reducing the accuracy and robustness of SLAM system. The abnormal point processing mechanism of RANSAC (Random Sample Consensus) algorithm can solve the influence of certain abnormal points in static or slightly dynamic environment. However, when dynamic objects occupy most of the camera view, RANSAC algorithm has little effect.
10
+
11
+ With the development of deep learning technology, some advanced researchers have used semantic constraints to solve the visual SLAM problem in dynamic environment recent years. The general approach is to take the semantic information obtained from object detection [2], [3] or semantic segmentation [4]-[12] as a priori and eliminate the dynamic objects in the environment combined with geometric constraints. Semantic
12
+
13
+ segmentation can provide a fine pixel level object mask, but its real-time performance is poor. The improvement of segmentation accuracy and robustness often comes at the cost of huge computational cost. Even so, the segmentation boundary of the object can not be extremely accurate and can not completely cover the moving object [12]. Object detection can circumvent the problems above, but there are a large amount of background point clouds in the box of objects, and some complex cases will be missed easily [3]. In addition, there are two common problems with current schemes: 1) All dynamic objects are treated as high dynamic attributes, which leads to poor performance in low dynamic scene. 2) As non-rigid objects, human bodies often perform partial movement. Directly eliminating the human body as a whole object will reduce the constraint of keypoints and introduce a negative effect on accuracy of localization.
14
+
15
+ For the above problems, we propose CFP-SLAM, which is a high-performance high-efficiency visual SLAM system based on object detection and static probability in indoor dynamic environments. On the basis of ORB-SLAM2 [13], CFP-SLAM uses YOLOv5 to obtain semantic information, uses extended Kalman filter (EKF) and Hungarian algorithm to compensate missed detection, calculates the static probability of objects to distinguish high dynamic objects from low dynamic objects, and distinguishes foreground points and background points of object detection results based on DBSCAN (Density-Based Spatial Clustering of Applications with Noise) algorithm. Established on a variety of constraints, a two-stage calculation method of the static probability of keypoints from coarse to fine is designed. The static probability of keypoints is used as a weight to participate in the camera pose optimization. Considering the needs of different scenarios, we provide a lower-performance version to improve the real-time performance without calculating the static probability of objects.
16
+
17
+ Extensive experiments are conducted on public datasets. Compared with state-of-the-art dynamic SLAM methods, our approach achieves the highest localization accuracy in almost all low dynamics and high dynamic scenarios. The main contributions of this paper are as follows:
18
+
19
+ different strategies, so as to improve the robustness and adaptability of SLAM system.
20
+
21
+ - The static probability of keypoints from coarse to fine. A two-stage static probability of keypoints calculation method based on the static probability of object, the DBSCAN clustering algorithm, the epipolar constraints and the projection constraints is proposed to solve the problem of false deletion of static keypoints caused by non-rigid body local motion.
22
+
23
+ # II. RELATED WORK
24
+
25
+ # A. Dynamic SLAM without Priori Semantic Information
26
+
27
+ When there is no semantic information as the priori, using reliable constraints to find the correct feature matching relationship is the basic method to deal with dynamic SLAM problem. Li et al. [14] propose a static weighting method of keyframe edge points, and integrated into the IAICP method to reduce tracking error. Sun et al. [15] roughly detect the motion of moving objects based on self motion compensation image difference, and enhance the motion detection by tracking the motion using particle filter. Then, they [16] propose a novel RGB-D data-based on-line motion removal approach, and build and update the foreground model incrementally. StaticFusion [17] simultaneously estimates the camera motion as well as a probabilistic static/dynamic segmentation of the current RGB-D image pair. DMS-SLAM [18] uses GMS [19] to eliminate mismatched points. Kim et al. [20] propose a dense visual mileage calculation method based on background model to estimate the nonparametric background model from depth scene. Dai et al. [21] distinguishes dynamic and static map points based on feature correlation. Flowfusion [22] uses optical flow residuals to highlight dynamic regions in rgbd point clouds. Because there is no need for deep learning networks to provide semantic priors, the above methods are usually fast in dealing with dynamic factors, but lack of accuracy.
28
+
29
+ # B. Dynamic SLAM Based on Semantic Constraints
30
+
31
+ Semantic segmentation or object detection can provide a steady and reliable priority constraint for dynamic SLAM. Detect-SLAM [2] detects objects in keyframes and propagates the motion probability of keypoints in real time to eliminate the influence of dynamic objects in SLAM. DS-SLAM [4] uses SegNet [23] to obtain semantic information, combines sparse optical flow and motion consistency detection to judge people's dynamic and static attributes. Dyna-SLAM [5] combines mask R-CNN [24] and multi view geometry to process moving objects. Brasch et al. [6] present monocular SLAM approach for highly dynamic environments which models dynamic outliers with a joint probabilistic model based on semantic prior information predicted by a CNN. With the help of the initial segmentation results, Wang et al. [7] extract the accurate pose from the rough pose by identifying and processing the moving object and possible moving object respectively, and further help to make up for the error and boundary inaccuracy of the segmentation area. Dynamic-SLAM [3] compensates SSD for missed detection based on the speed invariance of adjacent frames, and eliminates dynamic
32
+
33
+ objects combined with selective tracking algorithm. SaDSLAM [8] extracts static feature points from objects judged as dynamic based on semantic by verifying whether the inter frame feature points meet the epipolar constraints. Vincent et al. [9] perform semantic segmentation of object instances in the image, and use EKF to identify, track and remove dynamic objects from the scene. DP-SLAM [10] combines the results of geometric constraints and semantic segmentation, the dynamic keypoints are tracked in the Bayesian probability estimation framework. Ji et al. [11] only perform semantic segmentation on keyframes, cluster the depth map and identifies moving objects combined with re-projection error to remove known and unknown dynamic objects. Blitz-SLAM [12] repairs the mask of BlitzNet [25] based on depth information, and classifies static and dynamic matching points in potential dynamic areas using epipolar constraints. Generally, the above methods can accurately eliminate dynamic objects in the environment, but it is difficult to give consideration to both localization accuracy and real-time, and the performance is generally poor in low dynamic scenes.
34
+
35
+ # III. SYSTEM OVERVIEW
36
+
37
+ # A. Definition of Variables
38
+
39
+ In this paper, common variables are defined as follows:
40
+
41
+ $F_{k}$ -FrameK.
42
+ - $K$ - The intrinsic matrix of a pinhole camera model.
43
+ - $T_{k,w} \in R^{4 \times 4}$ - The transformation from world frame to camera frame $\mathbf{K}$ , which is composed of a rotation $R_{k,w} \in R^{3 \times 3}$ and a translation $t_{k,w} \in R^{3 \times 1}$ .
44
+ - $P_{i}^{k}$ - The keypoint with ID $i$ in $F_{k}$ . Its pixel coordinate is $P_{i w}^{k} = \left[u_{i}^{k}, v_{i}^{k}\right]^{T}$ , camera coordinate is $P_{i k}^{k} = \left[X_{i k}^{k}, Y_{i k}^{k}, Z_{i k}^{k}\right]^{T}$ , world coordinate is $P_{i w}^{k} = \left[X_{i w}^{k}, Y_{i w}^{k}, Z_{i w}^{k}\right]^{T}$ . $(\cdot)$ is the form of homogeneous coordinates in each coordinate system.
45
+ - $P_{i^*}^{k - 1}$ - The keypoint with ID $i^*$ in $F_{k - 1}$ which forms a matching relationship with $P_{i}^{k}$ .
46
+ - $O_{i+}^{k}$ - The static probability of potential moving object with ID $i^{+}$ . $P_{i}^{k}$ is the extracted keypoint on the object.
47
+ - $O_{Th}$ - The threshold to distinguish whether the object motion attribute is high dynamic or low dynamic.
48
+ - $K_{i}^{k}$ - The static probability of $P_{i}^{k}$ , which is in the update state and participates in camera pose optimization.
49
+ - $K_{i}^{Dk}, K_{i}^{Tk}, K_{i}^{Fk}$ - The static probability of $P_{i}^{k}$ obtained by the DBSCAN clustering algorithm, the projection constraints and the epipolar constraints respectively.
50
+ - $M_{i - }^{k}$ - The static probability of the map point forming a matching relationship with $P_{i}^{k}$ .
51
+
52
+ # B. System Architecture
53
+
54
+ The overview of CFP-SLAM is demonstrated in Fig.1. Based on ORB-SLAM2 [13], we design a complete static probability calculation and update framework of keypoints based on multiple constraints to deal with the influence of moving objects in dynamic environment. The system obtains semantic information based on YOLOv5, compensates for missed detection based on EKF and Hungarian algorithm, and then the box between adjacent frames is associated. In
55
+
56
+ ![](images/da155394ba58c7e9876e7c943df7fae3c5177943673b05a76d36a65e18717ed5.jpg)
57
+ Fig. 1. The overview of CFP-SLAM. The green portion and the purple portion are the input and output modules of the system respectively. The yellow portion is the semantic module, including object detection, missed detection compensation, and data association. The orange portion and the blue portion are static probability calculation modules for two stages of keypoints, respectively. In the first stage, the rough static probability of keypoints is calculated based on the static probability of objects and the results of DBSCAN clustering. In the second stage, based on the epipolar constraint and projection constraint, and considering the static probability of the object and the data association result of the box, the accurate static probability of feature points is calculated. During the whole process, the static probability of the map points is maintained and updated, and together with the static probability of the keypoints will be used as weight to participate in pose optimization.
58
+
59
+ $F_{k}$ , only calculate and update the static probability of the keypoints inside the potential moving object box. Firstly, the static probability of potential moving object $O_{i+}^{k}$ is obtained by using the optical flow and the epipolar constraints, and the object is divided into high dynamic object and low dynamic object. Initialize $K_{i}^{k}$ as the static probability of the object to which the keypoint belongs. Then, foreground points and background points is distinguished and the $K_{i}^{Dk}$ is calculated by using the DBSCAN clustering results, and the $K_{i}^{k}$ is updated to estimate the camera pose in the first stage to obtain $T_{k,w}$ . Next, $K_{i}^{Tk}, K_{i}^{Fk}$ are obtained by using the projection constraints and the epipolar constraints, $K_{i}^{k}$ and $M_{i-}^{k}$ are updated to participate in camera pose optimization as weights to obtain a more accurate $T_{k,w}$ .
60
+
61
+ # IV. SPECIFIC IMPLEMENTATION
62
+
63
+ # A. Missed Detection Compensation Algorithm
64
+
65
+ When processing dynamic objects, if the semantic information as a priori is suddenly missing in some frames, on the one hand, the subsequent methods based on semantic priors will not be able to process dynamic objects. On the other hand, the sudden emergence of dynamic objects in high dynamic scenes will lead to a sharp increase in the number of keypoints incorrectly matched between adjacent frames, which leads to the loss of tracking in SLAM system in high dynamic scenario. Therefore, stable and accurate semantic information is critical.
66
+
67
+ In order to solve the missed detection problem of YOLOv5, we introduce EKF and Hungarian algorithm to compensate the missed detection of potential moving objects. EKF is used to predict the boxes of potential moving objects in $F_{k}$ , while the Hungarian algorithm is used to correlate the predicted boxes with the boxes detected by YOLOv5. If the predicted box does not find a matching detected box, it could be considered that $F_{k}$ has missed detection, and the prediction result of EKF
68
+
69
+ is adopted to compensate the missed detection result. After missed detection compensation, EKF and Hungarian algorithm are used again for inter frame data association of boxes.
70
+
71
+ # B. Static Probability of Objects
72
+
73
+ When calculating the static probability of each potential moving object, we use the idea of DS-SLAM [4] for reference to solve the fundamental matrix $LF_{k,k - 1}$ and get the polar error $Ld_{i}^{F_{k,k - 1}}$ . We use the epipolar constraints and chi-square distribution to test the epipolar error. Since the pixel coordinates of the matching point pair obtained by the optical flow tracking have $k = 2$ degrees of freedom, if they are assumed to follow the Gauss Distribution $N(0,1)$ , then according to the chi-square distribution:
74
+
75
+ $$
76
+ \operatorname {c h s q} (x; k) = \left\{ \begin{array}{c} \frac {x ^ {(k / 2 - 1)} e ^ {- x / 2}}{2 ^ {k / 2} \Gamma \left(\frac {k}{2}\right)}, x > 0 \\ 0, x \leq 0 \end{array} \right. \tag {1}
77
+ $$
78
+
79
+ The definition of the function $\Gamma(v)$ is:
80
+
81
+ $$
82
+ \Gamma (v) = \int_ {0} ^ {\infty} e ^ {- t} t ^ {v - 1} d t, \operatorname {R e} v > 0 \tag {2}
83
+ $$
84
+
85
+ The single estimation result of $O_{i^{+}}^{k}$ can be obtained:
86
+
87
+ $$
88
+ \left(O _ {i ^ {+}} ^ {k}\right) _ {m} = \operatorname {c h s q} \left(\left(L d _ {i} ^ {F _ {k, k - 1}}\right) ^ {2}; 2\right) \tag {3}
89
+ $$
90
+
91
+ After all estimation results are obtained by using all optical flow point pairs belonging to the object, all estimation results are sorted from small to large. Let the number of all estimation results be $M$ , and take the average value of $(O_{i+}^{k})_{m}$ at 0.1M, 0.2M, 0.3M position after ranking as the estimated value of object static probability $O_{i+}^{k}$ .
92
+
93
+ According to the calculation result of the static probability of the object and the real motion of the object, and taking into account that the negative effect of the dynamic point is
94
+
95
+ generally greater than the positive effect of the increase of the static constraint when the camera pose is estimated, we set $O_{Th} = 0.9$ , the object motion attributes are divided into high dynamic and low dynamic, which are provided to the subsequent methods as a priori information for processing with different strategies. The static probability of all keypoints in the box of the potential moving object is initialized to $O_{i^{+}}^{k}$ and the static probability of other keypoints is initialized to 1.0.
96
+
97
+ # C. Static Probability of Keypoints in the First Stage
98
+
99
+ 1) DBSCAN Density Clustering Algorithm: Compared with semantic segmentation methods, object detection technology has great advantages in real-time, but it can not provide accurate object mask. In the indoor dynamic SLAM scene, this problem leads to numerous static backgrounds in the boxes classified as people, and the false deletion of static keypoints will reduce the constraints of camera pose optimization and reduce the accuracy of camera pose estimation. We noticed that people as the foreground as a non-rigid body, his depth has a good continuity, and usually has a large fault with the background depth. To this end, we use the DBSCAN density clustering algorithm to distinguish between the foreground and background points of boxes classified as people.
100
+
101
+ We adaptively determine $eps$ (the neighborhood radius of DBSCAN density clustering algorithm) and $minPts$ (the threshold of the number of samples in the neighborhood). After clustering, the one with the lowest average value of samples in cluster $\mathbf{C} = \{C_1,C_2,\dots ,C_k\}$ is taken as the foreground points of box.
102
+
103
+ After getting the DBSCAN clustering results, we adopt a soft strategy to further estimate the static probability of background points in the box of a potential moving object. Obviously, the static probability of background points must be greater than that of the object, and it is positively correlated with the static probability of the object. Specifies that the static probability of background points derived from the DBSCAN cluster is:
104
+
105
+ $$
106
+ K _ {i} ^ {D k} = \left\{ \begin{array}{c} \frac {1 - O _ {T h}}{\left(O _ {T h}\right) ^ {4}} \left(K _ {i} ^ {k}\right) ^ {3} + 1, O _ {i +} ^ {k} \leq O _ {T h} \\ \frac {1}{K _ {i} ^ {k}}, \quad O _ {i +} ^ {k} > O _ {T h} \end{array} \right. \tag {4}
107
+ $$
108
+
109
+ Considering that the static probability estimation of keypoints has not been strictly calculated at each point, in other words, the static probability of the keypoints is coarse at present, and the camera pose estimation is vulnerable to dynamic points, we set the static probability of all foreground points in the box of high dynamic objects to 0.
110
+
111
+ 2) First Stage Pose Optimization: Update the static probability of keypoints:
112
+
113
+ $$
114
+ K _ {i} ^ {k} = K _ {i} ^ {k} \times K _ {i} ^ {D k} \tag {5}
115
+ $$
116
+
117
+ When initializing the SLAM system, map points will be created. At this time, the static probability of map point $M_{i}^{k}$ will be initialized to the static probability of corresponding keypoint $K_{i}^{k}$ . In the frame after initialization, $K_{i}^{k}$ and $M_{i}^{k}$ are used as weights to optimize the camera pose, and the camera pose estimation value $T_{k,w}$ in the first stage is obtained. Then, the static probability of $P_{i}^{k}$ , which has a matching relation
118
+
119
+ with the keypoints in $F_{k - 1}$ , is calculated precisely based on the projection constraints and the epipolar constraints.
120
+
121
+ # D. Static Probability of Keypoints in the Second Stage
122
+
123
+ 1) Static Probability Based on the Projection Constraints: Convert the $P_{i^*}^{k-1}$ from the pixel coordinate to the camera coordinate:
124
+
125
+ $$
126
+ P _ {i _ {k - 1} ^ {*}} ^ {k - 1} = \frac {1}{K} Z _ {i _ {k - 1} ^ {*}} ^ {k - 1} \widetilde {P _ {i _ {u \nu}} ^ {k - 1}} \tag {6}
127
+ $$
128
+
129
+ Transform and project $P_{i_{k-1}^*}^{k-1}$ to $F_k$ , and the Euclidean distance between the projection point and $P_i^k$ is:
130
+
131
+ $$
132
+ d _ {i} ^ {T} = \left\| \right. P _ {i _ {u v}} ^ {k} - \left| \frac {1}{\left| T _ {k , k - 1} \widetilde {P _ {i _ {k - 1} ^ {*}}} ^ {k - 1} \right| _ {Z}} K \right| T _ {k, k - 1} \widetilde {P _ {i _ {k - 1} ^ {*}}} \left. \right| _ {X Y Z} \Bigg | _ {u \nu} \left. \right\| _ {2} \tag {7}
133
+ $$
134
+
135
+ Where function $|P|_{Z}$ represents the z-axis coordinate of point $P$ , and $|P|_{XYZ}$ represents the non-homogeneous coordinate form of point $P$ . On the premise that the camera pose $T_{k,w}$ is relatively accurate, the greater $d_{i}^{T}$ , the greater the possibility that $P_{i}^{k}$ and $P_{i^{*}}^{k-1}$ are mismatched. Based on this principle, we design a static probability model based on the projection constraints. After sorting the $d_{i}^{T}$ of all keypoints outside the box of the dynamic object in $F_{k}$ from small to large, take $d_{i}^{T}$ at the truncated position of 0.8 as the adaptive threshold $D_{Th}^{T}$ of the projection error, and obtain the minimum value $d_{min}^{T}$ of $d_{i}^{T}$ . We use the Sigmoid function form to measure the static probability of keypoints of the matching relationship in the box:
136
+
137
+ $$
138
+ K _ {i} ^ {T k} = \frac {1}{1 + e ^ {\left(d _ {i} ^ {T} - D _ {T h} ^ {T}\right) \times \frac {5}{D _ {T h} ^ {T} - d _ {\operatorname* {m i n}} ^ {T}}}} \tag {8}
139
+ $$
140
+
141
+ For a pair of matching points, the satisfaction of the projection constraints is not only related to whether the corresponding spatial points strictly meet the static environment assumption, but also directly related to the number of constraints when solving the pose matrix and whether the pose matrix itself is correctly solved. Therefore, the statistical confidence $C_s^{Tk}$ and calculation confidence $C_c^{Tk}$ of the pose matrix are introduced:
142
+
143
+ $$
144
+ C _ {S} ^ {T k} = \frac {1}{1 + e ^ {- N _ {B A} + 0 . 5 T h _ {B A}}} \tag {9}
145
+ $$
146
+
147
+ $$
148
+ C _ {C} ^ {T k} = 1 - \frac {\sum d _ {i} ^ {T}}{N _ {T} \times D _ {T h} ^ {T}} \tag {10}
149
+ $$
150
+
151
+ Where $N_{BA}$ is the number of interior points obtained by participating in the last camera pose solution, and threshold $Th_{BA}$ is the minimum number of interior points required to participate in the camera pose solution, $N_{T}$ and $\sum d_i^T$ respectively represent the number of all sample points and the sum of $d_i^T$ satisfying $d_i^T < D_{Th}^T$ .
152
+
153
+ 2) Static Probability Based on the Epipolar Constraints: Based on the camera pose estimation $T_{k,w}$ in the first stage, a more accurate fundamental matrix can be calculated:
154
+
155
+ $$
156
+ F _ {k, k - 1} = \mathrm {K} ^ {- \mathrm {T}} \left(t _ {k, k - 1}\right) ^ {\wedge} R _ {k, k - 1} \mathrm {K} ^ {- 1} \tag {11}
157
+ $$
158
+
159
+ The pole line $l_i^k = \left[A_i^k, B_i^k, C_i^k\right]^T$ corresponding to $P_i^k$ is:
160
+
161
+ $$
162
+ l _ {i} ^ {k} = F _ {k, k - 1} \widetilde {P _ {i _ {u v} ^ {*}} ^ {k - 1}} \tag {12}
163
+ $$
164
+
165
+ Then the polar error $d_i^F$ is:
166
+
167
+ $$
168
+ d _ {i} ^ {F} = \frac {\left| \left(\widetilde {P _ {i _ {\mathrm {u v}}} ^ {k}}\right) ^ {T} l _ {i} ^ {k} \right|}{\sqrt {\left(A _ {i} ^ {k}\right) ^ {2} + \left(B _ {i} ^ {k}\right) ^ {2}}} \tag {13}
169
+ $$
170
+
171
+ Similar to the projection constraints, we calculate static probability and confidence based on the epipolar constraints to obtain $K_{i}^{Fk}$ , the statistical confidence $C_s^{Fk}$ and calculation confidence $C_c^{Fk}$ of the fundamental matrix.
172
+
173
+ It should be noted that, as Eq.11 mentioned, the fundamental matrix can not be obtained when the camera translation is not large enough. Therefore, when the camera translation is less than the set threshold $t_{Th}$ , skip the calculation of static probability and confidence based on the epipolar constraints, that is:
174
+
175
+ $$
176
+ K _ {i} ^ {F k} = 0, C _ {S} ^ {F k} = C _ {C} ^ {F k} = 0 \quad \text {s . t .} \| t _ {k, k - 1} \| _ {2} \leq t _ {T h} \tag {14}
177
+ $$
178
+
179
+ 3) Second Stage Pose Optimization: After calculating the static probability of the keypoints based on the projection constraints and the epipolar constraints, we update the static probability of $P_{i}^{k}$ which matches the keypoints in $F_{k - 1}$ for the second time. When the object is in high dynamics, the negative impact of dynamic points on camera pose estimation is generally greater than the positive impact of the increase in the number of static point constraints, which is just the opposite when the object is in low dynamics. This is because ORB-SLAM2 has certain outlier suppression strategies, which can suppress dynamic disturbances in low dynamics, but does not work in high dynamics. So, when $O_{i + }^{k}\leq O_{Th}$ ,
180
+
181
+ $$
182
+ K _ {i} ^ {k} = \left\{ \begin{array}{c} K _ {i} ^ {T k} \times K _ {i} ^ {F k}, \| t _ {k, k - 1} \| _ {2} > t _ {T h} \\ K _ {i} ^ {T k}, \| t _ {k, k - 1} \| _ {2} \leq t _ {T h} \end{array} \right. \tag {15}
183
+ $$
184
+
185
+ when $O_{i^{+}}^{k} > O_{Th}$
186
+
187
+ $$
188
+ K _ {i} ^ {k} = \frac {K _ {i} ^ {T k} \times C _ {s} ^ {T k} C _ {c} ^ {T k}}{C _ {s} ^ {T k} C _ {c} ^ {T k} + C _ {s} ^ {F k} C _ {c} ^ {F k}} + \frac {K _ {i} ^ {F k} \times C _ {s} ^ {F k} C _ {c} ^ {F k}}{C _ {s} ^ {T k} C _ {c} ^ {T k} + C _ {s} ^ {F k} C _ {c} ^ {F k}} \tag {16}
189
+ $$
190
+
191
+ After missed detection compensation, we use EKF and Hungarian algorithm to correlate the boxes of potential moving objects between adjacent frames. It is easy to know that if the association result of a box in $F_{k}$ is not found in $F_{k-1}$ , even if there is a matching relationship between the foreground points in the box, it is generally a false matching, so let $K_{i}^{k} = 0$ in this case. For $P_{i}^{k}$ that does not match the keypoints in $F_{k-1}$ , according to the results of DBSCAN clustering, if $P_{i}^{k}$ belongs to the foreground points, let $K_{i}^{k} = 0$ , else let $K_{i}^{k} = M_{i-}^{k}$ . After the second estimation result of $K_{i}^{k}$ is obtained, $M_{i-}^{k}$ is updated. When $M_{i-}^{k} < 0.3$ , delete the map point. Then $K_{i}^{k}$ and $M_{i-}^{k}$ are used as weights to participate in the second stage of camera pose optimization. When there is a big difference between $K_{i}^{k}$ and $M_{i-}^{k}$ , it can be considered that $K_{i}^{k}$ and $M_{i-}^{k}$ are mismatched and do not participate in optimization.
192
+
193
+ # V. EXPERIMENTS AND RESULTS
194
+
195
+ In this section, we test the performance of the proposed algorithm in 8 dynamic sequences of the TUM RGB-D dataset [26], including 4 low dynamic sequences (fr3/s for short) and 4 high dynamic sequences (fr3/w for short), and the camera includes 4 kinds of motion: static, xyz, halfsphere and rpy. The indicators used to evaluate the accuracy are the Absolute Trajectory Error (ATE) and the Relative Pose Error (RPE). ATE represents the global consistency of trajectory. RPE includes translation drift and rotation drift. The Root-Mean-Square-Error (RMSE) and Standard Deviation (S.D.) of both are used to represent the robustness and stability of the system [12]. Firstly, we show the effect of missed detection compensation and DBSCAN clustering, then compare our method with some of the most advanced methods, then design a series of ablation experiments to test the impact of each module, and finally carry out real-time analysis. All the experiments are performed on a computer with Intel i7 CPU, 3060 GPU, and 16GB memory.
196
+
197
+ # A. Missed Detection Compensation and DBSCAN Clustering
198
+
199
+ In the dynamic SLAM scene, the motion of the object, the incomplete appearance of the object to be detected in the camera field of view, the blurred image and the singular angle of view caused by camera rotation all bring severe challenges to the object detection, very easy to cause miss detection, even will lead to continuous frame miss detection. Fig.2(a)-(d) and Fig.2(e) show the results of missed detection compensation of object detection in the above four cases and six consecutive frames, respectively. Fig.3 shows the DBSCAN clustering results after missed detection compensation. We
200
+
201
+ ![](images/ff1c128d6060c985c297fe42728df4a5ea51389605249d24213710281f93db46.jpg)
202
+ (a)
203
+
204
+ ![](images/ae834cf0f6c63fa0e7dc213482997505c1fea05ec74c87aacc7e4794b21b1adf.jpg)
205
+ (b)
206
+
207
+ ![](images/0427ddbf5b50ee8fc050748aa0f0184808fc468cffcc04cde63405a5271317c3.jpg)
208
+ (c)
209
+ Fig. 2. Missed detection and the results of missed detection compensation in the following cases: (a) The rapid motion of the object. (b) The incomplete appearance of the object to be detected in the camera field of view. (c) The blurred image. (d) The singular angle of view caused by camera rotation. (e) Continuous frame miss detection.
210
+
211
+ ![](images/40465acd668c315979a165895cb55a3666db26e5e967cee37939dafed3bf6ec6.jpg)
212
+ (d)
213
+
214
+ ![](images/b47b37538225d2e83e23b798d40329bd8daef0367cc597ad249a8b48fcd8c9d5.jpg)
215
+
216
+
217
+ ![](images/ca42b56ae6fd6070b60ad474a7a62727b097a101c88fdc00b874a0e3efb5e7c0.jpg)
218
+ (e)
219
+
220
+ ![](images/187fa9b67dc9c5fbeec0440a020315a87daa9a4e0f1f7489e3e61ccd6790173d.jpg)
221
+
222
+ (e)
223
+
224
+ ![](images/d433b01f04df1c7c67175d3f265d49c00c5e339fa012d7d8cfd9b8aa5523bade.jpg)
225
+
226
+ ![](images/244acb79e6241bdcf3cf4a65cc2d1bc730a11ab9eb22f8182784bfb2d3b198e8.jpg)
227
+
228
+ ![](images/6abd595501a021b81a916629174dc23a3d7d1c4b52c8bf1d33b7b8956ab91b28.jpg)
229
+ Fig. 3. Effect of DBSCAN density clustering algorithm in two consecutive frames. The top set of images is taken every 8 frames, and the bottom set of images is taken every 4 frames. The images contain three common states of movement: sitting in a chair, slow motion and fast motion. After clustering, the foreground and background points are shown in red and green respectively.
230
+
231
+ select two consecutive frames to show the clustering effect. The foreground points are marked with red and the background points are marked with green. The upper image group contains two people sitting on the chair and moving slowly respectively, and the people in the lower image group are in the fast walking state. It is worth noting from Fig.3 that many keypoints are extracted from the edge of the person, which is generally the part with the highest dynamic attributes. However, semantic segmentation is difficult to accurately judge the boundary of objects [12], which leads to the misjudgment of dynamic and static attributes of keypoints. We use DBSCAN algorithm to cluster keypoints based on depth information, which can well avoid this problem. The experimental results fully show the effectiveness and robustness of the missed detection compensation algorithm and clustering algorithm.
232
+
233
+ # B. Comparison with State-of-the-arts
234
+
235
+ We contrast with ORB-SLAM2 [13] and forth most advanced dynamic SLAM methods, including DS-SLAM [4], Dyna-SLAM [5], Blitz-SLAM [12] and TRS [11]. Like our method, these algorithms are all improved based on ORB-
236
+
237
+ SLAM2. Without calculating the static probability of the object, we provide a lower performance version of the algorithm in this paper with higher real-time performance, which is called CFP-SLAM $^{-}$ . The quantitative comparison results are shown in Tables I, II and III, in which the best results are highlighted in bold and the second-best are underlined. The data of DS-SLAM, Dyna-SLAM, Blitz-SLAM and TRS comes from the source literature, / indicates that the corresponding data is not provided in the source literature. The experimental results show that, unlike other dynamic SLAM algorithms, which only have advantages over ORB-SLAM2 in high dynamic scenarios, this algorithm can achieve almost the best results in high dynamic and low dynamic scenarios. Even the low-performance version we provide shows better performance than other algorithms. In rpy sequences, on the one hand, the epipolar constraints cannot be used, on the other hand, the large change of camera angle leads to insufficient feature matching, so our method performs slightly worse. The ATE and RPE plots of our algorithm on 8 sequences are shown in Fig.4.
238
+
239
+ ![](images/2fa6d7ad37630c63a897a09f659b769b268329f0273af69d60d0315e42c24442.jpg)
240
+ (1) s/xyz
241
+
242
+ ![](images/348266f84dfd5e413286dd35b997165aa4961552e24f64843d1a5255d2f22d49.jpg)
243
+ (2) $\mathrm{s / hs}$
244
+
245
+ ![](images/b1eabac0e68b499317a8f0b5fe276e08fdcf621d5ac263e5bcd6e8345a3b9bf2.jpg)
246
+ (3) s/static
247
+
248
+ ![](images/8b00b35846db96edfae173a0095f3649ef611cc95adeb2f7cd40c75b84d25472.jpg)
249
+ (4) s/rpy
250
+
251
+ ![](images/5cdf1d68258e15e403fcb1547006337fbf974f5d402103fdcaf94578734daf45.jpg)
252
+ (5) w/xyz
253
+
254
+ ![](images/b5315d7d3d888d3489640b43b6d81d2676315ec3a42c64157b57639b7d8e015d.jpg)
255
+ (6) w/hs
256
+ Fig. 4. ATE and RPE from CFP-SLAM.
257
+
258
+ ![](images/f54e3add35c00d7baf106a28f0ee521980f8d13b8f24376e0c5e6ca2f398daf7.jpg)
259
+ (7) w/static
260
+
261
+ ![](images/ebb61adcbc4fb1813a6b49dd665c1637a510b31492d3cd6f14fd09fa182120e0.jpg)
262
+ (8) w/ry
263
+
264
+ TABLEI RESULTS OF METRICS ABSOLUTE TRAJECTORY ERROR (ATE)
265
+
266
+ <table><tr><td rowspan="2">Sequences</td><td colspan="2">ORB-SLAM2</td><td colspan="2">Dyna-SLAM</td><td colspan="2">DS-SLAM</td><td colspan="2">Blitz-SLAM</td><td colspan="2">TRS</td><td colspan="2">CFP-SLAM-</td><td colspan="2">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0092</td><td>0.0047</td><td>0.0127</td><td>0.0060</td><td>/</td><td>/</td><td>0.0148</td><td>0.0069</td><td>0.0117</td><td>/</td><td>0.0129</td><td>0.0068</td><td>0.0090</td><td>0.0042</td></tr><tr><td>fr3/s/half</td><td>0.0192</td><td>0.0110</td><td>0.0186</td><td>0.0086</td><td>/</td><td>/</td><td>0.0160</td><td>0.0076</td><td>0.0172</td><td>/</td><td>0.0159</td><td>0.0072</td><td>0.0147</td><td>0.0069</td></tr><tr><td>fr3/s/static</td><td>0.0087</td><td>0.0042</td><td>/</td><td>/</td><td>0.0065</td><td>0.0033</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0061</td><td>0.0029</td><td>0.0053</td><td>0.0027</td></tr><tr><td>fr3/s/rpy</td><td>0.0195</td><td>0.0124</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0244</td><td>0.0175</td><td>0.0253</td><td>0.0154</td></tr><tr><td>fr3/w/xyz</td><td>0.7214</td><td>0.2560</td><td>0.0164</td><td>0.0086</td><td>0.0247</td><td>0.0161</td><td>0.0153</td><td>0.0078</td><td>0.0194</td><td>/</td><td>0.0149</td><td>0.0077</td><td>0.0141</td><td>0.0072</td></tr><tr><td>fr3/w/half</td><td>0.4667</td><td>0.2601</td><td>0.0296</td><td>0.0157</td><td>0.0303</td><td>0.0159</td><td>0.0256</td><td>0.0126</td><td>0.0290</td><td>/</td><td>0.0235</td><td>0.0114</td><td>0.0237</td><td>0.0114</td></tr><tr><td>fr3/w/static</td><td>0.3872</td><td>0.1636</td><td>0.0068</td><td>0.0032</td><td>0.0081</td><td>0.0036</td><td>0.0102</td><td>0.0052</td><td>0.0111</td><td>/</td><td>0.0069</td><td>0.0032</td><td>0.0066</td><td>0.0030</td></tr><tr><td>fr3/w/rpy</td><td>0.7842</td><td>0.4005</td><td>0.0354</td><td>0.0190</td><td>0.4442</td><td>0.2350</td><td>0.0356</td><td>0.0220</td><td>0.0371</td><td>/</td><td>0.0411</td><td>0.0257</td><td>0.0368</td><td>0.0230</td></tr></table>
267
+
268
+ TABLE II RESULTS OF METRIC TRANSLATIONAL DRIFT (RPE)
269
+
270
+ <table><tr><td rowspan="2">Sequences</td><td colspan="2">ORB-SLAM2</td><td colspan="2">Dyna-SLAM</td><td colspan="2">DS-SLAM</td><td colspan="2">Blitz-SLAM</td><td colspan="2">TRS</td><td colspan="2">CFP-SLAM-</td><td colspan="2">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0117</td><td>0.0060</td><td>0.0142</td><td>0.0073</td><td>/</td><td>/</td><td>0.0144</td><td>0.0071</td><td>0.0166</td><td>/</td><td>0.0149</td><td>0.0081</td><td>0.0114</td><td>0.0055</td></tr><tr><td>fr3/s/half</td><td>0.0231</td><td>0.0163</td><td>0.0239</td><td>0.0120</td><td>/</td><td>/</td><td>0.0165</td><td>0.0073</td><td>0.0259</td><td>/</td><td>0.0214</td><td>0.0099</td><td>0.0162</td><td>0.0079</td></tr><tr><td>fr3/s/static</td><td>0.0090</td><td>0.0043</td><td>/</td><td>/</td><td>0.0078</td><td>0.0038</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0078</td><td>0.0034</td><td>0.0072</td><td>0.0035</td></tr><tr><td>fr3/s/rpy</td><td>0.0245</td><td>0.0144</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.0322</td><td>0.0217</td><td>0.0316</td><td>0.0186</td></tr><tr><td>fr3/w/xyz</td><td>0.3944</td><td>0.2964</td><td>0.0217</td><td>0.0119</td><td>0.0333</td><td>0.0229</td><td>0.0197</td><td>0.0096</td><td>0.0234</td><td>/</td><td>0.0196</td><td>0.0099</td><td>0.0190</td><td>0.0097</td></tr><tr><td>fr3/w/half</td><td>0.3480</td><td>0.2859</td><td>0.0284</td><td>0.0149</td><td>0.0297</td><td>0.0152</td><td>0.0253</td><td>0.0123</td><td>0.0423</td><td>/</td><td>0.0274</td><td>0.0130</td><td>0.0259</td><td>0.0128</td></tr><tr><td>fr3/w/static</td><td>0.2349</td><td>0.2151</td><td>0.0089</td><td>0.0044</td><td>0.0102</td><td>0.0048</td><td>0.0129</td><td>0.0069</td><td>0.0117</td><td>/</td><td>0.0092</td><td>0.0043</td><td>0.0089</td><td>0.0040</td></tr><tr><td>fr3/w/rpy</td><td>0.4582</td><td>0.3447</td><td>0.0448</td><td>0.0262</td><td>0.1503</td><td>0.1168</td><td>0.0473</td><td>0.0283</td><td>0.0471</td><td>/</td><td>0.0540</td><td>0.0350</td><td>0.0500</td><td>0.0306</td></tr></table>
271
+
272
+ TABLE III RESULTS OF METRIC ROTATIONAL DRIFT (RPE)
273
+
274
+ <table><tr><td rowspan="2">Sequences</td><td colspan="2">ORB-SLAM2</td><td colspan="2">Dyna-SLAM</td><td colspan="2">DS-SLAM</td><td colspan="2">Blitz-SLAM</td><td colspan="2">TRS</td><td colspan="2">CFP-SLAM-</td><td colspan="2">CFP-SLAM</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.4890</td><td>0.2713</td><td>0.5042</td><td>0.2651</td><td>/</td><td>/</td><td>0.5024</td><td>0.2634</td><td>0.5968</td><td>/</td><td>0.5126</td><td>0.2793</td><td>0.4875</td><td>0.2640</td></tr><tr><td>fr3/s/half</td><td>0.6015</td><td>0.2924</td><td>0.7045</td><td>0.3488</td><td>/</td><td>/</td><td>0.5981</td><td>0.2739</td><td>0.7891</td><td>/</td><td>0.7697</td><td>0.3718</td><td>0.5917</td><td>0.2834</td></tr><tr><td>fr3/s/static</td><td>0.2850</td><td>0.1241</td><td>/</td><td>/</td><td>0.2735</td><td>0.1215</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.2749</td><td>0.1192</td><td>0.2654</td><td>0.1183</td></tr><tr><td>fr3/s/rpy</td><td>0.7772</td><td>0.3999</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>/</td><td>0.8303</td><td>0.4653</td><td>0.7410</td><td>0.3665</td></tr><tr><td>fr3/w/xyz</td><td>7.7846</td><td>5.8335</td><td>0.6284</td><td>0.3848</td><td>0.8266</td><td>0.5826</td><td>0.6132</td><td>0.3348</td><td>0.6368</td><td>/</td><td>0.6204</td><td>0.3850</td><td>0.6023</td><td>0.3719</td></tr><tr><td>fr3/w/half</td><td>7.2138</td><td>5.8299</td><td>0.7842</td><td>0.4012</td><td>0.8142</td><td>0.4101</td><td>0.7879</td><td>0.3751</td><td>0.9650</td><td>/</td><td>0.7853</td><td>0.3821</td><td>0.7575</td><td>0.3743</td></tr><tr><td>fr3/w/static</td><td>4.1856</td><td>3.8077</td><td>0.2612</td><td>0.1259</td><td>0.2690</td><td>0.1182</td><td>0.3038</td><td>0.1437</td><td>0.2872</td><td>/</td><td>0.2535</td><td>0.1130</td><td>0.2527</td><td>0.1051</td></tr><tr><td>fr3/w/rpy</td><td>8.8923</td><td>6.6658</td><td>0.9894</td><td>0.5701</td><td>3.0042</td><td>2.3065</td><td>1.0841</td><td>0.6668</td><td>1.0587</td><td>/</td><td>1.0521</td><td>0.5577</td><td>1.1084</td><td>0.6722</td></tr></table>
275
+
276
+ TABLE IV RESULTS OF METRICS ABSOLUTE TRAJECTORY ERROR (ATE) WITH DIFFERENT CONFIGURATIONS
277
+
278
+ <table><tr><td rowspan="2">Sequences</td><td colspan="2">CFP-SLAM</td><td colspan="2">CFP-SLAM-</td><td colspan="2">W/O-MDC</td><td colspan="2">W/O-DBS</td><td colspan="2">W/O-KSP</td><td colspan="2">Only-YOLO</td></tr><tr><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td><td>RMSE</td><td>S.D.</td></tr><tr><td>fr3/s/xyz</td><td>0.0090</td><td>0.0042</td><td>0.0129</td><td>0.0068</td><td>0.0123</td><td>0.0066</td><td>0.0130</td><td>0.0060</td><td>0.0142</td><td>0.0063</td><td>0.0174</td><td>0.0079</td></tr><tr><td>fr3/s/half</td><td>0.0147</td><td>0.0069</td><td>0.0159</td><td>0.0072</td><td>0.0150</td><td>0.0074</td><td>0.0305</td><td>0.0179</td><td>0.0201</td><td>0.0089</td><td>0.0281</td><td>0.0158</td></tr><tr><td>fr3/s/static</td><td>0.0053</td><td>0.0027</td><td>0.0061</td><td>0.0029</td><td>0.0055</td><td>0.0025</td><td>0.0064</td><td>0.0030</td><td>0.0062</td><td>0.0030</td><td>0.0064</td><td>0.0027</td></tr><tr><td>fr3/s/rpy</td><td>0.0253</td><td>0.0154</td><td>0.0244</td><td>0.0175</td><td>0.0237</td><td>0.0149</td><td>0.0297</td><td>0.0205</td><td>0.0287</td><td>0.0195</td><td>0.0460</td><td>0.0332</td></tr><tr><td>fr3/w/xyz</td><td>0.0141</td><td>0.0072</td><td>0.0149</td><td>0.0077</td><td>0.0158</td><td>0.0079</td><td>0.0159</td><td>0.0081</td><td>0.0154</td><td>0.0076</td><td>0.0165</td><td>0.0082</td></tr><tr><td>fr3/w/half</td><td>0.0237</td><td>0.0114</td><td>0.0235</td><td>0.0114</td><td>0.0258</td><td>0.0134</td><td>0.0274</td><td>0.0137</td><td>0.0307</td><td>0.0151</td><td>0.0310</td><td>0.0165</td></tr><tr><td>fr3/w/static</td><td>0.0066</td><td>0.0030</td><td>0.0069</td><td>0.0032</td><td>0.0070</td><td>0.0031</td><td>0.0078</td><td>0.0033</td><td>0.0076</td><td>0.0033</td><td>0.0073</td><td>0.0032</td></tr><tr><td>fr3/w/rpy</td><td>0.0368</td><td>0.0230</td><td>0.0411</td><td>0.0257</td><td>0.1910</td><td>0.1594</td><td>0.0749</td><td>0.0536</td><td>0.0405</td><td>0.0211</td><td>0.0456</td><td>0.0312</td></tr></table>
279
+
280
+ # C. Ablation Experiment
281
+
282
+ In order to prove the function of each module of our algorithm, We design a series of ablation experiments, and the experimental results are shown in Table IV. Among them, CFP-SLAM: The algorithm of this paper; CFP-SLAM $^{-}$ : Do not use static probability of objects; W/O-MDC: Without missed detection compensation; W/O-DBS: Without DBSCAN clustering; W/O-KSP: Without the static probability of keypoints, that is, all the foreground points after missed detection compensation and DBSCAN clustering are directly eliminated; Only-YOLO: Directly eliminate all keypoints in the box with human category.
283
+
284
+ The experimental results show that CFP-SLAM $^{-}$ shows worse performance in low dynamic scenes, because we cannot distinguish between high dynamic objects and low dynamic objects, so all objects are processed according to high dynamic. W/O-MDC is almost unaffected in low dynamic scenes, but the performance is very poor in high dynamic scenes,
285
+
286
+ especially in w/ropy, when the camera and objects are moving violently. In fact, the tracking is often lost in w/xyz, w/half and w/ropy because of missed detection. W/O-DBS and W/O-KSP show general performance in all sequences, which illustrates the effectiveness of DBSCAN clustering and the limitation of dealing with non-rigid bodies with partial motion as a whole, respectively. Only-YOLO encounters difficulties in initialization due to insufficient features in almost all sequences, and tracking is lost in some sequences.
287
+
288
+ # D. Real-time Analysis
289
+
290
+ Real-time performance is one of the important evaluation indexes of SLAM system. We test the average running time of each module, as shown in Table V. EKF represents the missed detection compensation and data association of boxes module, OSP represents the static probability calculation module of objects, and KSP represents the static probability calculation module of keypoints based on the epipolar constraints and the projection constraints. Semantic threads based on YOLOv5s
291
+
292
+ run in parallel with ORB feature extraction. The results show that the average processing time per frame for the main threads of CFP-SLAM and CFP-SLAM $^{-}$ is 42.7 ms and 24.77 ms, that is, the running speed reaches 23 Fps and 40 Fps respectively. Compared with the SLAM system based on semantic segmentation, it can better meet the real-time requirements while ensure the accuracy.
293
+
294
+ TABLEV THE AVERAGE RUNNING TIME OF EACH MODULE.
295
+
296
+ <table><tr><td>Methods</td><td>YOLO</td><td>EKF</td><td>OSP</td><td>DBSCAN</td><td>KSP</td><td>Tracking</td></tr><tr><td>CFP-SLAM</td><td>12.44</td><td>0.07</td><td>17.93</td><td>1.76</td><td>3.66</td><td>42.7</td></tr><tr><td>CFP-SLAM-</td><td>12.44</td><td>0.07</td><td>/</td><td>1.76</td><td>3.66</td><td>24.77</td></tr></table>
297
+
298
+ # VI. CONCLUSION
299
+
300
+ In this paper, we propose a dynamic scene-oriented visual SLAM algorithm based on YOLOv5s and coarse-to-fine static probability. After missed detection compensation and keypoints clustering, the static probabilities of objects, keypoints and map points are calculated and updated as weights to participate in pose optimization. Extensive evaluation shows that our algorithm achieves the highest accuracy of localization in almost all low dynamic and high dynamic scenes, and has quite high real-time performance. In the future, we intend to build a lightweight plane and object map containing only static environment for robot navigation and augmented reality.
301
+
302
+ # REFERENCES
303
+
304
+ [1] M. R. U. Saputra, A. Markham, and N. Trigoni, “Visual slam and structure from motion in dynamic environments: A survey,” ACM Computing Surveys (CSUR), vol. 51, no. 2, pp. 1–36, 2018.
305
+ [2] F. Zhong, S. Wang, Z. Zhang, and Y. Wang, "Detect-slam: Making object detection and slam mutually beneficial," in 2018 IEEE Winter Conference on Applications of Computer Vision (WACV). IEEE, 2018, pp. 1001-1010.
306
+ [3] L. Xiao, J. Wang, X. Qiu, Z. Rong, and X. Zou, "Dynamic-slam: Semantic monocular visual localization and mapping based on deep learning in dynamic environment," Robotics and Autonomous Systems, vol. 117, pp. 1-16, 2019.
307
+ [4] C. Yu, Z. Liu, X.-J. Liu, F. Xie, Y. Yang, Q. Wei, and Q. Fei, "Dsslam: A semantic visual slam towards dynamic environments," in 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2018, pp. 1168-1174.
308
+ [5] B. Bescos, J. M. Fácil, J. Civera, and J. Neira, “Dynoslam: Tracking, mapping, and inpainting in dynamic scenes,” IEEE Robotics and Automation Letters, vol. 3, no. 4, pp. 4076–4083, 2018.
309
+ [6] N. Brasch, A. Bozic, J. Lallemand, and F. Tombari, "Semantic monocular slam for highly dynamic environments," in 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2018, pp. 393-400.
310
+ [7] K. Wang, Y. Lin, L. Wang, L. Han, M. Hua, X. Wang, S. Lian, and B. Huang, “A unified framework for mutual improvement of slam and semantic segmentation,” in 2019 International Conference on Robotics and Automation (ICRA). IEEE, 2019, pp. 5224–5230.
311
+ [8] X. Yuan and S. Chen, "Sad-slam: A visual slam based on semantic and depth information," in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 4930-4935.
312
+ [9] J. Vincent, M. Labbe, J.-S. Lauzon, F. Grondin, P.-M. Comtois-Rivet, and F. Michaud, "Dynamic object tracking and masking for visual slam," in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 4974-4979.
313
+ [10] A. Li, J. Wang, M. Xu, and Z. Chen, “Dp-slam: A visual slam with moving probability towards dynamic environments,” Information Sciences, vol. 556, pp. 128-142, 2021.
314
+ [11] T. Ji, C. Wang, and L. Xie, "Towards real-time semantic rgb-d slam in dynamic environments," in 2021 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2021, pp. 11 175-11 181.
315
+
316
+ [12] Y. Fan, Q. Zhang, Y. Tang, S. Liu, and H. Han, "Blitz-slam: A semantic slam in dynamic environments," Pattern Recognition, vol. 121, p. 108225, 2022.
317
+ [13] R. Mur-Artal and J. D. Tardós, "Orb-slam2: An open-source slam system for monocular, stereo, and rgb-d cameras," IEEE transactions on robotics, vol. 33, no. 5, pp. 1255-1262, 2017.
318
+ [14] S. Li and D. Lee, "Rgb-d slam in dynamic environments using static point weighting," IEEE Robotics and Automation Letters, vol. 2, no. 4, pp. 2263-2270, 2017.
319
+ [15] Y. Sun, M. Liu, and M. Q.-H. Meng, "Improving rgb-d slam in dynamic environments: A motion removal approach," Robotics and Autonomous Systems, vol. 89, pp. 110-122, 2017.
320
+ [16] ——, “Motion removal for reliable rgb-d slam in dynamic environments,” Robotics and Autonomous Systems, vol. 108, pp. 115–128, 2018.
321
+ [17] R. Scona, M. Jaimez, Y. R. Petillot, M. Fallon, and D. Cremers, "Staticfusion: Background reconstruction for dense rgb-d slam in dynamic environments," in 2018 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2018, pp. 3849-3856.
322
+ [18] G. Liu, W. Zeng, B. Feng, and F. Xu, "Dms-slam: A general visual slam system for dynamic scenes with multiple sensors," Sensors, vol. 19, no. 17, p. 3714, 2019.
323
+ [19] J. Bian, W.-Y. Lin, Y. Matsushita, S.-K. Yeung, T.-D. Nguyen, and M.-M. Cheng, "Gms: Grid-based motion statistics for fast, ultra-robust feature correspondence," in Proceedings of the IEEE conference on computer vision and pattern recognition, 2017, pp. 4181-4190.
324
+ [20] D.-H. Kim and J.-H. Kim, "Effective background model-based rgb-d dense visual odometry in a dynamic environment," IEEE Transactions on Robotics, vol. 32, no. 6, pp. 1565-1573, 2016.
325
+ [21] W. Dai, Y. Zhang, P. Li, Z. Fang, and S. Scherer, "Rgb-d slam in dynamic environments using point correlations," IEEE Transactions on Pattern Analysis and Machine Intelligence, 2020.
326
+ [22] T. Zhang, H. Zhang, Y. Li, Y. Nakamura, and L. Zhang, "Flowfusion: Dynamic dense rgb-d slam based on optical flow," in 2020 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2020, pp. 7322-7328.
327
+ [23] V. Badrinarayanan, A. Kendall, and R. Cipolla, "Segnet: A deep convolutional encoder-decoder architecture for image segmentation," IEEE transactions on pattern analysis and machine intelligence, vol. 39, no. 12, pp. 2481-2495, 2017.
328
+ [24] K. He, G. Gkioxari, P. Dollár, and R. Girshick, “Mask r-cnn,” in Proceedings of the IEEE international conference on computer vision, 2017, pp. 2961–2969.
329
+ [25] N. Dvornik, K. Shmelkov, J. Mairal, and C. Schmid, "Blitznet: A real-time deep network for scene understanding," in Proceedings of the IEEE international conference on computer vision, 2017, pp. 4154-4162.
330
+ [26] J. Sturm, N. Engelhard, F. Endres, W. Burgard, and D. Cremers, “A benchmark for the evaluation of rgb-d slam systems,” in 2012 IEEE/RSJ international conference on intelligent robots and systems. IEEE, 2012, pp. 573–580.
2202.01xxx/2202.01938/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e822963610e65b178df8ccf6dc18b1788c79b0b053f8b9ca664d7e20a5bf8260
3
+ size 836200
2202.01xxx/2202.01938/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01971/dd56d8c1-727f-4bf1-ae09-4c34d53467db_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01971/dd56d8c1-727f-4bf1-ae09-4c34d53467db_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01971/dd56d8c1-727f-4bf1-ae09-4c34d53467db_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45600aa7e7e4686d529785307f44fa91d4a0b310a5081b3bcbcb1e04fb2da866
3
+ size 1114795
2202.01xxx/2202.01971/full.md ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Aggregation Service for Federated Learning: An Efficient, Secure, and More Resilient Realization
2
+
3
+ Yifeng Zheng, Shangqi Lai, Yi Liu, Xingliang Yuan, Xun Yi, and Cong Wang, Fellow, IEEE
4
+
5
+ Abstract—Federated learning has recently emerged as a paradigm promising the benefits of harnessing rich data from diverse sources to train high quality models, with the salient features that training datasets never leave local devices. Only model updates are locally computed and shared for aggregation to produce a global model. While federated learning greatly alleviates the privacy concerns as opposed to learning with centralized data, sharing model updates still poses privacy risks. In this paper, we present a system design which offers efficient protection of individual model updates throughout the learning procedure, allowing clients to only provide obscured model updates while a cloud server can still perform the aggregation. Our federated learning system first departs from prior works by supporting lightweight encryption and aggregation, and resilience against drop-out clients with no impact on their participation in future rounds. Meanwhile, prior work largely overlooks bandwidth efficiency optimization in the ciphertext domain and the support of security against an actively adversarial cloud server, which we also fully explore in this paper and provide effective and efficient mechanisms. Extensive experiments over several benchmark datasets (MNIST, CIFAR-10, and CelebA) show our system achieves accuracy comparable to the plaintext baseline, with practical performance.
6
+
7
+ Index Terms—Federated learning, secure aggregation, privacy, quantization, computation integrity
8
+
9
+ # 1 INTRODUCTION
10
+
11
+ Federated learning has rapidly emerged as a fascinating machine learning paradigm [1] which allows models to be trained on data dispersed over a number of mobile devices while each client can keep its dataset locally. Clients never share the raw datasets, and instead only periodically share model updates locally trained with their datasets, which are then aggregated by a coordinating server to produce a global model. Federated learning thus promises reaping the benefits of harnessing rich data from diverse sources to train high quality models, without clients being worried about the security and privacy risks of centralizing their raw data to a single place for training as in conventional practice.
12
+
13
+ While sharing model updates instead of raw datasets has greatly alleviated the privacy concerns as opposed to learning with centralized data, it still entails risks of private information leakage [2]. Protection of individual model updates is thus still necessary. Additively homomorphic encryption, which allows addition of private plaintext values to be securely performed over their ciphertexts, offers a feasible solution. Some recent works using this technique for building privacy-preserving federated learning systems have been presented [3], [4], [5], [6]. However, homomorphic encryption is expensive and also poses additional trust
14
+
15
+ assumptions (e.g., sharing a private key across data holders) in the context of federated learning (see Section 2 for more discussion). A different work is due to Bonawitz et al. [7], which is free of expensive cryptography and allows lightweight encryption and aggregation.
16
+
17
+ When designing secure federated learning systems, resilience against drop-out clients failing to submit model updates for aggregation is a practical requirement as some clients may face issues like poor network connections, energy constraints, or temporary unavailability [8]. The work [7] provides mechanisms to handle drop-out clients, yet it undesirably prevents drop-out clients from directly and safely engaging in any future rounds of aggregation unless a new key setup is re-conducted. Therefore, how to achieve secure and lightweight processing in federated learning while maintaining practical drop-out resilience with no impact on drop-out clients' participation in any future rounds remains to be fully explored.
18
+
19
+ In addition, most of prior works largely overlook the communication efficiency aspect of federated learning in the encrypted domain, which could be affected due to ciphertext size expansion or loss of precision in training due to the adaption of plaintext processing for compatibility with cryptographic processing. Furthermore, most of them only provide semi-honest security against the passively adversarial server, assuming the server faithfully conduct the designated processing, and they are not resilient to an active adversary that may compromise the integrity of the processing at the server side.
20
+
21
+ In light of the above observations, in this paper, we present a new system design enabling federated learning services with lightweight secure and resilient aggregation. Our system enables clients holding proprietary datasets to only provide obscured model updates while aggregation can still be supported at a cloud server to produce a global
22
+
23
+ model. By newly adapting a cherry-picked aggregation protocol for federated learning that we identified from the literature [9], [10], our system promises practical efficiency on encrypting the model updates at the client as well as aggregating the obscured model updates at the cloud server. No expensive cryptographic operations are required throughout the federated learning procedure. Meanwhile, our system is drop-out resilient and outperforms the best prior work [7] in the sense that the secret keys of drop-out clients are kept private so they still can directly participate at a later stage without the need for a new key setup.
24
+
25
+ With the above new secure design point for federated learning as a basis, we explore and present refinements in terms of boosted communication efficiency and stronger security. We start with consideration on the communication side, a known bottleneck for federated learning, due to various reasons like large sized models, limited client uplink bandwidth, and large numbers of participating clients [1]. We thus explore the potential of compressing model updates before secure aggregation so as to reduce the communication overheads in our system.
26
+
27
+ In the literature, various kinds of techniques for compressing the model updates have been proposed, such as sparsification, subsampling, and quantization [8]. Our observation is that quantization delicately represents the (fractional) values in a model update as integers, which is also the type of data required for secure aggregation. So our insight is to integrate the advancements in quantization with secure aggregation. However, most of existing quantization schemes are not secure aggregation friendly, as de-quantization requiring computation beyond summation has to be conducted before the quantized model updates can be aggregated. We make an observation that a newly developed quantization technique [6] (originally tailored for homomorphic encryption) can suit our purpose as no de-quantization is required before aggregation. Building on this new technique, our system allows clients to provide obscured quantized model updates, achieving a reduction in the communication overhead. To correctly integrate this quantization technique and make it function well, we address some practical considerations in our system including the prevention of overflow in aggregating quantized values and the identification of negative values in de-quantization.
28
+
29
+ Apart from the boosted communication efficiency side, we also investigate mechanisms to make our system more resilient, achieving stronger security against an actively adversarial cloud server, beyond the semi-honest security setting commonly assumed. The goal here is to ensure that the processing for the federated learning service is correctly enforced at the cloud server. Aiming for a pragmatic solution, we resort to the emerging techniques of hardware-assisted trusted execution environments (TEEEs) to shield the computation integrity at the cloud server side throughout the federated learning procedure. As a practical instantiation, our system makes use of the increasingly popular Intel SGX [11], [12]. We do not rely on the confidentiality guarantee which is originally targeted by trusted hardware, due to the emergence of various side-channel attacks [13]. This is different from most of existing works which assume both confidentiality and integrity when using trusted hardware for secure computation. We give the abstract functionality
30
+
31
+ assumed out of trusted hardware for our federated learning system, and further present a concrete protocol that renders federated learning with secure aggregation with computation integrity against an actively adversarial server.
32
+
33
+ We conduct extensive experiments over the popular benchmark datasets (MNIST, CIFAR-10, and CelebA), and different deep neural network models, with 21,840 parameters, 23,272,266 parameters, and 13,962,562 parameters, respectively. We evaluate the accuracy evolution over varying rounds and demonstrate our system shares similar behavior with the plaintext baseline and achieve comparable accuracy, even if quantization is applied to the model updates (which achieves up to $4\times$ reduction in communication). The client-side security cost in encryption as well as in dealing with drop-out is thoroughly examined, which is on the order of a few seconds for the MNIST model and of a few minutes for larger CIFAR-10 and CelebA models.
34
+
35
+ Our evaluation on the server-side aggregation demonstrates that our system with security against an actively adversarial cloud server incurs almost no overhead over the semi-honest adversary setting. We also make a performance comparison with the state-of-the-art [7] (which exposes the secret keys of drop-out clients). The results demonstrate that our system is (up to $39 \times$ ) much more efficient under zero client drop-out and only incurs small computation overhead (limited to $2.3 \times$ ) under varying drop-out rates. Besides, our system (with a semi-honest cloud server) has less server computation as the drop-out rates and number of participating clients increase.
36
+
37
+ We highlight our contributions as follows:
38
+
39
+ - We present a new system design enabling federated learning services with lightweight secure and resilient aggregation. Compared to prior work, our system can handle client drop-out while keeping their secret keys confidential, so their direct participation in future rounds is not affected.
40
+ - We explore quantization-based model compression and newly make unique integration with secure aggregation to boost the communication efficiency in our system. We also present practical mechanisms to make our system more resilient, achieving stronger security against an actively adversarial server.
41
+ - We conduct extensive experiments over multiple real-world datasets and extensively evaluate the accuracy, client-side, and server-side performance. The results demonstrate that our system achieves accuracy comparable to the plaintext baseline, with practical performance.
42
+
43
+ The rest of this paper is organized as follows. Section 2 discusses the related work. Section 3 introduces some preliminaries. Section 4 gives a system overview. Section 5 presents the design of secure aggregation in our system. Section 6 shows how to integrate quantization to boost the communication efficiency. Section 7 gives the design of endowing our system with integrity. Section 8 presents the experiments. Section 9 concludes the whole paper.
44
+
45
+ # 2 RELATED WORK
46
+
47
+ Our research is related to the line of work on federated learning with secure aggregation to protect the individual
48
+
49
+ model updates. To the best of our knowledge, none of existing works tackle exactly the same problem as our work, i.e., federated learning with secure and lightweight aggregation, drop-out resilience, and computation integrity against an adversarial server.
50
+
51
+ Some works rely on expensive homomorphic encryption [4], [5], [6], incurring high performance overheads. The use of homomorphic encryption also requires all clients to either share a common secret key [5], [6], or hold secret key shares which have to be generated via expensive multi-party protocols or distributed by a trusted third party (TTP) [4]. Xu et al. [3] propose a scheme using functional encryption, requiring a TTP for setting up keys as well as getting involved in the learning process. Our system is free of such a TTP.
52
+
53
+ In [7], Bonawitz et al. propose a lightweight encryption scheme supporting secure aggregation for federated learning. Their scheme can handle drop-out clients in an aggregation round, but would reveal their secret keys. When the scheme is used in federated learning, drop-out clients in a certain round are thus not able to directly and safely participate in any future rounds unless a new key setup is conducted. We note that the design of [7] has a double-masking mechanism, which is to prevent the server from learning the data of users who are too late in sending their masked vectors and are assumed to drop-out by the server (so the server recovers their secret keys).
54
+
55
+ There are some follow-up works [14], [15], [16] that attempt to the improve [7] from the performance aspect, yet they require additional assumptions regarding client topology information [14], [16] or delicate privacy-efficiency balance [15]. Specifically, the work of So et al. [14] applies a multi-group circular strategy for model aggregation, which partitions clients into communication groups and operates under a multi-group communication structure that relies on the topology of users and leads to a number of execution stages that has to be conducted sequentially across the groups. Each client in a group has to communicate with every client in the next group. The work of Choi et al. [16] has to appropriately organize clients in a graph structure based on the specific topology information and uses the graph to represent how public keys and secret shares are assigned to the other clients, so each client is aware of its neighboring clients. The work of Kadhe et al. [15] constructs a multi-secret sharing scheme and leverages it to design a secure aggregation scheme, which needs to delicately balance the trade-off between the number of secrets, privacy threshold, and dropout tolerance. Meanwhile, the resilience against client dropout is restricted to some pre-set constant fraction of clients.
56
+
57
+ The most related prior work thus is [7]. Compared with [7], our system also allows lightweight encryption for the clients but does not reveal the secret keys of drop-out clients in any rounds. That said, drop-out or non-selected clients in a certain round are still able to directly have safe participation in future rounds in our system. It is noted that the protocol in [7] consists of multiple rounds (including key setup), which need to be all executed over the clients selected in each iteration when applied to federated learning. Given the possibility of client drop-out being considered, it may not be promising to assume that each selected client in an iteration will well get involved in those interactions for
58
+
59
+ TABLE 1 Comparison of federated learning systems with secure aggregation. "L"ightweight cryptography."R"esilence against drop-out clients so that secure aggregation can still be correctly accomplished. "K"eys kept secret in handling dropouts so that drop-out clients' secret keys are not exposed. "S"security for computation integrity against the server.
60
+ "C"ompression and security co-design for high communication efficiency while ensuring security. "I"individual keys without TTP.
61
+
62
+ <table><tr><td>System</td><td>L</td><td>R</td><td>K</td><td>S</td><td>C</td><td>I</td></tr><tr><td>Xu et al. [3]</td><td>✘</td><td>✓</td><td>✓</td><td>✘</td><td>✘</td><td>✘</td></tr><tr><td>Phong et al. [5]</td><td>✘</td><td>✘</td><td>✓</td><td>✘</td><td>✘</td><td>✘</td></tr><tr><td>Truex et al. [4]</td><td>✘</td><td>✓</td><td>✓</td><td>✘</td><td>✘</td><td>✘</td></tr><tr><td>Zhang et al. [6]</td><td>✘</td><td>✓</td><td>✓</td><td>✘</td><td>✓</td><td>✘</td></tr><tr><td>Mandal et al. [17]</td><td>✘</td><td>✓</td><td>✓</td><td>✘</td><td>✘</td><td>✘</td></tr><tr><td>Bonawitz et al. [7]</td><td>✓</td><td>✓</td><td>✘</td><td>✘</td><td>✘</td><td>✓</td></tr><tr><td>This work</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
63
+
64
+ key setup. A new key setup in each iteration of federated learning thus might hinder that iteration in proceeding normally. Indeed, once the number of dropped clients (with all interactions rounds considered in an iteration of federated learning) exceeds a threshold, the current iteration of federated learning has to abort and start over. In contrast, our protocol has minimal interactions among the selected clients and the server in each iteration of federated learning. In fact, as opposed to plaintext-domain federated learning, our protocol just needs one additional round of interaction for handling client drop-out upon secure aggregation. It is further worth noting that compared with existing works, our system ambitiously and newly provides assurance on computation integrity against the cloud server.
65
+
66
+ In an independent work, Mandal et al. [17] consider a special scenario where they hide the model from the clients and rely on homomorphic encryption to build specific federated linear/logistic regression models. Our system follows most prior works with focus on protecting individual model updates and is general for training any models. We note that some previous works [3], [4] also integrate orthogonal differential privacy techniques by adding calibrated noises to local model updates and lead to an inherent trade-off on accuracy and privacy with delicate parameter tuning, which is complementary to our system. Table 1 summarizes the comparison with the most related works discussed above.
67
+
68
+ Our work is also relevant to prior work on building hardware-assisted security applications. Most of existing works assume confidentiality and integrity guarantees from the trusted hardware (e.g., Intel SGX) for secure computation (e.g., [18], [19], [20], [21], [22], to just list a few). Given that the confidentiality guarantees may face threats from various side-channel attacks, a trending practice is to relax the trust assumptions and only assume integrity. A few works have been presented in different contexts including zero-knowledge proofs [13], multi-party machine learning inference [23], client-side checking of model training [24], and aggregation over blockchain-stored data under homomorphic encryption [25]. Inspired by this trend, our system only assumes minimally trusted hardware with integrity guarantees, and explores a new design point for enforcing server-side computation correctness throughout the federated learning procedure.
69
+
70
+ TABLE 2 Key Notations
71
+
72
+ <table><tr><td>Notation</td><td>Description</td></tr><tr><td>w</td><td>Aggregate model</td></tr><tr><td>wtk</td><td>Model update from client Ck in round t</td></tr><tr><td>(SKi, PKi)</td><td>Key pair of client Ci in secure aggregation</td></tr><tr><td>CKi,j</td><td>Shared key computed from SKi and PKj</td></tr><tr><td>T</td><td>Set of selected clients in each round</td></tr><tr><td>To</td><td>Set of online clients in each round</td></tr><tr><td>Td</td><td>Set of drop-out clients in each round</td></tr><tr><td>rk</td><td>The vector of blinding factors of client Ck</td></tr><tr><td>Qr</td><td>The r-bit quantizer used in our system</td></tr><tr><td>Qr-1</td><td>The r-bit de-quantizer used in our system</td></tr><tr><td>(skCi,,pkCi)</td><td>Signing key pair of client Ci</td></tr></table>
73
+
74
+ # 3 PRELIMINARIES
75
+
76
+ # 3.1 Federated Learning
77
+
78
+ Federated learning enables multiple data owners to jointly solve an optimization problem which could be formulated as: $\min \sum_{s=1}^{S} \frac{1}{S} \cdot L(\mathbf{w}, \mathcal{D}_s)$ , where $S$ is the number of data owners, $L(\mathbf{w}, \mathcal{D}_s)$ is a loss function capturing how well the parameters $\mathbf{w}$ (treated as a flattened vector) model the local dataset $\mathcal{D}_s$ . During the learning procedure, each data owner only shares a locally trained model update. The model updates are typically aggregated by a server and used to update the global model. This is an iterative procedure and runs in multiple rounds.
79
+
80
+ Each round proceeds through the following steps: (1) A fraction of the data owners (say $K$ data owners) is selected by the server and a current global model $\mathbf{w}$ is sent to these data owners. (2) Each selected data owner then performs training over its local dataset, for which any optimizers could be used, though stochastic gradient descent (SGD) is the most popular one. With SGD, the $k$ -th selected data owner updates the local model parameters via $\mathbf{w}_k \gets \mathbf{w} - \eta \cdot \nabla L(\mathbf{w};\beta)$ , where $\beta$ is a batch randomly sampled from the local dataset and $\eta$ is the learning rate. A full iteration over the whole local dataset is referred to as an epoch, and the local training could be performed over multiple epochs. (3) Once the local training is done, an individual model update $\mathbf{w}_k$ is shared for aggregation: $\mathbf{w} = \frac{1}{K}\sum_{k = 1}^{K}\mathbf{w}_k$ , which produces an updated global model for next round. In Table 2, we provide a summary of the main notations used in this paper.
81
+
82
+ # 3.2 Transparent Enclave
83
+
84
+ Trusted hardware techniques (e.g., Intel SGX [11], [12]) allow the creation of protected memory regions called enclaves which are isolated from the rest of a host's software, including the operating system. Trusted hardware is designed with the aim of offering confidentiality and integrity assurance. Yet, it has been recently shown that the confidentiality guarantee may be compromised by a series of side-channel attacks [13]. The goal of achieving confidentiality with trusted hardware thus remains elusive so far. A recent trend on building trusted hardware based security applications [13], [23] is thus to only leverage the computational integrity. By relying on only the integrity, it
85
+
86
+ ![](images/d2f9383d7ef04d6475957ca9fca90ddb1a1dbe52412c20d72a69de42b150d707.jpg)
87
+ Fig. 1. Overview of our system architecture.
88
+
89
+ is assumed that the internal states (i.e., all the code and data) of enclaves are transparent to the host (or the corrupted party). Only secure code attestation and secure signing functionality are required. Enclaves with such minimal trust assumption are referred to as transparent enclaves [13].
90
+
91
+ # 4 SYSTEM OVERVIEW
92
+
93
+ # 4.1 Architecture
94
+
95
+ Fig. 1 illustrates our system architecture, which involves three actors: the service requester (or requester for short), clients, and the secure federated learning service which bridges the requester and clients. The requester wants to harness the power of federated learning and releases a task via the secure federated learning service. Such service could be hosted on the cloud, and run by a cloud server. Here the cloud server is an abstraction and can be implemented by an actual server or a cluster of servers. The clients are data owners interested in working on the federated learning task and sign up at the cloud server. All participants in the system agree in advance on a common machine learning model architecture and common learning objective. The clients may receive rewards from the requester for the participation.
96
+
97
+ To realize the secure federated learning service, our first aim is that in each round the cloud server should be able to perform aggregation of the individual model updates without seeing them in the clear. So, we will craft our design such that each client selected in a round can just provide an obscured model update, which still effectively allows aggregation. For high efficiency, our system will preclude the use of expensive homomorphic encryption and only rely on lightweight cryptography. Considering that communication is a known bottleneck in federated learning, our system will also cherry-pick and integrate appropriate quantization techniques to compress the model updates, so that a client can just share an obscured and quantized model update.
98
+
99
+ In addition to the confidentiality protection of individual model updates in each round of federated learning, our system also ambitiously aims to efficiently ensure that the federated learning service is correctly provisioned by the cloud server. While theoretically (expensive) cryptography-based verifiable computation techniques may help [26], we aim for a pragmatic solution and observe the trending practice is to leverage hardware-assisted trusted execution environments (TEE) [13]. Our system follows this trend and
100
+
101
+ only assumes a minimally trusted hardware with integrity guarantees. Specifically, we will rely on a transparent enclave to shield the computation integrity at the cloud server.
102
+
103
+ # 4.2 Threat Model
104
+
105
+ Our system for secure federated learning considers an adversarial cloud server. It could be semi-honest, which means the cloud server will honestly follow the protocol for the federated learning service, yet is curious about individual model updates so as to infer clients' local datasets. The semi-honest threat model is commonly adopted in privacy-preserving data-centric services in cloud computing [27], [28]. For this setting our system aims to maintain the confidentiality of individual model updates. The cloud server is only allowed to learn an aggregate model update and global model. Beyond the semi-honest adversary setting, a stronger threat model will also be considered where the cloud server may not correctly follow the designated computation. For this setting, our system further aims to enforce correct computation at the cloud server besides the confidentiality of individual model updates.
106
+
107
+ Collusion between the cloud server and a subset of selected clients in each round is also likely, and for such case our system aims to maintain the confidentiality of model updates of honest clients. Here, we are not concerned with the extreme (and also hardly realistic given that clients are many and geographically distributed) case that there is only one honest client and all other clients are compromised by the cloud server. Note that the assumption of at least one honest client is necessary in the problem of secure data aggregation and also (either explicitly or implicitly assumed) in existing works [7], [10], [29], [30]. It is obvious that for any practical secure aggregation schemes that securely compute the sum, if the aggregator colludes with all but one of the clients, the data of the colluding clients can be surely removed from the sum and the honest client's data will be revealed. Therefore, like existing works, our system does not provide data protection against the extreme colluding case.
108
+
109
+ It is noted that our system currently does not protect against possible attacks on the aggregate model updates and global model, which can directly be mitigated by letting clients do noise addition as per complementary differential privacy techniques [4]. Adversarial attacks like poisoning attacks [31] and backdoor attacks [32] are complementary research areas and out the scope of this work.
110
+
111
+ # 5 EMPOWERING FEDERATED LEARNING WITH SECURE AND EFFICIENT AGGREGATION
112
+
113
+ # 5.1 Overview
114
+
115
+ Our system is aimed at federated learning service with secure aggregation that is free of heavy cryptography. Meanwhile, our system should be able to work without a trusted third party for setting up keys for secure aggregation and/or assistance in the learning procedure. All these practical requirements preclude considerations on the use of expensive homomorphic encryption which is also confronted with key distributions issues as mentioned before. Recall that although the secure aggregation scheme in [7] allows lightweight encryption and aggregation without a trusted
116
+
117
+ third party, it reveals the secret keys of drop-out clients in an aggregation round, which consequently prevents them from direct participation in future rounds unless a new key setup is conducted in a subsequent round.
118
+
119
+ In our system, we propose to efficiently protect the confidentiality of individual model updates in federated learning with a cherry-picked low overhead cryptographic aggregation protocol [10]. We resort to this protocol due to the observations that it does not involve heavy cryptographic operations (only lightweight hashing and arithmetic operations are needed), does not require a trusted third party, and will not reveal the secret keys of drop-out clients. With this protocol as a basis, we craft a thorough design for federated learning with secure aggregation of individual model updates. Later in Section 6 and Section 7, we will work through the refinements that promise boosted communication efficiency and security.
120
+
121
+ # 5.2 Protocol
122
+
123
+ We now present the protocol for federated learning with secure aggregation of model updates. Note that as required by the cryptographic computation, the values in the vector representing the local model update should integers, which, however, could be fractional values in practice. We adopt a common scaling factor trick where a large enough scaling factor is used to scale up a fractional value into an integer. Specifically, given a fractional value $v$ and a scaling factor $L$ , we can obtain an integer representation of $v$ as $\overline{v} = \lfloor v \cdot L \rfloor$ . The approximate $v$ can be later reconstructed as $\overline{v} / L$ [33]. Applying such trick in our context, we only need to scale down the aggregate model update. To ensure that the scaling operation does not compromise the quality of computation result, the message space for the scaled integers should be large enough (say $2^{32}$ or $2^{64}$ ). Our protocol, as shown in Algorithm 1, proceeds as follows.
124
+
125
+ Initialization. Suppose that there are totally $S$ clients in the system. We write $\mathcal{S}$ to denote the set of clients, in which each client is uniquely indexed by an integer $i\in [1,S]$ . For initialization, each client generates its own key pair and leverages the cloud server as a central hub for distribution of public keys. Let $\mathbb{G}$ be a cyclic group of prime order $p$ , with generator $g$ . Also, let $\mathsf{H}:\{0,1\}^{*}\to \mathbb{Z}_{p}$ be a cryptographic hash function mapping arbitrary-length strings to integers in $\mathbb{Z}_p$ . Each client $\mathcal{C}_i$ generates a private key $SK_{i} = x_{i}\in \mathbb{Z}_{p}$ and a public key $PK_{i} = g^{x_{i}}\in \mathbb{G}$ , and uploads $PK_{i}$ to the cloud server. Then, each client $\mathcal{C}_i$ downloads other users' public keys and computes $CK_{i,j} = \mathsf{H}((PK_j)^{x_i})$ , where $i,j\in S$ and $j\neq i$ .
126
+
127
+ Secure federated learning. Without loss of generality, we describe here the secure aggregation process for federated learning in one round. The cloud server first randomly selects a fraction $\eta$ of the clients. We write $\mathcal{T}$ to denote the set of selected clients. This set $\mathcal{T}$ and the current global model vector $\mathbf{w}$ ( $\mathbf{w}$ is an initialized model when it is the first round) is sent to the selected clients. Each selected client $\mathcal{C}_k$ $(k\in \mathcal{T})$ then performs local training and produces a model update $\mathbf{w}_k$ . An obscured model update is then generated based on blinding factors. In particular, it first generates a blinding factor for each element in the model update vector $\mathbf{w}_k$ and produces a vector $\mathbf{r}_k$ of blinding factors.
128
+
129
+ Algorithm 1 Our Design for Secure Federated Learning
130
+ 1: Initialization:
131
+ 2: for each client $C_i$ do
132
+ 3: $SK_{i} = x_{i}\in \mathbb{Z}_{p}$
133
+ 4: $PK_{i} = g^{x_{i}}\in \mathbb{G}$
134
+ 5: Upload $PK_{i}$ to the cloud server.
135
+ 6: for each $j\in S,j\neq i$ do
136
+ 7: Download $PK_{j}$ from the cloud server.
137
+ 8: Compute $CK_{i,j} = \mathsf{H}((PK_j)^{x_i})$
138
+ 9: end for
139
+ 10: end for
140
+ 11: Cloud server executes:
141
+ 12: Initialize $\mathbf{w}^0$
142
+ 13: for each round $t = 1,2,\dots$ do
143
+ 14: Select a fraction of the clients and produce the set $\mathcal{T}^t$
144
+ 15: for each client $C_k$ in $\mathcal{T}^t$ do
145
+ 16: $\mathbf{w}'_k^t\gets \mathrm{ClientUpdate}(\mathbf{w}^{t - 1},\mathcal{T}^t)$
146
+ 17: end for
147
+ 18: $\mathbf{w}'\gets \sum_{k\in \mathcal{T}^t}\mathbf{w}'_k^t$ mod $M$
148
+ 19: $\mathbf{w}^t\gets \frac{1}{|\mathcal{T}^t|}\cdot \mathbf{w}'$
149
+ 20: end for
150
+ 21: ClientUpdate $(\mathbf{w}^{t - 1},\mathcal{T}^t)$
151
+ 22: $\mathbf{w}\gets \mathbf{w}^{t - 1}$
152
+ 23: Split the dataset $D_{k}$ into batches $\mathcal{B}$
153
+ 24: for each local epoch $e$ from 1 to $E$ do
154
+ 25: for each batch $\beta$ in $\mathcal{B}$ do
155
+ 26: $\mathbf{w}\gets \mathbf{w} - \eta \cdot \nabla L(\mathbf{w};\beta). / /\eta$ is the learning rate.
156
+ 27: end for
157
+ 28: end for
158
+ 29: $\mathbf{w}_k^t\gets \mathbf{w}$
159
+ 30: Generate the vector $\mathbf{r}_k^t$ of blinding factors. Each element $\mathbf{r}_k^t (b) = \sum_{n\in T,n\neq k}(-1)^{k > n}\mathsf{H}(CK_{k,n}||b||t)$
160
+ 31: Compute $\mathbf{w}'_k^t\gets \mathbf{w}_k^t +\mathbf{r}_k^t$ mod $M$
161
+ 32: Return $\mathbf{w}'_k^t$ to the cloud server.
162
+
163
+ The basic idea for supporting correct aggregation over obscured model updates is that if a client adds randomness to its input for blinding while another client subtracts that randomness from its input, the randomness will be canceled out when the summation of clients' obscured inputs is formed. In particular, each blinding factor $\mathbf{r}_k(b)$ for the $b$ -th element in $\mathbf{w}_k$ is generated as $\mathbf{r}_k(b) = \sum_{n \in \mathcal{T}, n \neq k} (-1)^{k > n} \mathsf{H}(CK_{k,n}||b||t)$ , where $(-1)^{k > n} = -1$ if $k > n$ and 1 otherwise, and $t$ is a round counter. It can be observed that the sum of all blinding factors $\sum_{k \in \mathcal{T}} \mathbf{r}_k(b)$ is 0 as the blinding factors are canceled out when the sum over the set $\mathcal{T}$ of selected clients is formed.
164
+
165
+ Given above, with a model update $\mathbf{w}_k$ where each element is assumed to lie in a message space $M$ (say $M = 2^{32}$ ), each client $\mathcal{C}_k$ generates a vector $\mathbf{r}_k$ of blinding factors as introduced above, and computes $\mathbf{w}_k' = \mathbf{w}_k + \mathbf{r}_k \bmod M$ , where the modulo operation is performed element-wise. Upon receiving the obscured model updates, the cloud server computes $\sum_{k \in \mathcal{T}} \mathbf{w}_k' \bmod M = \sum_{k \in \mathcal{T}} (\mathbf{w}_k + \mathbf{r}_k) \bmod M$ and obtains $\sum_{k \in \mathcal{T}} \mathbf{w}_k \bmod M$ as $\sum_{k \in \mathcal{T}} \mathbf{r}_k = 0$ .
166
+
167
+ # 5.3 Security Guarantees
168
+
169
+ We now provide security analysis for our above design below.
170
+
171
+ Definition 1. (Computational Diffie-Hellman (CDH) Problem). Consider a cyclic group $\mathbb{G}$ of prime order $p$ with generator $g$ . The CDH problem is hard if, for any probabilistic polynomial time algorithm $\mathcal{A}$ and random $a$ and $b$ drawn from $\mathbb{Z}_p$ : $Pr[\mathcal{A}(\mathbb{G}; p; g; g^a; g^b) = g^{ab}]$ is negligible.
172
+
173
+ Theorem 1. Given the hardness of the CDH problem, our system ensures that the cloud server only learns the aggregate model update without knowing individual model updates. In case of collusion between the cloud server and a subset of clients, the model updates of honest clients are still protected.
174
+
175
+ Proof. Each value in the model update of the client $\mathcal{C}_k$ is masked by a unique blinding factor generated by the secret keys $\{CK_{k,n}:\mathsf{H}((g^{x_n})^{x_k})\}$ . So we need to show that the cloud server is oblivious to the secret key $CK_{k,n}$ . In our system, the cloud server only has access to the public key $g^{x_i}$ of each client in $\mathcal{S}$ , as per the system setup. The CDH problem ensures that given $g^a$ and $g^b$ , it is computationally hard to compute the value $g^{ab}$ . So, given access to the public keys $g^{x_k}$ and $g^{x_n}$ , the cloud server is not able to infer the secret key $CK_{k,n} = \mathsf{H}((g^{x_n})^{x_k})$ for the generation of blinding factors. Next, we analyze the case of passive collusion between a subset of selected clients with the cloud server, where the corrupted clients share all their secret materials with the cloud server.
176
+
177
+ Without loss of generality, we give the proof for a certain honest client denoted by $\mathcal{C}_i$ . Let $\mathcal{E}$ denote the set of clients participating in the same aggregation round $t$ , $\mathcal{E}_h$ denote the set of honest clients, and $\mathcal{E}_c$ the subset of clients colluding with the cloud server. Recall that in our system the data submitted by the honest client $\mathcal{C}_i$ is $\mathbf{w}_i(b) + \mathbf{r}_i(b) = \mathbf{w}_i(b) + \sum_{n\in \mathcal{E}, n\neq i} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t)$ , for the $b$ -th element in $\mathbf{w}_i$ . Given $\mathcal{E}_h$ and $\mathcal{E}_c$ , $\sum_{n\in \mathcal{E}, n\neq i} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t)$ can be expressed as two parts: $\sum_{n\in \mathcal{E}_h, n\neq i} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t)$ and $\sum_{n\in \mathcal{E}_c} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t)$ . So what the cloud server receives from the (honest) client is $\mathbf{w}_i + \sum_{n\in \mathcal{E}_h, n\neq i} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t) + \sum_{n\in \mathcal{E}_c} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t)$ . As the set $\mathcal{E}_c$ of clients colludes with the cloud server, they are able to reveal to the cloud server the part $\sum_{n\in \mathcal{E}_c} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t)$ . So given the collusion with the set $\mathcal{E}_c$ of clients, the cloud server can obtain $\mathbf{w}_i + \sum_{n\in \mathcal{E}_h, n\neq i} (-1)^{i > n} \mathsf{H}(CK_{i,n}||b||t)$ ultimately. It can be seen that the honest client $\mathcal{C}_i$ 's model update is still protected by blinding factors generated from the mutual secret keys established between this honest client and other honest clients in $\mathcal{E}_h$ . That said, the cloud server still observes a randomly masked model update from the honest client $\mathcal{C}_i$ . This completes the proof.
178
+
179
+ # 5.4 Practical Considerations
180
+
181
+ Fault tolerance. It is likely that a selected client in a round may not actually participate (i.e., submitting a model update), due to reasons like poor network connections, energy constraints, or temporary unavailability. We call such clients
182
+
183
+ Algorithm 2 Fault Tolerance against Client Drop-out
184
+ 1: Cloud server sends $\mathcal{T}_d$ to the clients in $\mathcal{T}_o$ .
185
+ 2: for each client $\mathcal{C}_k$ in $\mathcal{T}_o$ do
186
+ 3: Initialize a vector $\mathbf{q}_k$ of size $|\mathbf{w}_k^{\prime}|$ .
187
+ 4: for the $b$ -th element in $\mathbf{q}_k$ do
188
+ 5: $\mathbf{q}_k(b) \gets \sum_{n \in \mathcal{T}_d} (-1)^{k > n} \mathsf{H}(CK_{k,n}||b||t) \mod M$ .
189
+ 6: end for
190
+ 7: Send $\mathbf{q}_k$ to the cloud server.
191
+ 8: end for
192
+ 9: Cloud server computes $\mathbf{q} = \sum_{k \in \mathcal{T}_o} \mathbf{q}_k \mod M$ .
193
+ 10: Cloud server computes $(\sum_{k \in \mathcal{T}_o} \mathbf{w}_k^{\prime}) - \mathbf{q} \mod M$ .
194
+
195
+ drop-out clients. In such case, the cloud server would not be able to directly obtain the correct aggregation result, because the sum of the blinding factors is not zero. Therefore, our system should be able to handle the drop-out clients in a certain round and ensure that the obscured model updates of the responding clients can still be correctly aggregated.
196
+
197
+ We assume that the selected clients in a round which have managed to submit the (obsured) model updates will be able to stay online for potential assistance. The rationale behind such assumption is that clients will typically train and participate when their devices are charging and on an unmetered network [1], [34]. In practice, adequate incentive mechanisms could also be developed to further motivate clients' active participation.
198
+
199
+ To handle drop-out clients, the main insight is that the blinding factors of the responding clients should be eventually canceled out. Let $\mathcal{T}_o$ denote the set of responding clients who have submitted the (masked) model updates and are able to stay online for potential assistance, and $\mathcal{T}_d$ the set of drop-out clients who fail to submit the masked model updates. At a high level, each online client $\mathcal{C}_k\in \mathcal{T}_o$ generates a vector consisting of blinding factors that are generated based on the shared key $\{CK_{k,n}\}_{n\in \mathcal{T}_d}$ with the set of drop-out clients. This vector of blinding vectors allows the server to eliminate the blinding factors corresponding to the drop-out clients in each masked model update, and thus the aggregate model update. We show in Algorithm 2 the fault tolerance mechanism for handling drop-out clients.
200
+
201
+ Client dynamics. After the initial system setup, new clients may join and some clients may be revoked later. We now introduce how our system can support such client dynamics. When a new client $\mathcal{C}_i$ joins, it generates a secret key $x_i$ and uploads its public key $PK_i = g^{x_i}$ to the cloud server, which then broadcasts it to other current clients in the system. Upon receiving $PK_i$ , each client $\mathcal{C}_j$ computes $CK_{j,i} = (PK_i)^{x_j}$ to ensure that its blinding factor will correctly be generated for later use. On another hand, when a client $\mathcal{C}_i$ leaves the system, the cloud server only needs to inform each client $\mathcal{C}_j$ to discard the key $CK_{j,i}$ .
202
+
203
+ Group management. In each round of federated learning, the client-side computation complexity in secure aggregation scales linearly in the size of the set $\mathcal{T}$ , as generating the blinding factors requires each client to perform $O(|\mathcal{T}|)$ hashing operations. When $|\mathcal{T}|$ becomes quite large, it would be beneficial to improve the client side efficiency. One immediate optimization is that in each round, the selected clients can be divided into groups with sizes smaller than $\mathcal{T}$ . During the process of secure aggregation, within-group
204
+
205
+ aggregation is first performed at the cloud server, followed by cross-group aggregation.
206
+
207
+ Suppose that the selected clients are divided into $s$ groups with equal sizes, the computation complexity is reduced to $O(|T| / s)$ for each client participating in a round of federated learning. For client grouping, different guidelines might be adopted, such as geographical proximity. Note that group management also benefits fault tolerance. This is because each group is independent so the faults in a certain group will not affect other groups. Only the clients in the same group will be involved to handle the faults. The impact of faults on the overall system is thus effectively mitigated. One trade-off here is that the cloud server learns aggregated model updates from clients in independent groups. Yet, individual clients' model updates in each group are still protected. The group size can be set in advance according to the agreement between the cloud server and the clients.
208
+
209
+ # 6 LEVERAGING QUANTIZATION FOR BOOSTED COMMUNICATION EFFICIENCY
210
+
211
+ Communication could be a bottleneck for federated learning [8], because the trained machine learning models are usually of large sizes, clients may have limited uplink bandwidth, and the number of clients could be large [1]. As in each round a selected client communicates the model update to the cloud server, reducing the model update size could be highly beneficial to improve the communication efficiency of the whole system. In the literature, researchers have studied various kinds of techniques for compressing model updates such as sparsification, subsampling, and quantization [8]. Among these techniques, our observation is that quantization delicately represents the values in a model update as integers, exhibiting compatibility with cryptographic aggregation. Therefore, we choose to leverage the advancements in quantization to support secure aggregation while achieving a reduction in communication cost, and thus kill two birds with one stone.
212
+
213
+ # 6.1 Quantization Technique
214
+
215
+ Quantization based communication efficiency optimization for distributed machine learning has received considerable traction in recent years (e.g., [35], [36], [37], to just list a few). However, most of existing quantization techniques are not secure aggregation friendly, as they require a de-quantization operation, which demands computation much more complicated than secure aggregation, to be performed before doing the aggregation. What we need here is thus a quantization scheme which can support aggregation directly over quantized values.
216
+
217
+ We make an observation that a newly developed quantization technique [6] (originally tailored for homomorphic encryption) suits our purpose and has good compatibility with secure aggregation in our system. This technique quantizes fractional values into $r$ -bit signed integers. In order to enable the values with opposite signs to be canceled out during summation, it proposes to make the quantized range symmetrical with respect to the range of the fractional values. Specifically, the $r$ -bit quantizer $Q_{r}$ that we integrate in our system works as follows. We start with
218
+
219
+ Algorithm 3 Secure Federated Learning with Quantization
220
+ 1: Cloud server executes:
221
+ 2: Initialize $\mathbf{w}^0$ .
222
+ 3: for each round $t = 1,2,\dots$ do
223
+ 4: Select a fraction of the clients and produces the set $\mathcal{T}^t$ .
224
+ 5: for each client $\mathcal{C}_k$ in $\mathcal{T}^t$ do
225
+ 6: $\mathbf{w}_{k}^{\prime t}\gets \mathrm{ClientUpdate}(\mathbf{w}^{t - 1},\mathcal{T}^t)$ .
226
+ 7: end for
227
+ 8: $\Delta^t\gets \sum_{k\in \mathcal{T}^t}\mathbf{w}_{k}^{\prime t}$ mod $2^{r}$ .
228
+ 9: $\mathbf{w}^t = \mathbf{w}^{t - 1} + \frac{1}{|\mathcal{T}^t|}\cdot Q_r^{-1}(\Delta^t)$ .
229
+ 10: end for
230
+ 11: ClientUpdate(w $^{t - 1}$ , $\mathcal{T}^t$ ):
231
+ 12: Produce a locally trained model $\mathbf{w}_k^t$ as in Algorithm 1 (steps 22-29).
232
+ 13: $\Delta_k^t = \mathbf{w}_k^t -\mathbf{w}^{t - 1}$ .
233
+ 14: Generate the vector $\mathbf{r}_k^t$ of blinding factors.
234
+ 15: Compute $\mathbf{w}_{k}^{\prime t}\gets Q_{r}(\Delta_{k}^{t}) + \mathbf{r}_{k}^{t}$ mod $2^{r}$ .
235
+ 16: Return $\mathbf{w}_{k}^{\prime t}$ to the cloud server.
236
+
237
+ introducing some notations. For any scalar $v \in \mathbb{R}$ , we write $\operatorname{sgn}(v) \in \{-1, 1\}$ to denote the sign of $v$ , with $\operatorname{sgn}(0) = 1$ . We write $\operatorname{abs}(v)$ to denote the absolute value of $v$ , and $\operatorname{round}(v)$ to denote standard rounding over $v$ . Given a value $v$ in the range $[-B, B]$ , the quantizer $Q_r$ quantizes it to an integer in $[-(2^{r-1}-1), 2^{r-1}-1]$ via:
238
+
239
+ $$
240
+ Q _ {r} (v) = \operatorname {s g n} (v) \cdot \operatorname {r o u n d} (\operatorname {a b s} (v) \cdot (2 ^ {r - 1} - 1) / B) \tag {1}
241
+ $$
242
+
243
+ Given a quantized value $u = Q_r(v)$ , the de-quantized value is computed as follows:
244
+
245
+ $$
246
+ Q _ {r} ^ {- 1} (u) = \operatorname {s g n} (u) \cdot (\operatorname {a b s} (u) \cdot B / \left(2 ^ {r - 1} - 1\right)) \tag {2}
247
+ $$
248
+
249
+ # 6.2 Secure Federated Learning with Quantization
250
+
251
+ We adapt the above quantization technique for our system to simultaneously achieve lightweight cryptographic aggregation and high communication efficiency. Such delicate integration leads to our refined protocol for secure federated learning, which is shown in Algorithm 3. Compared with the process shown in Algorithm 1, the main difference is that in each round, after training a local model, each selected client sends an obscured quantized model update to the cloud server. The cloud server then performs aggregation over these obscured quantized model updates and produces an updated global model.
252
+
253
+ Note that following prior works [35], [38], the quantization in our system is performed over the difference between a locally trained model $\mathbf{w}_k^t$ and the current global model $\mathbf{w}^{t - 1}$ , i.e., $\Delta_k^t = \mathbf{w}_k^t -\mathbf{w}^{t - 1}$ , as the model difference is more amenable to compression, in contrast to the locally trained model. Note that the quantizer requires the input to be bounded in a range $[-B,B]$ . This can be achieved by clipping the values in $\Delta_k^t$ based on the threshold $B$ which can be pre-set based on a public calibration dataset [39]. Values greater than $B$ are set to $B$ , and to $-B$ if they are smaller than $B$ . In what follows, we further address some practical considerations.
254
+
255
+ Preventing overflow in aggregating quantized values. As the aggregation is performed over the quantized values from multiple clients, preventing overflow is crucially important. We note that this can be achieved via applying a
256
+
257
+ 1: Each client $\mathcal{C}_i$ generates the public key $g^{x_i}$ and signature $\sigma_{\mathcal{C}_i} = \mathrm{Sign}_{sk_{\mathcal{C}_i}}(g^{x_i})$ .
258
+ 2: Each client $\mathcal{C}_i$ sends to $g^{x_i}$ and $\sigma_{\mathcal{C}_i}$ to $\mathcal{F}_{\mathrm{TEE}}$ as well as revealed to the cloud server.
259
+ 3: The cloud server invokes $\mathcal{F}_{\mathrm{TEE}}$ on ("Compute", $(\{g^{x_i}\}_{i\in S},\{\sigma_{\mathcal{C}_i}\}_{i\in S}))$ , and receives $\perp$ or updated state containing (ctr, $\{g^{x_j}\}_{j\in S,j\neq i},\sigma$ ) to be sent to each client $\mathcal{C}_i$ , where $\operatorname{ctr} = 0$ .
260
+ 4: Each client $\mathcal{C}_i$ runs $\mathsf{Verify}_{v k_{\mathbb{T}}}((\mathsf{ctr},\{g^{x_j}\}_{j\in S,j\neq i}),\sigma)$ upon receiving (ctr, $\{g^{x_j}\}_{j\in S,j\neq i},\sigma)$
261
+ 5: Each client continues to produce the keys used for blinding factor generation if the check passes, or aborts otherwise.
262
+
263
+ Fig. 2. Key setup in security-hardened federated learning in our system.
264
+
265
+ scaling mechanism on the input range for quantization [6]. In particular, to prevent overflow when the aggregation is performed over $c$ clients, we can set the input range for quantization as $[-c \cdot B, c \cdot B]$ , in contrast with the original range $[-B, B]$ . The intuition here is that by scaling the input range, each quantized value is scaled down $c$ times so overflow can be prevented when $c$ quantized values are aggregated.
266
+
267
+ Identifying negative values in de-quantization. It is noted that as the aggregate model update $\Delta^t$ is derived modulo $2^r$ , the cloud server needs to know which values in the $\Delta^t$ should have been negative indeed before performing the de-quantization. This can be achieved as follows. The cloud server checks if a value $a$ in $\Delta^t$ is greater than $2^{r - 1} - 1$ . If yes, its raw value can be obtained via such transformation $a' = a - 2^r$ . Otherwise, it should have been non-negative. After this check and the transformation applied for values deemed to be negative, the cloud server can then proceed to the de-quantization.
268
+
269
+ # 7 HARDENING SECURE FEDERATED LEARNING
270
+
271
+ In our design above, we assume a passively adversarial cloud server that faithfully performs the designated computation. We now present a practical design which can provide security against an actively adversarial cloud server with minimal performance overhead (as demonstrated in the experiments later), ensuring the correctness of computation at the cloud server. We aim for a pragmatic solution and thus following the trend of using trusted hardware, with only minimal assumption on its computation integrity assurance.
272
+
273
+ # 7.1 Abstract Functionality from Trusted Hardware
274
+
275
+ Inspired by the prior work [13], [23], we define the abstract functionality $\mathcal{F}_{\mathrm{TEE}}$ assumed out of the trusted hardware, which we use to harden the secure federated learning service in our system. The functionality is parameterized by a secure signing scheme $\{\mathrm{Sign}_{sk},\mathrm{Verify}_{vk}\}$ with a key pair $(sk,vk)$ . Let $\mathrm{Sign}_{sk}(m)$ denote signing a message $m$ and $\mathrm{Verify}_{vk}(\sigma ,m)$ denote verifying a signature $\sigma$ on $m$ . The functionality allows users to provide a program prog using the "install" command. It then returns $\alpha = \mathrm{Sign}_{sk}(\mathrm{Hash}(\mathrm{prog}))$ as a token for this program, which allows public integrity verification given the program prog and the signature verification key $vk$ . Subsequent invocation
276
+
277
+ of $\mathcal{F}_{\mathrm{TEE}}$ runs prog on given inputs inp and fresh randomness rd using the "Compute" command, and produces some output outp. The program prog could be stateful and may receive successive inputs in different rounds as indexed by a counter ctr and produce corresponding output $\mathrm{outp}_{\mathrm{ctr}}$ .
278
+
279
+ Let state<sub>ctr</sub> be an internal state maintained by $\mathcal{F}_{\mathrm{TEE}}$ . Upon the initialization, the state<sub>0</sub> is empty (i.e., state<sub>0</sub> = ε). On each "Compute" command for the ctr-th round, the functionality produces the output $\text{outp}_{\text{ctr}}$ and a signature $\sigma_{\text{ctr}}$ on $(\text{outp}_{\text{ctr}}, \text{ctr})$ . Then, state<sub>ctr-1</sub> is updated to state<sub>ctr</sub> by adding the tuple $\{\text{ctr}, \text{inp}_{\text{ctr}}, \text{outp}_{\text{ctr}}, \sigma_{\text{ctr}}, \text{rnd}_{\text{ctr}}\}$ . The updated state state<sub>ctr</sub> is always given to the host of the trusted hardware. This reflects the assumption that the internal state of the trusted hardware can be observed by the host. Note that for the output from the functionality, any parties with the public vk can verify the integrity. In short, we only assume that the functionality can conduct computation and produce signed outputs.
280
+
281
+ # 7.2 Security-Hardened Federated Learning
282
+
283
+ Given the functionality $\mathcal{F}_{\mathrm{TEE}}$ , we now describe how to make our above federated learning service secure against with the cloud server that may not correctly conduct the designated processing. The main idea is to have every message sent by the (TEE-enabled) cloud server in the semi-honest protocol be computed by $\mathcal{F}_{\mathrm{TEE}}$ , where the signing key pair $(sk_{\mathrm{T}}, vk_{\mathrm{T}})$ is used by $\mathcal{F}_{\mathrm{TEE}}$ . These messages can be verified by any party which knows $vk_{\mathrm{T}}$ . We assume that every participant in the system knows $vk_{\mathrm{T}}$ in a reliable manner. Later we will show how this can be achieved via the remote attestation mechanism in the widely popular trusted hardware Intel SGX. We also assume that each client has a signing key pair $(sk_{\mathcal{C}_i}, pk_{\mathcal{C}_i})$ , where the pubic key $pk_{\mathcal{C}_i}$ is bound to each client's identity $\mathcal{C}_i$ .
284
+
285
+ We now describe our protocol for security-hardened federated learning. Firstly, the requester sends to the cloud server the code prog. The cloud server invokes $\mathcal{F}_{\mathrm{TEE}}$ on ("Install", prog) to receive $(\mathrm{state}_0, \alpha = \mathrm{Sign}_{sk_{\mathrm{T}}}(\mathrm{Hash}(\mathrm{prog})))$ , which is also sent to the requester and all clients in the system. The requester and each client run $\mathrm{Verify}_{vk_{\mathrm{T}}}(\alpha, \mathrm{Hash}(\mathrm{prog}))$ , and abort if the check fails. After $\mathcal{F}_{\mathrm{TEE}}$ is initialized, the key setup for secure aggregation is conducted as shown in Fig. 2. After the key setup is done, secure federated learning now proceeds as shown in Fig. 3.
286
+
287
+ # 7.3 Realization of the Ideal Functionality $\mathcal{F}_{\mathrm{TEE}}$
288
+
289
+ The above functionality $\mathcal{F}_{\mathrm{TEE}}$ can potentially be realized via any trusted hardware techniques that provide code attestation and signing. As an instantiation, we resort to Intel SGX due to its wide integration in commodity processors. SGX enables the execution of programs in secure enclaves that are isolated from all other applications on the same host. It also provides attestation mechanisms which assure the integrity of the program loaded into the enclave once it has been attested. In particular, a signed attestation report can be generated for the program being loaded, which allows anyone to verify based on a public key corresponding to Intel's report signing key. The signing key pair $(sk_{\mathrm{T}},vk_{\mathrm{T}})$ of the functionality $\mathcal{F}_{\mathrm{TEE}}$ can be generated inside the enclave, and the verification key $vk_{\mathrm{T}}$ can be part of the payload
290
+
291
+ 1: In the beginning of the round ctr, the cloud server invokes $\mathcal{F}_{\mathrm{TEE}}$ on ("Compute", ctr) and receives (ctr, $\mathcal{T}_{\mathrm{ctr}}, \mathbf{w}^{\mathrm{ctr}-1}, \sigma$ ) as the updated state, which is also sent to each client $\mathcal{C}_k$ in $\mathcal{T}_{\mathrm{ctr}}$ .
292
+ 2: Each selected client in $\mathcal{T}_{\mathrm{ctr}}$ runs Verify $\mathbf{v_{k_T}}((\mathsf{ctr},\mathcal{T}_{\mathsf{ctr}},\mathbf{w}_{\mathsf{ctr} - 1}),\sigma)$ and aborts if the verification fails.
293
+ 3: Each selected client $\mathcal{C}_k$ does local training and produces an obscured (quantized) model update $\mathbf{w}'_k$ .
294
+ 4: The $(\mathrm{ctr},\mathcal{C}_k,\mathbf{w}'_k,\sigma_{\mathcal{C}_k})$ is sent to $\mathcal{F}_{\mathrm{TEE}}$ as well as revealed to the cloud server.
295
+ 5: The cloud server invokes $\mathcal{F}_{\mathrm{TEE}}$ on ("Compute", ctr, $\{\mathcal{C}_k,\mathbf{w}_k',\sigma_{\mathcal{C}_k}\}_{k\in \mathcal{T}_{\mathrm{ctr}}})$ . It receives $(\mathrm{ctr} + 1,\mathcal{T}_{\mathrm{ctr} + 1},\mathbf{w}^{\mathrm{ctr}},\sigma)$ for the next round, or $\perp$ if verification any selected clients' signatures fails.
296
+
297
+ Fig. 3. Aggregation in our security-hardened federated learning design.
298
+
299
+ of the signed attestation report so that it is made public reliably. This verification key $vk_{\mathsf{T}}$ can then be used to verify the signed outputs from the functionality throughout the computation procedure in federated learning.
300
+
301
+ # 8 EXPERIMENTS
302
+
303
+ # 8.1 Setup
304
+
305
+ We implement the client with Python. The PyTorch is used for model training. For the federated learning service, we use C++ since the official SGX SDK only has C++ interfaces. We adopt Thrift [40] to enable cross-language communication. That is, the federated learning service is implemented as a daemon on a Thrift server that provides the corresponding interface which can be invoked by a service client generated as Python scripts. Each client uses the service client to connect with the server to upload the model update. The daemon passes the model update to the enclave and gets the global model and its signature as the result. In our experiments, we manage to get rid of expensive EPC paging in the enclave by temporarily storing all received (obscured) model updates and their signatures in the untrusted memory of the host server. Aggregation in the enclave is then performed by reading in and verifying the (obscured) model updates one by one.
306
+
307
+ We use three popular datasets in our experiments, including the commonly used datasets MNIST and CIFAR-10, and the CelebA dataset from the LEAF federated learning benchmark framework [41]. The MNIST dataset contains images of 0-9 handwritten digits, with 60000 training examples and 10000 testing examples. The CIFAR-10 dataset contains 50000 training color images and 10000 testing color images, with 10 classes. The CelebA dataset contains 200,288 celebrity images, each with 40 attribute annotations. In this paper we use this dataset for the application of gender classification. For MNIST, we use a relatively simple CNN model with two 5x5 convolution layers, a dropout layer, and two fully connected layers (21,840 total parameters). For CIFAR-10, we rely on the popular AlexNet [42] with more sophisticated structures and larger sizes but use less conv. kernels (23,272,266 total parameters). For CelebA, we use ResNet-18 [43], a popular CNN model that is 18 layers deep (13,962,562 total parameters). Hereafter, to facilitate
308
+
309
+ ![](images/12756b617eb667ecfacdf9392802a30b10cb91324b92188d89795acddea03149.jpg)
310
+ Fig. 4. Effect of the scaling factor $L$ on accuracy in the proposed secure federated learning design over different datasets.
311
+
312
+ ![](images/cbb7e6dfdb95649bd7e3c01ed35864e79afc2fd20006b275b2b2f7cc314e6b34.jpg)
313
+
314
+ ![](images/c7054d0dff01085aaebd2299d530dbb27851b15a5ed2bac2adecd2d113b2c004.jpg)
315
+
316
+ presentation, we will refer to the models over different datasets as the MNIST model, CIFAR model, and CelebA model, respectively. All evaluations are conducted on an SGX-enabled server equipped with Intel Xeon E 2288G 3.70GHz CPU (8 cores), 128GB RAM, and 3 RTX 3090 GPUs.
317
+
318
+ # 8.2 Accuracy Evaluation
319
+
320
+ We evaluate three cases: the plaintext federated learning with no privacy of model updates, our basic secure protocol without quantization, where a scaling factor is used to scale model parameters into integers for cryptographic computation, and our secure protocol extended with quantization.
321
+
322
+ For the datasets MNIST and CIFAR, we randomly shuffle the training examples and evenly distribute them across 100 clients. Each client receives 600 examples under the MNIST dataset, and 500 examples under the CIFAR-10 dataset respectively. For CelebA, we divide it into a training set and a test set with a 70/30 ratio, and distribute the training set to the clients, and each client holds about 2002 images. Such way of partitioning is referred to as IID data distribution [1]. The fraction of clients being selected in each round is set to $10\%$ . Each selected client performs local training over 5 epochs, with the learning rate being 0.01 and batch size being 10. The threshold $B$ related with quantization is set to 0.5 for the MNIST model and 0.1 for the CIFAR model and the CelebA model, respectively. For our secure protocol extended with quantization, we examine the cases where the quantization bit widths are 8 and 16 respectively.
323
+
324
+ To start, we evaluate the effect of varying scaling factors on the model accuracy in our secure federated learning design, of which the results are shown in Fig. 4. It is observed that the use of an appropriate scaling factor does not adversely affect the accuracy as the number of rounds grows. As long as a large scaling factor is used, the model accuracy is maintained with respect to the plaintext baseline. Such accuracy preservation is also consistent with the literature that uses the trick of scaling factor for cryptographic computation (e.g., [44], [45], [46], to just list a few). Hereafter, we set the scaling factor to $10^{7}$ in all the remaining experiments.
325
+
326
+ In Fig. 5, we show the evolution of the testing accuracy under varying number of rounds over the MNIST dataset, CIFAR dataset, and CelebA dataset, respectively. It is noted that the legend "Plaintext FL" refers to the plaintext federated learning setting where the raw values of model updates in 32-bit floating-point representation are used. The legend
327
+
328
+ TABLE3 Size of Model Updates (in MB)
329
+
330
+ <table><tr><td>Setting</td><td>MNIST</td><td>CIFAR</td><td>CelebA</td></tr><tr><td>Plaintext FL</td><td>0.083</td><td>88.78</td><td>53.26</td></tr><tr><td>Sec. FL (scaling)</td><td>0.083</td><td>88.78</td><td>53.26</td></tr><tr><td>Sec. FL (16-bit quantization)</td><td>0.042</td><td>44.49</td><td>26.63</td></tr><tr><td>Sec. FL (8-bit quantization)</td><td>0.021</td><td>22.19</td><td>13.32</td></tr></table>
331
+
332
+ "scaling" refers to the setting where a scaling factor $(10^{7})$ is used to scale up the fractional values in model updates into 32-bit integers to support computation in the cryptographic aggregation protocol. The legend "8/16-bit quantization" refers to the setting where a 8-/16-bit quantizer is applied over the fractional values in model updates, leading to 8-/16-bit integers for supporting communication efficiency optimization in the ciphertext domain. It can be observed that our secure protocols share similar behavior with the baseline and achieve comparable accuracy. The unique integration of quantization (with adequate bit width) does not adversely affect the quality of the trained model either.
333
+
334
+ In addition to the IID setting, following the seminal work [1] on federated learning, we further examine the non-IID setting over the MNIST dataset. The non-IID data distribution is set up by sorting the training images according to the digit labels, dividing them into shards of size 300, and randomly distributing two shards to each client. We show the accuracy evaluation result in Fig. 6, which demonstrates similar behavior and comparable accuracy to the plaintext baseline. Note that data heterogeneity is related to the federated learning paradigm itself and is independent of our security design. There are no ties between data heterogeneity and client drop-out. Client-dropout only affects the actual number of model updates being aggregated to produce the global model. Our system ensures the correctness of secure aggregation even in case of client-dropout.
335
+
336
+ # 8.3 Performance Evaluation
337
+
338
+ # 8.3.1 Client-Side Performance
339
+
340
+ We first examine the client-side computation performance. Recall that encrypting a model update requires generation of blinding factors that requires $O(|\mathcal{T}|)$ hashing operations per each, where $|\mathcal{T}|$ is the number of selected clients in a round. The encryption cost thus scales with $|\mathcal{T}|$ (and inherently the size of the model update). We show the client's
341
+
342
+ ![](images/7d31c2ffd85bd3a53d19c0f6d4055239054289a680cd00b49a5160116daab68b.jpg)
343
+ Fig. 5. Accuracy evolution of the models over different datasets.
344
+
345
+ ![](images/a7ee379480339399c6678d9290526d983519220ea76264d04c5eecdda812f7b2.jpg)
346
+
347
+ ![](images/e5e4ad54b5c9a78e69b7619c55f9390ee34c462d61a5b6c6b03d1d7eb966d1b9.jpg)
348
+
349
+ ![](images/324069b890ec535ee7c8b13865a2dfc16054e236efd8d4efee7055dccb0d0474.jpg)
350
+ Fig. 6. MNIST model accuracy evolution under the non-IID setting.
351
+
352
+ running time of encrypting (quantized) model updates for varying fraction of clients being selected in a round, over different models in Fig. 7. For the smallest MNIST model, it is seen that the encryption cost is on the order of a few seconds. For the CIFAR and CelebA models, the running times are on the order of minutes due to their substantially larger model size (23, 272, 266 parameters and 13, 962, 562 parameters as opposed to 21, 840 parameters of the MNIST model). However, it is worth noting that our system can flexibly support client grouping to largely limit the computation complexity of a client (i.e., independent of the fraction), as demonstrated in Fig. 7 where the selected clients are grouped with size 10.
353
+
354
+ We also examine the computation cost of a client in a recovery phase to handle dropouts, with the results are shown in Fig. 8, under varying dropout rates over different models. As expected, the running time of the client scales linearly with the dropout rate. It is also revealed that client grouping can greatly reduce the cost.
355
+
356
+ In Table 3, we report the size of a model update (i.e., sizes of model update parameters), for the plaintext case and our secure design under the settings of scaling, 8-bit quantization, and 16-bit quantization, respectively. Our secure protocol with scaling incurs no overhead on the model update size, as the bit precision remains the same. Our secure protocol can achieve $4 \times$ reduction under 8-bit quantization and $2 \times$ reduction under 16-bit quantization on the size of transferred model update in a round. Recall that our system exhibits similar behavior in accuracy evolution with regard to varying number of rounds. Given a target number of rounds, our system with quantization can lead to $2 \times$ or $4 \times$ reduction on the communication, with comparable
357
+
358
+ TABLE4 Performance Complexity Comparison
359
+
360
+ <table><tr><td>Approach</td><td>Client Computation</td><td>Server Computation</td></tr><tr><td>Ours</td><td>O(mn + md)</td><td>O(m(n - d))</td></tr><tr><td>[7]</td><td>O(n2 + mn)</td><td>O(m(n - d) + md(n - d))</td></tr></table>
361
+
362
+ accuracy to the plaintext baseline.
363
+
364
+ # 8.3.2 Cloud Server-Side Performance
365
+
366
+ We now examine the costs of securely aggregating model updates to produce an updated global model under the semi-honest adversary setting and active-adversary setting respectively. The results are plotted in Fig. 9. As expected, the computation costs scales linearly with the fraction. It is revealed that our protocol with security against an active adversarial cloud server (i.e., computation integrity against the cloud server) incurs almost no overhead over the semi-honest setting $(1\times, 1.005\times$ , and $1.013\times$ over the MNIST, CIFAR, and CelebA models respectively). Such minimal performance overhead is promised as no paging is required.
367
+
368
+ # 8.3.3 Comparison with Prior Work
369
+
370
+ We now make comparison with the most related prior work [7] without heavy cryptographic operations. Firstly, we compare the computational complexity on the client side and on the cloud server side respectively. Suppose the dimension of each model update vector is $m$ , and the number of clients being selected in a round of the federated learning procedure is $n$ , and the number of dropped clients is $d$ . Overall the secure aggregation approach in our system leads to $O(mn + md)$ computation on the client side and $O(m(n - d))$ computation on the cloud server side. In comparison, according to [7], their scheme leads to $O(n^2 + mn)$ computation on the client side and $O(m(n - d) + md(n - d))$ on the cloud server side. Table 4 summarizes the comparison of the asymptotic computational complexity.
371
+
372
+ We note that the work [7] does not present real machine learning based experiments. To have empirical performance comparison with [7], we test their scheme over the MNIST model to measure the client-side and server-side runtime costs, with varying dropout rates and fraction of selected clients per around. Table 5 gives the comparison of the client-side cost. Our design is (up to $39 \times$ ) more efficient than
373
+
374
+ ![](images/e879d3715e317792a856bb3d38f9b652e99307fad9fb04ca1dfd78e16e8c9b92.jpg)
375
+ Fig. 7. Client's encryption cost with varying fraction of selected clients per round, over different models.
376
+
377
+ ![](images/ba806b63e57cf3a5fda3c881e01bb2575153e7a53098f44ddc560c59dd4f38f0.jpg)
378
+
379
+ ![](images/494709378a945c6d34ce6392b344da82a512cd71d59db15165e2d59b3deeef8b.jpg)
380
+
381
+ ![](images/db78801800a422b9245c18824d7176d49e6ba77326123975e45fa8775ee595f5.jpg)
382
+ Fig. 8. Client's computation cost in dealing with different dropout rates, over different models.
383
+
384
+ ![](images/8d93e29a711524abe206c055ad10f9a7480e892bb79874d0b7f0ff4a2b6e7578.jpg)
385
+
386
+ ![](images/5cd3f061656724827dcd9be606fe318f5eb8df1a125990decd4664098c6decdd.jpg)
387
+
388
+ ![](images/7a9c634b0b61dcdfef46ddb4d600239416628130c8952223ab664088b4147cdc.jpg)
389
+ Fig. 9. Cost of aggregating model updates at the cloud server, under different adversary settings and over different models
390
+
391
+ ![](images/b114c99892f35a8ada9de57e2df7b8baf8fc6bab5e2f0b1bd77b2fb29cbfc355.jpg)
392
+
393
+ ![](images/8feb58b3892ba2ee058ad18a72005b7f8f4539248ef7f095eafcf698f7985dd3.jpg)
394
+
395
+ TABLE 5 Client Cost Comparison with Prior Work [7]
396
+
397
+ <table><tr><td rowspan="2">Dropouts</td><td rowspan="2">Client Cost (ms)</td><td colspan="5">Fraction</td></tr><tr><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td></tr><tr><td rowspan="2">0%</td><td>Ours</td><td>0.95</td><td>1.09</td><td>1.2</td><td>1.35</td><td>1.49</td></tr><tr><td>[7]</td><td>12.05</td><td>23.38</td><td>34.98</td><td>47.16</td><td>58.43</td></tr><tr><td rowspan="2">10%</td><td>Ours</td><td>16</td><td>28</td><td>42</td><td>56</td><td>69</td></tr><tr><td>[7]</td><td>11.98</td><td>23.42</td><td>35.04</td><td>46.91</td><td>58.32</td></tr><tr><td rowspan="2">20%</td><td>Ours</td><td>28</td><td>59</td><td>82</td><td>108</td><td>134</td></tr><tr><td>[7]</td><td>11.87</td><td>23.42</td><td>35.77</td><td>47.02</td><td>58.3</td></tr></table>
398
+
399
+ the work [7] when the dropout rate is zero. As the dropout rate increases, our design has higher client cost (limited to $2.3 \times$ ), as each online client assists by computing blinding factors scaling to the number of drop-out clients.
400
+
401
+ TABLE 6 Server Cost Comparison with Prior Work [7]
402
+
403
+ <table><tr><td rowspan="2">Dropouts</td><td rowspan="2">Server Cost (ms)</td><td colspan="5">Fraction</td></tr><tr><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td></tr><tr><td rowspan="2">0%</td><td>Ours</td><td>22</td><td>43</td><td>64</td><td>84</td><td>104</td></tr><tr><td>[7]</td><td>5.74</td><td>11.26</td><td>16.32</td><td>22.59</td><td>29.66</td></tr><tr><td rowspan="2">10%</td><td>Ours</td><td>38</td><td>79</td><td>115</td><td>153</td><td>196</td></tr><tr><td>[7]</td><td>13.53</td><td>41.15</td><td>83.53</td><td>141.98</td><td>220.5</td></tr><tr><td rowspan="2">20%</td><td>Ours</td><td>35</td><td>75</td><td>111</td><td>148</td><td>189</td></tr><tr><td>[7]</td><td>19.83</td><td>64.17</td><td>138.89</td><td>238.24</td><td>366.59</td></tr></table>
404
+
405
+ Regarding the server-side cost, it is observed that as the dropout rate and the fraction of selected clients increase, our design (semi-honest adv. setting) consumes less computation. This is because the server in their scheme
406
+
407
+ needs to perform reconstruction of secret keys over collected secret shares and re-compute masks to be subtracted from the aggregate sum over the online clients. We emphasize that unlike [7], our system does not reveal the secret keys of drop-out clients, so their direct participation in future rounds is not affected. Meanwhile, our system can also provide much stronger security (computation integrity) against the server with minimal overhead, as demonstrated above.
408
+
409
+ # 9 CONCLUSION
410
+
411
+ In this paper, we present a system design for federated learning, which allows clients to provide obscured model updates while aggregation can still be supported. Our system first departs from prior works by building on a cherry-picked cryptographic aggregation protocol, which promises the advantages of lightweight encryption and aggregation as well as the ability to handle drop-out clients without exposing their secret keys. For higher communication efficiency, our system also adapts the latest advancements in quantization techniques for compressing individual model updates. Furthermore, our system also provides security beyond the common semi-honest adversary setting, ensuring the computation integrity at the cloud server. We conduct an extensive evaluation over popular benchmark datasets, and the results validate the practical performance of our system.
412
+
413
+ # ACKNOWLEDGEMENT
414
+
415
+ This work was supported in part by the Guangdong Basic and Applied Basic Research Foundation under Grant 2021A1515110027, in part by the Shenzhen High-Level Talents Research Start-up Fund, in part by the Australian Research Council (ARC) Discovery Projects under Grants DP200103308 and DP180103251, in part by a Monash-Data61 collaborative research project (Data61 CRP43), in part by the Research Grants Council of Hong Kong under Grants CityU 11217819, 11217620, 11218521, N_CityU139/21, R6021-20F, and RFS2122-1S04, in part by Shenzhen Municipality Science and Technology Innovation Commission under Grant SGDX20201103093004019, and in part by the National Natural Science Foundation of China under Grant 61572412.
416
+
417
+ # REFERENCES
418
+
419
+ [1] B. McMahan, E. Moore, D. Ramage, S. Hampson, and B. A. y Arcas, "Communication-efficient learning of deep networks from decentralized data," in Proc. of AISTATS, A. Singh and X. J. Zhu, Eds., 2017.
420
+ [2] L. Melis, C. Song, E. D. Cristofaro, and V. Shmatikov, "Exploiting unintended feature leakage in collaborative learning," in Proc. of IEEE S&P, 2019.
421
+ [3] R. Xu, N. Baracaldo, Y. Zhou, A. Anwar, and H. Ludwig, "Hybridalpha: An efficient approach for privacy-preserving federated learning," in Proc. of AISEC, 2019.
422
+ [4] S. Truex, N. Baracaldo, A. Anwar, T. Steinke, H. Ludwig, R. Zhang, and Y. Zhou, "A hybrid approach to privacy-preserving federated learning," in Proc. of AIsec, 2019.
423
+ [5] L. T. Phong, Y. Aono, T. Hayashi, L. Wang, and S. Moriai, "Privacy-preserving deep learning via additively homomorphic encryption," IEEE Transactions on Information Forensics and Security, vol. 13, no. 5, pp. 1333-1345, 2018.
424
+ [6] C. Zhang, S. Li, J. Xia, W. Wang, F. Yan, and Y. Liu, "Batchcrypt: Efficient homomorphic encryption for cross-silo federated learning," in Proc. of USENIX ATC, 2020.
425
+
426
+ [7] K. Bonawitz, V. Ivanov, B. Kreuter, A. Marcedone, H. B. McMahan, S. Patel, D. Ramage, A. Segal, and K. Seth, "Practical secure aggregation for privacy-preserving machine learning," in Proc. of ACM CCS, 2017.
427
+ [8] T. Li, A. K. Sahu, A. Talwalkar, and V. Smith, "Federated learning: Challenges, methods, and future directions," IEEE Signal Processing Magazine, vol. 37, no. 3, pp. 50-60, 2020.
428
+ [9] K. Kursawe, G. Danezis, and M. Kohlweiss, "Privacy-friendly aggregation for the smart-grid," in Proc. of PETS, 2011, pp. 175-191.
429
+ [10] L. Melis, G. Danezis, and E. D. Cristofaro, "Efficient private statistics with succinct sketches," in Proc. of NDSS, 2016, pp. 1-15.
430
+ [11] F. McKeen, I. Alexandrovich, A. Berenzon, C. V. Rozas, H. Shafi, V. Shanbhogue, and U. R. Savagaonkar, "Innovative instructions and software model for isolated execution," in Proc. of Workshop on Hardware and Architectural Support for Security and Privacy (HASP), 2013.
431
+ [12] Intel, "Intel software guard extensions," On line at: https:// software.intel.com/en-us/sgx, 2020.
432
+ [13] F. Tramer, F. Zhang, H. Lin, J. Hubaux, A. Juels, and E. Shi, "Sealed-glass proofs: Using transparent enclaves to prove and sell knowledge," in Proc. of IEEE EuroS&P, 2017.
433
+ [14] J. So, B. Guler, and A. S. Avestimehr, "Turbo-aggregate: Breaking the quadratic aggregation barrier in secure federated learning," CoRR, vol. abs/2002.04156, 2020.
434
+ [15] S. Kadhe, N. Rajaraman, O. O. Koyluoglu, and K. Ramchandran, "Fastsecagg: Scalable secure aggregation for privacy-preserving federated learning," CoRR, vol. abs/2009.11248, 2020.
435
+ [16] B. Choi, J. Sohn, D. Han, and J. Moon, "Communication-computation efficient secure aggregation for federated learning," CoRR, vol. abs/2012.05433, 2020.
436
+ [17] K. Mandal and G. Gong, "Privfl: Practical privacy-preserving federated regressions on high-dimensional data over mobile networks," in Proc. of CCSW, 2019.
437
+ [18] F. Schuster, M. Costa, C. Fournet, C. Gkantsidis, M. Peinado, G. Mainar-Ruiz, and M. Russinovich, "VC3: trustworthy data analytics in the cloud using SGX," in Proc. of IEEE S&P, 2015.
438
+ [19] O. Ohrimenko, F. Schuster, C. Fournet, A. Mehta, S. Nowozin, K. Vaswani, and M. Costa, "Oblivious multi-party machine learning on trusted processors," in Proc. of USENIX Security, 2016.
439
+ [20] R. Bahmani, M. Barbosa, F. Brasser, B. Portela, A. Sadeghi, G. Scerri, and B. Warinschi, "Secure multiparty computation from SGX," in Proc. of FC, 2017.
440
+ [21] J. I. Choi, D. J. Tian, G. Hernandez, C. Patton, B. Mood, T. Shrimpton, K. R. B. Butler, and P. Traynor, "A hybrid approach to secure function evaluation using SGX," in Proc. of AsiaCCS, 2019.
441
+ [22] F. Tramér and D. Boneh, "Slalom: Fast, verifiable and private execution of neural networks in trusted hardware," in Proc. of ICLR, 2019.
442
+ [23] N. Kumar, M. Rathee, N. Chandran, D. Gupta, A. Rastogi, and R. Sharma, "Cryptflow: Secure tensorflow inference," Proc. of IEEE S&P, 2020.
443
+ [24] X. Zhang, F. Li, Z. Zhang, Q. Li, C. Wang, and J. Wu, "Enabling execution assurance of federated learning at untrusted participants," in Prof. of IEEE INFOCOM, 2020.
444
+ [25] H. Duan, Y. Zheng, Y. Du, A. Zhou, C. Wang, and M. H. Au, "Aggregating crowd wisdom via blockchain: A private, correct, and robust realization," in Proc. of PerCom, 2019.
445
+ [26] G. Xu, H. Li, S. Liu, K. Yang, and X. Lin, "Verifynet: Secure and verifiable federated learning," IEEE Transactions on Information Forensics and Security, vol. 15, pp. 911-926, 2020.
446
+ [27] Y. Zheng, H. Duan, X. Tang, C. Wang, and J. Zhou, "Denoising in the dark: Privacy-preserving deep neural network-based image denoising," IEEE Transactions on Dependable and Secure Computing, vol. 18, no. 3, pp. 1261-1275, 2021.
447
+ [28] X. Liu, Y. Zheng, X. Yuan, and X. Yi, "Medisc: Towards secure and lightweight deep learning as a medical diagnostic service," in Proc. of ESORICS, 2021.
448
+ [29] Q. Li, G. Cao, and T. F. L. Porta, "Efficient and privacy-aware data aggregation in mobile sensing," IEEE Transactions on Dependable and Secure Computing, vol. 11, no. 2, pp. 115-129, 2014.
449
+ [30] Y. Zhang, Q. Chen, and S. Zhong, "Efficient and privacy-preserving min and $k$ th min computations in mobile sensing systems," IEEE Transactions on Dependable and Secure Computing, vol. 14, no. 1, pp. 9-21, 2017.
450
+ [31] M. Fang, X. Cao, J. Jia, and N. Z. Gong, "Local model poisoning attacks to byzantine-robust federated learning," in Proc. of Usenix Security, 2020.
451
+
452
+ [32] E. Bagdasaryan, A. Veit, Y. Hua, D. Estrin, and V. Shmatikov, "How to backdoor federated learning," in Proc. of AISTATS, 2020.
453
+ [33] C. Wang, K. Ren, J. Wang, and Q. Wang, "Harnessing the cloud for securely outsourcing large-scale systems of linear equations," IEEE Transactions on Parallel and Distributed Systems, vol. 24, no. 6, pp. 1172-1181, 2013.
454
+ [34] K. Bonawitz, H. Eichner, W. Grieskamp, D. Huba, A. Ingerman, V. Ivanov, C. Kiddon, J. Konecný, S. Mazzocchi, B. McMahan, T. V. Overveldt, D. Petrou, D. Ramage, and J. Roselander, "Towards federated learning at scale: System design," in Proc. of MLSys, 2019.
455
+ [35] A. Reisizadeh, A. Mokhtari, H. Hassani, A. Jabbabaie, and R. Pedarsani, "Fedpaq: A communication-efficient federated learning method with periodic averaging and quantization," in Proc. of AISTATS, 2020.
456
+ [36] D. Alistarh, D. Grubic, J. Li, R. Tomioka, and M. Vojnovic, "QSGD: communication-efficient SGD via gradient quantization and encoding," in Proc. of NeurIPS, 2017.
457
+ [37] N. Shlezinger, M. Chen, Y. C. Eldar, H. V. Poor, and S. Cui, "Federated learning with quantization constraints," in Proc. of IEEE ICASSP, 2020.
458
+ [38] J. Konecny, H. B. McMahan, F. X. Yu, P. Richtárik, A. T. Suresh, and D. Bacon, "Federated learning: Strategies for improving communication efficiency," in Proc. of NIPS Workshop on Private Multi-Party
459
+
460
+ Machine Learning, 2016.
461
+ [39] R. Shokri and V. Shmatikov, "Privacy-preserving deep learning," in Proc. of ACM CCS, 2015.
462
+ [40] Apache, "Thrift," On line at: https://thrift.apache.org, 2020.
463
+ [41] P. W. T. L. J. K. H. B. M. V. S. Sebastian Caldas, Sai Meher Karthik Duddu and A. Talwalkar, "Leaf: A benchmark for federated settings," in Prof. of Workshop on Federated Learning for Data Privacy and Confidentiality, 2019.
464
+ [42] A. Krizhevsky, I. Sutskever, and G. E. Hinton, "Imagenet classification with deep convolutional neural networks," in Proc. of NIPS, 2012.
465
+ [43] K. He, X. Zhang, S. Ren, and J. Sun, "Deep residual learning for image recognition," in Proc. of IEEE CVPR, 2016.
466
+ [44] Y. Zheng, H. Duan, X. Yuan, and C. Wang, "Privacy-aware and efficient mobile crowdsensing with truth discovery," IEEE Trans. Dependable Secur. Comput., vol. 17, no. 1, pp. 121-133, 2020.
467
+ [45] J. Liu, M. Juuti, Y. Lu, and N. Asokan, "Oblivious neural network predictions via minionn transformations," in Proc. of ACM CCS, 2017.
468
+ [46] Y. Zheng, H. Duan, and C. Wang, "Learning the truth privately and confidently: Encrypted confidence-aware truth discovery in mobile crowdsensing," IEEE Transactions on Information Forensics and Security, vol. 13, no. 10, pp. 2475-2489, 2018.
2202.01xxx/2202.01971/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf3b104f231251d6e6eddce84f42ba4ccf36222b0dd0f8898aea64f1523d738b
3
+ size 531913
2202.01xxx/2202.01971/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.01xxx/2202.01993/731e1a69-ebc4-471a-a367-2cb8de686eb2_content_list.json ADDED
The diff for this file is too large to render. See raw diff