SlowGuess commited on
Commit
7d292ae
·
verified ·
1 Parent(s): 1ce99a8

Add Batch 957ff90d-2bf5-4c23-a55d-4de3c72e6d89

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2303.03xxx/2303.03111/9c2e044b-04ac-4072-9177-7db879c96bf8_content_list.json +0 -0
  3. 2303.03xxx/2303.03111/9c2e044b-04ac-4072-9177-7db879c96bf8_model.json +0 -0
  4. 2303.03xxx/2303.03111/9c2e044b-04ac-4072-9177-7db879c96bf8_origin.pdf +3 -0
  5. 2303.03xxx/2303.03111/full.md +0 -0
  6. 2303.03xxx/2303.03111/images.zip +3 -0
  7. 2303.03xxx/2303.03111/layout.json +0 -0
  8. 2303.03xxx/2303.03169/0b3ca306-3c4a-4fd3-93aa-1734cec9c777_content_list.json +0 -0
  9. 2303.03xxx/2303.03169/0b3ca306-3c4a-4fd3-93aa-1734cec9c777_model.json +0 -0
  10. 2303.03xxx/2303.03169/0b3ca306-3c4a-4fd3-93aa-1734cec9c777_origin.pdf +3 -0
  11. 2303.03xxx/2303.03169/full.md +472 -0
  12. 2303.03xxx/2303.03169/images.zip +3 -0
  13. 2303.03xxx/2303.03169/layout.json +0 -0
  14. 2303.03xxx/2303.03192/c02e4a23-d7da-4df7-9fea-f5f09ba71fcf_content_list.json +0 -0
  15. 2303.03xxx/2303.03192/c02e4a23-d7da-4df7-9fea-f5f09ba71fcf_model.json +0 -0
  16. 2303.03xxx/2303.03192/c02e4a23-d7da-4df7-9fea-f5f09ba71fcf_origin.pdf +3 -0
  17. 2303.03xxx/2303.03192/full.md +0 -0
  18. 2303.03xxx/2303.03192/images.zip +3 -0
  19. 2303.03xxx/2303.03192/layout.json +0 -0
  20. 2303.03xxx/2303.03199/7e08506b-fc50-4f5d-a51c-9647c02cc0e2_content_list.json +0 -0
  21. 2303.03xxx/2303.03199/7e08506b-fc50-4f5d-a51c-9647c02cc0e2_model.json +0 -0
  22. 2303.03xxx/2303.03199/7e08506b-fc50-4f5d-a51c-9647c02cc0e2_origin.pdf +3 -0
  23. 2303.03xxx/2303.03199/full.md +589 -0
  24. 2303.03xxx/2303.03199/images.zip +3 -0
  25. 2303.03xxx/2303.03199/layout.json +0 -0
  26. 2303.03xxx/2303.03202/52833c8e-2380-4398-9065-8291ff12fc58_content_list.json +1955 -0
  27. 2303.03xxx/2303.03202/52833c8e-2380-4398-9065-8291ff12fc58_model.json +0 -0
  28. 2303.03xxx/2303.03202/52833c8e-2380-4398-9065-8291ff12fc58_origin.pdf +3 -0
  29. 2303.03xxx/2303.03202/full.md +379 -0
  30. 2303.03xxx/2303.03202/images.zip +3 -0
  31. 2303.03xxx/2303.03202/layout.json +0 -0
  32. 2303.03xxx/2303.03226/49c0fa96-6cd5-4100-9cb9-f0db954478b0_content_list.json +0 -0
  33. 2303.03xxx/2303.03226/49c0fa96-6cd5-4100-9cb9-f0db954478b0_model.json +0 -0
  34. 2303.03xxx/2303.03226/49c0fa96-6cd5-4100-9cb9-f0db954478b0_origin.pdf +3 -0
  35. 2303.03xxx/2303.03226/full.md +648 -0
  36. 2303.03xxx/2303.03226/images.zip +3 -0
  37. 2303.03xxx/2303.03226/layout.json +0 -0
  38. 2303.03xxx/2303.03278/fb3d2e2e-3027-4b46-be1a-86d15f2ff2a2_content_list.json +0 -0
  39. 2303.03xxx/2303.03278/fb3d2e2e-3027-4b46-be1a-86d15f2ff2a2_model.json +0 -0
  40. 2303.03xxx/2303.03278/fb3d2e2e-3027-4b46-be1a-86d15f2ff2a2_origin.pdf +3 -0
  41. 2303.03xxx/2303.03278/full.md +469 -0
  42. 2303.03xxx/2303.03278/images.zip +3 -0
  43. 2303.03xxx/2303.03278/layout.json +0 -0
  44. 2303.03xxx/2303.03283/c2e1acdb-42dd-441d-a39c-3ad4cbc8d300_content_list.json +0 -0
  45. 2303.03xxx/2303.03283/c2e1acdb-42dd-441d-a39c-3ad4cbc8d300_model.json +0 -0
  46. 2303.03xxx/2303.03283/c2e1acdb-42dd-441d-a39c-3ad4cbc8d300_origin.pdf +3 -0
  47. 2303.03xxx/2303.03283/full.md +0 -0
  48. 2303.03xxx/2303.03283/images.zip +3 -0
  49. 2303.03xxx/2303.03283/layout.json +0 -0
  50. 2303.03xxx/2303.03297/788a8595-a785-4c27-915c-cbf6cafb8054_content_list.json +1351 -0
.gitattributes CHANGED
@@ -10275,3 +10275,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10275
  2303.12xxx/2303.12942/6d3251e1-c196-4d1b-adeb-07d86e14a3fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
10276
  2303.13xxx/2303.13528/37a9c1a1-8fd6-48d2-a347-6445772408ff_origin.pdf filter=lfs diff=lfs merge=lfs -text
10277
  2304.12xxx/2304.12202/f6a14670-e490-4f01-a284-ce934e0edc05_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10275
  2303.12xxx/2303.12942/6d3251e1-c196-4d1b-adeb-07d86e14a3fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
10276
  2303.13xxx/2303.13528/37a9c1a1-8fd6-48d2-a347-6445772408ff_origin.pdf filter=lfs diff=lfs merge=lfs -text
10277
  2304.12xxx/2304.12202/f6a14670-e490-4f01-a284-ce934e0edc05_origin.pdf filter=lfs diff=lfs merge=lfs -text
10278
+ 2303.03xxx/2303.03111/9c2e044b-04ac-4072-9177-7db879c96bf8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10279
+ 2303.03xxx/2303.03169/0b3ca306-3c4a-4fd3-93aa-1734cec9c777_origin.pdf filter=lfs diff=lfs merge=lfs -text
10280
+ 2303.03xxx/2303.03192/c02e4a23-d7da-4df7-9fea-f5f09ba71fcf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10281
+ 2303.03xxx/2303.03199/7e08506b-fc50-4f5d-a51c-9647c02cc0e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10282
+ 2303.03xxx/2303.03202/52833c8e-2380-4398-9065-8291ff12fc58_origin.pdf filter=lfs diff=lfs merge=lfs -text
10283
+ 2303.03xxx/2303.03226/49c0fa96-6cd5-4100-9cb9-f0db954478b0_origin.pdf filter=lfs diff=lfs merge=lfs -text
10284
+ 2303.03xxx/2303.03278/fb3d2e2e-3027-4b46-be1a-86d15f2ff2a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10285
+ 2303.03xxx/2303.03283/c2e1acdb-42dd-441d-a39c-3ad4cbc8d300_origin.pdf filter=lfs diff=lfs merge=lfs -text
10286
+ 2303.03xxx/2303.03297/788a8595-a785-4c27-915c-cbf6cafb8054_origin.pdf filter=lfs diff=lfs merge=lfs -text
10287
+ 2303.03xxx/2303.03301/cdf09166-2694-449b-8d76-a5197b871344_origin.pdf filter=lfs diff=lfs merge=lfs -text
10288
+ 2303.03xxx/2303.03323/18feb719-1507-4ccd-a8fe-e62bcfcf6e18_origin.pdf filter=lfs diff=lfs merge=lfs -text
10289
+ 2303.03xxx/2303.03361/0c404385-0b74-44ee-8576-428acb9bf822_origin.pdf filter=lfs diff=lfs merge=lfs -text
10290
+ 2303.03xxx/2303.03363/6dc829fb-b2e3-4ea3-b3e1-dacf8fc33c8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10291
+ 2303.03xxx/2303.03366/b6ea3a25-64a7-4cff-b0b1-e22c539ce0d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10292
+ 2303.03xxx/2303.03367/1ba66ffc-1cc9-40cc-ba1e-003f74dfbc09_origin.pdf filter=lfs diff=lfs merge=lfs -text
10293
+ 2303.03xxx/2303.03369/ca804dbb-c855-44e3-b49b-066e5d233f12_origin.pdf filter=lfs diff=lfs merge=lfs -text
10294
+ 2303.03xxx/2303.03373/810c67f4-8f7a-4bbe-b601-1c2161794908_origin.pdf filter=lfs diff=lfs merge=lfs -text
10295
+ 2303.03xxx/2303.03376/bbadfbf9-d00f-44c1-b6ad-4c2d23f1e417_origin.pdf filter=lfs diff=lfs merge=lfs -text
10296
+ 2303.03xxx/2303.03378/97d75d40-2274-4bb0-87c7-52bc355a56d5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10297
+ 2303.03xxx/2303.03381/e23034e0-f524-4bb6-af3c-cc3357f970b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
10298
+ 2303.03xxx/2303.03384/ece2be84-5063-49dc-8188-a3ad5bacd376_origin.pdf filter=lfs diff=lfs merge=lfs -text
10299
+ 2303.03xxx/2303.03428/36dad390-69f6-4677-a123-44ead98ad5d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10300
+ 2303.03xxx/2303.03446/246a60a4-4443-41d4-8d67-d0990a7eaac9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10301
+ 2303.03xxx/2303.03476/b73e8675-eb35-461a-806e-e960e75e840b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10302
+ 2303.03xxx/2303.03480/1da0a42b-0262-4426-aa05-9da2e821bd7d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10303
+ 2303.03xxx/2303.03486/c8deb93a-c895-4530-bc8a-36cdea34df4e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10304
+ 2303.03xxx/2303.03543/54caeed7-3f19-43aa-aa11-413c0ccb8e73_origin.pdf filter=lfs diff=lfs merge=lfs -text
10305
+ 2303.03xxx/2303.03548/e99c3551-aa02-40fb-baba-94f3f950b7a9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10306
+ 2303.03xxx/2303.03577/aa1534eb-d905-48b4-9194-ee4c7a56dc20_origin.pdf filter=lfs diff=lfs merge=lfs -text
10307
+ 2303.03xxx/2303.03581/a4b9e8ce-d2e8-48d3-b661-db728528d60a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10308
+ 2303.03xxx/2303.03595/5789a3c8-75cf-49c4-a757-252156f9089a_origin.pdf filter=lfs diff=lfs merge=lfs -text
10309
+ 2303.03xxx/2303.03667/c1fa6fe4-90d5-40df-933f-3dbe0e512f89_origin.pdf filter=lfs diff=lfs merge=lfs -text
10310
+ 2303.03xxx/2303.03697/7d325bc1-20f0-46d8-9875-b4ef0d35ec01_origin.pdf filter=lfs diff=lfs merge=lfs -text
10311
+ 2303.03xxx/2303.03729/c4b4f0ab-0ea2-4561-a2cd-61721921d995_origin.pdf filter=lfs diff=lfs merge=lfs -text
10312
+ 2303.03xxx/2303.03751/ed0310e6-1ce0-4eac-bc28-23ba2a4fa4c6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10313
+ 2303.03xxx/2303.03757/a7bbbb55-a41e-49b7-97cb-14308a2ac9e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10314
+ 2303.03xxx/2303.03758/44b01a2c-1b0d-4ab8-bdb3-0a27b8d4a394_origin.pdf filter=lfs diff=lfs merge=lfs -text
10315
+ 2303.03xxx/2303.03770/eb24f24e-4cb1-4377-896a-b00353e487dc_origin.pdf filter=lfs diff=lfs merge=lfs -text
10316
+ 2303.03xxx/2303.03836/38d755ce-29dc-4f43-bd23-413ec7f93a73_origin.pdf filter=lfs diff=lfs merge=lfs -text
10317
+ 2303.03xxx/2303.03846/5187a911-0e5e-4781-8e66-903864175468_origin.pdf filter=lfs diff=lfs merge=lfs -text
10318
+ 2303.03xxx/2303.03909/a54ab1f2-0ef9-4790-9629-f37dc340d9cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
10319
+ 2303.03xxx/2303.03915/b20069b0-97ba-433a-8e67-38a0e5da54c4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10320
+ 2303.03xxx/2303.03916/4336b9a3-192e-4860-bc0f-12d9a3f3f7f6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10321
+ 2303.03xxx/2303.03926/bc771add-a100-4d37-a1c4-ca24eb1d4ae7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10322
+ 2303.03xxx/2303.03932/56cd044c-ba38-4c36-b0a4-0410c378a544_origin.pdf filter=lfs diff=lfs merge=lfs -text
10323
+ 2303.03xxx/2303.03953/89d892a5-43b5-4546-a1cc-908947c3e93f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10324
+ 2303.03xxx/2303.03981/5058e458-d869-41ce-98dc-c44c2d30025d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10325
+ 2303.03xxx/2303.03982/cfe9850a-d9d7-4ca3-a1b4-b35c05223378_origin.pdf filter=lfs diff=lfs merge=lfs -text
10326
+ 2303.03xxx/2303.03988/8a314ac5-d7a5-40e7-b692-0fba95b46a5d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10327
+ 2303.03xxx/2303.03991/e7e3f5e1-566d-4a1d-97d0-fb824613dae9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10328
+ 2303.04xxx/2303.04003/95f62f45-8bd0-477e-93aa-dff3c8372394_origin.pdf filter=lfs diff=lfs merge=lfs -text
10329
+ 2303.04xxx/2303.04048/f58dab73-2e83-4731-a67c-abffd4d44cc5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10330
+ 2303.04xxx/2303.04084/56c97a2a-061e-491d-806a-c74638fabbd5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10331
+ 2303.04xxx/2303.04116/0a3e5566-3960-4009-a031-0a0f28511109_origin.pdf filter=lfs diff=lfs merge=lfs -text
10332
+ 2303.04xxx/2303.04129/900d8a35-fd44-4418-8c00-c1640a8ebad1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10333
+ 2303.04xxx/2303.04132/9cf42710-f157-47b0-8299-b7931722c904_origin.pdf filter=lfs diff=lfs merge=lfs -text
10334
+ 2303.04xxx/2303.04137/89033ed8-8c50-4e20-b0be-c94eff31ff64_origin.pdf filter=lfs diff=lfs merge=lfs -text
10335
+ 2303.04xxx/2303.04150/43e19c47-009a-413d-8a65-bd9d3c55fedf_origin.pdf filter=lfs diff=lfs merge=lfs -text
10336
+ 2303.04xxx/2303.04222/1ba1615e-72a9-4d6f-bd13-d97c372e2795_origin.pdf filter=lfs diff=lfs merge=lfs -text
10337
+ 2303.04xxx/2303.04226/71a49169-2797-48f7-a8fd-be99078aa652_origin.pdf filter=lfs diff=lfs merge=lfs -text
10338
+ 2303.04xxx/2303.04810/938c6b3c-b567-4cc6-9ed9-176e201c7516_origin.pdf filter=lfs diff=lfs merge=lfs -text
10339
+ 2303.05xxx/2303.05352/237ee661-b897-47ba-a300-7d6e3f8f9a0b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10340
+ 2303.06xxx/2303.06020/088ceb7e-b004-425f-adb2-2a1046197d4b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10341
+ 2303.08xxx/2303.08046/e51c469b-8a18-496b-adec-d8d9ff25f3d9_origin.pdf filter=lfs diff=lfs merge=lfs -text
2303.03xxx/2303.03111/9c2e044b-04ac-4072-9177-7db879c96bf8_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03111/9c2e044b-04ac-4072-9177-7db879c96bf8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03111/9c2e044b-04ac-4072-9177-7db879c96bf8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd8d89ba4655108875ec07f16ccf2eb75631154547bef73c8e2c7b467fa0262
3
+ size 985931
2303.03xxx/2303.03111/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03111/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5dc7405f11fdb00558a3bf8dea5b585b8fbc24a7b3dcd05eb673b5a606064191
3
+ size 1323607
2303.03xxx/2303.03111/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03169/0b3ca306-3c4a-4fd3-93aa-1734cec9c777_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03169/0b3ca306-3c4a-4fd3-93aa-1734cec9c777_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03169/0b3ca306-3c4a-4fd3-93aa-1734cec9c777_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe64baae87b8177121f6a240edfa73b2481f996e792809affc8795e3a5d3e67b
3
+ size 266217
2303.03xxx/2303.03169/full.md ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A UNIFIED ALGEBRAIC PERSPECTIVE ON LIPSCHITZ NEURAL NETWORKS
2
+
3
+ Alexandre Araujo\*1, Aaron Havens\*2, Blaise Delattre3,4, Alexandre Allauzen3,5 and Bin Hu2
4
+
5
+ <sup>1</sup> INRIA, Ecole Normale Supérieure, CNRS, PSL University, Paris, France
6
+ $^{2}$ CSL & ECE, University of Illinois Urbana-Champaign, IL, USA
7
+ <sup>3</sup> Miles Team, LAMSADE, Université Paris-Dauphine, PSL University, Paris, France
8
+ 4 Foxstream, Vaultx-en-Velin, France
9
+ ESPCI PSL, Paris, France
10
+
11
+ # ABSTRACT
12
+
13
+ Important research efforts have focused on the design and training of neural networks with a controlled Lipschitz constant. The goal is to increase and sometimes guarantee the robustness against adversarial attacks. Recent promising techniques draw inspirations from different backgrounds to design 1-Lipschitz neural networks, just to name a few: convex potential layers derive from the discretization of continuous dynamical systems, Almost-Orthogonal-Layer proposes a tailored method for matrix rescaling. However, it is today important to consider the recent and promising contributions in the field under a common theoretical lens to better design new and improved layers. This paper introduces a novel algebraic perspective unifying various types of 1-Lipschitz neural networks, including the ones previously mentioned, along with methods based on orthogonality and spectral methods. Interestingly, we show that many existing techniques can be derived and generalized via finding analytical solutions of a common semidefinite programming (SDP) condition. We also prove that AOL biases the scaled weight to the ones which are close to the set of orthogonal matrices in a certain mathematical manner. Moreover, our algebraic condition, combined with the Gershgorin circle theorem, readily leads to new and diverse parameterizations for 1-Lipschitz network layers. Our approach, called SDP-based Lipschitz Layers (SLL), allows us to design non-trivial yet efficient generalization of convex potential layers. Finally, the comprehensive set of experiments on image classification shows that SLLs outperform previous approaches on certified robust accuracy. Code is available at github.com/araujoalexandre/Lipschitz-SLL-Networks.
14
+
15
+ (10/26/2023): Erratum is added in Appendix D. This is an updated version that fixes an implementation issue in the previous version. Due to that implementation issue, the original numerical results in our original ICLR paper are not accurate. We elaborate on the issue and provides some fix in Appendix D.
16
+
17
+ # 1 INTRODUCTION
18
+
19
+ The robustness of deep neural networks is nowadays a great challenge to establish confidence in their decisions for real-life applications. Addressing this challenge requires guarantees on the stability of the prediction, with respect to adversarial attacks. In this context, the Lipschitz constant of neural networks is a key property at the core of many recent advances. Along with the margin of the classifier, this property allows us to certify the robustness against worst-case adversarial perturbations. This certification is based on a sphere of stability within which the decision remains the same for any perturbation inside the sphere (Tsuzuki et al., 2018).
20
+
21
+ The design of 1-Lipschitz layers provides a successful approach to enforce this property for the whole neural network. For this purpose, many different techniques have been devised such as spectral normalization (Miyato et al., 2018; Farnia et al., 2019), orthogonal parameterization (Trockman et al., 2021; Li et al., 2019; Singla et al., 2021; Yu et al., 2022; Xu et al., 2022), Convex Potential Layers (CPL) (Meunier et al., 2022), and Almost-Orthogonal-Layers (AOL) (Prach et al., 2022). While all these techniques share the same goal, their motivations, and derivations can greatly differ, delivering different solutions. Nevertheless, their raw experimental comparison fails to really gain insight into their peculiar performance, soundness, and in the end their possible complementarity. Therefore a question acts as a barrier for an in-depth analysis and future development:
22
+
23
+ # Are there common principles underlying the developments of I-Lipschitz Layers?
24
+
25
+ In this paper, we propose a novel perspective to answer this question based on a unified Semidefinite Programming (SDP) approach. We introduce a common algebraic condition underlying various types of methods like spectral normalization, orthogonality-based methods, AOL, and CPL. Our key insight is that this condition can be formulated as a unifying and simple SDP problem, and that the development of 1-Lipschitz architectures systematically arise by finding "analytical solutions" of this SDP. Our main contributions are summarized as follows.
26
+
27
+ - We provide a unifying algebraic perspective for 1-Lipschitz network layers by showing that existing techniques such as spectral normalization, orthogonal parameterization, AOL, and CPL can all be recast as a solution of the same simple SDP condition (Theorem 1 and related discussions). Consequently, any new analytical solutions of our proposed SDP condition will immediately lead to new 1-Lipschitz network structures.
28
+ - Built upon the above algebraic viewpoint, we give a rigorous mathematical interpretation for AOL explaining how this method promotes "almost orthogonality" in training (Theorem 2).
29
+ - Based on our SDPs, a new family of 1-Lipschitz network structures termed as SDP-based Lipschitz layers (SLL) has been developed. Specifically, we apply the Gershgorin circle theorem to obtain some new SDP solutions, leading to non-trivial extensions of CPL (Theorem 3). We derive new SDP conditions to characterize SLL in a very general form (Theorem 4).
30
+ - Finally, we show, by a comprehensive set of experiments, that our new SDP-based Lipschitz layers outperform previous approaches on certified robust accuracy.
31
+
32
+ Our work is inspired by Fazlyab et al. (2019) that develops SDP conditions for numerical estimation of Lipschitz constants of given neural networks. A main difference is that we focus on "analytical SDP solutions" which can be used to characterize 1-Lipschitz network structures.
33
+
34
+ # 2 RELATED WORK
35
+
36
+ In recent years, certified methods have been central to the development of trustworthy machine learning and especially for deep learning. Randomized Smoothing (Cohen et al., 2019; Salman et al., 2019) is one of the first defenses to offer provable robustness guarantees. The method simply extends a given classifier by the smart introduction of random noise to enhance the robustness of the classifier. Although this method offers an interesting level of certified robustness, it suffers from important downsides such as the high computational cost of inference and some impossibility results from information-theory perspective (Yang et al., 2020; Kumar et al., 2020).
37
+
38
+ Another approach to certify the robustness of a classifier is to control its Lipschitz constant (Hein et al., 2017; Tsuzuku et al., 2018). The main idea is to derive a certified radius in the feature space by upper bounding the margin of the classifier. See Proposition 1 of Tsuzuku et al. (2018) for more details. This radius, along with the Lipschitz constant of the network can certify the robustness. In order to reduce the Lipschitz constant and have a non-trivial certified accuracy, Tsuzuku et al. (2018) and Leino et al. (2021) both upper bound the margin via computing a bound on the global Lipschitz constant, however, these bounds have proved to be loose. Instead of upper bounding the global Lipschitz constant, Huang et al. (2021b) leverages local information to get tighter bound on the Lipschitz constant. On the other hand, other works, instead of upper bounding the local or global Lipschitz, devised neural networks architecture that are provably 1-Lipschitz. One of the first approaches in this direction consists of normalizing each layer with its
39
+
40
+ spectral norm (Miyato et al., 2018; Farnia et al., 2019). Each layer is, by construction, 1-Lipschitz. Later, a body of research replaces the normalized weight matrix by an orthogonal matrix. It improves upon the spectral normalization method by adding the gradient preservation (Li et al., 2019; Trockman et al., 2021; Singla et al., 2021; Yu et al., 2022; Xu et al., 2022). These methods constrain the parameters by orthogonality during training. Specifically, the Cayley transform can be used to constrain the weights (Trockman et al., 2021) and, in a similar fashion, SOC (Singla et al., 2021) parameterizes their layers with the exponential of a skew symmetric matrix making it orthogonal. To reduce cost, Trockman et al. (2021), Yu et al. (2022), and Xu et al. (2022) orthogonalize their convolutional kernel in the Fourier domain.
41
+
42
+ More recently, a work by Meunier et al. (2022) has studied Lipschitz networks from a dynamical system perspective. Starting from the continuous view of a residual network, they showed that the parameterization with the Cayley transform (Trockman et al., 2021) and SOC (Singla et al., 2021) correspond respectively to two specific discretization schemes of the continuous flow. Furthermore, a new layer is derived from convex potential flows to ensure the 1-Lipschitz property<sup>1</sup>:
43
+
44
+ $$
45
+ z = x - \frac {2}{\| W \| _ {2} ^ {2}} W \sigma \left(W ^ {\top} x + b\right), \tag {1}
46
+ $$
47
+
48
+ where $\| W\| _2$ is the spectral norm of the weight matrix $W$ and $\sigma$ is the ReLU activation function. In general, the training of orthogonal layers can be expensive. The Cayley approach involves a matrix inversion, and the implementation of SOC requires either an SVD or an iterative Taylor expansion. The CPL approach can be more efficient, although the computation of $\| W\| _2$ is still needed.
49
+
50
+ A recent work, Almost-Orthogonal-layer (AOL) (Prach et al., 2022) came up with a middle ground: a new normalization which makes the layer 1-Lipschitz by favoring orthogonality. The fully-connected AOL layer is defined as $z = W \, dx + b$ where $D$ is a diagonal matrix given by:
51
+
52
+ $$
53
+ D = \operatorname {d i a g} \left(\sum_ {j} | W ^ {\top} W | _ {i j}\right) ^ {- \frac {1}{2}} \tag {2}
54
+ $$
55
+
56
+ They demonstrated that this layer is 1-Lipschitz and they empirically show that, after training, the Jacobian of the layer (with respect to $x$ ) is almost orthogonal, hence facilitating the training.
57
+
58
+ Another source of inspiration is the application of convex programs for robustness certification of neural networks (Wong et al., 2018; Raghunathan et al., 2018; Fazlyab et al., 2019; Revay et al., 2020; Fazlyab et al., 2020; Wang et al., 2022). The most relevant work is Fazlyab et al. (2019), which leverages the quadratic constraint approach from control theory (Megretski et al., 1997) to formulate SDPs for estimating the global Lipschitz constant of neural networks numerically. It is possible to solve such SDPs numerically for training relatively small Lipschitz networks (Pauli et al., 2021). However, due to the restrictions of existing SDP solvers, scalability has been one issue when deploying such approaches to deep learning problems with large data sets. Our focus is on the design of Lipschitz network structures, and we avoid the scalability issue via solving SDPs analytically.
59
+
60
+ # 3 BACKGROUND
61
+
62
+ Notation. The $n \times n$ identity matrix and the $n \times n$ zero matrix are denoted as $I_{n}$ and $0_{n}$ , respectively. The subscripts will be omitted when the dimension is clear from the context. When a matrix $P$ is negative semidefinite (definite), we will use the notation $P \preceq (\prec)0$ . When a matrix $P$ is positive semidefinite (definite), we will use the notation $P \succeq (\succ)0$ . Let $e_{i}$ denote the vector whose $i$ -entry is 1 and all other entries are 0. Given a collection of scalars $\{a_{i}\}_{i=1}^{n}$ , we use the notation $\mathrm{diag}(a_{i})$ to denote the $n \times n$ diagonal matrix whose $(i,i)$ -th entry is $a_{i}$ . For a matrix $A$ , the following notations $A^{\mathsf{T}}$ , $\|A\|_{2}$ , $\mathrm{tr}(A)$ , $\sigma_{\min}(A)$ , $\|A\|_{F}$ , and $\rho(A)$ stand for its transpose, largest singular value, trace, smallest singular value, Frobenius norm, and spectral radius, respectively.
63
+
64
+ Lipschitz functions. A function $f: \mathbb{R}^n \to \mathbb{R}^m$ is $L$ -Lipschitz with respect to the $\ell_2$ norm iff it satisfies $\| f(x) - f(y) \| \leq L \| x - y \|$ for all $x, y \in \mathbb{R}^n$ , where $\| \cdot \|$ stands for the $\ell_2$ norm. An important fact is that the robustness of a neural network can be certified based on its Lipschitz constant
65
+
66
+ (Tsuzuki et al., 2018). In this paper, we are interested in the case where $L = 1$ . Specifically, we consider the training of 1-Lipschitz neural networks. If each layer of a neural network is 1-Lipschitz, then the entire neural network is also 1-Lipschitz. The Lipschitz constant also satisfies the triangle inequality, and hence convex combination will preserve the 1-Lipschitz property.
67
+
68
+ Matrix cones: Positive semidefiniteness and diagonal dominance. Let $\mathbf{S}^n$ denote the set of all $n\times n$ real symmetric matrices. Let $\mathbf{S}_+^n\subset \mathbf{S}^n$ be the set of all $n\times n$ symmetric positive semidefinite matrices. It is well known that $\mathbf{S}_+^n$ is a closed-pointed convex cone in $\mathbf{S}^n$ . With the trace inner product, $\mathbf{S}_+^n$ is also self-dual. Consider two symmetric matrices $A$ and $B$ such that $A\succeq B\in \mathbf{S}^{n}$ , then we have $A - B\in \mathbf{S}_{+}^{n}$ , and $\operatorname {tr}(A - B)$ provides a distance measure between $A$ and $B$ . In addition, we have $\| A - B\| _F\leq \operatorname {tr}(A - B)$ . Finally, the set of all $n\times n$ real symmetric diagonally dominant matrices with non-negative diagonal entries is represented by $\mathbf{D}^n$ . It is known that $\mathbf{D}^n$ forms a closed, pointed, full cone (Barker et al., 1975). Based on the Gershgorin circle theorem (Horn et al., 2012), we know $\mathbf{D}^n\subset \mathbf{S}_+^n$ . It is also known that $\mathbf{D}^n$ is smaller than $\mathbf{S}_+^n$ (Barker et al., 1975). For any $A\in \mathbf{D}^n$ , we have $A_{ii}\geq \sum_{j:j\neq i}|A_{ij}|$ . It is important to require $A_{ii}\geq 0$ , and the set of real symmetric diagonally dominant matrices is not a cone by itself.
69
+
70
+ # 4 AN ALGEBRAIC UNIFICATION OF 1-LIPSCHITZ LAYERS
71
+
72
+ In this section, we present a unified algebraic perspective for various 1-Lipschitz layers (Spectral Normalization, Orthogonalization, AOL, and CPL) via developing a common SDP condition characterizing the Lipschitz property. Built upon our algebraic viewpoint, we also present a new mathematical interpretation explaining how AOL promotes orthogonality in training.
73
+
74
+ # 4.1 THE UNIFYING ALGEBRAIC CONDITION
75
+
76
+ First, we present an algebraic condition which can be used to unify the developments of existing techniques such as SN, AOL, and CPL. Our main theorem is formalized below.
77
+
78
+ Theorem 1. For any weight matrix $W \in \mathbb{R}^{m \times n}$ , if there exists a nonsingular diagonal matrix $T$ such that $W^{\mathrm{T}}W - T \preceq 0$ , then the two following statements hold true.
79
+
80
+ 1. The mapping $g(x) = WT^{-\frac{1}{2}}x + b$ is 1-Lipschitz.
81
+ 2. The mapping $h(x) = x - 2WT^{-1}\sigma (W^{\top}x + b)$ is 1-Lipschitz if $\sigma$ is ReLU, tanh or sigmoid.
82
+
83
+ The proof of the above theorem and some related control-theoretic interpretations are provided in the appendix. This theorem allows us to design different 1-Lipschitz layers just with various choices of $T$ , in two important cases: for a linear transformation with Statement 1, as well as for a residual and non-linear block with Statement 2. Moreover, for any given weight matrix $W$ , the condition $W^{\mathrm{T}}W \preceq T$ is linear in $T$ , and hence can be viewed as an SDP condition with decision variable $T$ . To emphasize the significance of this theorem, we propose to derive existing methods used for designing 1-Lipschitz layers by choosing specific $T$ for the SDP condition $W^{\mathrm{T}}W \preceq T$ . The 1-Lipschitz property is then automatically obtained.
84
+
85
+ - Spectral Normalization (SN) corresponds to an almost trivial choice if we notice that $W^{\mathsf{T}}W \preceq \| W^{\mathsf{T}}W\|_{2}I \preceq \| W\|_{2}^{2}I$ . Hence with $T = \| W\|_{2}^{2}I$ , we build the SN layer $g(x) = WT^{-\frac{1}{2}}x + b = \frac{1}{\|W\|_2} Wx + b$ .
86
+ - The Orthogonality-based parameterization is obtained by setting $T = I$ and enforcing the equality $W^{\top}W = T = I$ . Then obviously $g(x) = Wx + b$ is 1-Lipschitz.
87
+ - AOL formula can be derived by letting $T = \mathrm{diag}(\sum_{j=1}^{n} |W^{\mathsf{T}}W|_{ij})$ . With this choice, we have $T - W^{\mathsf{T}}W \in \mathbf{D}^{n} \subset \mathbf{S}_{+}^{n}$ , hence $W^{\mathsf{T}}W \preceq T$ . Then Statement 1 in Theorem 1 implies that the AOL layer, written as $g(x) = WT^{-\frac{1}{2}}x + b$ , is 1-Lipschitz.
88
+
89
+ - CPL follows the same SN choice $T = \| W\| _2^2 I$ , but with Statement 2 of Theorem 1. Hence we derive a different function $h(x) = x - \frac{2}{\|W\|_2^2} W\sigma (W^\top x + b)$ which is also 1-Lipschitz.
90
+
91
+ The above discussion illustrates the benefit of expressing all these methods within the same theoretical framework, offering us a new tool to characterize the similarity between different methods. For instance, SN and CPL share the same choice of $T = \| W\| _2^2 I$ . The difference between them is which statement is used. Hence CPL can be viewed as the "residual version" of SN. Clearly, the residual network structure allows CPL to address the gradient vanishing issue more efficiently than SN. With the same approach, we can readily infer from our unified algebraic condition what are the "residual" counterparts for orthogonality-based parameterization and AOL. For orthogonality-based parameterization, if we enforce $W^{\mathsf{T}}W = T = I$ via methods such as SOC and ECO, then the function $h(x) = x - 2W\sigma (W^{\mathsf{T}}x + b)$ is 1-Lipschitz (by Statement 2 in Theorem 1). Finally, if we choose $T = \mathrm{diag}\left(\sum_{j = 1}^{n}|W^{\mathsf{T}}W|_{ij}\right)$ , then the function $h(x) = x - 2W\operatorname {diag}\left(\sum_{j = 1}^{n}|W^{\mathsf{T}}W|_{ij}\right)^{-1}\sigma (W^{\mathsf{T}}x + b)$ is also 1-Lipschitz. Therefore it is straightforward to create new classes of 1-Lipschitz network structures from existing ones.
92
+
93
+ Another important consequence of Theorem 1 is about new layer development. Any new nonsingular diagonal solution $T$ for the SDP condition $W^{\mathsf{T}}W - T \preceq 0$ immediately leads to new 1-Lipschitz network structures in the form of $g(x) = WT^{-\frac{1}{2}}x + b$ or $h(x) = x - 2WT^{-1}\sigma (W^{\mathsf{T}}x + b)$ . Therefore, the developments of 1-Lipschitz network structures can be reformulated as finding analytical solutions of the matrix inequality $W^{\mathsf{T}}W \preceq T$ with nonsingular diagonal $T$ . As a matter of fact, the Gershgorin circle theorem can help to improve the existing choices of $T$ in a systematic way. In Section 5, we will discuss such new choices of $T$ and related applications to improve CPL. At this point, it is worth noticing that to develop deep Lipschitz networks, it is important to have analytical formulas of $T$ . The analytical formula of $T$ will enable a fast computation of $WT^{-\frac{1}{2}}$ or $WT^{-1}$ .
94
+
95
+ Theorem 1 is powerful in building a connection between 1-Lipschitz network layers and the algebraic condition $W^{\top}W \preceq T$ . Next, we will look closer at this algebraic condition and provide a new mathematical interpretation explaining how AOL generates "almost orthogonal" weights.
96
+
97
+ Remark 1. The proof of Statement 2 in Theorem 1 relies on (Fazlyab et al., 2019, Lemma 1), which requires the activation function $\sigma$ to be slope-restricted on $[0,1]$ . Therefore, Statement 2 cannot be applied to the case with $\sigma$ being the GroupSort activation function (Anil et al., 2019). In contrast, Statement 1 can be used to build neural networks with any activation functions which are 1-Lipschitz.
98
+
99
+ # 4.2 A NEW MATHEMATICAL INTERPRETATION FOR AOL
100
+
101
+ In Prach et al. (2022), it is observed that AOL can learn "almost orthogonal" weights and hence overcome the gradient vanishing issue. As a matter of fact, the choice of $T$ used in AOL is optimal in a specific mathematical sense as formalized with the next theorem.
102
+
103
+ Theorem 2. Given any $W \in \mathbb{R}^{m \times n}$ which does not have zero columns, define the set $\mathbf{T} = \{T : T \text{ is nonsingular diagonal, and } T - W^{\top}W \in \mathbf{D}^{n}\}$ . Then the choice of $T$ for the AOL method actually satisfies
104
+
105
+ $$
106
+ T = \operatorname {diag} \left(\sum_ {j = 1} ^ {n} | W ^ {\mathsf {T}} W | _ {i j}\right) = \underset {T \in \mathbf {T}} {\arg \min } \operatorname {t r} \left(I - T ^ {- \frac {1}{2}} W ^ {\mathsf {T}} W T ^ {- \frac {1}{2}}\right) = \underset {T \in \mathbf {T}} {\arg \min } \| T ^ {- \frac {1}{2}} W ^ {\mathsf {T}} W T ^ {- \frac {1}{2}} - I \| _ {F}.
107
+ $$
108
+
109
+ We defer the proof for the above result to the appendix. Here we provide some interpretations for the above result. Obviously, the quantity $\| T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}} - I\|_{F}$ provides a measure for the distance between the scaled weight matrix $WT^{-\frac{1}{2}}$ and the set of $n\times n$ orthogonal matrices. If $\| T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}} - I\|_{F} = 0$ , then the scaled weight $WT^{-\frac{1}{2}}$ is orthogonal. If $\| T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}} - I\|_{F}$ is small, it means that $WT^{-\frac{1}{2}}$ is "almost orthogonal" and close to the set of orthogonal matrices. Since we require $W^{\mathsf{T}}W - T\spreceq 0$ , we know that $I - T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}}$ is a positive semidefinite matrix, and its trace provides an alternative metric quantifying the distance between $WT^{-\frac{1}{2}}$ and the set of orthogonal matrices. Importantly, we have the following inequality:
110
+
111
+ $$
112
+ \left\| T ^ {- \frac {1}{2}} W ^ {\mathsf {T}} W T ^ {- \frac {1}{2}} - I \right\| _ {F} \leq \operatorname {t r} (I - T ^ {- \frac {1}{2}} W ^ {\mathsf {T}} W T ^ {- \frac {1}{2}}).
113
+ $$
114
+
115
+ If $\mathrm{tr}(I - T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}})$ is small, then $\| T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}} - I\|_{F}$ is also small, and $WT^{-\frac{1}{2}}$ is close to the set of orthogonal matrices. Therefore, one interpretation for Theorem 2 is that among all the nonsingular diagonal scaling matrices $T$ satisfying $T - W^{\mathsf{T}}W\in \mathbf{D}^{n}$ , the choice of $T$ used in AOL makes the scaled weight matrix $WT^{-\frac{1}{2}}$ the closest to the set of orthogonal matrices. This provides a new mathematical explanation of how AOL can generate "almost orthogonal" weights.
116
+
117
+ One potential issue for AOL is that $\mathbf{D}^n$ is typically much smaller than $\mathbf{S}_+^n$ , and the condition $T - W^{\mathsf{T}}W\in \mathbf{D}^{n}$ may be too conservative compared to the original condition $T - W^{\mathsf{T}}W\in \mathbf{S}_{+}^{n}$ in Theorem 1. If we denote the set $\hat{\mathbf{T}} = \{T:T$ is nonsingular diagonal, and $T - W^{\mathsf{T}}W\in \mathbf{S}_{+}^{n}\}$ , then we have $\arg \min_{T\in \hat{\mathbf{T}}}\mathrm{tr}(I - T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}})\leq \arg \min_{T\in \mathbf{T}}\mathrm{tr}(I - T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}})$ , and $\arg \min_{T\in \hat{\mathbf{T}}}\| T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}} - I\| _F\leq \arg \min_{T\in \mathbf{T}}\| T^{-\frac{1}{2}}W^{\mathsf{T}}WT^{-\frac{1}{2}} - I\| _F$ . This leads to interesting alternative choices of $T$ which can further promote orthogonality:
118
+
119
+ $$
120
+ T = \underset {T \in \hat {\mathbf {T}}} {\arg \min } \| T ^ {- \frac {1}{2}} W ^ {\top} W T ^ {- \frac {1}{2}} - I \| _ {F} \quad \text {o r} \quad T = \underset {T \in \hat {\mathbf {T}}} {\arg \min } \operatorname {t r} \left(I - T ^ {- \frac {1}{2}} W ^ {\top} W T ^ {- \frac {1}{2}}\right) \tag {3}
121
+ $$
122
+
123
+ Although (3) may be solved as convex programs on small toy examples, it is not practical to use such choice of $T$ for large-scale problems. It is our hope that our theoretical discussion above will inspire more future research on developing new practical choices of $T$ for promoting orthogonality.
124
+
125
+ # 5 EXTENSIONS OF CPL: THE POWER OF GERSHGORIN CIRCLE THEOREM
126
+
127
+ In this section, we extend the original CPL layer (5) to a new family of 1-Lipschitz network structures via providing new analytical solutions to our condition $W^{\mathsf{T}}W \preceq T$ . We term this general family of layers as SDP-based Lipschitz layers (SLL), since the condition $W^{\mathsf{T}}W \preceq T$ can be viewed as an SDP for the decision variable $T$ . First of all, we extend the existing CPL (Eq. (1)) via applying more general choices of $T$ with Theorem 1. From the discussion after Theorem 1, we already know that we can use the choice of $T = \mathrm{diag}(\sum_{j=1}^{n} |W^{\mathsf{T}}W|_{ij})$ to replace the original choice $T = \|W\|_2^2 I$ . In this section, we will strengthen CPL via an even more general choice of $T$ , which is based on a special version of Gershgorin circle theorem. Specifically, we will apply (Horn et al., 2012, Corollary 6.1.6) to show the following result.
128
+
129
+ Theorem 3. Let $W$ be the weight matrix. Suppose $T$ is a nonsingular diagonal matrix. If there exists some diagonal matrix $Q$ with all positive diagonal entries such that $(T - QW^{\mathsf{T}}WQ^{-1})$ is a real diagonally dominant matrix with diagonal entries being all positive, then $T \succeq W^{\mathsf{T}}W$ , and the function $h(x) = x - 2WT^{-1}\sigma(W^{\mathsf{T}}x + b)$ is 1-Lipschitz for $\sigma$ being ReLU, tanh or sigmoid.
130
+
131
+ We defer the proof of this result to the appendix. If we choose $Q = I$ , the above theorem just recovers the choice of $T$ used in AOL, i.e. $T = \mathrm{diag}(\sum_{j=1}^{n} |W^{\mathsf{T}}W|_{ij})$ . However, it is expected that the use of more general $Q$ will allow us to train a less conservative 1-Lipschitz neural network due to the increasing expressivity brought by these extra variables. We will present numerical results to demonstrate this. We also emphasize that $(T - QW^{\mathsf{T}}WQ^{-1})$ is typically not a symmetric matrix and hence is not in $\mathbf{D}^n$ even when it only has non-negative eigenvalues. However, this does not affect our proof on the positive-semidefiniteness of $(T - W^{\mathsf{T}}W)$ .
132
+
133
+ Application of Theorem 3. We can parameterize $Q^{-1} = \mathrm{diag}(q_i)$ with $q_{i} > 0$ . Then the $(i,j)$ -th entry of $QW^{\top}WQ^{-1}$ is equal to $(W^{\top}W)_{ij}q_j / q_i$ . Hence we can just set the diagonal entry of $T$ as
134
+
135
+ $$
136
+ T _ {i i} = \sum_ {j = 1} ^ {n} | (W ^ {\top} W) _ {i j} q _ {j} / q _ {i} | = \sum_ {j = 1} ^ {n} | W ^ {\top} W | _ {i j} \frac {q _ {j}}{q _ {i}}. \tag {4}
137
+ $$
138
+
139
+ This leads to our new choice of $T = \mathrm{diag}(\sum_{j=1}^{n} |W^{\mathsf{T}} W|_{ij} q_j / q_i)$ . Notice that the layer function $h(x) = x - 2WT^{-1}\sigma(W^{\mathsf{T}}x + b)$ has a residual network structure. Hence it is expected that vanishing gradient will not be an issue. Therefore, we can simultaneously optimize the training loss over $W$ and $\{q_i\}$ . We will present a numerical study to demonstrate that such a training approach will allow us to generate competitive results on training certifiably robust classifiers.
140
+
141
+ Table 1: This table presents the natural, provable accuracy as well as the number of parameters and training time of several concurrent work and our SLL networks on CIFAR10 dataset. All results for SLL networks are the result of the average of 3 trainings.
142
+
143
+ <table><tr><td rowspan="2">Models</td><td rowspan="2">Natural Accuracy</td><td colspan="4">Provable Accuracy (ε)</td><td rowspan="2">Number of Parameters</td><td rowspan="2">Time by Epoch (s)</td></tr><tr><td>36/255</td><td>72/255</td><td>108/255</td><td>1</td></tr><tr><td>GloRo (Leino et al., 2021)</td><td>77.0</td><td>58.4</td><td>-</td><td>-</td><td>-</td><td>8M</td><td>6</td></tr><tr><td>Local-Lip-B (Huang et al., 2021b)</td><td>77.4</td><td>60.7</td><td>39.0</td><td>20.4</td><td>-</td><td>2.3M</td><td>8</td></tr><tr><td>Cayley Large (Trockman et al., 2021)</td><td>74.6</td><td>61.4</td><td>46.4</td><td>32.1</td><td>-</td><td>21M</td><td>30</td></tr><tr><td>SOC 20 (Singla et al., 2021)</td><td>78.0</td><td>62.7</td><td>46.0</td><td>30.3</td><td>-</td><td>27M</td><td>52</td></tr><tr><td>SOC+ 20 (Singla et al., 2022b)</td><td>76.3</td><td>62.6</td><td>48.7</td><td>36.0</td><td>-</td><td>27M</td><td>52</td></tr><tr><td>CPL XL (Meunier et al., 2022)</td><td>78.5</td><td>64.4</td><td>48.0</td><td>33.0</td><td>-</td><td>236M</td><td>163</td></tr><tr><td>AOL Large (Prach et al., 2022)</td><td>71.6</td><td>64.0</td><td>56.4</td><td>49.0</td><td>23.7</td><td>136M</td><td>64</td></tr><tr><td>SLL Small</td><td>71.2</td><td>62.6</td><td>53.8</td><td>45.3</td><td>20.4</td><td>41M</td><td>20</td></tr><tr><td>SLL Medium</td><td>72.2</td><td>64.3</td><td>56.0</td><td>48.3</td><td>23.9</td><td>78M</td><td>35</td></tr><tr><td>SLL Large</td><td>72.7</td><td>65.0</td><td>57.3</td><td>49.7</td><td>25.4</td><td>118M</td><td>55</td></tr><tr><td>SLL X-Large</td><td>73.3</td><td>65.8</td><td>58.4</td><td>51.3</td><td>27.3</td><td>236M</td><td>105</td></tr></table>
144
+
145
+ SDP conditions for more general network structures. It is also worth mentioning that the SDP condition in Theorem 1 can be generalized to address the following more general structure:
146
+
147
+ $$
148
+ h (x) = H x + G \sigma \left(W ^ {\mathsf {T}} x + b\right), \tag {5}
149
+ $$
150
+
151
+ where $H$ and $G$ will be determined by the weight $W$ in some manner, and the matrix dimensions are assumed to be compatible. If we choose $H = I$ and $G = -2WT^{-1}$ , then (5) reduces to the residual network structure considered in Theorem 1. There are many other choices of $(H,G)$ which can also ensure (5) to be 1-Lipschitz. Our last theoretical result is a new SDP condition which generalizes Theorem 1 and provides a more comprehensive characterization of such choices of $(H,G)$ .
152
+
153
+ Theorem 4. Let $n$ be the neuron number. For any non-negative scalars $\{\lambda_i\}_{i=1}^n$ , define
154
+
155
+ $$
156
+ \Lambda = \operatorname {d i a g} \left(\lambda_ {1}, \lambda_ {2}, \dots , \lambda_ {n}\right). \tag {6}
157
+ $$
158
+
159
+ Suppose the activation function $\sigma$ is ReLU or tanh or sigmoid. If there exist non-negative scalars $\{\lambda_i\}_{i=1}^n$ such that the following matrix inequality holds
160
+
161
+ $$
162
+ \left[ \begin{array}{c c} I - H ^ {\mathsf {T}} H & - H ^ {\mathsf {T}} G - W \Lambda \\ - G ^ {\mathsf {T}} H - \Lambda W ^ {\mathsf {T}} & 2 \Lambda - G ^ {\mathsf {T}} G \end{array} \right] \succeq 0 \tag {7}
163
+ $$
164
+
165
+ then the network layer (5) is 1-Lipschitz, i.e., $\| h(x) - h(y)\| \leq \| x - y\|$ for all $(x,y)$ .
166
+
167
+ The above theorem can be proved via modifying the argument used in Fazlyab et al. (2019, Theorem $1$ ) and we defer the detailed proof to the appendix. On one hand, if we choose $H = 0$ , then our condition (7) reduces to a variant of Theorem 1 in Fazlyab et al. (2019). On the other hand, for residual network structure with $H = I$ , we can choose $T = 2\Lambda^{-1}$ and $G = -W\Lambda = -2WT^{-1}$ to reduce (7) to our original algebraic condition $T \succeq W^{\mathsf{T}}W$ . Therefore, Theorem 4 provides a connection between the SDP condition in Fazlyab et al. (2019) and our proposed simple algebraic condition in Theorem 1. It is possible to obtain new 1-Lipschitz network layers via providing new analytical solutions to (7). It is our hope that our proposed SDP condition (7) can lead to many more 1-Lipschitz network structures in the future.
168
+
169
+ # 6 EXPERIMENTS
170
+
171
+ In this section, we present a comprehensive set of experiments with 1-Lipschitz neural networks based on our proposed SDP-based Lipschitz Layer. More specifically, we build 1-Lipschitz neural networks based on the following layer:
172
+
173
+ $$
174
+ h (x) = x - 2 W \operatorname {d i a g} \left(\sum_ {j = 1} ^ {n} \left| W ^ {\mathsf {T}} W \right| _ {i j} q _ {j} / q _ {i}\right) ^ {- 1} \sigma \left(W ^ {\mathsf {T}} x + b\right), \tag {8}
175
+ $$
176
+
177
+ Table 2: This table presents the natural and provable accuracy of several concurrent works and our SLL networks on CIFAR100 and TinyImageNet datasets. SLL networks are averaged of 3 trainings.
178
+
179
+ <table><tr><td rowspan="2">Datasets</td><td rowspan="2">Models</td><td rowspan="2">Natural Accuracy</td><td colspan="4">Provable Accuracy (ε)</td></tr><tr><td>36 255</td><td>72 255</td><td>108 255</td><td>1</td></tr><tr><td rowspan="9">CIFAR100</td><td>Cayley Large (Trockman et al., 2021)</td><td>43.3</td><td>29.2</td><td>18.8</td><td>11.0</td><td>-</td></tr><tr><td>SOC 20 (Singla et al., 2021)</td><td>48.3</td><td>34.4</td><td>22.7</td><td>14.2</td><td>-</td></tr><tr><td>SOC+ 20 (Singla et al., 2022b)</td><td>47.8</td><td>34.8</td><td>23.7</td><td>15.8</td><td>-</td></tr><tr><td>CPL XL (Meunier et al., 2022)</td><td>47.8</td><td>33.4</td><td>20.9</td><td>12.6</td><td>-</td></tr><tr><td>AOL Large (Prach et al., 2022)</td><td>43.7</td><td>33.7</td><td>26.3</td><td>20.7</td><td>7.8</td></tr><tr><td>SLL Small</td><td>44.9</td><td>34.7</td><td>26.8</td><td>20.9</td><td>8.1</td></tr><tr><td>SLL Medium</td><td>46.0</td><td>35.5</td><td>27.9</td><td>22.2</td><td>9.1</td></tr><tr><td>SLL Large</td><td>46.4</td><td>36.2</td><td>28.4</td><td>22.7</td><td>9.6</td></tr><tr><td>SLL X-Large</td><td>46.5</td><td>36.5</td><td>29.0</td><td>23.3</td><td>10.4</td></tr><tr><td rowspan="6">TinyImageNet</td><td>GloRo (Leino et al., 2021)</td><td>35.5</td><td>22.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Local-Lip-B (+MaxMin) (Huang et al., 2021b)</td><td>36.9</td><td>23.4</td><td>12.7</td><td>6.1</td><td>0.0</td></tr><tr><td>SLL Small</td><td>26.6</td><td>19.5</td><td>14.2</td><td>10.4</td><td>2.9</td></tr><tr><td>SLL Medium</td><td>30.4</td><td>22.3</td><td>15.9</td><td>11.6</td><td>3.0</td></tr><tr><td>SLL Large</td><td>31.3</td><td>23.0</td><td>16.9</td><td>12.3</td><td>3.3</td></tr><tr><td>SLL X-Large</td><td>32.1</td><td>23.2</td><td>16.8</td><td>12.0</td><td>3.2</td></tr></table>
180
+
181
+ where $W$ is a parameter matrix being either dense or a convolution, $\{q_i\}$ forms a diagonal scaling matrix as described by Theorem 3, and $\sigma(\cdot)$ is the ReLU nonlinearity function. We use the same architectures proposed by Meunier et al. (2022) with small, medium, large and xlarge sizes. The architecture consists of several Conv-SLL and Linear-SLL. For CIFAR-100, we use the Last Layer Normalization proposed by Singla et al. (2022b) which improves the certified accuracy when the number of classes becomes large. Note that the layer presented in Equation (8) can be easily implemented with convolutions following the same scaling as in Prach et al. (2022). Our experiments focus on the impact of the Lipschitz layer structures on certified robustness. This complements a recent study on other aspects (e.g. projection pooling) of robust networks (Singla et al., 2022a).
182
+
183
+ # Details on the architectures & Hyper-parameters.
184
+
185
+ Table 3 describes the detail of our Small, Medium, Large and X-Large architectures. We trained our networks with a batch size of 256 over 1000 epochs with the data augmentation used by. We use an Adam optimizer (Kingma et al., 2014) with 0.01 learning rate and parameters $\beta_{1}$ and $\beta_{2}$ equal to 0.5 and 0.9 respectively and no weight decay. We use a piecewise triangular learning rate scheduler to decay the learning rate dur
186
+
187
+ Table 3: The SLL architecture used for the experiments is inspired by Meunier et al..
188
+
189
+ <table><tr><td></td><td>S</td><td>M</td><td>L</td><td>XL</td></tr><tr><td>Conv-SLL</td><td>20</td><td>30</td><td>90</td><td>120</td></tr><tr><td>Channels</td><td>45</td><td>60</td><td>60</td><td>70</td></tr><tr><td>Linear-SLL</td><td>7</td><td>10</td><td>15</td><td>15</td></tr><tr><td>Linear Features</td><td>2048</td><td>2048</td><td>4096</td><td>4096</td></tr></table>
190
+
191
+ ing training. We use the CrossEntropy loss as in Prach et al. (2022) with a temperature of 0.25 and an offset value $\frac{3}{2}\sqrt{2}$ .
192
+
193
+ Results in terms of Natural and Certified Accuracy on CIFAR10/100. First, we evaluate our networks (SLL) on CIFAR10 and CIAFR100 and compare the results against recent 1-Lipschitz neural network structures: Cayley, SOC, $\mathrm{SOC + }$ , CPL and AOL. We also compare SLL with two other Lipschitz training approaches (Leino et al., 2021; Huang et al., 2021b), which do not guarantee prescribed global Lipschitz bounds during the training stage. Table 1 presents the natural and certified accuracy with different radius of certification on CIFAR10. For a fair comparison, parameter number and training time per epoch for each method are also added to Table 1. Results on CIFAR100 are included in Table 2. We can see that our approach outperforms existing 1-Lipschitz architectures including AOL and CPL on certified accuracy for all values of $\varepsilon$ . We also observe that SLL-based 1-Lipschitz neural networks offer a good trade-off among previous approaches with respect to natural and certified accuracy. A detailed comparison is given below.
194
+
195
+ Advantages of SLL over Cayley/SOC. In general, it is difficult to compare the expressive power of non-residual and residual networks. Hence we do not claim that with the same model size, SLL is more representative than Cayley or SOC which are not residual networks in the first place. However, we believe that the current choice of $T$ in SLL is very easy to calculate and hence leads to a scalable
196
+
197
+ Table 5: The table describes the empirical robustness of our SLL-based classifiers on CIFAR10 ans CIFAR100 datasets. The empirical robustness is measured with AutoAttacks. All results are the average of 3 models.
198
+
199
+ <table><tr><td rowspan="2">Models</td><td colspan="4">CIFAR10 - AutoAttack (ε)</td><td colspan="4">CIFAR100 - AutoAttack (ε)</td></tr><tr><td>36/255</td><td>72/255</td><td>108/255</td><td>1</td><td>36/255</td><td>72/255</td><td>108/255</td><td>1</td></tr><tr><td>SLL Small</td><td>68.1</td><td>62.5</td><td>56.8</td><td>35.0</td><td>40.7</td><td>35.2</td><td>30.4</td><td>17.0</td></tr><tr><td>SLL Medium</td><td>69.1</td><td>63.8</td><td>58.4</td><td>37.0</td><td>41.5</td><td>36.4</td><td>31.5</td><td>17.9</td></tr><tr><td>SLL Large</td><td>69.8</td><td>64.5</td><td>59.1</td><td>37.9</td><td>42.1</td><td>37.1</td><td>32.6</td><td>18.7</td></tr><tr><td>SLL X-Large</td><td>70.3</td><td>65.4</td><td>60.2</td><td>39.4</td><td>42.7</td><td>37.8</td><td>33.2</td><td>19.5</td></tr></table>
200
+
201
+ approach that allows us to train very large models with a reasonable amount of time. For illustrative purposes, consider the comparison between SLL and Cayley in Table 1. We can see that SLL Small has more parameters than Cayley Large (41M vs. 21M) while being faster to train. Indeed, the Cayley approach involves computing an expensive orthogonal projection (with a matrix inverse), while SOC requires the computation of several convolutions at training and inference (from 6 to 12) to compute the exponential of a convolution up to a desired precision. Hence the training time per epoch for Cayley Large and SOC is actually longer than SLL Small. While being faster to train SLL Small still outperforms Cayley Large and SOC for all three values of $\varepsilon$ . In general, we think it is fair to claim that our approach is more scalable than previous approaches based on orthogonal layers, and allows the use of larger networks which leads to improvements in certified robustness.
202
+
203
+ Advantages of SLL over AOL/CPL. With careful tuning of the offset value, SLL outperforms AOL for all values of $\varepsilon$ . We experiment with several offset values: $\sqrt{2}$ , $\frac{3}{2}\sqrt{2}$ and $2\sqrt{2}$ . The detailed results for all these different offset values are deferred to Table 6 in the appendix. In general, the offset value offers a trade-off between natural accuracy and robustness, thus, by choosing the offset value properly, SLL Large already achieves better results than AOL Large (notice that the training time per epoch for these two is roughly the same). SLL X-Large has even more improvements. We can also see that SLL Large outperforms CPL XL for all values of $\varepsilon$ while being faster to train. For larger value of $\varepsilon$ , the gain of SLL over CPL is remarkable (over $10\%$ ).
204
+
205
+ Results on TinyImageNet. We have also implemented SLL on TinyImageNet (see Table 2). Previously, other 1-Lipschitz network structures including SOC, Cayley, AOL, and CPL have not been tested on TinyImageNet, and the state-of-the-art approach on TinyImageNet is the local Lipschitz bound approach (Huang et al., 2021a). We can see that SLL significantly outperforms this local Lipschitz approach for larger values of $\varepsilon$ (while generating similar results for the small $\varepsilon$ case). Notice that the local Lipschitz approach (Huang et al.,
206
+
207
+ Table 4: Inference time for Local-LipB and SLL X-Large on the full TinyImageNet validation with 4 GPUs.
208
+
209
+ <table><tr><td>Models</td><td>Inference Time</td></tr><tr><td>Local-Lip-B</td><td>41 min</td></tr><tr><td>SLL X-Large</td><td>8 sec</td></tr></table>
210
+
211
+ 2021a) is quite different from other 1-Lipschitz network methods in the sense that it has no guarantees on the Lipschitz constant of the resultant network and hence does not generate 1-Lipschitz networks in the first place. Furthermore, given that this approach does not guarantee a Lipschitz bound during training, a lot more computation needs to be performed during inference, making the certification process very time consuming. Table 4 describes the inference time on TinyImageNet for this local Lipschitz approach and SLL X-large.
212
+
213
+ Results on Empirical Robustness. We also provide results of our approach on empirical robustness against an ensemble of diverse parameter-free attacks (i.e., AutoAttacks) developed by Croce et al. (2020b). Table 5 reports the empirical robustness accuracy for different levels of perturbations. Although AutoAttacks is a strong empirical attack consisting of an ensemble of several known attacks: $\mathrm{APGD}_{\mathrm{CE}}$ , $\mathrm{APGD}_{\mathrm{DLR}}$ , FAB (Croce et al., 2020a) and Square (Andriushchenko et al., 2020). We can observe that the measure robustness is high and well above the certified radius. Indeed, on CIFAR10, we observe a robustness "gain" of up to $4.5\%$ , $9.6\%$ , $14.1\%$ and $21.7\%$ for respectively, 36, 72, 108 and $255~\varepsilon$ -perturbations.
214
+
215
+ # 7 CONCLUSION
216
+
217
+ In this paper, we present a unifying framework for designing Lipschitz layers. Based on a novel algebraic perspective, we identify a common SDP condition underlying the developments of spectral normalization, orthogonality-based methods, AOL, and CPL. Furthermore, we have shown that AOL and CPL can be re-derived and generalized using our theoretical framework. From this analysis, we introduce a family of SDP-based Lipschitz layers (SLL) that outperforms previous work. In the future, it will be interesting to investigate more expressive structures of $T$ and extending our contributions to address multi-layer neural networks.
218
+
219
+ # ACKNOWLEDGMENTS
220
+
221
+ This work was performed using HPC resources from GENCI-IDRIS (Grant 2021-AD011013259) and funded by the French National Research Agency (ANR SPEEDD-20-CE23-0025). A. Havens and B. Hu are generously supported by the NSF award CAREER-2048168. We also thank Kai Hu for emailing us about an issue in our previous implementation of SLL, which is now addressed in Appendix D.
222
+
223
+ # REFERENCES
224
+
225
+ Maksym Andriushchenko, Francesco Croce, Nicolas Flammarion, and Matthias Hein. Square attack: a query-efficient black-box adversarial attack via random search. In European Conference on Computer Vision, 2020.
226
+ Cem Anil, James Lucas, and Roger Grosse. Sorting out lipschitz function approximation. In International Conference on Machine Learning, 2019.
227
+ George Barker and David Carlson. Cones of diagonally dominant matrices. Pacific Journal of Mathematics, 57(1):15-32, 1975.
228
+ Jeremy Cohen, Elan Rosenfeld, and Zico Kolter. Certified adversarial robustness via randomized smoothing. In International Conference on Machine Learning, 2019.
229
+ Francesco Croce and Matthias Hein. Minimally distorted adversarial examples with a fast adaptive boundary attack. In International Conference on Machine Learning. PMLR, 2020a.
230
+ Francesco Croce et al. Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In International Conference on Machine Learning, 2020b.
231
+ Farzan Farnia, Jesse Zhang, and David Tse. Generalizable adversarial training via spectral normalization. In International Conference on Learning Representations, 2019.
232
+ Mahyar Fazlyab, Alexander Robey, Hamed Hassani, Manfred Morari, and George Pappas. Efficient and accurate estimation of lipschitz constants for deep neural networks. Advances in Neural Information Processing Systems, 32, 2019.
233
+ Mahyar Fazlyab, Manfred Morari, and George J Pappas. Safety verification and robustness analysis of neural networks via quadratic constraints and semidefinite programming. IEEE Transactions on Automatic Control, 2020.
234
+ Matthias Hein and Maksym Andriushchenko. Formal guarantees on the robustness of a classifier against adversarial manipulation. Advances in neural information processing systems, 30, 2017.
235
+ R.A. Horn and C.R. Johnson. Matrix Analysis. Cambridge University Press, 2012. ISBN 9781139788885.
236
+ Kai Hu, Klas Leino, Zifan Wang, and Matt Fredrikson. A recipe for improved certifiable robustness: Capacity and data. arXiv preprint arXiv:2310.02513, 2023.
237
+ Yujia Huang, Huan Zhang, Yuanyuan Shi, J Zico Kolter, and Anima Anandkumar. Training certifiably robust neural networks with efficient local lipschitz bounds. In Advances in Neural Information Processing Systems, 2021a.
238
+
239
+ Yujia Huang, Huan Zhang, Yuanyuan Shi, J Zico Kolter, and Anima Anandkumar. Training certifiably robust neural networks with efficient local lipschitz bounds. Advances in Neural Information Processing Systems, 34:22745-22757, 2021b.
240
+ Diederik Kingma et al. Adam: A method for stochastic optimization. In International Conference for Learning Representations, 2014.
241
+ Aounon Kumar, Alexander Levine, Tom Goldstein, and Soheil Feizi. *Curse of dimensionality on randomized smoothing for certifiable robustness*. In International Conference on Machine Learning, 2020.
242
+ Klas Leino, Zifan Wang, and Matt Fredrikson. Globally-robust neural networks. In International Conference on Machine Learning, 2021.
243
+ Qiyang Li, Saminul Haque, Cem Anil, James Lucas, Roger B Grosse, and Joern-Henrik Jacobsen. Preventing gradient attenuation in lipschitz constrained convolutional networks. In Advances in Neural Information Processing Systems, 2019.
244
+ A. Lur'e and V. Postnikov. On the theory of stability of control systems. Applied mathematics and mechanics, 8(3):246-248, 1944.
245
+ A. Megretski and A. Rantzer. System analysis via integral quadratic constraints. IEEE Transactions on Automatic Control, 42:819-830, 1997.
246
+ Laurent Meunier, Blaise Delattre, Alexandre Araujo, and Alexandre Allauzen. A dynamical system perspective for lipschitz neural networks. In International Conference on Machine Learning, 2022.
247
+ Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. Spectral normalization for generative adversarial networks. In International Conference on Learning Representations, 2018.
248
+ Patricia Pauli, Anne Koch, Julian Berberich, Paul Kohler, and Frank Allgower. Training robust neural networks using lipschitz bounds. IEEE Control Systems Letters, 6:121-126, 2021.
249
+ Bernd Prach and Christoph H Lampert. Almost-orthogonal layers for efficient general-purpose lipschitz networks. In Computer Vision-ECCV 2022: 17th European Conference, 2022.
250
+ Aditi Raghunathan, Jacob Steinhardt, and Percy S Liang. Semidefinite relaxations for certifying robustness to adversarial examples. Advances in neural information processing systems, 31, 2018.
251
+ Max Revay, Ruigang Wang, and Ian R Manchester. Lipschitz bounded equilibrium networks. arXiv preprint arXiv:2010.01732, 2020.
252
+ Hadi Salman, Jerry Li, Ilya Razenshteyn, Pengchuan Zhang, Huan Zhang, Sebastien Bubeck, and Greg Yang. Provably robust deep learning via adversarially trained smoothed classifiers. In Advances in Neural Information Processing Systems, 2019.
253
+ Sahil Singla and Soheil Feizi. Skew orthogonal convolutions. In International Conference on Machine Learning, 2021.
254
+ Sahil Singla and Soheil Feizi. Improved techniques for deterministic 12 robustness. In Advances in Neural Information Processing Systems, 2022a.
255
+ Sahil Singla, Surbhi Singla, and Soheil Feizi. Improved deterministic 12 robustness on CIFAR-10 and CIFAR-100. In International Conference on Learning Representations, 2022b.
256
+ Asher Trockman and J Zico Kolter. Orthogonalizing convolutional layers with the cayley transform. In International Conference on Learning Representations, 2021.
257
+ Yusuke Tsuzuki, Issei Sato, and Masashi Sugiyama. Lipschitz-margin training: Scalable certification of perturbation invariance for deep neural networks. In Advances in Neural Information Processing Systems, 2018.
258
+
259
+ Zi Wang, Gautam Prakriya, and Somesh Jha. A quantitative geometric approach to neural-network smoothness. In Advances in Neural Information Processing Systems, 2022.
260
+ Eric Wong and Zico Kolter. Provable defenses against adversarial examples via the convex outer adversarial polytope. In International Conference on Machine Learning, pp. 5286-5295. PMLR, 2018.
261
+ Xiaojun Xu, Linyi Li, and Bo Li. Lot: Layer-wise orthogonal training on improving 12 certified robustness. In Advances in Neural Information Processing Systems, 2022.
262
+ Greg Yang, Tony Duan, J Edward Hu, Hadi Salman, Ilya Razenshteyn, and Jerry Li. Randomized smoothing of all shapes and sizes. In International Conference on Machine Learning, 2020.
263
+ Tan Yu, Jun Li, Yunfeng Cai, and Ping Li. Constructing orthogonal convolutions in an explicit manner. In International Conference on Learning Representations, 2022.
264
+
265
+ # A PROOFS
266
+
267
+ In this section, we present the proofs for the theorems presented in our paper.
268
+
269
+ # A.1 PROOF OF THEOREM 1
270
+
271
+ To prove the first statement in Theorem 1, notice that we have
272
+
273
+ $$
274
+ \left\| g (x) - g (y) \right\| ^ {2} = \left\| W T ^ {- \frac {1}{2}} (x - y) \right\| ^ {2} = (x - y) ^ {\top} T ^ {- \frac {1}{2}} W ^ {\top} W T ^ {- \frac {1}{2}} (x - y).
275
+ $$
276
+
277
+ Based on our algebraic condition $W^{\top}W\preceq T$ , we immediately have
278
+
279
+ $$
280
+ \left\| g (x) - g (y) \right\| ^ {2} \leq (x - y) ^ {\mathrm {T}} T ^ {- \frac {1}{2}} T T ^ {- \frac {1}{2}} (x - y) = \| x - y \| ^ {2}.
281
+ $$
282
+
283
+ Therefore, Statement 1 is true.
284
+
285
+ To prove Statement 2 in Theorem 1, we need to use the property of the nonlinear activation function $\sigma$ . Notice that the condition $W^{\mathsf{T}}W\preceq T$ ensures that all the diagonal entries of the nonsingular matrix $T$ are positive. Therefore, $T^{-1}$ is also a diagonal matrix whose diagonal entries are all positive. For all the three activation functions listed in the above theorem, $\sigma$ is slope-restricted on [0, 1], and the following inequality holds for any $\{x',y'\}$ (Fazlyab et al., 2019, Lemma 1):
286
+
287
+ $$
288
+ \left[ \begin{array}{c} x ^ {\prime} - y ^ {\prime} \\ \sigma (x ^ {\prime}) - \sigma (y ^ {\prime}) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} 0 & - T ^ {- 1} \\ - T ^ {- 1} & 2 T ^ {- 1} \end{array} \right] \left[ \begin{array}{c} x ^ {\prime} - y ^ {\prime} \\ \sigma (x ^ {\prime}) - \sigma (y ^ {\prime}) \end{array} \right] \leq 0.
289
+ $$
290
+
291
+ We can set $x' = W^{\top}x + b$ and $y' = W^{\top}y + b$ , and the above inequality becomes
292
+
293
+ $$
294
+ \left[ \begin{array}{c} W ^ {\mathsf {T}} (x - y) \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} 0 & - T ^ {- 1} \\ - T ^ {- 1} & 2 T ^ {- 1} \end{array} \right] \left[ \begin{array}{c} W ^ {\mathsf {T}} (x - y) \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] \leq 0.
295
+ $$
296
+
297
+ We can rewrite the above inequality as
298
+
299
+ $$
300
+ \left[ \begin{array}{c} x - y \\ \sigma \left(W ^ {\top} x + b\right) - \sigma \left(W ^ {\top} y + b\right) \end{array} \right] ^ {\top} \left[ \begin{array}{c c} 0 & - W T ^ {- 1} \\ - T ^ {- 1} W ^ {\top} & 2 T ^ {- 1} \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma \left(W ^ {\top} x + b\right) - \sigma \left(W ^ {\top} y + b\right) \end{array} \right] \leq 0. \tag {9}
301
+ $$
302
+
303
+ Now we can apply the following argument:
304
+
305
+ $$
306
+ \begin{array}{l} \left\| h (x) - h (y) \right\| ^ {2} \\ = \left\| x - y - 2 \left(W T ^ {- 1} \sigma (W ^ {\mathsf {T}} x + b) - W T ^ {- 1} \sigma (W ^ {\mathsf {T}} y + b)\right) \right\| ^ {2} \\ = \left[ \begin{array}{c} x - y \\ 2 W T ^ {- 1} \left(\sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b)\right) \end{array} \right] \left[ \begin{array}{c c} I & - I \\ - I & I \end{array} \right] \left[ \begin{array}{c} x - y \\ 2 W T ^ {- 1} \left(\sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b)\right) \end{array} \right] \\ = \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} I & - 2 W T ^ {- 1} \\ - 2 T ^ {- 1} W ^ {\mathsf {T}} & 4 T ^ {- 1} W ^ {\mathsf {T}} W T ^ {- 1} \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] \\ \leq \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} I & - 2 W T ^ {- 1} \\ - 2 T ^ {- 1} W ^ {\mathsf {T}} & 4 T ^ {- 1} \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right], \\ \end{array}
307
+ $$
308
+
309
+ where the last step follows from the fact that our condition $W^{\mathsf{T}}W \preceq T$ implies $T^{-1}W^{\mathsf{T}}WT^{-1} \preceq T^{-1}$ . Finally, we can combine the above inequality with (9) to show
310
+
311
+ $$
312
+ \begin{array}{l} \| h (x) - h (y) \| ^ {2} \leq \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} I & 0 \\ 0 & 0 \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] \\ = \| x - y \| ^ {2}, \\ \end{array}
313
+ $$
314
+
315
+ which is the desired conclusion. Our proof is complete.
316
+
317
+ # A.2 PROOF OF THEOREM 2
318
+
319
+ Since $T$ is nonsingular diagonal and $T - W^{\mathsf{T}}W\in \mathbf{D}^{n}$ , then we must have $T_{ii}\geq \sum_{j}|W^{\mathsf{T}}W|_{ij}$ . Given the following key relation:
320
+
321
+ $$
322
+ \mathrm {t r} (I - T ^ {- \frac {1}{2}} W ^ {\mathsf {T}} W T ^ {- \frac {1}{2}}) = \sum_ {i} \left(1 - \frac {| W ^ {\mathsf {T}} W | _ {i i}}{T _ {i i}}\right),
323
+ $$
324
+
325
+ it becomes clear that we need to choose the smallest value of $T_{ii}$ for all $i$ to minimize $\mathrm{tr}(I - T^{-\frac{1}{2}}W^{\top}WT^{-\frac{1}{2}})$ . Therefore the choice of $T$ for AOL minimizes $\mathrm{tr}(I - T^{-\frac{1}{2}}W^{\top}WT^{-\frac{1}{2}})$ over $T \in \mathbf{T}$ . The proof for the last equality in Theorem 2 is similar. Let us denote $X = I - T^{-\frac{1}{2}}W^{\top}WT^{-\frac{1}{2}}$ . For any $(i,j)$ , the quantity $X_{ij}^{2}$ is always monotone non-decreasing in $T_{ii}$ and $T_{jj}$ . To minimize $\| X \|_F$ , we just need to choose the smallest value for all $T_{ii}$ under the constraint $T_{ii} \geq \sum_j |W^{\top}W|_{ij}$ . This completes the proof.
326
+
327
+ # A.3 THE GERSHGORIN CIRCLE THEOREM AND PROOF OF THEOREM 3
328
+
329
+ Before stating the proof of Theorem 3, we will state the Gershgorin circle theorem, a useful result from matrix analysis which locates the eigenvalues of a real (or complex) matrix (Horn et al., 2012, Theorem 6.1.1).
330
+
331
+ Theorem 5 (Gershgorin). Let $A \in \mathbb{R}^{n \times n}$ and define the $n$ Gershgorin discs of $A$ by
332
+
333
+ $$
334
+ \left\{z \in \mathbb {C}: | z - A _ {i i} | \leq \sum_ {j \neq i} | A _ {i j} | \right\}, \quad i \in \{1, \dots , n \}.
335
+ $$
336
+
337
+ Then the eigenvalues of $A$ are contained in the union of Gershgorin discs
338
+
339
+ $$
340
+ \bigcup_ {i = 1} ^ {n} \left\{z \in \mathbb {C}: | z - A _ {i i} | \leq \sum_ {j \neq i} | A _ {i j} | \right\}
341
+ $$
342
+
343
+ A useful consequence of this theorem is that whenever $A$ is diagonally dominant (i.e. $|A_{ii}| \geq \sum_{j \neq i} |A_{ij}|$ ) with positive diagonal entries, then the eigenvalues of $A$ must be non-negative. With this fact, we now proceed to the proof of Theorem 3.
344
+
345
+ Proof of Theorem 3 Given nonsingular matrix $Q$ , clearly the eigenvalues of $Q(T - W^{\mathsf{T}}W)Q^{-1}$ and $(T - W^{\mathsf{T}}W)$ are the same. If $Q(T - W^{\mathsf{T}}W)Q^{-1}$ is diagonally dominant and only has positive diagonal entries, then we can apply Gershgorin circle theorem (Horn et al., 2012, Corollary 6.1.6) to show that all the eigenvalues of $Q(T - W^{\mathsf{T}}W)Q^{-1}$ (which is the same as $T - QW^{\mathsf{T}}WQ^{-1}$ ) are non-negative. Therefore, we know that all the eigenvalues of $(T - W^{\mathsf{T}}W)$ are non-negative. Since $(T - W^{\mathsf{T}}W)$ is symmetric, we have $T \succeq W^{\mathsf{T}}W$ . Then we can apply Theorem 1 to reach our desired conclusion.
346
+
347
+ # A.4 PROOF OF THEOREM 4
348
+
349
+ A detailed proof for Theorem 4 is presented here. Our proof is based on modifying the arguments used in (Fazlyab et al., 2019, Theorem 1), and mainly relies on the quadratic constraint technique developed in the control field (Megretski et al., 1997).
350
+
351
+ First, notice that (7) is equivalent to the following condition:
352
+
353
+ $$
354
+ \left[ \begin{array}{l l} H ^ {\top} H & H ^ {\top} G \\ G ^ {\top} H & G ^ {\top} G \end{array} \right] \preceq \left[ \begin{array}{c c} I & - W \Lambda \\ - \Lambda W ^ {\top} & 2 \Lambda \end{array} \right]. \tag {10}
355
+ $$
356
+
357
+ Suppose (10) holds. Next we will show that $h(x) = Hx + G\sigma(W^{\top}x + b)$ is 1-Lipschitz.
358
+
359
+ For all the three activation functions listed in the above theorem, $\sigma$ is slope-restricted on $[0,1]$ , and the following inequality holds for any $\{x',y'\}$ (Fazlyab et al., 2019, Lemma 1):
360
+
361
+ $$
362
+ \left[ \begin{array}{c} x ^ {\prime} - y ^ {\prime} \\ \sigma (x ^ {\prime}) - \sigma (y ^ {\prime}) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} 0 & - \Lambda \\ - \Lambda & 2 \Lambda \end{array} \right] \left[ \begin{array}{c} x ^ {\prime} - y ^ {\prime} \\ \sigma (x ^ {\prime}) - \sigma (y ^ {\prime}) \end{array} \right] \leq 0.
363
+ $$
364
+
365
+ Table 6: Additional results for CIFAR10 and CIFAR100 datasets with different offset values.
366
+
367
+ <table><tr><td rowspan="3">Offset</td><td rowspan="3">Models</td><td colspan="5">CIFAR10</td><td colspan="5">CIFAR100</td></tr><tr><td rowspan="2">Natural Accuracy</td><td colspan="4">Provable Accuracy (ε)</td><td rowspan="2">Natural Accuracy</td><td colspan="4">Provable Accuracy (ε)</td></tr><tr><td>36 255</td><td>72 255</td><td>108 255</td><td>1</td><td>36 255</td><td>72 255</td><td>108 255</td><td>1</td></tr><tr><td rowspan="4">√2</td><td>SLL small</td><td>73.3</td><td>63.7</td><td>53.8</td><td>44.5</td><td>15.3</td><td>46.7</td><td>35.2</td><td>26.4</td><td>20.1</td><td>5.9</td></tr><tr><td>SLL medium</td><td>74.0</td><td>64.7</td><td>54.9</td><td>45.3</td><td>16.0</td><td>47.2</td><td>36.1</td><td>27.1</td><td>20.7</td><td>6.5</td></tr><tr><td>SLL large</td><td>74.6</td><td>65.3</td><td>55.2</td><td>45.8</td><td>16.2</td><td>47.9</td><td>36.7</td><td>27.9</td><td>21.3</td><td>6.7</td></tr><tr><td>SLL xlarge</td><td>75.3</td><td>65.7</td><td>55.8</td><td>46.1</td><td>16.3</td><td>48.3</td><td>37.2</td><td>28.3</td><td>21.8</td><td>6.9</td></tr><tr><td rowspan="4">3/2√2</td><td>SLL small</td><td>71.2</td><td>62.6</td><td>53.8</td><td>45.3</td><td>20.4</td><td>44.9</td><td>34.7</td><td>26.8</td><td>20.9</td><td>8.1</td></tr><tr><td>SLL medium</td><td>72.2</td><td>64.3</td><td>56.0</td><td>48.3</td><td>23.9</td><td>46.0</td><td>35.5</td><td>27.9</td><td>22.2</td><td>9.1</td></tr><tr><td>SLL large</td><td>72.7</td><td>65.0</td><td>57.3</td><td>49.7</td><td>25.4</td><td>46.4</td><td>36.2</td><td>28.4</td><td>22.7</td><td>9.6</td></tr><tr><td>SLL xlarge</td><td>73.3</td><td>65.8</td><td>58.4</td><td>51.3</td><td>27.3</td><td>46.5</td><td>36.5</td><td>29.0</td><td>23.3</td><td>10.4</td></tr><tr><td rowspan="4">2√2</td><td>SLL small</td><td>70.0</td><td>61.5</td><td>53.4</td><td>45.7</td><td>22.7</td><td>44.6</td><td>34.5</td><td>26.5</td><td>21.0</td><td>8.6</td></tr><tr><td>SLL medium</td><td>70.8</td><td>63.1</td><td>55.4</td><td>48.3</td><td>25.8</td><td>45.4</td><td>35.5</td><td>27.9</td><td>22.1</td><td>9.8</td></tr><tr><td>SLL large</td><td>71.4</td><td>63.9</td><td>56.7</td><td>49.8</td><td>27.8</td><td>45.9</td><td>36.0</td><td>28.2</td><td>22.7</td><td>10.3</td></tr><tr><td>SLL xlarge</td><td>71.6</td><td>64.6</td><td>57.7</td><td>50.8</td><td>29.6</td><td>46.1</td><td>36.3</td><td>29.0</td><td>23.6</td><td>11.0</td></tr></table>
368
+
369
+ We can set $x' = W^{\top}x + b$ and $y' = W^{\top}y + b$ , and the above inequality becomes
370
+
371
+ $$
372
+ \left[ \begin{array}{c} W ^ {\mathsf {T}} (x - y) \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} 0 & - \Lambda \\ - \Lambda & 2 \Lambda \end{array} \right] \left[ \begin{array}{c} W ^ {\mathsf {T}} (x - y) \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] \leq 0.
373
+ $$
374
+
375
+ We can rewrite the above inequality as
376
+
377
+ $$
378
+ \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\top} x + b) - \sigma (W ^ {\top} y + b) \end{array} \right] ^ {\top} \left[ \begin{array}{c c} 0 & - W \Lambda \\ - \Lambda W ^ {\top} & 2 \Lambda \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\top} x + b) - \sigma (W ^ {\top} y + b) \end{array} \right] \leq 0. \tag {11}
379
+ $$
380
+
381
+ Now we can apply the following argument:
382
+
383
+ $$
384
+ \begin{array}{l} \left\| h (x) - h (y) \right\| ^ {2} = \left\| H (x - y) + \left(G \sigma (W ^ {\intercal} x + b) - G \sigma (W ^ {\intercal} y + b)\right) \right\| ^ {2} \\ = \left[ \begin{array}{c} H (x - y) \\ G (\sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b)) \end{array} \right] \left[ \begin{array}{c c} I & I \\ I & I \end{array} \right] \left[ \begin{array}{c} H (x - y) \\ G (\sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b)) \end{array} \right] \\ = \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} H ^ {\mathsf {T}} H & H ^ {\mathsf {T}} G \\ G ^ {\mathsf {T}} H & G ^ {\mathsf {T}} G \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] \\ \leq \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} I & - W \Lambda \\ - \Lambda W ^ {\mathsf {T}} & 2 \Lambda \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right], \\ \end{array}
385
+ $$
386
+
387
+ where the last step follows from the condition (10). Finally, we can combine the above inequality with (11) to show
388
+
389
+ $$
390
+ \begin{array}{l} \| h (x) - h (y) \| ^ {2} \leq \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] ^ {\mathsf {T}} \left[ \begin{array}{c c} I & 0 \\ 0 & 0 \end{array} \right] \left[ \begin{array}{c} x - y \\ \sigma (W ^ {\mathsf {T}} x + b) - \sigma (W ^ {\mathsf {T}} y + b) \end{array} \right] \\ = \| x - y \| ^ {2}, \\ \end{array}
391
+ $$
392
+
393
+ which is the desired conclusion.
394
+
395
+ # B ADDITIONAL RESULTS
396
+
397
+ In this section, we will present some additional results and discuss the effect of the offset value on training. The choice of the offset value will affect the performance of SLL significantly. Larger offset values will lead to decrease in natural accuracy and increase in certified robust accuracy. The details are documented in Table 6.
398
+
399
+ # C FURTHER DISCUSSIONS
400
+
401
+ In this section, we provide some extra discussions on control-theoretic interpretations and possible extensions of our main results.
402
+
403
+ # C.1 CONTROL-THEORETIC INTERPRETATIONS FOR OUR MAIN RESULTS
404
+
405
+ Our work is inspired by the quadratic constraint approach (Megretski et al., 1997) and the Lur'e system theory (Lur'e et al., 1944) developed in the control community. Specifically, the general network layer structure (5) can be viewed as a Lur'e system, which is a feedback interconnection of a linear dynamical system and a static nonlinearity. In this section, we try to make this connection more transparent.
406
+
407
+ Specifically, we can denote $x' = h(x)$ and rewrite (5) as follows
408
+
409
+ $$
410
+ \begin{array}{l} x ^ {\prime} = H x + G w \\ v = W ^ {\intercal} x + b \\ w = \sigma (v) \\ \end{array}
411
+ $$
412
+
413
+ which is exactly a shifted version of the Lur'e system. Therefore, it is not surprising that one can tailor the Lur'e system theory to study the properties of (5). As a matter of fact, the previous developments in Fazlyab et al. (2019) and Revay et al. (2020) were based on similar ideas. The main difference is that our paper requires solving SDPs analytically. In the controls literature, the formulated SDP conditions are typically solved numerically.
414
+
415
+ # C.2 A VARIANT OF THEOREM 1
416
+
417
+ When discussing AOL and SLL, our main paper makes the assumption that all the columns of $W$ have at least one non-zero entry such that (2) is well defined. To drop this assumption, we can use the following variant of Theorem 1.
418
+
419
+ Theorem 6. For any weight matrix $W \in \mathbb{R}^{m \times n}$ , if there exists a diagonal matrix $\Gamma \in \mathbf{S}^n$ such that $\Gamma W^{\mathrm{T}} W \Gamma \preceq \Gamma$ , then the two following statements hold true.
420
+
421
+ 1. The mapping $g(x) = W\Gamma^{\frac{1}{2}}x + b$ is 1-Lipschitz.
422
+ 2. The mapping $h(x) = x - 2W\Gamma \sigma (W^{\top}x + b)$ is 1-Lipschitz if $\sigma$ is ReLU, tanh or sigmoid.
423
+
424
+ The proof is omitted here, since we can use exactly the same argument as before. If $\Gamma$ happens to be nonsingular, then we can set $T = \Gamma^{-1}$ , and the above theorem exactly reduces to Theorem 1. However, the above result allows $\Gamma$ to be singular. This is useful for designing AOL and SLL in the case where $W$ has some zero columns. Suppose the $(i_0,j)$ -entry of $W^{\mathsf{T}}W$ is equal to 0 for all $j$ . Then we can set the $(i_0,i_0)$ -th entry of $\Gamma$ as 0 and still use (2) or (4) for other entries. It is straightforward to verify that the resultant $\Gamma$ is still a feasible solution to $\Gamma W^{\mathsf{T}}W\Gamma \preceq \Gamma$ , and then we can implement AOL or SLL accordingly.
425
+
426
+ # C.3 A VARIANT OF THEOREM 3
427
+
428
+ We can also modify Theorem 3 for the non-residual network layer case. The following variant of Theorem 3 is useful.
429
+
430
+ Theorem 7. Let $W$ be the weight matrix. Suppose $T$ is a nonsingular diagonal matrix. If there exists some diagonal matrix $Q$ with all positive diagonal entries such that $(T - QW^{\mathsf{T}}WQ^{-1})$ is a real diagonally dominant matrix with diagonal entries being all positive, then $T \succeq W^{\mathsf{T}}W$ , and the function $g(x) = WT^{-\frac{1}{2}}x + b$ is 1-Lipschitz.
431
+
432
+ The proof is trivial and hence omitted. Based on the above result, it is possible that one can use (4) to construct a non-residual layer that can still improve upon AOL.
433
+
434
+ Table 7: This table presents the natural and corrected provable accuracy of our SLL networks on CIFAR10 and CIFAR100 datasets. SLL networks are averaged of 3 trainings.
435
+
436
+ <table><tr><td rowspan="2">Datasets</td><td rowspan="2">Training</td><td rowspan="2">Models</td><td rowspan="2">Natural Accuracy</td><td colspan="4">Provable Accuracy (ε)</td></tr><tr><td>36 255</td><td>72 255</td><td>108 255</td><td>1</td></tr><tr><td rowspan="8">CIFAR10</td><td rowspan="4">Surrogate Training</td><td>SLL Small</td><td>71.3</td><td>62.7</td><td>53.8</td><td>45.4</td><td>20.4</td></tr><tr><td>SLL Medium</td><td>72.0</td><td>63.6</td><td>54.7</td><td>46.4</td><td>21.0</td></tr><tr><td>SLL Large</td><td>72.6</td><td>64.1</td><td>55.4</td><td>46.9</td><td>21.3</td></tr><tr><td>SLL X-Large</td><td>73.2</td><td>64.6</td><td>55.8</td><td>47.3</td><td>21.5</td></tr><tr><td rowspan="4">Exponential Scaling</td><td>SLL Small</td><td>71.5</td><td>62.8</td><td>53.7</td><td>45.2</td><td>19.4</td></tr><tr><td>SLL Medium</td><td>72.2</td><td>63.7</td><td>54.7</td><td>46.1</td><td>20.1</td></tr><tr><td>SLL Large</td><td>72.6</td><td>64.2</td><td>55.1</td><td>46.6</td><td>20.3</td></tr><tr><td>SLL X-Large</td><td>73.3</td><td>64.8</td><td>55.7</td><td>47.1</td><td>20.6</td></tr><tr><td rowspan="8">CIFAR100</td><td rowspan="4">Surrogate Training</td><td>SLL Small</td><td>45.8</td><td>34.7</td><td>26.5</td><td>20.4</td><td>7.2</td></tr><tr><td>SLL Medium</td><td>46.5</td><td>35.6</td><td>27.3</td><td>21.1</td><td>7.7</td></tr><tr><td>SLL Large</td><td>46.9</td><td>36.2</td><td>27.9</td><td>21.6</td><td>7.9</td></tr><tr><td>SLL X-Large</td><td>47.6</td><td>36.5</td><td>28.2</td><td>21.8</td><td>8.2</td></tr><tr><td rowspan="4">Exponential Scaling</td><td>SLL Small</td><td>45.8</td><td>34.8</td><td>26.5</td><td>20.2</td><td>7.2</td></tr><tr><td>SLL Medium</td><td>46.8</td><td>35.8</td><td>27.3</td><td>21.0</td><td>7.7</td></tr><tr><td>SLL Large</td><td>47.2</td><td>36.2</td><td>27.8</td><td>21.5</td><td>7.9</td></tr><tr><td>SLL X-Large</td><td>47.8</td><td>36.7</td><td>28.3</td><td>22.2</td><td>8.3</td></tr></table>
437
+
438
+ # D ERRATUM - CORRECTION OF SLL IMPLEMENTATION
439
+
440
+ The authors of Hu et al. (2023) discovered an issue in the original implementation of the SLL layers. We are grateful to them for bringing this to our attention. Now we elaborate on this issue and provide the corrected implementation of SLL.
441
+
442
+ Recall that our paper proposed the following SLL layer to build 1-Lipschitz networks:
443
+
444
+ $$
445
+ h (x) = x - 2 W \operatorname {d i a g} \left(\sum_ {j = 1} ^ {n} | W ^ {\mathsf {T}} W | _ {i j} \frac {q _ {j}}{q _ {i}}\right) ^ {- 1} \sigma \left(W ^ {\mathsf {T}} x + b\right), \tag {12}
446
+ $$
447
+
448
+ where $\sigma (\cdot)$ is the ReLU nonlinearity function. The parameters of this layer consist of $W$ , $\{q_i\}_{i = 1}^n$ and $b$ . One can prove that this layer is 1-Lipschitz. However, the division on the parameter $q_{j} / q_{i}$ can make the training process unstable. Previously, SLL was implemented as follows:
449
+
450
+ $$
451
+ h (x) = x - 2 W \operatorname {d i a g} \left(\sum_ {j = 1} ^ {n} | W ^ {\mathsf {T}} W | _ {i j} \frac {q _ {j}}{q _ {i} + \varepsilon}\right) ^ {- 1} \sigma \left(W ^ {\mathsf {T}} x + b\right), \tag {13}
452
+ $$
453
+
454
+ where $\varepsilon = 10^{-6}$ is added to the denominator to avoid dividing by 0. However, Equation (13) is not 1-Lipschitz anymore due to the appearance of $\varepsilon$ .
455
+
456
+ There are several ways to fix the above issue. For example, we can still use (13) for training, and then substitute the resultant values of $(W,\{q_i\},b)$ to (12) for evaluating the certified robust accuracy. In other words, we can use (13) as a surrogate for stable training of (12). The results of surrogate training are presented in Table 7. One other way to address the above issue is to use the exponential scaling:
457
+
458
+ $$
459
+ h (x) = x - 2 W \operatorname {d i a g} \left(\sum_ {j = 1} ^ {n} | W ^ {\mathsf {T}} W | _ {i j} \frac {\exp (q _ {j})}{\exp (q _ {i})}\right) ^ {- 1} \sigma \left(W ^ {\mathsf {T}} x + b\right), \tag {14}
460
+ $$
461
+
462
+ The training of the above layer is stable, and the resultant network is indeed 1-Lipschitz. Table 7 also presents the corrected results obtained using the above exponential scaling form. We can see that the results from surrogate training in Table 7 are similar to those obtained using the exponential scaling. We can also observe from Table 7 that SLL outperforms AOL on CIFAR100. For CIFAR10 with $\varepsilon = \frac{36}{255}$ , SLL still outperforms AOL. However, For
463
+
464
+ CIFAR10 with $\varepsilon = \frac{72}{255}$ or $\frac{108}{255}$ , AOL achieves better results than SLL. The Github repo https://github.com/araujoalexandre/Lipschitz-SLL-Networks has been corrected accordingly.
465
+
466
+ Finally, it is worth mentioning that Kai Hu's email has pointed out that the following layer is also 1-Lipschitz and can be trained in a stable manner:
467
+
468
+ $$
469
+ h (x) = x - 2 W \operatorname {d i a g} \left(\sum_ {j = 1} ^ {n} | W ^ {\mathsf {T}} W | _ {i j} \frac {q _ {j} + \varepsilon}{q _ {i} + \varepsilon}\right) ^ {- 1} \sigma (W ^ {\mathsf {T}} x + b).
470
+ $$
471
+
472
+ In comparison to our current results in Table 7, the certified robustness results from the above variant (reported in Kai Hu's email) are worse on CIFAR100 and similar on CIFAR10.
2303.03xxx/2303.03169/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f5686e2e2816e39c0d91be4a507e4eaa05fe90b122a432c01da41455426fb22
3
+ size 673944
2303.03xxx/2303.03169/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03192/c02e4a23-d7da-4df7-9fea-f5f09ba71fcf_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03192/c02e4a23-d7da-4df7-9fea-f5f09ba71fcf_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03192/c02e4a23-d7da-4df7-9fea-f5f09ba71fcf_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37d75a9d1a67810ec1bfdfb63d3ebdf7bcf92fe8dd123d811812c565a9ced4c3
3
+ size 3659741
2303.03xxx/2303.03192/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03192/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6b69dc9c1e613226bd99566c3123b928bac358ba3a82c5b463bd5e41aed9154
3
+ size 946690
2303.03xxx/2303.03192/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03199/7e08506b-fc50-4f5d-a51c-9647c02cc0e2_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03199/7e08506b-fc50-4f5d-a51c-9647c02cc0e2_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03199/7e08506b-fc50-4f5d-a51c-9647c02cc0e2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45df404891ed15fd898b0980f50684682f81e1775baeb3169ca4c37c4575e266
3
+ size 2182212
2303.03xxx/2303.03199/full.md ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Choice Over Control: How Users Write with Large Language Models using Diegetic and Non-Diegetic Prompting
2
+
3
+ HaiDang
4
+
5
+ hai.dang@uni-bayreuth.de
6
+
7
+ University of Bayreuth
8
+
9
+ Bayreuth, Bavaria, Germany
10
+
11
+ Florian Lehmann
12
+
13
+ florian.lehmann@uni-bayreuth.de
14
+
15
+ University of Bayreuth
16
+
17
+ Bayreuth, Bavaria, Germany
18
+
19
+ Only diegetic prompts
20
+
21
+ Users influence suggestions with their text written so far ...
22
+
23
+ Sven Goller
24
+
25
+ sven.goller@uni-bayreuth.de
26
+
27
+ University of Bayreuth
28
+
29
+ Bayreuth, Bavaria, Germany
30
+
31
+ Daniel Buschek
32
+
33
+ daniel.buschek@uni-bayreuth.de
34
+
35
+ University of Bayreuth
36
+
37
+ Bayreuth, Bavaria, Germany
38
+
39
+ Also with non-diegetic prompts
40
+
41
+ ... and can optionally write explicit instructions to the AI
42
+
43
+ Single
44
+
45
+ Suggestions
46
+
47
+ I was planning to travel to Europe, but now I have changed my mind.
48
+
49
+ I was planning to travel to
50
+
51
+ Europe.
52
+
53
+ Paris.
54
+
55
+ Canada using the H1B visa.
56
+
57
+ I was planning to travel to SE Asia to visit the temples and other historic places.
58
+
59
+ find a place in Northern Europe
60
+
61
+ I was planning to travel to
62
+
63
+ Finland. I wanted to go to Helsinki.
64
+
65
+ Northern Europe
66
+
67
+ Norway because I want to do hiking.
68
+
69
+ Figure 1: Overview of our four UI variants, showing the user's written text (black font, i.e. a diegetic prompt), the suggestions (text highlighted in green, and options in the list), and a popup text box that allows users to input an instruction as a zero-shot prompt to the system (i.e. a non-diegetic prompt).
70
+
71
+ # ABSTRACT
72
+
73
+ We propose a conceptual perspective on prompts for Large Language Models (LLMs) that distinguishes between (1) diegetic prompts (part of the narrative, e.g. "Once upon a time, I saw a fox ..."), and (2) non-diegetic prompts (external, e.g. "Write about the adventures of the fox"). With this lens, we study how 129 crowd workers on Prolific write short texts with different user interfaces (1 vs 3 suggestions, with/out non-diegetic prompts; implemented with GPT-3): When the interface offered multiple suggestions and provided an option for non-diegetic prompting, participants preferred choosing from multiple suggestions over controlling them via non-diegetic prompts. When participants provided non-diegetic prompts it was to ask for inspiration, topics or facts. Single suggestions in particular were guided both with diegetic and non-diegetic information.
74
+
75
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
76
+
77
+ CHI '23, April 23-April 28, 2023, Hamburg, Germany
78
+
79
+ © 2023 Copyright held by the owner/author(s). Publication rights licensed to ACM.
80
+
81
+ ACM ISBN 978-1-4503-9421-5/23/04...$15.00
82
+
83
+ https://doi.org/10.1145/3544548.3580969
84
+
85
+ This work informs human-AI interaction with generative models by revealing that (1) writing non-diegetic prompts requires effort, (2) people combine diegetic and non-diegetic prompting, and (3) they use their draft (i.e. diegetic information) and suggestion timing to strategically guide LLMs.
86
+
87
+ # CCS CONCEPTS
88
+
89
+ - Human-centered computing $\rightarrow$ Empirical studies in HCI; Text input; • Computing methodologies $\rightarrow$ Natural language generation.
90
+
91
+ # KEYWORDS
92
+
93
+ Large language models, Co-creative systems, Human-AI collaboration, User-centric natural language generation
94
+
95
+ # ACM Reference Format:
96
+
97
+ Hai Dang, Sven Goller, Florian Lehmann, and Daniel Buschek. 2023. Choice Over Control: How Users Write with Large Language Models using Diegetic and Non-Diegetic Prompting. In CHI '23: ACM Conference on Human Factors in Computing Systems, April 23–April 28, 2023, Hamburg, Germany. ACM, New York, NY, USA, 17 pages. https://doi.org/10.1145/3544548.3580969
98
+
99
+ # 1 INTRODUCTION
100
+
101
+ When writing collaboratively, people coordinate and inspire each other through what they write in the draft itself and through communication beyond it. In this paper, we examine related mechanisms for human-AI co-writing.
102
+
103
+ Input text provided to a Large Language Model (LLM) as a basis for generating text is referred to as a "prompt". Providing a few examples of inputs and outputs in such a text prompt can help the model solve a task [5, 50]. This is called few-shot learning. For example, an LLM can be prompted to translate from English to French with a few examples of English sentences and corresponding translations, followed by the English sentence to be translated. By completing this text the LLM then (ideally) translates that sentence. This affords user control: Users can define tasks and delegate them to an LLM ad-hoc. Going further, zero-shot learning prompts the LLM with an instruction without examples (e.g. Translate 'The weather is nice' to French). This is a harder task but from a Human-Computer Interaction (HCI) point of view it frees users from thinking of specific examples when instructing the AI system.
104
+
105
+ We introduce the terms diegetic prompting and non-diegetic prompting<sup>1</sup> to frame a new perspective on how users influence an LLM in their writing process. A diegetic prompt is part of the users' narrative. For example, when the user writes about a vacation in South East Asia, the story as written so far forms the diegetic prompt. In contrast, a non-diegetic prompt is an explicit instruction to the LLM (e.g. "suggest activities to do in Singapore"). Crucially, this instruction is not a part of the resulting document (e.g. travel blog); it only serves to guide the LLM's text generation.
106
+
107
+ Technically, there may not be a difference between diegetic and non-diegetic prompts for the LLM – both types are received by the model as text input strings. However, from an HCI perspective, this distinction allows us to identify patterns in the perception and interaction of users writing with LLMs. With this new distinction, in this paper we address the research question: How do users write with Large Language Models using diegetic and non-diegetic prompting?
108
+
109
+ Concretely, we propose and compare four UI variants (Figure 1) that allow people to write with these types of prompts, plus a baseline UI without suggestions. We conducted a remote study with 129 crowd workers on Prolific, each writing five stories. We investigate the influence of two independent variables on users' writing behavior, namely INSTRUCTION with two levels ( $i_{no}$ , $i_{yes}$ ) and NUMBER of suggestions with three levels (baseline: $s_0$ ; $s_1$ , $s_3$ ).
110
+
111
+ Users overall prefer choosing from multiple suggestions over controlling them via non-diegetic prompts. They use non-diegetic prompts to ask the LLM for inspiration, topics or facts. Non-diegetic prompts increase effort, for learning how to formulate them and switching between diegetic and non-diegetic writing. Users also prefer the UI with multiple suggestions over seeing single ones, yet allowing them to provide non-diegetic prompts reduces the gap in acceptance rates by boosting it for the UI with single suggestions. Moreover, single suggestions are triggered later in sentences, and less frequently at transition words and to start sentences. Together with people's comments, this indicates that writers consider diegetic information to guide LLMs. We discuss implications for LLMs and interaction design.
112
+
113
+ We contribute a new conceptual lens on prompting that distinguishes diegetic and non-diegetic ways in which users can influence LLMs, and a new UI design to combine text continuation suggestions with zero-shot prompt input.
114
+
115
+ # 2 RELATED WORK
116
+
117
+ We relate our work to prompting in Natural Language Processing (NLP) and writing interfaces in Human-Computer Interaction (HCI). Moreover, we present our proposed concept of diegetic and non-diegetic prompting by locating it in existing user interfaces for writing and prompting.
118
+
119
+ # 2.1 Prompting in Large Language Models
120
+
121
+ Language models are trained to predict the next word given the previous words in the text. One primary advantage of Deep Learning-based LLMs is that they can solve several natural language processing tasks without being specifically trained on those. This can be done via text prompts written in natural language [5]. Zhao et al. [50] show that providing a few examples of inputs and outputs can help to steer the model. However, optimizing prompts is not trivial and requires extensive experience [28].
122
+
123
+ 2.1.1 Prompt Engineering. Related work in prompt engineering has proposed several methods to improve prompts: For example, paraphrasing prompts can lead to better model outputs [20, 23, 27, 49]. Another approach involves constructing prompt templates to increase the accuracy for probing knowledge [33], for translation tasks [5], or for text classification tasks [39]. However, optimized prompts constructed in the process of prompt engineering are usually not meant to be consumed by humans; rather, they are designed for LLMs to most effectively perform a task [28]. In contrast, in our study, we explore how non-expert users write and use (zero-shot) prompts when writing with an LLM.
124
+
125
+ 2.1.2 Prompting Interfaces. Several interactive systems have been proposed to enable users to work more effectively with prompts: For example, AI Chains by Wu et al. [47] allows users to combine multiple prompt primitives and their outputs to form a chain of prompts that can solve complex language processing tasks. In another study, they introduce an interface for visually programming these chains [46]. Similarly, PromptMaker [22] allows users to prototype new AI functionalities using language prompts. Strobelt et al. [42] developed a prompt programming environment to allow users to experiment with prompt variations and visualize prompt performance. Story Centaur by Swanson et al. [43] supports users in creating few-shot examples for creative writing. Using our terminology, these projects focused on non-diegetic prompts as a main output of interaction. In contrast, we integrate non-diegetic prompts into a text editor, with a focus on writing. Concretely, we combine a UI for phrase suggestions with a UI for zero-shot prompt inputs to an LLM, and analyze how users make use of these during their writing.
126
+
127
+ # 2.2 Writing Interfaces for LLMs
128
+
129
+ Here we give a brief overview of key design factors for user interfaces that involve LLMs and text generation.
130
+
131
+ 2.2.1 Scope of Suggestions. Earlier work mainly focused on single word suggestions [11, 12, 18, 34]. This scope favours performance metrics, such as reducing key-strokes, while longer phrase suggestions [6, 26, 37] are perceived more as new ideas for writing [1]. We focus on such phrase suggestions in this paper.
132
+
133
+ 2.2.2 Display of Suggestions. Single text suggestions can be shown inline [4, 8, 17, 48], whereas multiple suggestions are shown as pop-up lists of about three to six entries [6, 26]. Beyond that, Singh et al. [41] evaluated how writers use suggestions displayed as images and sound. Moreover, Bhat et al. [4] used a pop-up text box to show suggestions for insertions in the middle of sentences. We follow these design choices (Figure 1) and show single suggestions inline and multiple ones in a pop-up list. We add a pop-up text field for entering non-diegetic prompts.
134
+
135
+ 2.2.3 Implicit vs. Explicit Trigger. In writing interfaces, suggestions can be triggered explicitly or implicitly. Related work showed suggestions automatically after short inactivity [4, 6] or gated by a utility function [24]. Alternatively, recent work has also explored designs in which users explicitly request suggestions with a hotkey [7, 17, 26, 41, 48]. We also use this design with an explicit request key to better understand how and when users request suggestions.
136
+
137
+ # 2.3 Diegetic and Non-Diegetic Prompting in Existing Writing Interfaces
138
+
139
+ Here we apply the proposed lens to analyse how existing systems use diegetic and non-diegetic information in their writing interfaces. Traditionally, systems mainly use diegetic information, that is, they predict text based (only) on the preceding text [11, 12, 18, 34]. Some also added other information (e.g. hand posture, body movement [15, 16]). These show early examples of non-diegetic input to the language model. In this work, we focus on textual diegetic and non-diegetic information.
140
+
141
+ From a technical perspective, for recent systems that use LLMs to generate text suggestions, there might be no difference between the user's text draft (i.e. diegetic text) and other text inputs to the language model (e.g. instructions to the model, i.e. non-diegetic text). Therefore, systems in which the UI did not afford text prompts explicitly made the implicit choice of only using diegetic information as their input to the LLM [6, 7, 26, 41].
142
+
143
+ In contrast, writing interfaces that indeed allow users to explicitly enter prompts often use a mix of diegetic and non-diegetic information. Gero et al. [14] propose "sparks", i.e. sentences generated from LLMs to inspire new ideas for scientific writing. The user-provided prompts to generate these sparks are not part of the final outcome text, thus they are non-diegetic. Similarly, other systems (e.g. Wordcraft [48], LaMPost [17]) allowed users to select a part of the written text and modify it via predefined functionality (internally these functionalities also use prompting: e.g. a button for "rewrite selection" + text entry field for prompt). The selected text in this example is diegetic information while the prompt template and user-provided prompts are non-diegetic information. Related, we include the entire user written text draft as diegetic information and allow users to provide non-diegetic custom text prompts to further guide the LLM.
144
+
145
+ # 3 INTERACTION CONCEPT
146
+
147
+ Here we describe our UI and interaction concept (also see Figure 1 and Figure 8): It closely integrates diegetic and non-diegetic prompting in the same UI; users can use both types without having to take the hands off the keyboard.
148
+
149
+ # 3.1 Inline (Single) Suggestions (Figure 1 top row)
150
+
151
+ When a user requests a new suggestion (TAB) a preview of the suggestion appears after the current caret position in the text editor. Users can press TAB repeatedly to get new suggestions. The suggestion preview is visually highlighted in green to indicate that it is not part of the text yet. We decided for this design instead of e.g. a greyed out suggestion text (as e.g. used in Google's SmartCompose [8]) because pilot tests showed that grey text can be difficult to read for some people and makes readability more dependant on screen brightness settings, which we cannot control in an online study. If the suggestion is accepted (ENTER) the preview style (green background) is removed and the suggested text becomes part of the text document. Alternatively, the user can cancel the current suggestion preview by pressing Esc or by continuing to type without confirming the suggestion. When the suggestion is cancelled in one of these ways, the previewed suggestion is removed from the text editor.
152
+
153
+ # 3.2 Multiple Suggestions (Figure 1 bottom row)
154
+
155
+ Our system follows current practices for multiple suggestions (see Section 2.2) and shows each phrase suggestion as a separate item in a list of three. Again, users can press TAB to get suggestions (and repeatedly to get new ones). Users can use the $\uparrow$ UP/ $\downarrow$ DOWN keys to navigate this list and confirm a suggestion with ENTER. Selection via mouse is also possible.
156
+
157
+ # 3.3 Pop-up Textbox for Non-Diegetic Prompts (Figure 1 right column)
158
+
159
+ In the study, we described non-diegetic prompts as "instructions to the AI". Users can request suggestions with $\boxed{\mathrm{TAB}}$ as before. Additionally, they can enter an instruction by typing in a popup box that appears above the caret position. Thus, users have the option to input an instruction but are not forced to do so to request suggestions. Input focus is automatically switched from the text editor to the pop-up textbox when requesting suggestions so users can type instructions directly after pressing $\boxed{\mathrm{TAB}}$ . Users can submit the instruction with $\boxed{\mathrm{TAB}}$ or $\boxed{\mathrm{Enter}}$ . They can then press $\boxed{\mathrm{Enter}}$ again to accept the selected suggestion. Alternatively, they can revise their instruction to update the suggestions.
160
+
161
+ # 4 PROTOTYPE IMPLEMENTATION
162
+
163
+ Here we provide details about the web prototype used in the study. For screenshots, see Figure 1 and Appendix A.
164
+
165
+ # 4.1 Web System
166
+
167
+ The prototype was implemented with ReactJS $^2$ and CKEditor $^5$ . Each suggestion request from the client was passed to and parsed by a backend server which used FastApi $^4$ as a lightweight webserver. The server forwarded these requests to OpenAI's text-davinci-edit-001 model along with the entire written text as well as an instruction for the suggestion model (see Section 3). We chose this model and API because it is reportedly trained specifically to take in a given text as well as a (separate) instruction relating to the text.
168
+
169
+ # 4.2 Language Model Prompts
170
+
171
+ We used two default prompt prefixes to retrieve sentence completions from GPT-3 (text-davinci-edit-001): (1) Complete the sentence. (2) Complete the sentence and <user Instruction>. The system automatically used (1) when there was no option for the participants to provide explicit instructions to the AI, or when users did not provide an instruction. When they did write instructions, these were appended to (2). For instance, if the user wrote the instruction: 'suggest colors', the resulting full instruction sent to the model was: 'Complete the sentence and suggest colors'. During the pre-study we experimented with other default instructions such as: 'Continue' or 'Continue the text', as well as more complex ones, but found them to be less suitable (e.g. produced longer text or less consistent). We applied a post processing step to trim the model's output and display only the generated continuation.
172
+
173
+ # 4.3 Information Box
174
+
175
+ For the user study, we implemented an information box (Figure 8 in Appendix A) which explains the different features of the current text editor setup. Concretely, it showed an image that demonstrates the usage of the UI as well as an explanation of the available action keys.
176
+
177
+ # 5 METHOD
178
+
179
+ We used the following methods, in line with related studies on human-AI writing (e.g. cf. [6, 26]).
180
+
181
+ # 5.1 Questionnaires
182
+
183
+ To assess participants' backgrounds, an initial questionnaire asked about demographics and experience with writing features and language models. Participants also filled in one questionnaire after each UI variant (see Figure 3) to give subjective feedback per UI. To extend on this with overall feedback, a final questionnaire asked for (optional) open comments on changes to the system and experiences with suggestions and instructions.
184
+
185
+ # 5.2 Interaction Logging
186
+
187
+ To analyze interaction behaviour in detail, we logged interaction events, i.e. key and mouse events, during the writing tasks (see Appendix A). Each event included a timestamp, task id, and the current text in the editor. Depending on the event it included information
188
+
189
+ such as the suggestion trigger position in the text or the instruction to the AI.
190
+
191
+ # 5.3 Coding of Open Questions
192
+
193
+ We analysed the open comments from the final questionnaire in an approach adopting coding steps from Grounded Theory [10, 29], in order to identify and report on the emerging aspects: First, two researchers inductively proposed codes for the data of 20 people. They then compared and clustered these codes to develop a common codebook. Then, they coded the first 20 plus 32 more participants and checked each other's codings, with slight adjustments to the codebook. Finally, one researcher coded the remaining data and another one checked this coding. Throughout the process, disagreements were resolved via discussion.
194
+
195
+ # 5.4 Evaluation of User Written Text
196
+
197
+ We used LanguageTool<sup>5</sup>, a multilingual grammar and spell-checker, to count the number of grammar and spelling mistakes. To evaluate the degree to which participants engaged with the selected writing prompts during the user study (cf. Section 6), three researchers independently reviewed the stories and provided comments on their connection to the prompts. Finally, one researcher reviewed all comments to ensure consistency.
198
+
199
+ # 6 USER STUDY
200
+
201
+ # 6.1 Study Design
202
+
203
+ Our study uses a within-subject design with two independent variables: The NUMBER of (parallel) suggestions with two levels: one and three suggestions $(s_1, s_3)$ ; and the opportunity for INSTRUCTION with two levels $(i_{no}, i_{yes})$ . This results in four UI variants with suggestions. In addition, we included a baseline UI without any suggestions $(s_0)$ . The order of these five UIs was fully counterbalanced. As dependent variables we included interaction measures as well as questionnaire data.
204
+
205
+ # 6.2 Participants
206
+
207
+ We conducted a pre-study with 6 participants with direct discussions for rich feedback, followed by our main study with 129 participants $(M = 71, F = 57, N B = 1)$ . We recruited on $Prolific^6$ and screened participants for written and spoken fluency in English, as well as access to a computer with a keyboard. Participants reported ages ranged from 18 to 70 with a median age of 32. Following the platform recommendations, participants were compensated with $8\ £/h$ .
208
+
209
+ # 6.3 Procedure
210
+
211
+ The study started with a description page, including information about the collected data and GDPR, in line with our institute's regulations. After giving their consent, participants were directed to a page with an overview of the procedure and involved UI variants. Following this, people were guided through the five writing tasks in a counterbalanced order, and then to the final questionnaire. The study had an estimated duration of 45 minutes (actual mean was 45 minutes and 41 seconds).
212
+
213
+ 6.3.1 Topic Selection. For each task, participants first selected a writing topic. Repeated selections were allowed, as in related work [26], yet we asked them to choose at least two different topics overall. The topic order was also randomized and shown one at a time to encourage variety in the topic choices overall.
214
+
215
+ Gero et al. [13] suggested three tasks for writing support tools, including story writing and argumentative essay writing. We thus selected five topics for creative writing<sup>7</sup> and five topics for argumentative writing from the same source<sup>8</sup> as Lee et al. [26].
216
+
217
+ 6.3.2 Writing Task. Participants were told to write about the previously selected topic for five minutes and finish their text with a clear ending. The description encouraged to try out all features but also to write their own text. A timer was shown below the text editor. It was mentioned that the timer was not a hard cut-off, but served as a reminder of when to finish the task. We set a minimum time of 15 seconds before participants could submit their story but people stayed close to the five minutes anyway (see Section 7.2). People filled in a questionnaire after each task (Section 5.1).
218
+
219
+ # 7 RESULTS
220
+
221
+ Here we present our study results. For statistical testing we use R [35], concretely, (generalised) linear mixed-effects models (LMMs with the packages lme4 [2], lmerTest [25]). The models account for participants' individual differences, as well as for the type of their chosen topics (creative story writing, argumentative writing), via random intercepts. As fixed effects, the models have INSTRUCTION and NUMBER. Moreover, we use the R package multgee [44] to analyse the Likert results (i.e. ordinal data) with Generalized Estimating Equations (GEEs). We report significance at $p < 0.05$ .
222
+
223
+ We define a suggestion session as continuous interaction with suggestions, from requesting them until cancellation or acceptance (e.g. a session might involve three subsequent "tab" presses to browse suggestions). Participants triggered 3097 suggestion sessions. The mean in tasks with suggestions enabled was 6.47 (SD 4.00), comparable to related work [26].
224
+
225
+ # 7.1 Suggestion Acceptance
226
+
227
+ We define the acceptance rate as the number of accepted suggestions divided by the number of triggered suggestion sessions. We found considerable differences between the UIs (Means: $s_1 = 0.55$ , $s_3 = 0.74$ , $i_{no} = 0.59$ , $i_{yes} = 0.69$ ). The grand mean acceptance rate was 0.64 (SD 0.29). The mean for suggestion requests with a written instruction was in line with this (0.64, SD: 0.33). We fitted a generalised LMM on the acceptances as binomial data (i.e. for each shown suggestion we logged if it was accepted or not), summarized in Table 1 (row 1). Figure 2 (top left) shows the descriptive data. In summary, showing one suggestion (instead of three) significantly decreased the chance of acceptance, yet enabling users to write instructions significantly reduced this gap by increasing their acceptance (Mean rate of 0.45 for $s_1$ without instructions vs 0.65 with them).
228
+
229
+ # 7.2 Task Completion Time
230
+
231
+ We measured task time from starting the task to submitting it (Means: $s_0 = 281$ , $s_1 = 305$ , $s_3 = 306$ , $i_{no} = 303$ , $i_{yes} = 309$ ). As a fixed writing time was given, we do not expect large differences here. Indeed, an LMM fitted on this data for the suggestion UIs did not reveal significant effects (Table 1, row 2). Another such model compared the suggestion UIs against the baseline (Table 1, row 3): Here we found that writing with three suggestions took significantly longer than without suggestions. This is in line with the descriptive picture in Figure 2 (top center): Participants followed the task description of writing for five minutes, and writing with the suggestion UIs took slightly longer.
232
+
233
+ # 7.3 Text Length
234
+
235
+ In total, submitted texts contained 87,640 words, including text from accepted suggestions. The grand mean number of words per text was 134 words (SD 54). We fitted a generalised (Poisson) LMM on the word count data to compare the four tasks with suggestions (Table 1, row 4), and another such model to compare the suggestion UIs against the baseline without suggestions (Table 1, row 5). The results match the descriptive pattern visible in Figure 2 (top right): In summary, texts are significantly shorter when writing with single suggestions or with a UI allowing for instructions. However, writing with multiple suggestions leads to significantly longer texts. These differences are rather small, about 6-10 words (Means: $s_0 = 136$ , $s_1 = 130$ , $s_3 = 140$ , $i_{no} = 138$ , $i_{yes} = 131$ ).
236
+
237
+ # 7.4 Moments of Suggestion Requests
238
+
239
+ We analysed at which moments participants requested suggestions.
240
+
241
+ 7.4.1 After Sentence vs Mid-sentence. We analysed how often suggestions started a sentence (e.g. "Hello, world! [tab]") vs in the middle (e.g. "Hello world, how [tab]"). We fitted a generalised LMM on the requests as binomial data (i.e. for each request we logged if it was at the beginning of a new sentence or not), summarized in Table 1 (row 6). Figure 2 (bottom left) shows the descriptive data. In summary, showing one suggestion (instead of three) significantly decreased the chance of requesting suggestions at the beginning of a new sentence (Means: $s_1 = 21.70\%$ , $s_3 = 31.02\%$ ).
242
+
243
+ 7.4.2 Number of Words in Sentence. For the suggestion requests in the middle of sentences we further analysed after how many words in that sentence they were requested. We fitted an LMM on the mean numbers of words in sentences with suggestion requests per text, summarized in Table 1 (row 7). Figure 2 (bottom center) shows the descriptive data. In summary, showing one suggestion (instead of three) significantly increased the number of words in a sentence after which suggestions were requested – by about 1.5 words (Means: $s_1 = 10.93$ , $s_3 = 9.48$ ; i.e. a relative increase of $15.3\%$ ), while INSTRUCTION seemed to make no difference (Means: $i_{no} = 10.18$ , $i_{yes} = 10.34$ ). Note that 1-2 words later in a sentence is considerable because it may lead to very different constraints that users give to the system for possible continuations (e.g. “The...” vs “The man said...”).
244
+
245
+ 7.4.3 Words at the Suggestion Requests. We further analysed the type of words after which suggestions were requested. Concretely,
246
+
247
+ ![](images/b1430a93d1ca93ed30a2321bebb477c3e106eb79a4fafd201c9a246b60852627.jpg)
248
+
249
+ ![](images/ef35e43cf617da056d80078c323701ace8fed4cc89d375fe7aa503f0623490a5.jpg)
250
+
251
+ ![](images/baf757c813f32b8914534a8824a325f963e8541d8b1bd92c68e9f0024f05eb1b.jpg)
252
+
253
+ ![](images/e46347e37eeeda81c93915f9559ec12b8c1c3d806f2d422b284851c5c84b7aea.jpg)
254
+ Figure 2: Overview of the interaction metrics in our study. In summary, we observe: (1) Giving users the option to write instructions (i.e. non-diegetic prompts) increases the acceptance rate of suggestions for single suggestions, but not beyond that of multiple suggestions (top left). (2) Writing time did not vary much and texts were slightly shorter with single suggestions and instructions (top center/right). (3) Single suggestions were requested less often at the start of sentences (bottom left), about 1.5 words later in a sentence (bottom center), and less often after transition words (bottom right). See text for details.
255
+
256
+ ![](images/2444d3fee6e32eba5b99d133baa2d753b36855af1086c214dcc4b0ab42760a48.jpg)
257
+
258
+ ![](images/8160ec76ee0005f4b8f53b60711085992f2fcaa6f300cfb6a52a38ede22f8ee5.jpg)
259
+
260
+ we categorised these "trigger words" into transition words and other words, using online lists of English transition words<sup>9</sup>. For example, transition words mark causes (e.g. "because", "since"), opposites (e.g. "while", "despite"), effects (e.g. "therefore", "then"), and other aspects. We provide the full list we used in the project repository. We fitted a generalised LMM on the requests as binomial data (i.e. for each request we logged if it was after a transition word or not), summarized in Table 1 (row 8). Figure 2 (bottom right) shows the descriptive data. In summary, showing one suggestion (instead of three) significantly decreased the chance of requesting suggestions after a transition word (Means: $s_1 = 11.80\%$ , $s_3 = 14.03\%$ ), while INSTRUCTION seemed to make no (sig.) difference (Means: $i_{no} = 12.03\%$ , $i_{yes} = 13.32\%$ ).
261
+
262
+ # 7.5 Perception of the Tasks and UIs
263
+
264
+ We used Likert items to assess participants' perception after each writing task (Figure 3). Descriptively, suggestions received favourable ratings by the majority and had almost no perceived grammatical or factual errors. However, no UI was clearly "best" for everyone: Across questions and UI variants, there is a spread of opinions, including for the perceived usefulness of being able to write instructions (i.e. non-diegetic prompting). This spread fits to the different pros and cons and preferences that participants commented on (see Section 8). Here, we report on the results from our GEE analysis. Since we have 16 questions, we summarize this analysis according to the emerging bigger picture.
265
+
266
+ 7.5.1 Perceived Differences for NUMBER of Suggestions ( $s_1$ vs $s_3$ ). Showing a single suggestion was rated worse than having a list of three suggestions. This was significant for several questions. The
267
+
268
+ GEE model estimates that the odds of giving a higher rating with a single suggestion were " $x$ " times the odds of that with the list of three suggestions, with $x$ as follows: Single suggestions were rated as significantly more distracting ( $x = 1.95$ , p<0.005), less helpful ( $x = 0.60$ , p=0.02), leading to more manual editing ( $x = 1.91$ , p<0.005), feeling less in control ( $x = 0.67$ , p=0.03), and providing less diverse suggestions ( $x = 0.67$ , p=0.04).
269
+
270
+ 7.5.2 Perceived Differences for INSTRUCTION $(i_{no} \text{ vs } i_{yes})$ . We found a tradeoff in the perception of instructions: On the negative side, the UIs that allowed users to enter instructions to the AI received ratings of manually editing suggestions significantly more ( $x = 1.54$ , $p = 0.02$ ) and being significantly more distracting ( $x = 2.03$ , $p < 0.0005$ ).
271
+
272
+ On the positive side, giving instructions was rated significantly better on being able to influence the suggested text ( $x = 1.92$ , $p < 0.001$ ). Descriptively, it was also rated better on feeling in control of the suggested text (see $Q_{10}$ in Figure 3), although this was not significant ( $x = 1.42$ , $p = 0.058$ ).
273
+
274
+ 7.5.3 Interactions of Number and Instruction. As mentioned in the previous two parts, both single suggestions and the ability to give instructions were perceived as significantly more distracting. However, there was also a significant negative interaction effect of Instruction and Number on distraction. The increase in distraction between $s_1$ compared to $s_3$ was lower for $i_{yes}$ than $i_{no}$ (which also matches the picture for $Q_2$ in Figure 3). This seems to be in line with the earlier finding for acceptance rates (Section 7.1): Possibly, finding more useful single suggestions with instructions reduced the otherwise perceived distraction of single suggestions and/or instructions. That said, note that for all suggestion UIs, the majority did not find them distracting. We return to the aspect of distraction in more detail when analysing the open feedback (Section 8).
275
+
276
+ <table><tr><td></td><td>Section</td><td>Aspect</td><td>Sig. pos. predictors</td><td>Sig. neg. predictors</td><td>Sig. interaction</td><td>Takeaway in words</td></tr><tr><td>1</td><td>7.1</td><td>Suggestion acceptance</td><td></td><td>s1(β=-1.26, SE=0.11, CI95%=[-1.48, -1.03], p&lt;.0001)</td><td>NUMBER *
277
+ INSTRUCTION
278
+ (β=0.72, SE=0.17, CI95%= [0.39, 1.05], p&lt;.0001)</td><td>Showing one suggestion (instead of three) decreases chance of acceptance; more so without instructions than with them.</td></tr><tr><td>2</td><td>7.2</td><td>Task time, comparing sugg. UIs</td><td></td><td></td><td></td><td>No sig. differences in task completion times were found between the four UIs with suggestions.</td></tr><tr><td>3</td><td>7.2</td><td>Task time, comparing sugg. UIs against baseline (no suggestions)</td><td>s3(β=24.59, SE=9.5, CI95%= [5.96, 43.22], p&lt;.01)</td><td></td><td></td><td>Writing with three suggestions took longer than without suggestions.</td></tr><tr><td>4</td><td>7.3</td><td>Text length, comparing sugg. UIs</td><td></td><td>s1(β=-0.08, SE=0.01, CI95%=[-0.10, -0.06], p&lt;.0001); iyes(β=-0.06, SE=0.01, CI95%=[-0.09, -0.04], p&lt;.0001)</td><td></td><td>Texts are slightly shorter when writing with single suggestions or with a UI allowing for instructions...</td></tr><tr><td>5</td><td>7.3</td><td>Text length, comparing sugg. UIs against baseline (no suggestions)</td><td>s3(β=0.05, SE=0.01, CI95%= [0.03, 0.07], p&lt;.0001)</td><td>s1(β=-0.03, SE=0.01, CI95%=[-0.05, -0.01], p&lt;.005); iyes(β=-0.05, SE=0.01, CI95%=[-0.07, -0.03], p&lt;.0001)</td><td></td><td>... also compared to the baseline. However, writing with multiple suggestions leads to slightly longer texts.</td></tr><tr><td>6</td><td>7.4.1</td><td>Requesting suggestions after sentence vs mid-sentence</td><td></td><td>s1(β=-0.83, SE=0.13, CI95%=[-1.08, -0.57], p&lt;.0001); iyes(β=-0.32, SE=0.14, CI95%=[-0.59, -0.05], p=0.018)</td><td></td><td>Showing one suggestion (instead of three) decreased the chance of requesting suggestions at the beginning of a new sentence.</td></tr><tr><td>7</td><td>7.4.2</td><td>Number of words in sentence at suggestion request</td><td>s1(β=1.55, SE=0.78, CI95%= [0.01, 3.08], p=0.049)</td><td></td><td></td><td>Showing one suggestion (instead of three) increased the number of words in a sentence after which suggestions were requested.</td></tr><tr><td>8</td><td>7.4.3</td><td>Type of words at suggestion request</td><td></td><td>s1(β=-0.37, SE=0.14, CI95%=[-0.65, -0.09], p=0.010)</td><td></td><td>Showing one suggestion (instead of three) decreased the chance of requesting suggestions after a transition word.</td></tr></table>
279
+
280
+ Table 1: Overview of the (generalised) LMM results and takeaways of the significant results. Empty cells indicate no significant results. See Sections 7.1 - 7.4 for details and Figure 2 for a descriptive overview of the data.
281
+
282
+ Finally, we also asked two questions that focused on the instructions directly ( $Q_{15}$ and $Q_{16}$ ) and thus could only be asked for those UIs with instructions (i.e. there's only a non-diegetic row in Figure 3 for $Q_{15}$ and $Q_{16}$ ). For these two questions, we found no significant differences between $s_1$ and $s_3$ .
283
+
284
+ # 7.6 Instruction Usage and Content
285
+
286
+ In total, participants used the non-diegetic prompting option to send 397 instructions to the system, with an average of 3.08 instructions per person (SD: 3.40). The mean instruction length was 14.20 characters (SD: 9.01) and 2.52 words (SD: 1.74). On average, participants had a ratio of 0.19 (SD: 0.17) of entering an instruction text when requesting suggestions, for those tasks that offered to do so. That is, about every fifth suggestion request used instructions.
287
+
288
+ We identified three main instruction "styles": The most common one (171 usages) was to use single keywords (or comma-separated lists of keywords). We also found an imperative style with 59 occurrences (e.g. starting the prompt text with "suggest", "give", "find", "describe"). In 12 cases, participants formulated a question (e.g. starting with a w-word like "what", "who" and so on, and/or ending with a "?"). Other cases included instructions consisting of multiple
289
+
290
+ words to describe something (e.g. "somewhere in Italy"). Qualitatively, we found a range of approaches (Table 2).
291
+
292
+ # 7.7 Evaluation of Text Quality
293
+
294
+ The mean number of spelling and grammar mistakes per word was 0.0025, which is comparable to values reported in previous research [26]. Approximately $3.5\%$ of all texts (23 out of 645) did not align with the selected topics. Of these, the majority (13 out of 23) were written for the category of "shapeshifter", which may have been misunderstood as a metaphor for a specific set of desired traits in a partner. Despite this potential misunderstanding, the majority of participants demonstrated attentiveness to the task and provided thoughtful reflections on the topic.
295
+
296
+ # 8 OPEN FEEDBACK
297
+
298
+ We analyzed the final feedback as described in Section 5. We structure this report by the emerging aspects.
299
+
300
+ # 8.1 Comments on Suggestions
301
+
302
+ The majority preferred multiple suggestions (75 people stated this preference vs 23 for single suggestions). Main reasons were higher chances of finding fitting suggestions (coded 36 times) and more
303
+
304
+ ![](images/f8ddf39de87ff13f44f813a8bc25ba58bcac447ba3ff7d6a9f0c7533b6a9765d.jpg)
305
+
306
+ ![](images/88fa33425fb6f5f9c95f986c8ceb767c1fb48787320922452d05b28f6cc47ab7.jpg)
307
+
308
+ ![](images/cc2d64ab0198cbad7fb51b6af2622f12224a1485662740fbd44de629e564f2d5.jpg)
309
+
310
+ ![](images/4ec2a0ec8ed09e1f4e61c3c802f57536b10a6e449a5febf50eedcb641d441cd2.jpg)
311
+
312
+ ![](images/3fb9fffff00a9963500ba569185a37b3414a8c7d0568679a9f8ad1253ba7bbd1.jpg)
313
+
314
+ ![](images/bb3f37bed0f04cf35fa40c1275ad84bc1eb6114212ec369dd39f090a48a6f35f.jpg)
315
+
316
+ ![](images/a1b9c9412530ccbb2d58d4c8c5e88da6a5ba43d766528dc33355838bccb555ea.jpg)
317
+
318
+ ![](images/9be2074ffe3e89928a2582b86a94a4c0a98f83b54b9642af5719ad535e08fdc4.jpg)
319
+
320
+ ![](images/5dc77adecb1ff9926e762710aadf9925f44d9bdbfa9ee232c09e35da7138fa41.jpg)
321
+
322
+ ![](images/39983bc707e21393ba8da3dc89c3c331176ba8ded7683003b170b44cbd065383.jpg)
323
+
324
+ ![](images/eaae4560b6ef2012dbcbe7771b91ef55b2597e139ec998e4dd6e3acc5e4f859a.jpg)
325
+
326
+ ![](images/334d6852c2fa2a3261c6e19716853a5d69758c3a3e0ceae6de65c8b39e5cd0b9.jpg)
327
+
328
+ ![](images/867f8ab7f5c4a8a00f778bfc6eb5c20fd9dc8baca080a32dcef19d006239d4b7.jpg)
329
+ Figure 3: Overview of the Likert results. These questions were asked after each writing task. Note that Q15 and Q16 relate to the instructions (i.e. non-diegetic prompts) and thus were only asked for the corresponding tasks.
330
+
331
+ ![](images/ae6074c3afd611034eaa04776e66bcb784a95a7dc2075b0f5b2eddc5da582277.jpg)
332
+
333
+ ![](images/ea2b10c4abcad2067e9fb06e2c7e7c9654f2811af746591a822015d52e2b9c0f.jpg)
334
+
335
+ ![](images/ef0613db1f84373c6eedb1bedb5b31ad8cabfebcc1c4bcf5bee24d4dd8dca049.jpg)
336
+
337
+ <table><tr><td>Approach</td><td>Examples</td></tr><tr><td>providing a topic</td><td>“school”, “book”, “retirement”, “zoo”, “event”</td></tr><tr><td>providing adjectives</td><td>“good”, “horrendous”, “bad”, “long, too much, insane”, “friendly”, “funny”, “scared”</td></tr><tr><td>request for inspiration</td><td>“give me a horror story”, “suggest a place”, “suggest the next step”, “things we do in the morning”, “suggest an activity for a middle aged man”</td></tr><tr><td>make idea more concrete</td><td>“suggest something disgusting”, “what is wrong with dad”, “suggest a cocktail”, “suggest a type of pistol”</td></tr><tr><td>request for variation</td><td>“another phrase”, “another action outside”, “suggest a different approach”, “anything”</td></tr><tr><td>request for writing help</td><td>“other words for stereotypical”, “find a synonym for valued”, “suggest a word for young people”, “another word for talent”</td></tr><tr><td>ask for opinion/advice</td><td>“are books good”, “what do i do next”</td></tr><tr><td>retrieve facts</td><td>“closest galaxy”, “side effect of anti ageing”, “a place on the Danube”</td></tr></table>
338
+
339
+ Table 2: An overview of the different approaches for writing non-diegetic prompts during the user study. Participants used single keywords to suggest topics and adjectives. Multiple-word prompts were often written in the imperative style, phrased as questions or phrased as incomplete sentences without a verb.
340
+
341
+ inspiration (coded 8 times). As $P_{38}$ wrote: "I found the multiple suggestions much more user friendly and also much more inspiring due to the multiple options."
342
+
343
+ Those preferring single suggestions found them more intuitive (coded 7 times), faster to work with (coded 3 times) or less distracting (coded 3 times): "I strongly preferred inline due to how intuitive
344
+
345
+ they were to use." $(P_{113})$ Or: "I like seeing how the sentences actually looks in its actual place, and the inline suggestions allowed this." $(P_{109})$ . Others liked not having to decide (coded 2 times) but noted that this might lead to choosing a less than optimal suggestion.
346
+
347
+ Fourteen participants reflected on benefits for both, such as: "On the one hand, the inline suggestions felt less cluttered and I could just press tab again if the first suggestion wasn't suitable. On the other hand, displaying multiple suggestions at once could lead me to a better suggestion when I might just have settled for the first one." $(P_{13})$
348
+
349
+ Participants commented on why and how to use suggestions, mentioning inspiration (coded 39 times), overcoming writer's block (coded 3 times), or finishing sentences (coded 7 times). For example: "I used the suggestions if they aligned with what I was writing or if I felt a little stuck with what to say next." $(P_{117})$ Or: "I tended to start with a vague idea of my own and see what ideas it had." $(P_{14})$ .
350
+
351
+ Eighteen participants explicitly commented on suggestion quality: Eight were negative ( $P_{91}$ : "[...] had to edit most of it." Nine felt suggestions were hit or miss ( $P_{130}$ : "Sometimes [the suggestions] helped, sometimes it didn't." Two left positive comments ( $P_{27}$ : "I was sceptical about whether the AI would align with my ideas or suggest phrasing that I would actually use but most often it did so and I was pleasantly surprised by the results." $P_{109}$ further noted that "[instructions] helped the AI write more detailed and interesting sentences" when the direction of the sentence was known beforehand. Using "one or two words in the instructions to get better sentences" that participant continued that "[s]ometimes [it] worked, but quite often I just ended up writing my own sentences, or changing the suggested sentences substantially." (see Figure 5).
352
+
353
+ "I liked it better without [the instructions], just let the AI do its thing. That seems more human, that's how I share story telling with my grand children, we just take turns." (P34)
354
+
355
+ It was a normal Thursday morning when Matt Damon was kidnapped. It was like he just disappeared off the face of the earth. This caused a huge worldwide search. People from all over went to extreme lengths to try and find the beautiful actor and no one was willing to give up until he was safe. An old couple who were a huge fan of Matt, spent hours walking around places they'd never been to before in hopes they'd find him. Carrying around weapons just in case, they were putting their lives on the line for him. After a few hours when it was getting dark, they saw a sighting - they weren't sure what it was, it didn't look human. As they got closer, they realised it was Matt Damon in the flesh but he wasn't.. him. It looked like he had just morphed into a completely different person. Different features, different voice - the couple weren't sure whether to believe it was actually him. At the end of the day they decided they should bring it to the police, they were their only hope in finding out what happened, or to potentially get the old Matt back. To be continued..
356
+
357
+ Accepted text suggestions
358
+
359
+ ![](images/f943e0a0740630d0684309df4f7eb3ae6728340dfecaee442c81ef13434783d3.jpg)
360
+ Figure 4: Text sample of $P_{34}$ who took turns with the AI to write about the kidnapping of Matt Damon. The suggestions were taken verbatim and mostly requested at the start or in the middle of a sentence.
361
+ Figure 5: Text sample of $P_{109}$ who provided non-diegetic prompts to guide the LLM. For the last instruction ("train"), $P_{109}$ decided to then modify the topic to "zoo".
362
+
363
+ # 8.2 Comments on Instructions
364
+
365
+ Opinions diverged on instructions: 21 participants explicitly stated they preferred the UIs allowing for instructions, while 24 preferred those without them. 22 participants reflected on both pros and cons. The main reasons for using instructions were getting more suitable suggestions (coded 12 times) (e.g. $P_{27}$ : "I found the instructions more helpful as I could guide the AI when needed"), inspiration for words (coded 29 times) (e.g. $P_{65}$ : "[...] it gave me inspiration when i was stuck for words.") and delegating tasks like coming up with places, names or synonyms. (coded 5 times). One person used the AI "[...] to get suggestions for and against the point I was trying to make." ( $P_{85}$ ).
366
+
367
+ In contrast, some found it hard to write instructions (see Section 8.4 for details). Six participants described a trial-and-error approach to find out how to best write instructions.
368
+
369
+ It was also reported that coming up with instructions can disrupt the writing flow (coded 3 times) and thus reduces efficiency, or is not worth the effort. For example: "It made no difference, as i never felt the need to give it specific instructions. I felt it did a pretty good job of knowing what sort of suggestions I wanted." $(P_{38})$ .
370
+
371
+ Some said writing with instructions felt less natural (coded 3 times): "I mostly enjoyed writing without the instructions. I felt more like I was 'one' with the AI and it felt like it was more of a team member with me than a piece of software. I think because it removed that feeling of using a computer to help me write I felt like the suggested writing was an extension of myself." $(P_{132})$ . And $P_{34}$ wrote: "I liked it better without [the instructions], just let the AI do its thing. That seems more human, that's how I share story telling with my grand children, we just take turns." (see Figure 4).
372
+
373
+ # 8.3 Control and Influence
374
+
375
+ Eleven participants commented on control and influencing suggestions. For example: "I prefer $[r]$ red multiple because - literally - there were multiple to choose from and that gave me a better feeling of control over the story." $(P_{59})$ . Another commented: "I like the suggestion systems especially when I was able to provide guidance." $(P_{82})$ . Overall, multiple suggestions and instructions were mentioned here as contributing to feeling in control, matching the Likert results on control and influence $(Q_{9}$ and $Q_{10}$ in Figure 3).
376
+
377
+ Moreover, participants commented on strategies around what we now call diegetic prompting in this paper. For example, some preferred influencing the suggestion with the diegetic approach: "I didn't have much success providing instructions, was having trouble thinking of suggestions quickly and instead focused on directing the topic towards a place were viable suggestions would be made without interactive input." $(P_{99})$ . Similarly, $P_{111}$ said: "Often it was just as difficult to think of the instruction as it would be to actually write something. It seemed just as easy to start writing what I wanted in order to push the AI in the direction I wanted it to go."
378
+
379
+ In contrast, some disliked diegetic prompting: "Without instructions was highly annoying, had to shape your lead-in sentences to get it to say something relevant. The instructions were intuitive and usually got it right." $(P_{78}$ , also see Figure 6).
380
+
381
+ Finally, others noticed influences on their own writing processes related to diegetic prompting: "[W]hen I was on my own I just rambled on but while working with the AI I was mentally setting up what
382
+
383
+ I wrote to be able to ask for a suggestion at a point where the ideas could go in different directions, depending on what was suggested." $(P_{99},$ also see Figure 7). And similarly, $P_{9}$ wrote: "[I] noticed that the more time I spent the more my tendency was to find a way to write that would facilitate the suggestion to be meaningful and at the same time interesting to add to give more in-depth to my story."
384
+
385
+ # 8.4 Learnability
386
+
387
+ Several participants (33) touched on challenges of learnability and writing instructions: "I found coming up with suggestions [to the AI] difficult, really. Having to type the start of a sentence and then type what I wanted in a smaller box felt quite clunky and not worth the effort for what was generated. It felt much more fluid when the AI recognized what I wanted and completed the writing without needing suggestions." $(P_{29})$ . $P_{6}$ said: "I almost felt stressed trying to think of some instructions to give to the AI; it felt really hard to me. I'm glad that the option was there, but I guess I wasn't taking full advantage of it." Fittingly, 25 participants said they did not use instructions much because, for example, "[...] I wasn't very good of thinking of them." $(P_{2})$ . Some of the previous comments (Section 8.3) fit this aspect as well.
388
+
389
+ # 8.5 Distraction
390
+
391
+ Nine participants explicitly reflected on distraction. For example $P_{20}$ wrote: "I actually found the suggestions fairly distracting and not helpful. I tended to already know what I wanted to say so the chances of the suggestions aligning with my thoughts were fairly slim." $P_{43}$ perceived instructions in particular as distracting: "I feel like writing without instructions help me focus more and [I] am less distracted which allows my sentences to flow and be more natural. Instructions are good if [I] am stuck and need help."
392
+
393
+ # 8.6 Perception of the AI and Expectations
394
+
395
+ The comments indicate two fundamental views on the role of the AI: Some expected the system to serve efficiency. For example: "I think there is a lot of value in this system, but inputting instructions make it quite long winded and onerous, negating any benefits there may be. I preferred the multi suggestions without instruction." $(P_{26})$ . Also see the first quote on "alignment" above (Section 8.5). In contrast, others saw the system as serving inspiration (also see Section 8.2 and Section 7.6). They asked the AI for content suggestions or were curious to see in which direction the AI would take the story. This included feeling inspired by suggestions even without accepting them: "I was reading the suggestions either to use them or to just get ideas of what I was writing about" $(P_{67})$ .
396
+
397
+ # 9 DISCUSSION
398
+
399
+ # 9.1 Choice vs. Control
400
+
401
+ Our findings contribute to the literature on prompt-based interaction with generative systems for writing: Participants overall preferred choosing from multiple text suggestions presented to them, over actively writing instructions, in short creative and argumentative writing tasks. This is evident from highest acceptance rates with the multiple suggestions UI (Section 7.1), which were not improved through instructions, and from the qualitative feedback,
402
+
403
+ ![](images/027979544d7f7a680105d0b8ba6ac2e395e7abc6befc653e04cc74a2ccd8b317.jpg)
404
+ Figure 6: Text sample of $P_{78}$ who used non-diegetic prompting to retrieve a list of "school subjects". The accepted suggestion is highlighted in blue. Part of the accepted suggestions was later on deleted.
405
+
406
+ ![](images/66b9a7e424bae10f0ee00ee5d4af627b539fb6a7f56b4c2bc5136327112aaccd.jpg)
407
+ Figure 7: Text sample of $P_{99}$ who found it difficult to provide non-diegetic prompts and instead focused on guiding the suggestion through diegetic content, e.g. requesting suggestions after "...sending kids off to school when" (line 3). By setting the sentence up in this way before requesting a suggestion this participant guided the LLM to suggestions that "go in different directions" (cf. comment in the second yellow box).
408
+
409
+ where a clear majority favored multiple suggestions, while opinions were divided on instructions (Section 8).
410
+
411
+ However, giving users more control options in the UI by adding non-diegetic prompting partially mitigated the drawback of a lack of suggestion choice: Instructions increased acceptance rates for single suggestions – although these still did not reach the rate for multiple suggestions (Section 7.1). This indicates that the control offered by instructions was useful to guide single suggestions but not better than having a choice of three suggestions to begin with.
412
+
413
+ We discuss possible reasons: First, participants might satisfice [40] that is, accept a "good enough" suggestion rather than trying to "optimize" it via instructions. Suggestions might also already be good enough so that there is no need for instructions, as supported by some comments (Section 8.2). Second, a known usability principle is recognition over recall [31]: Users might find it easier to recognize a presented suggestion as suitable (or not), compared to coming up with an instruction and typing it in. Third, convenience might lead participants in the study to accept suggestions without instructions to get through the tasks quickly. However, participants accepted suggestions at a rate comparable with related work (with multiple suggestions and explicit request via tab key: $74\%$ here vs $72\%$ in [26]). For suggestions based on user instructions, our rate $(64\%)$ is higher than in a related study design where users could enter requests in a sidebar $(17.6\%$ in [48]): This suggests that potential influences of the study setup do not necessarily work against instructions, or are less dominant than the effects of the UI design (e.g. sidebar vs integration at text cursor). Moreover, times and texts, in combination with the comments, further support the conclusion that participants took the tasks seriously (see Section 7.7).
414
+
415
+ At the same time, instructions were indeed (situationally) useful: Participants commented on their benefits (Section 8.2), used them in every fifth suggestion request, and experimented with different styles (Section 7.6). Together, these findings motivate the HCI community to further explore the integration of choice and control via prompting. For example, future work could build on our conceptual lens to envision further UI designs that combine diegetic and non-diegetic prompting, and use our data as a benchmark in their evaluation.
416
+
417
+ # 9.2 Guiding Suggestions with Diegetic Prompts
418
+
419
+ Our results add to the literature on writing with AI by revealing that people specify more diegetic information to offset the lack of suggestion choice in UIs that display only a single suggestion. This is based on the first large-scale analysis of where in the text users request suggestions: Users wrote about 1.5 more words in the sentence before requesting single suggestions, compared to multiple ones. Moreover, single suggestions were requested less frequently to start a new sentece and to continue after a transition word. Possibly, receiving a single suggestion is less useful here, given that new sentences and transition words signal "openness" for potential changes to the direction of the narrative.
420
+
421
+ Currently, there is one other (small-scale) analysis of trigger moments $(N = 4$ in [7]). Thus, we encourage the community to analyze trigger moments whenever studying UIs with explicit suggestion triggers.
422
+
423
+ Fittingly, we indeed recently see high interest in interaction designs where users explicitly request suggestions (e.g. [7, 26, 41]). Our study explores this design space further by looking at how it interacts with the number of suggestions: Here, we contribute evidence that people consider when to request suggestions, and in particular for single suggestions they request them at points in their text that are expected to give clearer guidance to the text continuation system. Future work could examine whether this holds in other writing contexts and to what extent users actively think about when to request suggestions while writing. Based on people's comments, at least some strategically thought about what we term diegetic prompting (see Section 8.3).
424
+
425
+ As a related aspect, prior work focused on how people react to suggestions (e.g. evaluation fatigue [4], integrative leaps [41]). Complementary, the above results indicate that there is also a proactive direction: Writers think about suggestions before seeing them. Future work could investigate this in more detail, in particular for UIs in which users explicitly request suggestions.
426
+
427
+ # 9.3 Challenges of Integrating Non-Diegetic Prompts
428
+
429
+ We extract two concrete challenges of interacting via non-diegetic prompts to guide future research and design.
430
+
431
+ 9.3.1 Non-Diegetic Prompts Interrupt the Writing Process. Writing involves multiple cognitive processes, such as coming up with a thought, turning it into words, and entering it [21]. Recently, Bhat et al. [4] studied (without non-diegetic prompts) how this is impacted by text suggestions. For example, writers need to evaluate displayed suggestions. Here, our study adds insights into the relative impact of diegetic vs non-diegetic prompts: Crucially, switching from diegetic writing to non-diegetic instructing forces writers to shift from thinking about their narrative or argument to thinking about instructions to the system. This is reflected in people's comments (Section 8.2, 8.3, 8.4) and the Likert results on distraction and problems with thinking of instructions ( $Q_{2}$ and $Q_{15}$ in Figure 3). In contrast, diegetic prompts do not require such shifts, although they still require engagement with displayed suggestions [4, 6].
432
+
433
+ 9.3.2 Non-Diegetic Prompts can be Hard to Write. Even after making that shift, then writing effective non-diegetic prompts is difficult, adding to related findings in the literature [48]: Many participants struggled with this and recognised that they did so in self-reflection (Section 8.2, 8.3, 8.4). More positively, the non-diegetic prompts collected in our study show how users experimented with different styles. These might evolve further with longer use. At the moment, none of these styles go beyond what would also be a meaningful comment to a human co-author.
434
+
435
+ # 9.4 Perceived Role of the AI
436
+
437
+ Here we discuss how users perceived the AI and support this discussion by reflecting on three writing processes as in the framework for analyzing writer-suggestion interactions by Bhat et al. [4]: (1) proposing new topics or ideas, (2) translating abstract thoughts or keywords into sentences, (3) transcribing (i.e. entering) words.
438
+
439
+ 9.4.1 Two Perspectives on the Main Role: Proposer vs Transcriber. Some people clearly saw the system as something that serves input
440
+
441
+ efficiency (i.e. transcriber), whereas others saw it as providing inspiration (i.e. proposer). The former are more critical about the system since it would only be good if it is fast and predicts exactly what they want. Based on the qualitative feedback we think that the chosen topic as well as participants' familiarity with the topic might have an influence on their writing mindset. For argumentative writing and, more generally, when people already had an opinion about a topic, they felt that the AI was distracting if it proposed something other than what participants had in mind. Future work may have a closer look at the influence of topic genre and prior knowledge about a topic on the perception of the role of the AI. Study designs should take this difference into account when choosing writing topics to calibrate metrics for performance or exploration.
442
+
443
+ 9.4.2 Non-diegetic Prompts Reflect Users' Perception of the AI. We can further discuss how the content of non-diegetic prompts reflects varying perceptions of the role of the AI: Considering the writing processes [4], non-diegetic prompts from our dataset show that users requested the AI to propose inspirational ideas. Sometimes users also only provided partial phrases or keywords, or asked for word choices, which puts the AI into the role of translating these abstract ideas into full sentences. At other times, they perceived the AI as a transcriber for input efficiency (Section 8).
444
+
445
+ Other non-diegetic prompts indicate influences on the perceived role beyond these writing processes: For example, people asked the AI for an opinion or advice, or to lookup information. Thus, non-diegetic prompts may shift perception of the AI's role towards a writing collaborator.
446
+
447
+ # 9.5 Limitiations and Reflections on Methodology
448
+
449
+ People wrote for five minutes with each UI. Hence, they spent ten minutes in total with each individual UI feature across the writing tasks (single and multiple suggestions, with and without instructions). This is comparable to related work (e.g. 11 min [26], 4 min [6], 10-12 min [48]). Future studies should investigate long-term use, in particular to observe how non-diegetic prompts evolve as writers gain experience with a system.
450
+
451
+ We prototyped our system with GPT-3 via an API. We did not have access to the model directly and we do not claim to have identified the "best" settings for our specific usage of the model. We noticed two limitations: Sometimes, suggestions were repetitive (e.g. similar ones in one list) or repeated the instruction text (which seems unhelpful). Nevertheless, suggestions were rated highly overall (Section 7.5).
452
+
453
+ Potential changes to the model over time are beyond our control. This limits exact replicability for studies like this. We see a trend of limited direct access to state-of-the-art LLMs for parts of the academic community, which is not easy to resolve. On the positive side, our work shows that it is possible to construct and study in detail interactive applications built on existing models.
454
+
455
+ We chose an online setup in line with recent related work (e.g. [6, 26]) to collect logging data from interactions of many people. However, we could not observe people directly or ask questions at interesting moments in the interaction, except for in our pre-study, which we used to refine our design. A small-N study with direct
456
+
457
+ observation and think-aloud could complement our work, for example, to understand decision-making around triggering suggestions and writing non-diegetic prompts in more detail. Nevertheless, we received rich qualitative feedback as well (Section 8).
458
+
459
+ It is possible that the instruction styles (Section 7.6) are biased by the provided examples (Figure 8 in Appendix A). Our pre-study showed that such examples are needed to help people get started with this new feature. Nevertheless, people experimented beyond these examples (e.g. questions, writing help, advice, etc.; see Table 2).
460
+
461
+ With the pop-up box, we tested one way of integrating instructions. This UI element is motivated as a simple way of integrating instructions with the established design of a suggestion list (or inline suggestion). A similar pop-up is used in recent related work (not for instructions but for suggestions in the middle of sentences; cf. [4]). Other designs should be explored in the future.
462
+
463
+ Finally, we emphasize the importance of open writing tasks in HCI research. Historically, transcription tasks have dominated text entry research (cf. [45]). With the rising interest in human-AI co-creation, research on writing tools needs new tasks. These might not necessarily focus on measuring input speed but rather cover a range of topics, text types, and other aspects. Pragmatically, writing tasks from writer communities and custom tasks have been used in recent studies (e.g. [6, 26, 41, 48]), including ours. As a community, we should systematically evaluate and curate such writing tasks if they are to become a lasting key methodological component.
464
+
465
+ # 9.6 Beyond Writing: Diegetic and Non-diegetic Interaction in Generative Systems
466
+
467
+ We have studied diegetic and non-diegetic prompts to draft text (i.e. text to text). Here we reflect on this new perspective by discussing concrete examples of how other interactive generative systems use diegetic and non-diegetic prompting.
468
+
469
+ - Visual to Text Chung et al. [9] proposed a new story ideation tool that uses visual sketching to guide a LLM. Here the sketch is translated to a text prompt. This interaction is non-diegetic.
470
+ - Text to Visual Recent text to image models allow users to generate images from text descriptions [30, 32, 36]. These are non-diegetic prompts, because they are not part of the visuals.
471
+ - Visual to Visual Bau et al. [3] show an example of "painting shapes" to guide image models: Users draw simple shapes such as a triangle to symbolize a mountain. The image model then translates these shapes into a high-fidelity rendering. Since the abstract shape is usually not part of the outcome we consider this interaction non-diegetic. On the other hand, Ha and Eck [19] enable users to start painting a part of an image (i.e. providing diegetic information) and let the system continue or finish the painting.
472
+
473
+ Differentiating these two perspectives therefore allows researchers to analyse users' intention and behavior when interacting or designing systems with generative AI. As shown in the following discussion we can use this understanding to derive implications on the design of interactions for generative models.
474
+
475
+ # 9.7 Implications for LLMs and User Interfaces
476
+
477
+ In recent work by Schick et al. [38], their LLM "PEER" is explicitly trained to follow non-diegetic prompts related to text revision. Effectively, our study contributes the HCI counterpart - an investigation of a UI and interaction design to integrate an LLM in such a role into the writing process. Our results guide future work at this intersection of HCI and NLP in two concrete ways:
478
+
479
+ First, based on our collected non-diegetic prompts these LLMs should be trained to understand a broader range of inputs. For instance, PEER is trained on the imperative-style but we found the keyword-style to be more common. Alternatively, users need to be guided towards the supported style via the UI.
480
+
481
+ Second, while LLMs are rapidly improving, even the best model cannot eliminate cognitive costs and interaction costs of switching between diegetic and non-diegetic writing. This motivates further studies on interaction designs that require such switches and potential pathways to making them easier and more efficient.
482
+
483
+ # 10 CONCLUSION
484
+
485
+ Our new understanding highlights that people use two types of prompting to guide LLMs for text generation. While related work has presented systems that focused on non-diegetic prompts, our findings reveal that users additionally think about and shape their text to guide LLMs through diegetic information. With our UI design that allows for both types, using GPT-3, participants preferred choosing from multiple suggestions over writing instructions. We conclude by highlighting three key takeaways based on our results:
486
+
487
+ First, writing instructions to the AI requires effort, including switching between diegetic and non-diegetic writing. Second, people combine diegetic and non-diegetic prompting, as single suggestions benefitted from both. Third, writers use their draft (i.e. diegetic information) and suggestion timing to strategically guide LLMs, based on our analysis of when people request suggestions, as well as their self-reflection in comments.
488
+
489
+ We encourage future work to further analyze these prompt types to develop better writing tools and generalize to other domains (e.g. interaction with generative models for images). To facilitate this, we release our prototype and material on the study and analysis here:
490
+
491
+ https://osf.io/qwkaj
492
+
493
+ # ACKNOWLEDGMENTS
494
+
495
+ We thank Lukas Mecke for feedback on the manuscript. This project is funded by the Bavarian State Ministry of Science and the Arts and coordinated by the Bavarian Research Institute for Digital Transformation (bidt).
496
+
497
+ # REFERENCES
498
+
499
+ [1] Kenneth C. Arnold, Krysta Chauncey, and Krzysztof Z. Gajos. 2018. Sentiment Bias in Predictive Text Recommendations Results in Biased Writing. In Proceedings of the 44th Graphics Interface Conference (Toronto, Canada) (GI '18). Canadian Human-Computer Communications Society, Waterloo, CAN, 42-49. https://doi.org/10.20380/GI2018.07
500
+ [2] Douglas Bates, Martin Machler, Ben Bolker, and Steve Walker. 2015. Fitting Linear Mixed-Effects Models Using lme4. Journal of Statistical Software 67, 1 (2015), 1-48. https://doi.org/10.18637/jss.v067.i01
501
+ [3] David Bau, Jun-Yan Zhu, Hendrik Strobelt, Bolei Zhou, Joshua B. Tenenbaum, William T. Freeman, and Antonio Torralba. 2018. GAN Dissection: Visualizing and Understanding Generative Adversarial Networks. Technical
502
+
503
+ Report arXiv:1811.10597. arXiv. https://doi.org/10.48550/arXiv.1811.10597 arXiv:1811.10597 [cs] type: article.
504
+ [4] Advait Bhat, Saaket Agashe, Niharika Mohile, Parth Oberoi, Ravi Jangir, and Anirudha Joshi. 2022. Studying writer-suggestion interaction: A qualitative study to understand writer interaction with aligned/misaligned next-phrase suggestion. https://doi.org/10.48550/ARXIV.2208.00636
505
+ [5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. In Advances in Neural Information Processing Systems, H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (Eds.), Vol. 33. Curran Associates, Inc., 1877-1901. https://proceedings.neurips.cc/paper/2020/file/1457c0d6bfcb4967418bf8ac142f64a-Paper.pdf
506
+ [6] Daniel Buschek, Martin Zurn, and Malin Eiband. 2021. The Impact of Multiple Parallel Phrase Suggestions on Email Input and Composition Behaviour of Native and Non-Native English Writers. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (Yokohama, Japan) (CHI '21). Association for Computing Machinery, New York, NY, USA, Article 732, 13 pages. https://doi.org/10.1145/3411764.3445372
507
+ [7] Alex Calderwood, Vivian Qiu, Katy Ilonka Gero, and Lydia B. Chilton. 2020. How Novelists Use Generative Language Models: An Exploratory User Study... In HAI-GEN+ user2agent@ IUI.
508
+ [8] Mia Xu Chen, Benjamin N. Lee, Gagan Bansal, Yuan Cao, Shuyuan Zhang, Justin Lu, Jackie Tsay, Yinan Wang, Andrew M. Dai, Zhifeng Chen, Timothy Sohn, and Yonghui Wu. 2019. Gmail SmartCompose: Real-Time Assisted Writing. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining (Anchorage, AK, USA) (KDD '19). Association for Computing Machinery, New York, NY, USA, 2287-2295. https://doi.org/10.1145/3292500.3330723
509
+ [9] John Joon Young Chung, Wooseok Kim, Kang Min Yoo, Hwaran Lee, Eytan Adar, and Minsuk Chang. 2022. TaleBrush: Sketching Stories with Generative Pretrained Language Models. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (CHI '22). Association for Computing Machinery, New York, NY, USA. https://doi.org/10.1145/3491102.3501819 event-place: New Orleans, LA, USA.
510
+ [10] Juliet M Corbin. 1990. Basics of qualitative research: Grounded theory procedures and techniques. Sage.
511
+ [11] Mark Dunlop and John Levine. 2012. Multidimensional Pareto Optimization of Touchscreen Keyboards for Speed, Familiarity and Improved Spell Checking. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (Austin, Texas, USA) (CHI '12). Association for Computing Machinery, New York, NY, USA, 2669-2678. https://doi.org/10.1145/2207676.2208659
512
+ [12] Andrew Fowler, Kurt Partridge, Ciprian Chelba, Xiaojun Bi, Tom Ouyang, and Shumin Zhai. 2015. Effects of Language Modeling and Its Personalization on Touchscreen Typing Performance. In Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems (Seoul, Republic of Korea) (CHI '15). Association for Computing Machinery, New York, NY, USA, 649-658. https://doi.org/10.1145/2702123.2702503
513
+ [13] Katy Gero, Alex Calderwood, Charlotte Li, and Lydia Chilton. 2022. A Design Space for Writing Support Tools Using a Cognitive Process Model of Writing. In Proceedings of the First Workshop on Intelligent and Interactive Writing Assistants (In2Writing 2022). Association for Computational Linguistics, Dublin, Ireland, 11-24. https://aclanthology.org/2022.in2writing-1.2
514
+ [14] Katy Ilonka Gero, Vivian Liu, and Lydia Chilton. 2022. Sparks: Inspiration for Science Writing Using Language Models. In Designing Interactive Systems Conference (Virtual Event, Australia) (DIS '22). Association for Computing Machinery, New York, NY, USA, 1002-1019. https://doi.org/10.1145/3532106.3533533
515
+ [15] Mayank Goel, Leah Findlater, and Jacob Wobbrock. 2012. WalkType: Using Accelerometer Data to Accomodate Situational Impairments in Mobile Touch Screen Text Entry. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (Austin, Texas, USA) (CHI '12). Association for Computing Machinery, New York, NY, USA, 2687-2696. https://doi.org/10.1145/2207676.2208662
516
+ [16] Mayank Goel, Alex Jansen, Travis Mandel, Shwetak N. Patel, and Jacob O. Wobbrock. 2013. ContextType: Using Hand Posture Information to Improve Mobile Touch Screen Text Entry. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (Paris, France) (CHI '13). Association for Computing Machinery, New York, NY, USA, 2795-2798. https://doi.org/10.1145/2470654.2481386
517
+ [17] Steven Goodman, Erin Buehler, Patrick Clary, Andy Coenen, Aaron Michael Donsbach, Tiffanie Horne, Michal Lahav, Bob MacDonald, Rain Bream Michaels, Ajit Narayanan, Mahima Pushkarna, Joel Christopher Riley, Alex Santana, Lei Shi, Rachel Sweeney, Phil Weaver, Ann Yuan, and Meredith Ringel Morris. 2022. LaMPost: Evaluation of an AI-assisted Writing Editor Prototype for Adults with Dyslexia. https://arxiv.org/abs/2207.02308
518
+
519
+ [18] Mitchell Gordon, Tom Ouyang, and Shumin Zhai. 2016. WatchWriter: Tap and Gesture Typing on a Smartwatch Miniature Keyboard with Statistical Decoding. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (San Jose, California, USA) (CHI '16). Association for Computing Machinery, New York, NY, USA, 3817-3821. https://doi.org/10.1145/2858036.2858242
520
+ [19] David Ha and Douglas Eck. 2017. A Neural Representation of Sketch Drawings. http://arxiv.org/abs/1704.03477 arXiv:1704.03477 [cs, stat].
521
+ [20] Adi Haviv, Jonathan Berant, and Amir Globerson. 2021. BERTese: Learning to Speak to BERT. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume. Association for Computational Linguistics, Online, 3618-3623. https://doi.org/10.18653/v1/2021.eacl-main.316
522
+ [21] John R. Hayes. 2012. Modeling and Remodeling Writing. Written Communication 29, 3 (July 2012), 369-388. https://doi.org/10.1177/0741088312451260
523
+ [22] Ellen Jiang, Kristen Olson, Edwin Toh, Alejandra Molina, Aaron Donsbach, Michael Terry, and Carrie J. Cai. 2022. PromptMaker: Prompt-based Prototyping with Large Language Models. In CHI Conference on Human Factors in Computing Systems Extended Abstracts. 1-8.
524
+ [23] Zhengbao Jiang, Frank F. Xu, Jun Araki, and Graham Neubig. 2020. How Can We Know What Language Models Know? Transactions of the Association for Computational Linguistics 8 (Dec. 2020), 423-438. https://doi.org/10.1162/tacl_a_00324
525
+ [24] Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufmann, Andrew Tomkins, Balint Miklos, Greg Corrado, Laszlo Lukacs, Marina Ganea, Peter Young, and Vivek Ramavajjala. 2016. Smart Reply: Automated Response Suggestion for Email. In Proceedings of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (San Francisco, California, USA) (KDD '16). Association for Computing Machinery, New York, NY, USA, 955-964. https://doi.org/10.1145/2939672.2939801
526
+ [25] Alexandra Kuznetsova, Per B. Brockhoff, and Rune H. B. Christensen. 2017. ImerTest Package: Tests in Linear Mixed Effects Models. Journal of Statistical Software 82, 13 (2017), 1-26. https://doi.org/10.18637/jss.v082.i13
527
+ [26] Mina Lee, Percy Liang, and Qian Yang. 2022. CoAuthor: Designing a Human-AI Collaborative Writing Dataset for Exploring Language Model Capabilities. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (New Orleans, LA, USA) (CHI '22). Association for Computing Machinery, New York, NY, USA, Article 388, 19 pages. https://doi.org/10.1145/3491102.3502030
528
+ [27] Jiachang Liu, Dinghan Shen, Yizhe Zhang, Bill Dolan, Lawrence Carin, and Weizhu Chen. 2021. What Makes Good In-Context Examples for GPT- $\$ 3$ ? http://arxiv.org/abs/2101.06804 arXiv:2101.06804 [cs].
529
+ [28] Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. 2021. Pre-train, Prompt, and Predict: A Systematic Survey of Prompting Methods in Natural Language Processing. arXiv:2107.13586 [cs] (July 2021). http://arxiv.org/abs/2107.13586 arXiv:2107.13586.
530
+ [29] Michael J. Muller and Sandra Kogan. 2012. Grounded Theory Method in Human-Computer Interaction and Computer-Supported Cooperative Work. In The Human-Computer Interaction Handbook (3 ed.), CRC Press. Num Pages: 21.
531
+ [30] Alex Nichol, Prafulla Dhariwal, Aditya Ramesh, Pranav Shyam, Pamela Mishkin, Bob McGrew, Ilya Sutskever, and Mark Chen. 2021. GLIDE: Towards Photorealistic Image Generation and Editing with Text-Guided Diffusion Models. (2021). https://doi.org/10.48550/ARXIV.2112.10741 Publisher: arXiv Version Number: 3.
532
+ [31] Jakob Nielsen. 1994. Enhancing the Explanatory Power of Usability Heuristics. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems (Boston, Massachusetts, USA) (CHI '94). Association for Computing Machinery, New York, NY, USA, 152-158. https://doi.org/10.1145/191666.191729
533
+ [32] Or Patashnik, Zongze Wu, Eli Shechtman, Daniel Cohen-Or, and Dani Lischinski. 2021. StyleCLIP: Text-Driven Manipulation of StyleGAN Imagery. (2021). https://doi.org/10.48550/ARXIV.2103.17249 Publisher: arXiv Version Number: 1.
534
+ [33] Fabio Petroni, Tim Rocktäschel, Patrick Lewis, Anton Bakhtin, Yuxiang Wu, Alexander H. Miller, and Sebastian Riedel. 2019. Language Models as Knowledge Bases? http://arxiv.org/abs/1909.01066 arXiv:1909.01066 [cs].
535
+ [34] Philip Quinn and Shumin Zhai. 2016. A Cost-Benefit Study of Text Entry Suggestion Interaction. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (San Jose, California, USA) (CHI '16). Association for Computing Machinery, New York, NY, USA, 83-88. https://doi.org/10.1145/2858036.2858305
536
+ [35] R Core Team. 2020. R: A Language and Environment for Statistical Computing. R Foundation for Statistical Computing, Vienna, Austria. https://www.R-project.org
537
+ [36] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. 2022. Hierarchical Text-Conditional Image Generation with CLIP Latents. (2022). https://doi.org/10.48550/ARXIV.2204.06125 Publisher: arXiv Version Number: 1.
538
+ [37] Melissa Roemmele and Andrew S. Gordon. 2015. Creative Help: A Story Writing Assistant. In Interactive Storytelling (Lecture Notes in Computer Science), Henrik Schoenau-Fog, Luis Emilio Bruni, Sandy Louchart, and Sarune Baceviciute (Eds.). Springer International Publishing, Cham, 81-92. https://doi.org/10.1007/978-3-319-27036-4_8
539
+
540
+ [38] Timo Schick, Jane Dwivedi-Yu, Zhengbao Jiang, Fabio Petroni, Patrick Lewis, Gautier Izacard, Qingfei You, Christoforos Nalmantis, Edouard Grave, and Sebastian Riedel. 2022. PEER: A Collaborative Language Model. https://doi.org/10.48550/ARXIV.2208.11663
541
+ [39] Timo Schick and Hinrich Schütze. 2021. Exploiting Cloze Questions for Few Shot Text Classification and Natural Language Inference. http://arxiv.org/abs/2001.07676 arXiv:2001.07676 [cs].
542
+ [40] Herbert Alexander Simon. 1996. The sciences of the artificial (3. ed. ed.). MIT Press, Cambridge, Mass.
543
+ [41] Nikhil Singh, Guillermo Bernal, Daria Savchenko, and Elena L. Glassman. 2022. Where to Hide a Stolen Elephant: Leaps in Creative Writing with Multimodal Machine Intelligence. ACM Trans. Comput.-Hum. Interact. (jan 2022). https://doi.org/10.1145/3511599 Just Accepted.
544
+ [42] Hendrik Strobelt, Albert Webson, Victor Sanh, Benjamin Hoover, Johanna Beyer, Hanspeter Pfister, and Alexander M. Rush. 2022. Interactive and Visual Prompt Engineering for Ad-hoc Task Adaptation with Large Language Models. http://arxiv.org/abs/2208.07852 arXiv:2208.07852 [cs].
545
+ [43] Ben Swanson, Kory Mathewson, Ben Pietrzak, Sherol Chen, and Monica Dinalescu. 2021. Story Centaur: Large Language Model Few Shot Learning as a Creative Writing Tool. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: System Demonstrations. Association for Computational Linguistics, Online, 244-256. https://doi.org/10.18653/v1/2021.eacl-demos.29
546
+ [44] Anestis Touloumis. 2015. R Package multgee: A Generalized Estimating Equations Solver for Multinomial Responses. Journal of Statistical Software 64, 8 (2015), 1-14. http://www.jstatsoft.org/v64/i08/
547
+ [45] Keith Vertanen and Per Ola Kristensson. 2014. Complementing Text Entry Evaluations with a Composition Task. ACM Trans. Comput.-Hum. Interact. 21, 2, Article 8 (feb 2014), 33 pages. https://doi.org/10.1145/2555691
548
+ [46] Tongshuang Wu, Ellen Jiang, Aaron Donsbach, Jeff Gray, Alejandra Molina, Michael Terry, and Carrie J. Cai. 2022. PromptChainer: Chaining Large Language Model Prompts through Visual Programming. http://arxiv.org/abs/2203.06566 Number: arXiv:2203.06566 arXiv:2203.06566 [cs].
549
+ [47] Tongshuang Wu, Michael Terry, and Carrie Jun Cai. 2022. AI Chains: Transparent and Controllable Human-AI Interaction by Chaining Large Language Model Prompts. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (New Orleans, LA, USA) (CHI '22). Association for Computing Machinery, New York, NY, USA, Article 385, 22 pages. https://doi.org/10.1145/3491102.3517582
550
+ [48] Ann Yuan, Andy Coenen, Emily Reif, and Daphne Ippolito. 2022. Wordcraft: Story Writing With Large Language Models. In 27th International Conference on Intelligent User Interfaces (Helsinki, Finland) (IUI '22). Association for Computing Machinery, New York, NY, USA, 841-852. https://doi.org/10.1145/3490099.3511105
551
+ [49] Weizhe Yuan, Graham Neubig, and Pengfei Liu. 2021. BARTScore: Evaluating Generated Text as Text Generation. http://arxiv.org/abs/2106.11520 arXiv:2106.11520 [cs].
552
+ [50] Tony Z. Zhao, Eric Wallace, Shi Feng, Dan Klein, and Sameer Singh. 2021. Calibrate Before Use: Improving Few-Shot Performance of Language Models. http://arxiv.org/abs/2102.09690 arXiv:2102.09690 [cs].
553
+
554
+ # A APPENDIX
555
+
556
+ In this appendix, we provide a table of logged events and additional screenshots.
557
+
558
+ <table><tr><td>No.</td><td>Interaction Event</td><td>Description</td></tr><tr><td>1</td><td>EVENT_CONFIRM_INSTRUCTION</td><td>User has confirmed instruction (Enter Key)</td></tr><tr><td>2</td><td>EVENT_DISABLE_INSTRUCTION</td><td>User has cancelled the instruction (ESC key or clicking outside the instruction box)</td></tr><tr><td>3</td><td>EVENT_OPEN_INSTRUCTION_BOX</td><td>User has triggered new suggestions in the “with instructions” writing setting (Tab Key)</td></tr><tr><td>4</td><td>EVENT_SELECT_NEXT_SUGGESTION</td><td>User has selected next suggestion (Down Arrow Key)</td></tr><tr><td>5</td><td>EVENT_SELECT_PREV_SUGGESTION</td><td>User has selected previous suggestions (Up Arrow Key)</td></tr><tr><td>6</td><td>EVENT_REQUEST_SUGGESTIONS</td><td>User has requested new suggestions (Tab Key)</td></tr><tr><td>7</td><td>EVENT_SYNTHESIS_RESPONSE</td><td>System returned suggestions</td></tr><tr><td>8</td><td>EVENT_CONFIRM_SYNTHESIS</td><td>User has selected and confirmed one suggestion (Enter Key or Mouse Selection)</td></tr><tr><td>9</td><td>EVENT_DISABLE_SYNTHESIS</td><td>User has cancelled the suggestions (ESC key or clicking outside the suggestion box)</td></tr><tr><td>10</td><td>EVENT_TASK_STATUS</td><td>Can be either “task started” or “task finished”</td></tr><tr><td>11</td><td>EVENT_KEYDOWN</td><td>User has pressed a key, e.g “A” or “TAB”</td></tr></table>
559
+
560
+ Table 3: An overview of the interaction events logged in the user study.
561
+
562
+ ![](images/ee99c9bbb9deb2aa546308d931f3d76fe432e462f17e5a9cf56529682e1d14d3.jpg)
563
+ Figure 8: Screenshot of the writing interface. (Left Side) The info box describes the available functionalities in the current setting, (Top Middle) the selected topic, (Bottom Middle) the text editor with the current written text and an inline suggestion.
564
+
565
+ Spend at most 5 minutes to write about the topic. Once you are done writing, click on the I'm done writing and then the Next on the bottom right corner to proceed.
566
+
567
+ Note: The I'm done writing will be enabled after 15 seconds.
568
+
569
+ # Selected Topic
570
+
571
+ Is Listening to a Book Just as Good as Reading It? Do you listen to audiobiooks? What are the benefits, in your opinion, of listening instead of reading? Are there advantages to reading that cannot be gained by listening? Which method do you prefer? Why?
572
+
573
+ Listening to a book is like going on an adven Optional: Type here are free, your mind has time to wander and imagine. You can listen to the name of another person and enjoy their voice. A good narration voice can make the story come to life. You can become the character.
574
+
575
+ I'M DONE WRITING
576
+
577
+ Time: 1:11
578
+
579
+ # Choose one topic that you wish to write about.
580
+
581
+ # Selected Topic
582
+
583
+ How Worried Should We Be About Screen Time During the Pandemic? The coronavirus pandemic ended the screen time debate: Screens won. We all now find ourselves on our screens for school, work, and connecting with family and friends during this time of social distancing and increased isolation. But should we be worried about this excessive screen use right now? Or should we finally get over it and embrace the benefits of our digital devices?
584
+
585
+ SHOW PREVIOUS TOPIC SHOW NEXT TOPIC
586
+
587
+ I'M READY TO WRITE ABOUT THIS TOPIC
588
+
589
+ Figure 9: The topic selection panel. Users can browse through the topics and indicate that they are ready to write about the depicted topic.
2303.03xxx/2303.03199/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:265d45bb8a0a601cdd8359c06f05da53a28d5f30d9a9e3b39436523ef3ed44d1
3
+ size 995648
2303.03xxx/2303.03199/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03202/52833c8e-2380-4398-9065-8291ff12fc58_content_list.json ADDED
@@ -0,0 +1,1955 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Continuous Sign Language Recognition with Correlation Network",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 151,
8
+ 130,
9
+ 818,
10
+ 151
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Lianyu Hu, Liqing Gao, Zekang Liu, Wei Feng",
17
+ "bbox": [
18
+ 292,
19
+ 188,
20
+ 676,
21
+ 208
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "College of Intelligence and Computing, Tianjin University, Tianjin 300350, China",
28
+ "bbox": [
29
+ 161,
30
+ 208,
31
+ 805,
32
+ 224
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Code: https://github.com/hulianyuyy/CorrNet",
39
+ "bbox": [
40
+ 235,
41
+ 226,
42
+ 728,
43
+ 243
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Abstract",
50
+ "text_level": 1,
51
+ "bbox": [
52
+ 233,
53
+ 277,
54
+ 310,
55
+ 292
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Human body trajectories are a salient cue to identify actions in the video. Such body trajectories are mainly conveyed by hands and face across consecutive frames in sign language. However, current methods in continuous sign language recognition (CSLR) usually process frames independently, thus failing to capture cross-frame trajectories to effectively identify a sign. To handle this limitation, we propose correlation network (CorrNet) to explicitly capture and leverage body trajectories across frames to identify signs. In specific, a correlation module is first proposed to dynamically compute correlation maps between the current frame and adjacent frames to identify trajectories of all spatial patches. An identification module is then presented to dynamically emphasize the body trajectories within these correlation maps. As a result, the generated features are able to gain an overview of local temporal movements to identify a sign. Thanks to its special attention on body trajectories, CorrNet achieves new state-of-the-art accuracy on four large-scale datasets, i.e., PHOENIX14, PHOENIX14-T, CSL-Daily, and CSL. A comprehensive comparison with previous spatial-temporal reasoning methods verifies the effectiveness of CorrNet. Visualizations demonstrate the effects of CorrNet on emphasizing human body trajectories across adjacent frames.",
62
+ "bbox": [
63
+ 75,
64
+ 309,
65
+ 472,
66
+ 674
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1. Introduction",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 76,
76
+ 700,
77
+ 207,
78
+ 715
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Sign language is one of the most widely-used communication tools for the deaf community in their daily life. However, mastering this language is rather difficult and time-consuming for the hearing people, thus hindering direct communications between two groups. To relieve this problem, isolated sign language recognition tries to classify a video segment into an independent gloss<sup>1</sup>. Continuous sign language recognition (CSLR) progresses by sequentially translating images into a series of glosses to express a sentence, more prospective toward real-life deployment.",
85
+ "bbox": [
86
+ 75,
87
+ 726,
88
+ 468,
89
+ 878
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "image",
95
+ "img_path": "images/6521a2a95407e5b24af119b56bacf0eab481a1b1c157903c4dbc654f5d92dc9a.jpg",
96
+ "image_caption": [
97
+ "Left frame"
98
+ ],
99
+ "image_footnote": [],
100
+ "bbox": [
101
+ 506,
102
+ 276,
103
+ 624,
104
+ 368
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "image",
110
+ "img_path": "images/a4a99a216aa9e71c179468ae99c69d03722df054737f66d146142f52e8714d4c.jpg",
111
+ "image_caption": [
112
+ "Figure 1. Visualization of correlation maps with Grad-CAM [40]. It's observed that without extra supervision, our method could well attend to informative regions in adjacent left/right frames to identify human body trajectories."
113
+ ],
114
+ "image_footnote": [],
115
+ "bbox": [
116
+ 638,
117
+ 277,
118
+ 756,
119
+ 368
120
+ ],
121
+ "page_idx": 0
122
+ },
123
+ {
124
+ "type": "image",
125
+ "img_path": "images/6733366ac65c9528551bdd52128754a8eb15332325cb4409b32662eafda77cad.jpg",
126
+ "image_caption": [
127
+ "Right frame"
128
+ ],
129
+ "image_footnote": [],
130
+ "bbox": [
131
+ 800,
132
+ 277,
133
+ 888,
134
+ 368
135
+ ],
136
+ "page_idx": 0
137
+ },
138
+ {
139
+ "type": "text",
140
+ "text": "Human body trajectories are a salient cue to identify actions in human-centric video understanding [45]. In sign language, such trajectories are mainly conveyed by both manual components (hand/arm gestures), and non-manual components (facial expressions, head movements, and body postures) [11,36]. Especially, both hands move horizontally and vertically across consecutive frames quickly, with finger twisting and facial expressions to express a sign. To track and leverage such body trajectories is of great importance to understanding sign language.",
141
+ "bbox": [
142
+ 496,
143
+ 474,
144
+ 890,
145
+ 626
146
+ ],
147
+ "page_idx": 0
148
+ },
149
+ {
150
+ "type": "text",
151
+ "text": "However, current CSLR methods [5,7,17,34,35,37,55] usually process each frame separately, thus failing to exploit such critical cues in the early stage. Especially, they usually adopt a shared 2D CNN to capture spatial features for each frame independently. In this sense, frames are processed individually without interactions with adjacent neighbors, thus inhibited to identify and leverage cross-frame trajectories to express a sign. The generated features are thus not aware of local temporal patterns and fail to perceive the hand/face movements in expressing a sign. To handle this limitation, well-known 3D convolution [4] or its $(2 + 1)\\mathrm{D}$ variants [43, 50] are potential candidates to capture short-term temporal information to identify body trajectories. Other temporal methods like temporal shift [31] or temporal convolutions [32] can also attend to short-term temporal movements. However, it's hard for them to aggregate beneficial information from distant informative spatial regions due to their limited spatial-temporal receptive field.",
152
+ "bbox": [
153
+ 496,
154
+ 628,
155
+ 892,
156
+ 900
157
+ ],
158
+ "page_idx": 0
159
+ },
160
+ {
161
+ "type": "page_footnote",
162
+ "text": "Gloss is the atomic lexical unit to annotate sign languages.",
163
+ "bbox": [
164
+ 94,
165
+ 886,
166
+ 410,
167
+ 900
168
+ ],
169
+ "page_idx": 0
170
+ },
171
+ {
172
+ "type": "aside_text",
173
+ "text": "arXiv:2303.03202v3 [cs.CV] 18 Mar 2023",
174
+ "bbox": [
175
+ 22,
176
+ 258,
177
+ 57,
178
+ 705
179
+ ],
180
+ "page_idx": 0
181
+ },
182
+ {
183
+ "type": "page_number",
184
+ "text": "1",
185
+ "bbox": [
186
+ 480,
187
+ 924,
188
+ 488,
189
+ 936
190
+ ],
191
+ "page_idx": 0
192
+ },
193
+ {
194
+ "type": "text",
195
+ "text": "Besides, as their structures are fixed for each sample during inference, they may fail to dynamically deal with different samples to identify informative regions. To tackle these problems, we propose to explicitly compute correlation maps between adjacent frames to capture body trajectories, referred to as CorrNet. As shown in fig. 1, our approach dynamically attends to informative regions in adjacent left/right frames to capture body trajectories, without relying on extra supervision.",
196
+ "bbox": [
197
+ 80,
198
+ 90,
199
+ 467,
200
+ 224
201
+ ],
202
+ "page_idx": 1
203
+ },
204
+ {
205
+ "type": "text",
206
+ "text": "In specific, our CorrNet first employs a correlation module to compute correlation maps between the current frame and its adjacent frames to identify trajectories of all spatial patches. An identification module is then presented to dynamically identify and emphasize the body trajectories embodied within these correlation maps. This procedure doesn't rely on extra expensive supervision like body keypoints [54] or heatmaps [55], which could be end-to-end trained in a lightweight way. The resulting features are thus able to gain an overview of local temporal movements to identify a sign. Remarkably, CorrNet achieves new state-of-the-art accuracy on four large-scale datasets, i.e., PHOENIX14 [27], PHOENIX14-T [2], CSL-Daily [53], and CSL [24], thanks to its special attention on body trajectories. A comprehensive comparison with other spatial-temporal reasoning methods demonstrates the superiority of our method. Visualizations hopefully verify the effects of CorrNet on emphasizing human body trajectories across adjacent frames.",
207
+ "bbox": [
208
+ 80,
209
+ 227,
210
+ 467,
211
+ 513
212
+ ],
213
+ "page_idx": 1
214
+ },
215
+ {
216
+ "type": "text",
217
+ "text": "2. Related Work",
218
+ "text_level": 1,
219
+ "bbox": [
220
+ 80,
221
+ 532,
222
+ 215,
223
+ 547
224
+ ],
225
+ "page_idx": 1
226
+ },
227
+ {
228
+ "type": "text",
229
+ "text": "2.1. Continuous Sign Language Recognition",
230
+ "text_level": 1,
231
+ "bbox": [
232
+ 80,
233
+ 558,
234
+ 413,
235
+ 573
236
+ ],
237
+ "page_idx": 1
238
+ },
239
+ {
240
+ "type": "text",
241
+ "text": "Sign language recognition methods can be roughly categorized into isolated sign language recognition [19, 20, 44] and continuous sign language recognition [5, 7, 34, 35, 38] (CSLR), and we focus on the latter in this paper. CSLR tries to translate image frames into corresponding glosses in a weakly-supervised way: only sentence-level label is provided. Earlier methods [13, 14] in CSLR always employ hand-crafted features or HMM-based systems [16, 27-29] to perform temporal modeling and translate sentences step by step. HMM-based systems first employ a feature extractor to capture visual features and then adopt an HMM to perform long-term temporal modeling.",
242
+ "bbox": [
243
+ 80,
244
+ 582,
245
+ 467,
246
+ 763
247
+ ],
248
+ "page_idx": 1
249
+ },
250
+ {
251
+ "type": "text",
252
+ "text": "The recent success of convolutional neural networks (CNNs) and recurrent neural networks (RNNs) brings huge progress for CSLR. The widely used CTC loss [15] in recent CSLR methods [5, 7, 34, 35, 37, 38] enables training deep networks in an end-to-end manner by sequentially aligning target sentences with input frames. These CTC-based methods first rely on a feature extractor, i.e., 3D or 2D&1D CNN hybrids, to extract frame-wise features, and then adopt a LSTM for capturing long-term temporal de",
253
+ "bbox": [
254
+ 80,
255
+ 763,
256
+ 467,
257
+ 898
258
+ ],
259
+ "page_idx": 1
260
+ },
261
+ {
262
+ "type": "text",
263
+ "text": "pendencies. However, several methods [7,38] found in such conditions the feature extractor is not well-trained and then present an iterative training strategy to relieve this problem, but consume much more computations. Some recent studies [5,17,34] try to directly enhance the feature extractor by adding alignment losses [17,34] or adopt pseudo labels [5] in a lightweight way, alleviating the heavy computational burden. More recent works enhance CSLR by squeezing more representative temporal features [22] or dynamically emphasizing informative spatial regions [23].",
264
+ "bbox": [
265
+ 503,
266
+ 90,
267
+ 888,
268
+ 241
269
+ ],
270
+ "page_idx": 1
271
+ },
272
+ {
273
+ "type": "text",
274
+ "text": "Our method is designed to explicitly incorporate body trajectories to identify a sign, especially those from hands and face. Some previous methods have also explicitly leveraged the hand and face features for better recognition. For example, CNN-LSTM-HMM [26] employs a multi-stream HMM (including hands and face) to integrate multiple visual inputs to improve recognition accuracy. STMC [54] first utilizes a pose-estimation network to estimate human body keypoints and then sends cropped appearance regions (including hands and face) for information integration. More recently, $\\mathrm{C}^2\\mathrm{SLR}$ [55] leverages the preextracted pose keypoints as supervision to guide the model to explicitly focus on hand and face regions. Our method doesn't rely on additional cues like pre-extracted body keypoints [55] or multiple streams [26], which consume much more computations to leverage hand and face information. Instead, our model could be end-to-end trained to dynamically attend to body trajectories in a self-motivated way.",
275
+ "bbox": [
276
+ 503,
277
+ 243,
278
+ 888,
279
+ 513
280
+ ],
281
+ "page_idx": 1
282
+ },
283
+ {
284
+ "type": "text",
285
+ "text": "2.2. Applications of Correlation Operation",
286
+ "text_level": 1,
287
+ "bbox": [
288
+ 503,
289
+ 527,
290
+ 826,
291
+ 544
292
+ ],
293
+ "page_idx": 1
294
+ },
295
+ {
296
+ "type": "text",
297
+ "text": "Correlation operation has been widely used in various domains, especially optical flow estimation and video action recognition. Rocco et al. [39] used it to estimate the geometric transformation between two images, and Feichtenhofer et al. [12] applied it to capture object co-occurrences across time in tracking. For optical flow estimation, Deep matching [48] computes the correlation maps between image patches to find their dense correspondences. CNN-based methods like FlowNet [10] and PWC-Net [41] design a correlation layer to help perform multiplicative patch comparisons between two feature maps. For video action recognition, Zhao et al. [52] firstly employ a correlation layer to compute a cost volume to estimate the motion information. STCNet [9] considers spatial correlations and temporal correlations, respectively, inspired by SENet [21]. MFNet [30] explicitly estimates the approximation of optical flow based on fixed motion filters. Wang et al. [45] design a learnable correlation filter and replace 3D convolutions with the proposed filter to capture spatial-temporal information. Different from these methods that explicitly or implicitly estimate optical flow, the correlation operator in our method is used in combination with other operations to identify and track body trajectories across frames.",
298
+ "bbox": [
299
+ 503,
300
+ 553,
301
+ 888,
302
+ 898
303
+ ],
304
+ "page_idx": 1
305
+ },
306
+ {
307
+ "type": "page_number",
308
+ "text": "2",
309
+ "bbox": [
310
+ 480,
311
+ 925,
312
+ 488,
313
+ 935
314
+ ],
315
+ "page_idx": 1
316
+ },
317
+ {
318
+ "type": "image",
319
+ "img_path": "images/1bef9e6eccb64cd660f57d997c8de9b81dbbaf26bedc58dc89f29f8a9088cd81.jpg",
320
+ "image_caption": [
321
+ "Figure 2. An overview for our CorrNet. It first employs a feature extractor (2D CNN) to capture frame-wise features, and then adopts a 1D CNN and a BiLSTM to perform short-term and long-term temporal modeling, respectively, followed by a classifier to predict sentences. We place our proposed identification module and correlation module after each stage of the feature extractor to identify body trajectories across adjacent frames."
322
+ ],
323
+ "image_footnote": [],
324
+ "bbox": [
325
+ 80,
326
+ 89,
327
+ 467,
328
+ 319
329
+ ],
330
+ "page_idx": 2
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "3. Method",
335
+ "text_level": 1,
336
+ "bbox": [
337
+ 76,
338
+ 455,
339
+ 166,
340
+ 469
341
+ ],
342
+ "page_idx": 2
343
+ },
344
+ {
345
+ "type": "text",
346
+ "text": "3.1. Overview",
347
+ "text_level": 1,
348
+ "bbox": [
349
+ 76,
350
+ 481,
351
+ 186,
352
+ 494
353
+ ],
354
+ "page_idx": 2
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "As shown in fig. 2, the backbone of CSLR models consists of a feature extractor (2D CNN $^2$ ), a 1D CNN, a BiLSTM, and a classifier (a fully connected layer) to perform prediction. Given a sign language video with $T$ input frames $x = \\{x_{t}\\}_{t=1}^{T} \\in \\mathcal{R}^{T \\times 3 \\times H_{0} \\times W_{0}}$ , a CSLR model aims to translate the input video into a series of glosses $y = \\{y_{i}\\}_{i=1}^{N}$ to express a sentence, with $N$ denoting the length of the label sequence. Specifically, the feature extractor first processes input frames into frame-wise features $v = \\{v_{t}\\}_{t=1}^{T} \\in \\mathcal{R}^{T \\times d}$ . Then the 1D CNN and BiLSTM perform short-term and long-term temporal modeling based on these extracted visual representations, respectively. Finally, the classifier employs widely-used CTC loss [15] to predict the probability of target gloss sequence $p(y|x)$ .",
359
+ "bbox": [
360
+ 75,
361
+ 505,
362
+ 468,
363
+ 715
364
+ ],
365
+ "page_idx": 2
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "The CSLR model processes input frames independently, failing to incorporate interactions between consecutive frames. We present a correlation module and an identification module to identify body trajectories across adjacent frames. Fig. 2 shows an example of a common feature extractor consisting of multiple stages. The proposed two modules are placed after each stage, whose outputs are element-wisely multiplied and added into the original features via a learnable coefficient $\\alpha$ . $\\alpha$ controls the contribu",
370
+ "bbox": [
371
+ 75,
372
+ 717,
373
+ 467,
374
+ 852
375
+ ],
376
+ "page_idx": 2
377
+ },
378
+ {
379
+ "type": "image",
380
+ "img_path": "images/0331c421a90cb6bc7cbf7ba62b0b403de7486e5212ab84edf9d7e04b8db016e9.jpg",
381
+ "image_caption": [
382
+ "Figure 3. Illustration for the correlation operator. It computes affinities between a feature patch $p(i,j)$ in $x_{t}$ and patches $p_{t+1}(i',j') / p_{t-1}(i',j')$ in adjacent frame $x_{t+1} / x_{t-1}$ ."
383
+ ],
384
+ "image_footnote": [],
385
+ "bbox": [
386
+ 558,
387
+ 87,
388
+ 831,
389
+ 241
390
+ ],
391
+ "page_idx": 2
392
+ },
393
+ {
394
+ "type": "text",
395
+ "text": "tions of the proposed modules, and is initialized as zero to make the whole model keep its original behaviors. The correlation module computes correlation maps between consecutive frames to capture trajectories of all spatial patches. The identification module dynamically locates and emphasizes body trajectories embedded within these correlation maps. The outputs of correlation and identification modules are multiplied to enhance inter-frame correlations.",
396
+ "bbox": [
397
+ 496,
398
+ 314,
399
+ 890,
400
+ 435
401
+ ],
402
+ "page_idx": 2
403
+ },
404
+ {
405
+ "type": "text",
406
+ "text": "3.2. Correlation Module",
407
+ "text_level": 1,
408
+ "bbox": [
409
+ 500,
410
+ 443,
411
+ 689,
412
+ 455
413
+ ],
414
+ "page_idx": 2
415
+ },
416
+ {
417
+ "type": "text",
418
+ "text": "Sign language is mainly conveyed by both manual components (hand/arm gestures), and non-manual components (facial expressions, head movements, and body postures) [11, 36]. However, these informative body parts, e.g., hands or face, are misaligned in adjacent frames. We propose to compute correlation maps between adjacent frames to identify body trajectories.",
419
+ "bbox": [
420
+ 496,
421
+ 465,
422
+ 890,
423
+ 571
424
+ ],
425
+ "page_idx": 2
426
+ },
427
+ {
428
+ "type": "text",
429
+ "text": "Each frame could be represented as a 3D tensor $C \\times H \\times W$ , where $C$ is the number of channels and $H \\times W$ denotes spatial size. Given a feature patch $p_t(i,j)$ in current frame $x_t$ , we compute the affinity between patch $p(i,j)$ and another patch $p_{t+1}(i',j')$ in adjacent frame $x_{t+1}$ , where $(i,j)$ is the spatial location of the patch. To restrict the computation, the size of the feature patch could be reduced to a minimum, i.e., a pixel. The affinity between $p(i,j)$ and $p_{t+1}(i',j')$ is computed in a dot-product way as:",
430
+ "bbox": [
431
+ 496,
432
+ 571,
433
+ 890,
434
+ 709
435
+ ],
436
+ "page_idx": 2
437
+ },
438
+ {
439
+ "type": "equation",
440
+ "text": "\n$$\nA (i, j, i ^ {\\prime}, j ^ {\\prime}) = \\frac {1}{C} \\sum_ {c = 1} ^ {C} \\left(p _ {t} ^ {c} (i, j) \\cdot p _ {t + 1} ^ {c} \\left(i ^ {\\prime}, j ^ {\\prime}\\right)\\right). \\tag {1}\n$$\n",
441
+ "text_format": "latex",
442
+ "bbox": [
443
+ 544,
444
+ 715,
445
+ 890,
446
+ 757
447
+ ],
448
+ "page_idx": 2
449
+ },
450
+ {
451
+ "type": "text",
452
+ "text": "For the spatial location $(i,j)$ in $x_{t}, (i',j')$ is often restricted within a $K \\times K$ neighborhood in $x_{t+1}$ to relieve spatial misalignment. A visualization is given in fig. 3. Thus, for all pixels in $x_{t}$ , the correlation maps are a tensor of size $H \\times W \\times K \\times K$ . $K$ could be set as a smaller value to keep semantic consistency or as a bigger value to attend to distant informative regions.",
453
+ "bbox": [
454
+ 496,
455
+ 763,
456
+ 890,
457
+ 869
458
+ ],
459
+ "page_idx": 2
460
+ },
461
+ {
462
+ "type": "text",
463
+ "text": "Given the correlation map between a pixel and its neighbors in adjacent frame $x_{t + 1}$ , we constrain its range",
464
+ "bbox": [
465
+ 500,
466
+ 869,
467
+ 890,
468
+ 900
469
+ ],
470
+ "page_idx": 2
471
+ },
472
+ {
473
+ "type": "page_footnote",
474
+ "text": "2Here we only consider the feature extractor based on 2D CNN, because recent findings [1, 55] show 3D CNN can not provide as precise gloss boundaries as 2D CNN, and lead to lower accuracy.",
475
+ "bbox": [
476
+ 75,
477
+ 862,
478
+ 467,
479
+ 900
480
+ ],
481
+ "page_idx": 2
482
+ },
483
+ {
484
+ "type": "page_number",
485
+ "text": "3",
486
+ "bbox": [
487
+ 478,
488
+ 924,
489
+ 488,
490
+ 936
491
+ ],
492
+ "page_idx": 2
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "into (0,1) to measure their semantic similarity by passing $A(i,j,i',j')$ through a sigmoid function. We further subtract 0.5 from the results, to emphasize informative regions with positive values, and suppress redundant areas with negative values as:",
497
+ "bbox": [
498
+ 76,
499
+ 90,
500
+ 468,
501
+ 165
502
+ ],
503
+ "page_idx": 3
504
+ },
505
+ {
506
+ "type": "equation",
507
+ "text": "\n$$\nA ^ {\\prime} (i, j, i ^ {\\prime}, j ^ {\\prime}) = \\operatorname {S i g m o i d} \\left(A (i, j, i ^ {\\prime}, j ^ {\\prime})\\right) - 0. 5 \\tag {2}\n$$\n",
508
+ "text_format": "latex",
509
+ "bbox": [
510
+ 120,
511
+ 178,
512
+ 468,
513
+ 195
514
+ ],
515
+ "page_idx": 3
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "After identifying the trajectories between adjacent frames, we incorporate these local temporal movements into the current frame $x_{t}$ . Specifically, for a pixel in $x_{t}$ , its trajectories are aggregated from its $K \\times K$ neighbors in adjacent frame $x_{t + 1}$ , by multiplying their features with the corresponding affinities as:",
520
+ "bbox": [
521
+ 76,
522
+ 207,
523
+ 468,
524
+ 297
525
+ ],
526
+ "page_idx": 3
527
+ },
528
+ {
529
+ "type": "equation",
530
+ "text": "\n$$\nT (i, j) = \\sum_ {i ^ {\\prime}, j ^ {\\prime}} A ^ {\\prime} \\left(i, j, i ^ {\\prime}, j ^ {\\prime}\\right) * x _ {t + 1} \\left(i ^ {\\prime}, j ^ {\\prime}\\right). \\tag {3}\n$$\n",
531
+ "text_format": "latex",
532
+ "bbox": [
533
+ 132,
534
+ 309,
535
+ 468,
536
+ 340
537
+ ],
538
+ "page_idx": 3
539
+ },
540
+ {
541
+ "type": "text",
542
+ "text": "In this sense, each pixel is able to be aware of its trajectories across consecutive frames. We aggregate bidirectional trajectories from both $x_{t-1}$ and $x_{t+1}$ , and attach a learnable coefficient $\\beta$ to measure the importance of bi-directions. Thus, eq. 3 could be updated as:",
543
+ "bbox": [
544
+ 76,
545
+ 353,
546
+ 468,
547
+ 428
548
+ ],
549
+ "page_idx": 3
550
+ },
551
+ {
552
+ "type": "equation",
553
+ "text": "\n$$\n\\begin{array}{l} T (i, j) = \\beta_ {1} \\cdot \\sum_ {i ^ {\\prime}, j ^ {\\prime}} A _ {t + 1} ^ {\\prime} \\left(i, j, i ^ {\\prime}, j ^ {\\prime}\\right) * x _ {t + 1} \\left(i ^ {\\prime}, j ^ {\\prime}\\right) + \\tag {4} \\\\ \\beta_ {2} \\cdot \\sum_ {i ^ {\\prime}, j ^ {\\prime}} A _ {t - 1} ^ {\\prime} (i, j, i ^ {\\prime}, j ^ {\\prime}) * x _ {t - 1} (i ^ {\\prime}, j ^ {\\prime}) \\\\ \\end{array}\n$$\n",
554
+ "text_format": "latex",
555
+ "bbox": [
556
+ 99,
557
+ 439,
558
+ 468,
559
+ 508
560
+ ],
561
+ "page_idx": 3
562
+ },
563
+ {
564
+ "type": "text",
565
+ "text": "where $\\beta_{1}$ and $\\beta_{1}$ are initialized as 0.5. This correlation calculation is repeated for each frame in a video to track body trajectories in videos.",
566
+ "bbox": [
567
+ 76,
568
+ 520,
569
+ 468,
570
+ 565
571
+ ],
572
+ "page_idx": 3
573
+ },
574
+ {
575
+ "type": "text",
576
+ "text": "3.3. Identification Module",
577
+ "text_level": 1,
578
+ "bbox": [
579
+ 76,
580
+ 575,
581
+ 279,
582
+ 589
583
+ ],
584
+ "page_idx": 3
585
+ },
586
+ {
587
+ "type": "text",
588
+ "text": "The correlation module computes correlation maps between each pixel with its $K \\times K$ neighbors in adjacent frames $x_{t-1}$ and $x_{t+1}$ . However, as not all regions are critical for expressing a sign, only those informative regions carrying body trajectories should be emphasized in the current frame $x_t$ . The trajectories of background or noise should be suppressed. We present an identification module to dynamically emphasize these informative spatial regions. Specifically, as informative regions like hand and face are misaligned in adjacent frames, the identification module leverages the closely correlated local spatial-temporal features to tackle the misalignment issue and locate informative regions.",
589
+ "bbox": [
590
+ 76,
591
+ 599,
592
+ 468,
593
+ 792
594
+ ],
595
+ "page_idx": 3
596
+ },
597
+ {
598
+ "type": "text",
599
+ "text": "As shown in fig. 4, the identification module first projects input features $x \\in \\mathcal{R}^{T \\times C \\times H \\times W}$ into $x_{r} \\in \\mathcal{R}^{T \\times C / r \\times H \\times W}$ with a $1 \\times 1 \\times 1$ convolution to decrease the computations, by a channel reduction factor $r$ as 16 by default.",
600
+ "bbox": [
601
+ 76,
602
+ 795,
603
+ 468,
604
+ 854
605
+ ],
606
+ "page_idx": 3
607
+ },
608
+ {
609
+ "type": "text",
610
+ "text": "As the informative regions, e.g., hands and face, are not exactly aligned in adjacent frames, it's necessary to consider a large spatial-temporal neighborhood to identify",
611
+ "bbox": [
612
+ 76,
613
+ 854,
614
+ 468,
615
+ 900
616
+ ],
617
+ "page_idx": 3
618
+ },
619
+ {
620
+ "type": "image",
621
+ "img_path": "images/af232ab19c1642040dd7a71df28ebb9578142e8cdfc504693896298d7b5f0f13.jpg",
622
+ "image_caption": [
623
+ "Figure 4. Illustration for our identification module."
624
+ ],
625
+ "image_footnote": [],
626
+ "bbox": [
627
+ 501,
628
+ 87,
629
+ 883,
630
+ 428
631
+ ],
632
+ "page_idx": 3
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "these features. Instead of directly employing a large 3D spatial-temporal kernel, we present a multi-scale paradigm by decomposing it into parallel branches of progressive dilation rates to reduce required computations and increase the model capacity.",
637
+ "bbox": [
638
+ 496,
639
+ 478,
640
+ 890,
641
+ 553
642
+ ],
643
+ "page_idx": 3
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "Specifically, as shown in fig. 4, with a same small base convolution kernel of $K_{t} \\times K_{s} \\times K_{s}$ , we employ multiple convolutions with their dilation rates increasing along spatial and temporal dimensions concurrently. The spatial and temporal dilation rate range within $(1, N_{s})$ and $(1, N_{t})$ , respectively, resulting in total $N_{s} \\times N_{t}$ branches. Group convolutions are employed for each branch to reduce parameters and computations. Features from different branches are multiplied with learnable coefficients $\\{\\sigma_{1}, \\dots, \\sigma_{N_{s} \\times N_{t}}\\}$ to control their importance, and then added to mix information from branches of various spatial-temporal receptive fields as:",
648
+ "bbox": [
649
+ 496,
650
+ 554,
651
+ 890,
652
+ 732
653
+ ],
654
+ "page_idx": 3
655
+ },
656
+ {
657
+ "type": "equation",
658
+ "text": "\n$$\nx _ {m} = \\sum_ {i = 1} ^ {N _ {s}} \\sum_ {j = 1} ^ {N _ {t}} \\sigma_ {i, j} \\cdot \\operatorname {C o n v} _ {i, j} \\left(x _ {r}\\right) \\tag {5}\n$$\n",
659
+ "text_format": "latex",
660
+ "bbox": [
661
+ 584,
662
+ 731,
663
+ 890,
664
+ 773
665
+ ],
666
+ "page_idx": 3
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "where the group-wise convolution $\\mathrm{Conv}_{i,j}$ of different branches receives features of different spatial-temporal neighborhoods, with dilation rate $(j,i,i)$ .",
671
+ "bbox": [
672
+ 496,
673
+ 780,
674
+ 890,
675
+ 824
676
+ ],
677
+ "page_idx": 3
678
+ },
679
+ {
680
+ "type": "text",
681
+ "text": "After receiving features from a large spatial-temporal neighborhood, $x_{m}$ is sent into a $1 \\times 1 \\times 1$ convolution to project its channels back into $C$ . It then passes through a sigmoid function to generate attention maps $M \\in \\mathcal{R}^{T \\times C \\times H \\times W}$ with its values ranging within (0,1). Spe",
682
+ "bbox": [
683
+ 496,
684
+ 825,
685
+ 890,
686
+ 900
687
+ ],
688
+ "page_idx": 3
689
+ },
690
+ {
691
+ "type": "page_number",
692
+ "text": "4",
693
+ "bbox": [
694
+ 478,
695
+ 924,
696
+ 490,
697
+ 935
698
+ ],
699
+ "page_idx": 3
700
+ },
701
+ {
702
+ "type": "text",
703
+ "text": "cially, $M$ is further subtracted from a constant value of 0.5 to emphasize informative regions with positive values, and suppress redundant areas with negative values as:",
704
+ "bbox": [
705
+ 75,
706
+ 90,
707
+ 468,
708
+ 137
709
+ ],
710
+ "page_idx": 4
711
+ },
712
+ {
713
+ "type": "equation",
714
+ "text": "\n$$\nM = \\operatorname {S i g m o i d} \\left(\\operatorname {C o n v} _ {1 \\times 1 \\times 1} \\left(x _ {m}\\right)\\right) - 0. 5. \\tag {6}\n$$\n",
715
+ "text_format": "latex",
716
+ "bbox": [
717
+ 137,
718
+ 148,
719
+ 468,
720
+ 165
721
+ ],
722
+ "page_idx": 4
723
+ },
724
+ {
725
+ "type": "text",
726
+ "text": "Given the attention maps $M$ to identify informative regions, it's multiplied with the aggregated trajectories $T(x)$ by the correlation module to emphasize body trajectories and suppress others like background or noise. This refined trajectory information is finally incorporated into original spatial features $x$ via a residual connection as:",
727
+ "bbox": [
728
+ 75,
729
+ 176,
730
+ 468,
731
+ 266
732
+ ],
733
+ "page_idx": 4
734
+ },
735
+ {
736
+ "type": "equation",
737
+ "text": "\n$$\nx ^ {\\text {o u t}} = x + \\alpha T (x) \\cdot M. \\tag {7}\n$$\n",
738
+ "text_format": "latex",
739
+ "bbox": [
740
+ 189,
741
+ 277,
742
+ 468,
743
+ 294
744
+ ],
745
+ "page_idx": 4
746
+ },
747
+ {
748
+ "type": "text",
749
+ "text": "As stated before, $\\alpha$ is initialized as zero to keep the original spatial features.",
750
+ "bbox": [
751
+ 76,
752
+ 305,
753
+ 468,
754
+ 335
755
+ ],
756
+ "page_idx": 4
757
+ },
758
+ {
759
+ "type": "text",
760
+ "text": "4. Experiments",
761
+ "text_level": 1,
762
+ "bbox": [
763
+ 76,
764
+ 349,
765
+ 209,
766
+ 366
767
+ ],
768
+ "page_idx": 4
769
+ },
770
+ {
771
+ "type": "text",
772
+ "text": "4.1. Experimental Setup",
773
+ "text_level": 1,
774
+ "bbox": [
775
+ 76,
776
+ 373,
777
+ 266,
778
+ 390
779
+ ],
780
+ "page_idx": 4
781
+ },
782
+ {
783
+ "type": "text",
784
+ "text": "4.1.1 Datasets.",
785
+ "text_level": 1,
786
+ "bbox": [
787
+ 76,
788
+ 397,
789
+ 192,
790
+ 411
791
+ ],
792
+ "page_idx": 4
793
+ },
794
+ {
795
+ "type": "text",
796
+ "text": "PHOENIX14 [27] is recorded from a German weather forecast broadcast with nine actors before a clean background with a resolution of $210 \\times 260$ . It contains 6841 sentences with a vocabulary of 1295 signs, divided into 5672 training samples, 540 development (Dev) samples and 629 testing (Test) samples.",
797
+ "bbox": [
798
+ 75,
799
+ 420,
800
+ 468,
801
+ 511
802
+ ],
803
+ "page_idx": 4
804
+ },
805
+ {
806
+ "type": "text",
807
+ "text": "PHOENIX14-T [2] is available for both CSLR and sign language translation tasks. It contains 8247 sentences with a vocabulary of 1085 signs, split into 7096 training instances, 519 development (Dev) instances and 642 testing (Test) instances.",
808
+ "bbox": [
809
+ 75,
810
+ 511,
811
+ 468,
812
+ 585
813
+ ],
814
+ "page_idx": 4
815
+ },
816
+ {
817
+ "type": "text",
818
+ "text": "CSL-Daily [53] revolves the daily life, recorded indoor at 30fps by 10 signers. It contains 20654 sentences, divided into 18401 training samples, 1077 development (Dev) samples and 1176 testing (Test) samples.",
819
+ "bbox": [
820
+ 75,
821
+ 585,
822
+ 468,
823
+ 647
824
+ ],
825
+ "page_idx": 4
826
+ },
827
+ {
828
+ "type": "text",
829
+ "text": "CSL [24] is collected in the laboratory environment by fifty signers with a vocabulary size of 178 with 100 sentences. It contains 25000 videos, divided into training and testing sets by a ratio of 8:2.",
830
+ "bbox": [
831
+ 75,
832
+ 647,
833
+ 468,
834
+ 708
835
+ ],
836
+ "page_idx": 4
837
+ },
838
+ {
839
+ "type": "text",
840
+ "text": "4.1.2 Training details.",
841
+ "text_level": 1,
842
+ "bbox": [
843
+ 76,
844
+ 724,
845
+ 243,
846
+ 739
847
+ ],
848
+ "page_idx": 4
849
+ },
850
+ {
851
+ "type": "text",
852
+ "text": "For fair comparisons, we follow the same setting as state-of-the-art methods [34, 55] to prepare our model. We adopt ResNet18 [18] as the 2D CNN backbone with ImageNet [8] pretrained weights. The 1D CNN of state-of-the-art methods is set as a sequence of $\\{\\mathrm{K}5,\\mathrm{P}2,\\mathrm{K}5,\\mathrm{P}2\\}$ layers where $\\mathrm{K}\\sigma$ and $\\mathrm{P}\\sigma$ denotes a 1D convolutional layer and a pooling layer with kernel size of $\\sigma$ , respectively. A two-layer BiLSTM with hidden size 1024 is attached for long-term temporal modeling, followed by a fully connected layer for sentence prediction. We train our models for 40 epochs with",
853
+ "bbox": [
854
+ 75,
855
+ 750,
856
+ 468,
857
+ 900
858
+ ],
859
+ "page_idx": 4
860
+ },
861
+ {
862
+ "type": "table",
863
+ "img_path": "images/3ec60dd3b1185e845e84495f92b7fb3eec39aabbd9cc48287b56581c746779c7.jpg",
864
+ "table_caption": [],
865
+ "table_footnote": [],
866
+ "table_body": "<table><tr><td>Configurations</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>-</td><td>20.2</td><td>21.0</td></tr><tr><td>Nt=4, Ns=1</td><td>19.6</td><td>20.1</td></tr><tr><td>Nt=4, Ns=2</td><td>19.2</td><td>19.8</td></tr><tr><td>Nt=4, Ns=3</td><td>18.8</td><td>19.4</td></tr><tr><td>Nt=4, Ns=4</td><td>19.1</td><td>19.7</td></tr><tr><td>Nt=2, Ns=3</td><td>19.4</td><td>19.9</td></tr><tr><td>Nt=3, Ns=3</td><td>19.1</td><td>19.7</td></tr><tr><td>Nt=4, Ns=3</td><td>18.8</td><td>19.4</td></tr><tr><td>Nt=5, Ns=3</td><td>19.3</td><td>19.8</td></tr><tr><td>Kt=9, Ks=7</td><td>19.9</td><td>20.4</td></tr></table>",
867
+ "bbox": [
868
+ 566,
869
+ 88,
870
+ 826,
871
+ 260
872
+ ],
873
+ "page_idx": 4
874
+ },
875
+ {
876
+ "type": "text",
877
+ "text": "Table 1. Ablations for the multi-scale architecture of identification module on the PHOENIX14 dataset.",
878
+ "bbox": [
879
+ 500,
880
+ 270,
881
+ 890,
882
+ 297
883
+ ],
884
+ "page_idx": 4
885
+ },
886
+ {
887
+ "type": "text",
888
+ "text": "initial learning rate 0.001 which is divided by 5 at epoch 20 and 30. Adam [25] optimizer is adopted as default with weight decay 0.001 and batch size 2. All input frames are first resized to $256 \\times 256$ , and then randomly cropped to $224 \\times 224$ with $50\\%$ horizontal flipping and $20\\%$ temporal rescaling during training. During inference, a $224 \\times 224$ center crop is simply adopted. Following VAC [34], we employ the VE loss and VA loss for visual supervision, with weights 1.0 and 25.0, respectively. Our model is trained and evaluated upon a 3090 graphical card.",
889
+ "bbox": [
890
+ 498,
891
+ 324,
892
+ 890,
893
+ 474
894
+ ],
895
+ "page_idx": 4
896
+ },
897
+ {
898
+ "type": "text",
899
+ "text": "4.1.3 Evaluation Metric.",
900
+ "text_level": 1,
901
+ "bbox": [
902
+ 500,
903
+ 493,
904
+ 684,
905
+ 507
906
+ ],
907
+ "page_idx": 4
908
+ },
909
+ {
910
+ "type": "text",
911
+ "text": "We use Word Error Rate (WER) as the evaluation metric, which is defined as the minimal summation of the substitution, insertion, and deletion operations to convert the predicted sentence to the reference sentence, as:",
912
+ "bbox": [
913
+ 498,
914
+ 517,
915
+ 890,
916
+ 577
917
+ ],
918
+ "page_idx": 4
919
+ },
920
+ {
921
+ "type": "equation",
922
+ "text": "\n$$\n\\mathrm {W E R} = \\frac {\\# \\text {s u b} + \\# \\text {i n s} + \\# \\text {d e l}}{\\# \\text {r e f e r e n c e}}. \\tag {8}\n$$\n",
923
+ "text_format": "latex",
924
+ "bbox": [
925
+ 584,
926
+ 585,
927
+ 890,
928
+ 619
929
+ ],
930
+ "page_idx": 4
931
+ },
932
+ {
933
+ "type": "text",
934
+ "text": "Note that the lowerWER, the better accuracy.",
935
+ "bbox": [
936
+ 500,
937
+ 627,
938
+ 808,
939
+ 641
940
+ ],
941
+ "page_idx": 4
942
+ },
943
+ {
944
+ "type": "text",
945
+ "text": "4.2. Ablation Study",
946
+ "text_level": 1,
947
+ "bbox": [
948
+ 500,
949
+ 650,
950
+ 653,
951
+ 666
952
+ ],
953
+ "page_idx": 4
954
+ },
955
+ {
956
+ "type": "text",
957
+ "text": "We report ablative results on both development (Dev) and testing (Test) sets of PHOENIX14 dataset.",
958
+ "bbox": [
959
+ 500,
960
+ 674,
961
+ 890,
962
+ 703
963
+ ],
964
+ "page_idx": 4
965
+ },
966
+ {
967
+ "type": "text",
968
+ "text": "Study on the multi-scale architecture of identification module. In tab. 1, without identification module, our baseline achieves $20.2\\%$ and $21.0\\%$ WER on the Dev and Test Set, respectively. The base kernel size is set as $3 \\times 3 \\times 3$ for $K_{t} \\times K_{s} \\times K_{s}$ . When fixing $N_{t} = 4$ and varying spatial dilation rates to expand spatial receptive fields, it's observed a larger $N_{s}$ consistently brings better accuracy. When $N_{s}$ reaches 3, it brings no more accuracy gain. We set $N_{s}$ as 3 by default and test the effects of $N_{t}$ . One can see that either increasing $K_{t}$ to 5 or decreasing $K_{t}$ to 2 and 3 achieves worse accuracy. We thus adopt $N_{t}$ as 4 by default. We also compare our proposed multi-scale architecture with a normal implementation of more parameters. The receptive field",
969
+ "bbox": [
970
+ 498,
971
+ 704,
972
+ 890,
973
+ 900
974
+ ],
975
+ "page_idx": 4
976
+ },
977
+ {
978
+ "type": "page_number",
979
+ "text": "5",
980
+ "bbox": [
981
+ 480,
982
+ 924,
983
+ 488,
984
+ 935
985
+ ],
986
+ "page_idx": 4
987
+ },
988
+ {
989
+ "type": "table",
990
+ "img_path": "images/43a56eba6eda887df1a7c2f57f15f468cf440ac774b73cdb3b6fd80b7f153b27.jpg",
991
+ "table_caption": [],
992
+ "table_footnote": [],
993
+ "table_body": "<table><tr><td>Configurations</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>-</td><td>20.2</td><td>21.0</td></tr><tr><td>K=3</td><td>19.6</td><td>20.4</td></tr><tr><td>K=5</td><td>19.4</td><td>20.2</td></tr><tr><td>K=7</td><td>19.2</td><td>20.0</td></tr><tr><td>K=9</td><td>19.1</td><td>19.8</td></tr><tr><td>K= H or W (Full image)</td><td>18.8</td><td>19.4</td></tr></table>",
994
+ "bbox": [
995
+ 107,
996
+ 87,
997
+ 439,
998
+ 198
999
+ ],
1000
+ "page_idx": 5
1001
+ },
1002
+ {
1003
+ "type": "table",
1004
+ "img_path": "images/15df3470110f8f67aeb6d5b48ffb7a852f6613baff071f3f6de1b695b57c05cf.jpg",
1005
+ "table_caption": [
1006
+ "Table 2. Ablations for the articulated area of correlation module on the PHOENIX14 dataset."
1007
+ ],
1008
+ "table_footnote": [],
1009
+ "table_body": "<table><tr><td>Correlation</td><td>Identification</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>X</td><td>X</td><td>20.2</td><td>21.0</td></tr><tr><td>✓</td><td>X</td><td>19.5</td><td>20.0</td></tr><tr><td>X</td><td>✓</td><td>19.4</td><td>19.9</td></tr><tr><td>✓</td><td>✓</td><td>18.8</td><td>19.4</td></tr></table>",
1010
+ "bbox": [
1011
+ 99,
1012
+ 253,
1013
+ 444,
1014
+ 340
1015
+ ],
1016
+ "page_idx": 5
1017
+ },
1018
+ {
1019
+ "type": "text",
1020
+ "text": "of the identification module with $N_{t} = 4$ , $N_{s} = 3$ is identical to a normal convolution with $K_{t} = 9$ and $K_{s} = 7$ . As shown in the bottom of tab. 1, although a normal convolution owns more parameters and computations than our proposed architecture, it still performs worse, verifying the effectiveness of our architecture.",
1021
+ "bbox": [
1022
+ 75,
1023
+ 409,
1024
+ 468,
1025
+ 500
1026
+ ],
1027
+ "page_idx": 5
1028
+ },
1029
+ {
1030
+ "type": "text",
1031
+ "text": "Study on the neighborhood $K$ of correlation module. In tab. 2, when $K$ is null, the correlation module is disabled. It's observed that a larger $K$ , i.e., more incorporated spatial-temporal neighbors, consistently brings better accuracy. The performance reaches the peak when $K$ equals $H$ or $W$ , i.e., the full image is incorporated. In this case, distant informative objects could be interacted to provide discriminative information. We set $K = H$ or $W$ by default.",
1032
+ "bbox": [
1033
+ 75,
1034
+ 502,
1035
+ 468,
1036
+ 625
1037
+ ],
1038
+ "page_idx": 5
1039
+ },
1040
+ {
1041
+ "type": "text",
1042
+ "text": "Effectiveness of two proposed modules. In tab. 3, we first notice that either only using the correlation module or identification module could already bring a notable accuracy boost, with $19.5\\%$ & $20.0\\%$ and $19.4\\%$ & $19.9\\%$ accuracy on the Dev and Test Sets, respectively. When combining both modules, the effectiveness is further activated with $18.8\\%$ & $19.4\\%$ accuracy on the Dev and Test Sets, respectively, which is adopted as the default setting.",
1043
+ "bbox": [
1044
+ 75,
1045
+ 625,
1046
+ 468,
1047
+ 744
1048
+ ],
1049
+ "page_idx": 5
1050
+ },
1051
+ {
1052
+ "type": "text",
1053
+ "text": "Effects of locations for CorrNet. Tab 4 ablates the locations of our proposed modules, which are placed after Stage 2, 3 or 4. It's observed that choosing any one of these locations could bring a notable accuracy boost, with $19.6\\%$ & $20.1\\%$ , $19.5\\%$ & $20.2\\%$ and $19.4\\%$ & $20.0\\%$ accuracy boost. When combining two or more locations, a larger accuracy gain is witnessed. The accuracy reaches the peak when proposed modules are placed after Stage 2, 3 and 4, with $18.8\\%$ & $19.4\\%$ accuracy, which is adopted by default.",
1054
+ "bbox": [
1055
+ 75,
1056
+ 747,
1057
+ 468,
1058
+ 883
1059
+ ],
1060
+ "page_idx": 5
1061
+ },
1062
+ {
1063
+ "type": "text",
1064
+ "text": "Generalizability of CorrNet. We deploy CorrNet upon",
1065
+ "bbox": [
1066
+ 96,
1067
+ 885,
1068
+ 468,
1069
+ 901
1070
+ ],
1071
+ "page_idx": 5
1072
+ },
1073
+ {
1074
+ "type": "table",
1075
+ "img_path": "images/bc2382995cc1f44405cb95c43e5d2e08f0c80ed048c4d63f158a000629cd55f3.jpg",
1076
+ "table_caption": [
1077
+ "Table 3. Ablations for the effectiveness of correlation module and identification module on the PHOENIX14 dataset."
1078
+ ],
1079
+ "table_footnote": [],
1080
+ "table_body": "<table><tr><td>Stage 2</td><td>Stage 3</td><td>Stage 4</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>X</td><td>X</td><td>X</td><td>20.2</td><td>21.0</td></tr><tr><td>✓</td><td>X</td><td>X</td><td>19.6</td><td>20.1</td></tr><tr><td>X</td><td>✓</td><td>X</td><td>19.5</td><td>20.2</td></tr><tr><td>X</td><td>X</td><td>✓</td><td>19.4</td><td>20.0</td></tr><tr><td>✓</td><td>✓</td><td>X</td><td>19.2</td><td>19.9</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>18.8</td><td>19.4</td></tr></table>",
1081
+ "bbox": [
1082
+ 521,
1083
+ 87,
1084
+ 870,
1085
+ 208
1086
+ ],
1087
+ "page_idx": 5
1088
+ },
1089
+ {
1090
+ "type": "table",
1091
+ "img_path": "images/3eae3651b903573af8e1940954cfa4d4241128bb8d457de0c4abe29fa69c5de4.jpg",
1092
+ "table_caption": [
1093
+ "Table 4. Ablations for the locations of CorrNet on the PHOENIX14 dataset."
1094
+ ],
1095
+ "table_footnote": [],
1096
+ "table_body": "<table><tr><td>Configurations</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>SqueezeNet [21]</td><td>22.2</td><td>22.6</td></tr><tr><td>w/ CorrNet</td><td>20.2</td><td>20.4</td></tr><tr><td>ShuffleNet V2 [33]</td><td>21.7</td><td>22.2</td></tr><tr><td>w/ CorrNet</td><td>19.7</td><td>20.2</td></tr><tr><td>GoogleNet [42]</td><td>21.4</td><td>21.5</td></tr><tr><td>w/ CorrNet</td><td>19.6</td><td>19.8</td></tr></table>",
1097
+ "bbox": [
1098
+ 550,
1099
+ 263,
1100
+ 841,
1101
+ 375
1102
+ ],
1103
+ "page_idx": 5
1104
+ },
1105
+ {
1106
+ "type": "table",
1107
+ "img_path": "images/6e81ec1d53801d671f60382b9dc5aee9dcd5d91d9d23d46796b45551c1b06484.jpg",
1108
+ "table_caption": [
1109
+ "Table 5. Ablations for the generalizability of CorrNet over multiple backbones on the PHOENIX14 dataset."
1110
+ ],
1111
+ "table_footnote": [],
1112
+ "table_body": "<table><tr><td>Methods</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>-</td><td>20.2</td><td>21.0</td></tr><tr><td>w/ SENet [21]</td><td>19.8</td><td>20.4</td></tr><tr><td>w/ CBAM [49]</td><td>19.7</td><td>20.2</td></tr><tr><td>w/ NLNet [47]</td><td>-</td><td>-</td></tr><tr><td>I3D [4]</td><td>22.6</td><td>22.9</td></tr><tr><td>R(2+1)D [43]</td><td>22.4</td><td>22.3</td></tr><tr><td>TSM [31]</td><td>19.9</td><td>20.5</td></tr><tr><td>CorrNet</td><td>18.8</td><td>19.4</td></tr></table>",
1113
+ "bbox": [
1114
+ 578,
1115
+ 431,
1116
+ 813,
1117
+ 571
1118
+ ],
1119
+ "page_idx": 5
1120
+ },
1121
+ {
1122
+ "type": "text",
1123
+ "text": "Table 6. Comparison with other methods of spatial-temporal attention or temporal reasoning on the PHOENIX14 dataset.",
1124
+ "bbox": [
1125
+ 498,
1126
+ 583,
1127
+ 890,
1128
+ 612
1129
+ ],
1130
+ "page_idx": 5
1131
+ },
1132
+ {
1133
+ "type": "text",
1134
+ "text": "multiple backbones, including SqueezeNet [21], ShuffleNet V2 [33] and GoogLeNet [42] to validate its generalizability in tab. 5. The proposed modules are placed after three spatial downsampling layers in SqueezeNet, ShuffleNet V2 and GoogLeNet, respectively. It's observed that our proposed model generalizes well upon different backbones, bringing $+2.0\\%$ & $+2.2\\%$ , $+2.0\\%$ & $+2.0\\%$ and $+1.8\\%$ & $+1.7\\%$ accuracy boost on the Dev and Test Sets, respectively.",
1135
+ "bbox": [
1136
+ 496,
1137
+ 641,
1138
+ 890,
1139
+ 763
1140
+ ],
1141
+ "page_idx": 5
1142
+ },
1143
+ {
1144
+ "type": "text",
1145
+ "text": "Comparisons with other spatial-temporal reasoning methods. Tab. 6 compares our approach with other methods of spatial-temporal reasoning ability. SENet [21] and CBAM [49] perform channel attention to emphasize key information. NLNet [47] employs non-local means to aggregate spatial-temporal information from other frames. I3D [4] and R(2+1)D [43] deploys 3D or 2D+1D convolutions to capture spatial-temporal features. TSM [31] adopts temporal shift operation to obtain features from ad",
1146
+ "bbox": [
1147
+ 496,
1148
+ 763,
1149
+ 890,
1150
+ 900
1151
+ ],
1152
+ "page_idx": 5
1153
+ },
1154
+ {
1155
+ "type": "page_number",
1156
+ "text": "6",
1157
+ "bbox": [
1158
+ 478,
1159
+ 924,
1160
+ 491,
1161
+ 936
1162
+ ],
1163
+ "page_idx": 5
1164
+ },
1165
+ {
1166
+ "type": "image",
1167
+ "img_path": "images/dc251d304930eca45fb08b488ba26fb982ac8fce8e3ff9e91561a0ee736e3de5.jpg",
1168
+ "image_caption": [],
1169
+ "image_footnote": [],
1170
+ "bbox": [
1171
+ 83,
1172
+ 89,
1173
+ 204,
1174
+ 183
1175
+ ],
1176
+ "page_idx": 6
1177
+ },
1178
+ {
1179
+ "type": "image",
1180
+ "img_path": "images/04a78753ff12682a48a0db3e0d0bbaf6363176106c7681694b581785828c53dd.jpg",
1181
+ "image_caption": [],
1182
+ "image_footnote": [],
1183
+ "bbox": [
1184
+ 218,
1185
+ 90,
1186
+ 338,
1187
+ 183
1188
+ ],
1189
+ "page_idx": 6
1190
+ },
1191
+ {
1192
+ "type": "image",
1193
+ "img_path": "images/d45fb699dffcc2ed46ac1c2a983a8dd26fa7d7359f2f3a418bf9a08522d8c5d3.jpg",
1194
+ "image_caption": [],
1195
+ "image_footnote": [],
1196
+ "bbox": [
1197
+ 351,
1198
+ 90,
1199
+ 472,
1200
+ 183
1201
+ ],
1202
+ "page_idx": 6
1203
+ },
1204
+ {
1205
+ "type": "image",
1206
+ "img_path": "images/5da1b21ffbb392ab568ab1af79cf610499198de4a74fc29e3dbce0b4a6a3e37c.jpg",
1207
+ "image_caption": [
1208
+ "Left"
1209
+ ],
1210
+ "image_footnote": [],
1211
+ "bbox": [
1212
+ 83,
1213
+ 191,
1214
+ 202,
1215
+ 286
1216
+ ],
1217
+ "page_idx": 6
1218
+ },
1219
+ {
1220
+ "type": "image",
1221
+ "img_path": "images/30d0bb73071782d3bf496a9d927acbe3045d10a94a01935355cf6dfca9b25ee4.jpg",
1222
+ "image_caption": [
1223
+ "Figure 5. Visualizations of correlation maps for correlation module. Based on correlation operators, each frame could especially attend to informative regions in adjacent left/right frames like hands and face (dark red areas)."
1224
+ ],
1225
+ "image_footnote": [],
1226
+ "bbox": [
1227
+ 218,
1228
+ 191,
1229
+ 338,
1230
+ 286
1231
+ ],
1232
+ "page_idx": 6
1233
+ },
1234
+ {
1235
+ "type": "image",
1236
+ "img_path": "images/ca37528798216d480189a605fb5cf371262105af88bc623c5687055369f6eaf9.jpg",
1237
+ "image_caption": [
1238
+ "Right"
1239
+ ],
1240
+ "image_footnote": [],
1241
+ "bbox": [
1242
+ 351,
1243
+ 191,
1244
+ 472,
1245
+ 286
1246
+ ],
1247
+ "page_idx": 6
1248
+ },
1249
+ {
1250
+ "type": "image",
1251
+ "img_path": "images/efe451a8062ace0ac58e80a5e5ecf1d020caddfc7db326532a140e628134a444.jpg",
1252
+ "image_caption": [],
1253
+ "image_footnote": [],
1254
+ "bbox": [
1255
+ 500,
1256
+ 92,
1257
+ 620,
1258
+ 185
1259
+ ],
1260
+ "page_idx": 6
1261
+ },
1262
+ {
1263
+ "type": "image",
1264
+ "img_path": "images/20cf0c20c8ae6e77d0dc647982930529a9371ec1615ab5069ed85f492d7026fe.jpg",
1265
+ "image_caption": [],
1266
+ "image_footnote": [],
1267
+ "bbox": [
1268
+ 635,
1269
+ 90,
1270
+ 754,
1271
+ 185
1272
+ ],
1273
+ "page_idx": 6
1274
+ },
1275
+ {
1276
+ "type": "image",
1277
+ "img_path": "images/2f07363275e37c9c5c7b6ff3c46974474904b852fc1d9f7c89561ca1b16b29e4.jpg",
1278
+ "image_caption": [],
1279
+ "image_footnote": [],
1280
+ "bbox": [
1281
+ 767,
1282
+ 90,
1283
+ 888,
1284
+ 185
1285
+ ],
1286
+ "page_idx": 6
1287
+ },
1288
+ {
1289
+ "type": "image",
1290
+ "img_path": "images/ecf58f29197304c3ff52c3db1bc3b33aeb1fba16e0f5c2d82bd08c3ad5e605fb.jpg",
1291
+ "image_caption": [
1292
+ "Left"
1293
+ ],
1294
+ "image_footnote": [],
1295
+ "bbox": [
1296
+ 500,
1297
+ 195,
1298
+ 619,
1299
+ 287
1300
+ ],
1301
+ "page_idx": 6
1302
+ },
1303
+ {
1304
+ "type": "image",
1305
+ "img_path": "images/e8e6ac5c8760f9d6f4c2f6d7404ed71773494c69609a11f8da43d4b7bbb83143.jpg",
1306
+ "image_caption": [],
1307
+ "image_footnote": [],
1308
+ "bbox": [
1309
+ 635,
1310
+ 196,
1311
+ 753,
1312
+ 287
1313
+ ],
1314
+ "page_idx": 6
1315
+ },
1316
+ {
1317
+ "type": "image",
1318
+ "img_path": "images/7d3aff697d02cece681cfd0909ffb265223c6da7239244165257f9093cbd334b.jpg",
1319
+ "image_caption": [
1320
+ "Right"
1321
+ ],
1322
+ "image_footnote": [],
1323
+ "bbox": [
1324
+ 767,
1325
+ 196,
1326
+ 888,
1327
+ 287
1328
+ ],
1329
+ "page_idx": 6
1330
+ },
1331
+ {
1332
+ "type": "table",
1333
+ "img_path": "images/b4ad453385cbcc04cddaf52eb397fb2b760608fe1db9b581cf65470645fdea18.jpg",
1334
+ "table_caption": [],
1335
+ "table_footnote": [],
1336
+ "table_body": "<table><tr><td>Methods</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>CNN+HMM+LSTM [26]</td><td>26.0</td><td>26.0</td></tr><tr><td>DNF [7]</td><td>23.1</td><td>22.9</td></tr><tr><td>STMC [54]</td><td>21.1</td><td>20.7</td></tr><tr><td>C²SLR [55]</td><td>20.5</td><td>20.4</td></tr><tr><td>CorrNet</td><td>18.8</td><td>19.4</td></tr></table>",
1337
+ "bbox": [
1338
+ 120,
1339
+ 364,
1340
+ 426,
1341
+ 462
1342
+ ],
1343
+ "page_idx": 6
1344
+ },
1345
+ {
1346
+ "type": "text",
1347
+ "text": "Table 7. Comparison with other methods that explicitly exploit hand and face features on the PHOENIX14 dataset.",
1348
+ "bbox": [
1349
+ 75,
1350
+ 472,
1351
+ 468,
1352
+ 500
1353
+ ],
1354
+ "page_idx": 6
1355
+ },
1356
+ {
1357
+ "type": "text",
1358
+ "text": "jacent frames. In the upper part of tab. 6, one can see CorrNet largely outperforms other attention-based methods, i.e., SENet, CBAM and NLNet, for its superior ability to identify and aggregate body trajectories. NLNet is out of memory due to its quadratic computational complexity with spatial-temporal size. In the bottom part of tab. 6, it's observed that I3D and $\\mathrm{R}(2 + 1)\\mathrm{D}$ even degrade accuracy, which may be attributed to their limited spatial-temporal receptive fields and increased training complexity. TSM slightly brings $0.3\\%$ & $0.3\\%$ accuracy boost. Our proposed approach surpasses these methods greatly, verifying its effectiveness in aggregating beneficial spatial-temporal information, from even distant spatial neighbors.",
1359
+ "bbox": [
1360
+ 75,
1361
+ 534,
1362
+ 468,
1363
+ 731
1364
+ ],
1365
+ "page_idx": 6
1366
+ },
1367
+ {
1368
+ "type": "text",
1369
+ "text": "Comparisons with previous methods equipped with hand or face features. Many previous CSLR methods explicitly leverage hand and face features for better recognition, like multiple input streams [26], human body keypoints [54, 55] and pre-extracted hand patches [7]. They require extra expensive pose-estimation networks like HRNet [46] or additional training stages. Our approach doesn't rely on extra supervision and could be end-to-end trained to dynamically attend to body trajectories like hand and face in a self-motivated way. Tab. 7 shows that our method outperforms these methods by a large margin.",
1370
+ "bbox": [
1371
+ 75,
1372
+ 734,
1373
+ 467,
1374
+ 902
1375
+ ],
1376
+ "page_idx": 6
1377
+ },
1378
+ {
1379
+ "type": "image",
1380
+ "img_path": "images/88b5c2ce168c86a54a8bed78df6db7ea2d244c3cef98d569b5f73a0427c6f040.jpg",
1381
+ "image_caption": [
1382
+ "Raw"
1383
+ ],
1384
+ "image_footnote": [],
1385
+ "bbox": [
1386
+ 557,
1387
+ 366,
1388
+ 630,
1389
+ 425
1390
+ ],
1391
+ "page_idx": 6
1392
+ },
1393
+ {
1394
+ "type": "image",
1395
+ "img_path": "images/7628ea62456ebb80c2835552151762c0b98fafecf3698a20b6309eec5dd29df4.jpg",
1396
+ "image_caption": [],
1397
+ "image_footnote": [],
1398
+ "bbox": [
1399
+ 642,
1400
+ 366,
1401
+ 717,
1402
+ 425
1403
+ ],
1404
+ "page_idx": 6
1405
+ },
1406
+ {
1407
+ "type": "image",
1408
+ "img_path": "images/fb00c1ec51cf04841582a9163b85263c8c7380038a5e8afaa2858c8c761f8653.jpg",
1409
+ "image_caption": [],
1410
+ "image_footnote": [],
1411
+ "bbox": [
1412
+ 728,
1413
+ 366,
1414
+ 803,
1415
+ 425
1416
+ ],
1417
+ "page_idx": 6
1418
+ },
1419
+ {
1420
+ "type": "image",
1421
+ "img_path": "images/12c3bc0ab54a97748cb81c206d49ac9e68e75429e88ae6b60bff38c30fcaa9ca.jpg",
1422
+ "image_caption": [],
1423
+ "image_footnote": [],
1424
+ "bbox": [
1425
+ 815,
1426
+ 366,
1427
+ 890,
1428
+ 425
1429
+ ],
1430
+ "page_idx": 6
1431
+ },
1432
+ {
1433
+ "type": "image",
1434
+ "img_path": "images/8ac3888f1e75298cf9af946019055584447819fd1bb3868f8a721db05509ac2a.jpg",
1435
+ "image_caption": [
1436
+ "Heatmap"
1437
+ ],
1438
+ "image_footnote": [],
1439
+ "bbox": [
1440
+ 558,
1441
+ 430,
1442
+ 630,
1443
+ 488
1444
+ ],
1445
+ "page_idx": 6
1446
+ },
1447
+ {
1448
+ "type": "image",
1449
+ "img_path": "images/928a75fb0f294e5a168cc27436e7459c7c926daeafe9893d64510aaddb57a2b4.jpg",
1450
+ "image_caption": [],
1451
+ "image_footnote": [],
1452
+ "bbox": [
1453
+ 642,
1454
+ 430,
1455
+ 717,
1456
+ 488
1457
+ ],
1458
+ "page_idx": 6
1459
+ },
1460
+ {
1461
+ "type": "image",
1462
+ "img_path": "images/b71394c5fe1116ecf20d3df69f4cacc1fcf41fd4a1690f8ac89d8c2308b0e26b.jpg",
1463
+ "image_caption": [],
1464
+ "image_footnote": [],
1465
+ "bbox": [
1466
+ 728,
1467
+ 430,
1468
+ 803,
1469
+ 488
1470
+ ],
1471
+ "page_idx": 6
1472
+ },
1473
+ {
1474
+ "type": "image",
1475
+ "img_path": "images/8b9b93ce62f81fe08ee6bababc44fd9a464a3536468c823742f8859891ed32c9.jpg",
1476
+ "image_caption": [],
1477
+ "image_footnote": [],
1478
+ "bbox": [
1479
+ 815,
1480
+ 430,
1481
+ 890,
1482
+ 488
1483
+ ],
1484
+ "page_idx": 6
1485
+ },
1486
+ {
1487
+ "type": "image",
1488
+ "img_path": "images/19feeeb2ee4473b4c19cfb98c4a0e3765e7424ba0ddabc2b35f17db5935dc1cb.jpg",
1489
+ "image_caption": [
1490
+ "Raw"
1491
+ ],
1492
+ "image_footnote": [],
1493
+ "bbox": [
1494
+ 557,
1495
+ 508,
1496
+ 630,
1497
+ 566
1498
+ ],
1499
+ "page_idx": 6
1500
+ },
1501
+ {
1502
+ "type": "image",
1503
+ "img_path": "images/3b14dab2714390bfd138e841d39c271d7a8390cc47b116e47ffbb6f1eecbc5cb.jpg",
1504
+ "image_caption": [],
1505
+ "image_footnote": [],
1506
+ "bbox": [
1507
+ 642,
1508
+ 508,
1509
+ 717,
1510
+ 566
1511
+ ],
1512
+ "page_idx": 6
1513
+ },
1514
+ {
1515
+ "type": "image",
1516
+ "img_path": "images/c8bef05374e98acbb4382a11705aa25c8ae64279938edab0cb39ab36988d64d5.jpg",
1517
+ "image_caption": [],
1518
+ "image_footnote": [],
1519
+ "bbox": [
1520
+ 728,
1521
+ 508,
1522
+ 803,
1523
+ 566
1524
+ ],
1525
+ "page_idx": 6
1526
+ },
1527
+ {
1528
+ "type": "image",
1529
+ "img_path": "images/e83d20fc59dd7758c3a3a95e29c511cd9a42b96604fc1b6c43abc92a1123e944.jpg",
1530
+ "image_caption": [],
1531
+ "image_footnote": [],
1532
+ "bbox": [
1533
+ 815,
1534
+ 508,
1535
+ 890,
1536
+ 566
1537
+ ],
1538
+ "page_idx": 6
1539
+ },
1540
+ {
1541
+ "type": "image",
1542
+ "img_path": "images/44adcc91c3fa0ebe30fcdd752315b99d7ed67606b6deb90431f3c910ffd47dfa.jpg",
1543
+ "image_caption": [
1544
+ "Heatmap"
1545
+ ],
1546
+ "image_footnote": [],
1547
+ "bbox": [
1548
+ 557,
1549
+ 571,
1550
+ 630,
1551
+ 630
1552
+ ],
1553
+ "page_idx": 6
1554
+ },
1555
+ {
1556
+ "type": "image",
1557
+ "img_path": "images/9d4705e64a227d166943293af643573f7cba4df85cf902735f5411cb3b5d0b35.jpg",
1558
+ "image_caption": [
1559
+ "Figure 6. Visualizations of heatmaps by Grad-CAM [40]. Top: raw frames; Bottom: heatmaps of our identification module. Our identification module could generally focus on the human body (light yellow areas) and especially pays attention to informative regions like hands and face (dark red areas) to track body trajectories."
1560
+ ],
1561
+ "image_footnote": [],
1562
+ "bbox": [
1563
+ 642,
1564
+ 571,
1565
+ 717,
1566
+ 630
1567
+ ],
1568
+ "page_idx": 6
1569
+ },
1570
+ {
1571
+ "type": "image",
1572
+ "img_path": "images/02198b03120a7fff44b713aba7c7cb59f003c952060fc2a9aea3f0ebb158d2a2.jpg",
1573
+ "image_caption": [],
1574
+ "image_footnote": [],
1575
+ "bbox": [
1576
+ 728,
1577
+ 571,
1578
+ 803,
1579
+ 630
1580
+ ],
1581
+ "page_idx": 6
1582
+ },
1583
+ {
1584
+ "type": "image",
1585
+ "img_path": "images/ca793193f752210d9577df0b8130e1f62c92caaee4d2f7736a3c52314e455510.jpg",
1586
+ "image_caption": [],
1587
+ "image_footnote": [],
1588
+ "bbox": [
1589
+ 815,
1590
+ 571,
1591
+ 890,
1592
+ 630
1593
+ ],
1594
+ "page_idx": 6
1595
+ },
1596
+ {
1597
+ "type": "text",
1598
+ "text": "4.3. Visualizations",
1599
+ "text_level": 1,
1600
+ "bbox": [
1601
+ 500,
1602
+ 753,
1603
+ 643,
1604
+ 768
1605
+ ],
1606
+ "page_idx": 6
1607
+ },
1608
+ {
1609
+ "type": "text",
1610
+ "text": "Visualizations for correlation module. Fig. 5 shows the correlation maps generated by our correlation module with adjacent frames. It's observed that the reference point could well attend to informative regions in adjacent left/right frame, e.g., hands or face, to track body trajectories in expressing a sign. Especially, they always focus on the moving body parts that play a major role in expressing signs. For example, the reference point (left hand) in the up-",
1611
+ "bbox": [
1612
+ 500,
1613
+ 779,
1614
+ 890,
1615
+ 901
1616
+ ],
1617
+ "page_idx": 6
1618
+ },
1619
+ {
1620
+ "type": "page_number",
1621
+ "text": "7",
1622
+ "bbox": [
1623
+ 478,
1624
+ 924,
1625
+ 490,
1626
+ 935
1627
+ ],
1628
+ "page_idx": 6
1629
+ },
1630
+ {
1631
+ "type": "table",
1632
+ "img_path": "images/994e3284dbde69e56ced18d741b2a3c8c69fc287e1bf57d32da17730f22091f2.jpg",
1633
+ "table_caption": [],
1634
+ "table_footnote": [],
1635
+ "table_body": "<table><tr><td rowspan=\"3\">Methods</td><td rowspan=\"3\">Backbone</td><td colspan=\"4\">PHOENIX14</td><td colspan=\"2\">PHOENIX14-T</td></tr><tr><td colspan=\"2\">Dev(%)</td><td colspan=\"2\">Test(%)</td><td rowspan=\"2\">Dev(%)</td><td rowspan=\"2\">Test(%)</td></tr><tr><td>del/ins</td><td>WER</td><td>del/ins</td><td>WER</td></tr><tr><td>SFL [35]</td><td>ResNet18</td><td>7.9/6.5</td><td>26.2</td><td>7.5/6.3</td><td>26.8</td><td>25.1</td><td>26.1</td></tr><tr><td>FCN [5]</td><td>Custom</td><td>-</td><td>23.7</td><td>-</td><td>23.9</td><td>23.3</td><td>25.1</td></tr><tr><td>CMA [37]</td><td>GoogLeNet</td><td>7.3/2.7</td><td>21.3</td><td>7.3/2.4</td><td>21.9</td><td>-</td><td>-</td></tr><tr><td>VAC [34]</td><td>ResNet18</td><td>7.9/2.5</td><td>21.2</td><td>8.4/2.6</td><td>22.3</td><td>-</td><td>-</td></tr><tr><td>SMKD [17]</td><td>ResNet18</td><td>6.8/2.5</td><td>20.8</td><td>6.3/2.3</td><td>21.0</td><td>20.8</td><td>22.4</td></tr><tr><td>TLP [22]</td><td>ResNet18</td><td>6.3/2.8</td><td>19.7</td><td>6.1/2.9</td><td>20.8</td><td>19.4</td><td>21.2</td></tr><tr><td>SEN [23]</td><td>ResNet18</td><td>5.8/2.6</td><td>19.5</td><td>7.3/4.0</td><td>21.0</td><td>19.3</td><td>20.7</td></tr><tr><td>SLT* [2]</td><td>GoogLeNet</td><td>-</td><td>-</td><td>-</td><td>-</td><td>24.5</td><td>24.6</td></tr><tr><td>CNN+LSTM+HMM* [26]</td><td>GoogLeNet</td><td>-</td><td>26.0</td><td>-</td><td>26.0</td><td>22.1</td><td>24.1</td></tr><tr><td>DNF* [7]</td><td>GoogLeNet</td><td>7.3/3.3</td><td>23.1</td><td>6.7/3.3</td><td>22.9</td><td>-</td><td>-</td></tr><tr><td>STMC* [54]</td><td>VGG11</td><td>7.7/3.4</td><td>21.1</td><td>7.4/2.6</td><td>20.7</td><td>19.6</td><td>21.0</td></tr><tr><td>C²SLR* [55]</td><td>ResNet18</td><td>-</td><td>20.5</td><td>-</td><td>20.4</td><td>20.2</td><td>20.4</td></tr><tr><td>CorrNet</td><td>ResNet18</td><td>5.6/2.8</td><td>18.8</td><td>5.7/2.3</td><td>19.4</td><td>18.9</td><td>20.5</td></tr></table>",
1636
+ "bbox": [
1637
+ 184,
1638
+ 87,
1639
+ 785,
1640
+ 334
1641
+ ],
1642
+ "page_idx": 7
1643
+ },
1644
+ {
1645
+ "type": "text",
1646
+ "text": "per left figure specially attends to the quickly moving right hand to capture sign information.",
1647
+ "bbox": [
1648
+ 75,
1649
+ 400,
1650
+ 468,
1651
+ 430
1652
+ ],
1653
+ "page_idx": 7
1654
+ },
1655
+ {
1656
+ "type": "text",
1657
+ "text": "Visualizations for identification module. Fig. 6 shows the heatmaps generated by our identification module. Our identification module could generally focus on the human body (light yellow areas). Especially, it pays major attention to regions like hands and face (dark red areas). These results show that our identification module could dynamically emphasize important areas in expressing a sign, e.g., hands and face, and suppress other regions.",
1658
+ "bbox": [
1659
+ 75,
1660
+ 431,
1661
+ 468,
1662
+ 551
1663
+ ],
1664
+ "page_idx": 7
1665
+ },
1666
+ {
1667
+ "type": "text",
1668
+ "text": "4.4. Comparison with State-of-the-Art Methods",
1669
+ "text_level": 1,
1670
+ "bbox": [
1671
+ 76,
1672
+ 560,
1673
+ 444,
1674
+ 575
1675
+ ],
1676
+ "page_idx": 7
1677
+ },
1678
+ {
1679
+ "type": "text",
1680
+ "text": "PHOENIX14 and PHOENIX14-T. Tab. 8 shows a comprehensive comparison between our CorrNet and other state-of-the-art methods. The entries notated with * indicate these methods utilize additional factors like face or hand features for better accuracy. We notice that CorrNet outperforms other state-of-the-art methods by a large margin upon both datasets, thanks to its special attention on body trajectories. Especially, CorrNet outperforms previous CSLR methods equipped with hand and faces acquired by heavy pose-estimation networks or pre-extracted heatmaps (notated with *), without additional expensive supervision.",
1681
+ "bbox": [
1682
+ 75,
1683
+ 583,
1684
+ 468,
1685
+ 750
1686
+ ],
1687
+ "page_idx": 7
1688
+ },
1689
+ {
1690
+ "type": "text",
1691
+ "text": "CSL-Daily. CSL-Daily is a recently released large-scale dataset with the largest vocabulary size (2k) among commonly-used CSLR datasets, with a wide content covering family life, social contact and so on. Tab. 9 shows that our CorrNet achieves new state-of-the-art accuracy upon this challenging dataset with notable progress, which generalizes well upon real-world scenarios.",
1692
+ "bbox": [
1693
+ 75,
1694
+ 750,
1695
+ 468,
1696
+ 853
1697
+ ],
1698
+ "page_idx": 7
1699
+ },
1700
+ {
1701
+ "type": "text",
1702
+ "text": "CSL. As shown in tab. 10, our CorrNet could achieve extremely superior accuracy (0.8% WER) upon this well-examined dataset, outperforming existing CSLR methods.",
1703
+ "bbox": [
1704
+ 75,
1705
+ 854,
1706
+ 468,
1707
+ 900
1708
+ ],
1709
+ "page_idx": 7
1710
+ },
1711
+ {
1712
+ "type": "table",
1713
+ "img_path": "images/e20b451a927b8824c133a8890a4480295815c7c702a7750488fbc8b81f450201.jpg",
1714
+ "table_caption": [
1715
+ "Table 8. Comparison with state-of-the-art methods on the PHOENIX14 and PHOENIX14-T datasets. * indicates extra clues such as face or hand features are included by additional networks or pre-extracted heatmaps."
1716
+ ],
1717
+ "table_footnote": [],
1718
+ "table_body": "<table><tr><td>Methods</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>LS-HAN [24]</td><td>39.0</td><td>39.4</td></tr><tr><td>TIN-Iterative [7]</td><td>32.8</td><td>32.4</td></tr><tr><td>Joint-SLRT [3]</td><td>33.1</td><td>32.0</td></tr><tr><td>FCN [5]</td><td>33.2</td><td>32.5</td></tr><tr><td>BN-TIN [53]</td><td>33.6</td><td>33.1</td></tr><tr><td>CorrNet</td><td>30.6</td><td>30.1</td></tr></table>",
1719
+ "bbox": [
1720
+ 578,
1721
+ 397,
1722
+ 815,
1723
+ 508
1724
+ ],
1725
+ "page_idx": 7
1726
+ },
1727
+ {
1728
+ "type": "table",
1729
+ "img_path": "images/6c8c6c35c9df3b56e4470d6d6c5ef6e3bad11964644939e27f0e44fe400d0c2d.jpg",
1730
+ "table_caption": [
1731
+ "Table 9. Comparison with state-of-the-art methods on the CSL-Daily dataset [53]."
1732
+ ],
1733
+ "table_footnote": [],
1734
+ "table_body": "<table><tr><td>Methods</td><td>WER(%)</td></tr><tr><td>LS-HAN [24]</td><td>17.3</td></tr><tr><td>SubUNet [6]</td><td>11.0</td></tr><tr><td>SF-Net [51]</td><td>3.8</td></tr><tr><td>FCN [5]</td><td>3.0</td></tr><tr><td>STMC [54]</td><td>2.1</td></tr><tr><td>VAC [34]</td><td>1.6</td></tr><tr><td>C²SLR [55]</td><td>0.9</td></tr><tr><td>CorrNet</td><td>0.8</td></tr></table>",
1735
+ "bbox": [
1736
+ 611,
1737
+ 561,
1738
+ 781,
1739
+ 704
1740
+ ],
1741
+ "page_idx": 7
1742
+ },
1743
+ {
1744
+ "type": "text",
1745
+ "text": "Table 10. Comparison with state-of-the-art methods on the CSL dataset [24].",
1746
+ "bbox": [
1747
+ 498,
1748
+ 713,
1749
+ 890,
1750
+ 742
1751
+ ],
1752
+ "page_idx": 7
1753
+ },
1754
+ {
1755
+ "type": "text",
1756
+ "text": "5. Conclusion",
1757
+ "text_level": 1,
1758
+ "bbox": [
1759
+ 500,
1760
+ 763,
1761
+ 617,
1762
+ 779
1763
+ ],
1764
+ "page_idx": 7
1765
+ },
1766
+ {
1767
+ "type": "text",
1768
+ "text": "This paper introduces a correlation module to capture trajectories between adjacent frames and an identification module to locate body regions. Comparisons with previous CSLR methods with spatial-temporal reasoning ability or equipped with hand and face features demonstrate the superiority of CorrNet. Visualizations show that CorrNet could generally attend to hand and face regions to capture body trajectories.",
1769
+ "bbox": [
1770
+ 496,
1771
+ 787,
1772
+ 890,
1773
+ 900
1774
+ ],
1775
+ "page_idx": 7
1776
+ },
1777
+ {
1778
+ "type": "page_number",
1779
+ "text": "8",
1780
+ "bbox": [
1781
+ 478,
1782
+ 924,
1783
+ 488,
1784
+ 935
1785
+ ],
1786
+ "page_idx": 7
1787
+ },
1788
+ {
1789
+ "type": "text",
1790
+ "text": "References",
1791
+ "text_level": 1,
1792
+ "bbox": [
1793
+ 78,
1794
+ 121,
1795
+ 173,
1796
+ 136
1797
+ ],
1798
+ "page_idx": 8
1799
+ },
1800
+ {
1801
+ "type": "list",
1802
+ "sub_type": "ref_text",
1803
+ "list_items": [
1804
+ "[1] Nikolas Adaloglou, Theocharis Chatzis, Ilias Papastratis, Andreas Stergioulas, Georgios Th Papadopoulos, Vassia Zacharopoulou, George J Xydopoulos, Klimnis Atzakas, Dimitris Papazachariou, and Petros Daras. A comprehensive study on deep learning-based methods for sign language recognition. IEEE Transactions on Multimedia, 24:1750-1762, 2021. 3",
1805
+ "[2] Necati Cihan Camgoz, Simon Hadfield, Oscar Koller, Hermann Ney, and Richard Bowden. Neural sign language translation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7784-7793, 2018. 2, 5, 8",
1806
+ "[3] Necati Cihan Camgoz, Oscar Koller, Simon Hadfield, and Richard Bowden. Sign language transformers: Joint end-to-end sign language recognition and translation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10023-10033, 2020. 8",
1807
+ "[4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 6",
1808
+ "[5] Ka Leong Cheng, Zhaoyang Yang, Qifeng Chen, and Yu-Wing Tai. Fully convolutional networks for continuous sign language recognition. In ECCV, 2020. 1, 2, 8",
1809
+ "[6] Necati Cihan Camgoz, Simon Hadfield, Oscar Koller, and Richard Bowden. Subunets: End-to-end hand shape and continuous sign language recognition. In ICCV, 2017. 8",
1810
+ "[7] Runpeng Cui, Hu Liu, and Changshui Zhang. A deep neural framework for continuous sign language recognition by iterative training. TMM, 21(7):1880-1891, 2019. 1, 2, 7, 8",
1811
+ "[8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 5",
1812
+ "[9] Ali Diba, Mohsen Fayyaz, Vivek Sharma, M Mahdi Arzani, Rahman Yousefzadeh, Juergen Gall, and Luc Van Gool. Spatio-temporal channel correlation networks for action classification. In Proceedings of the European Conference on Computer Vision (ECCV), pages 284-299, 2018. 2",
1813
+ "[10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 2758-2766, 2015. 2",
1814
+ "[11] Philippe Dreuw, David Rybach, Thomas Deselaers, Morteza Zahedi, and Hermann Ney. Speech recognition techniques for a sign language recognition system. hand, 60:80, 2007. 1, 3",
1815
+ "[12] Christoph Feichtenhofer, Axel Pinz, and Andrew Zisserman. Detect to track and track to detect. In Proceedings of the IEEE international conference on computer vision, pages 3038-3046, 2017. 2"
1816
+ ],
1817
+ "bbox": [
1818
+ 78,
1819
+ 147,
1820
+ 470,
1821
+ 898
1822
+ ],
1823
+ "page_idx": 8
1824
+ },
1825
+ {
1826
+ "type": "list",
1827
+ "sub_type": "ref_text",
1828
+ "list_items": [
1829
+ "[13] William T Freeman and Michal Roth. Orientation histograms for hand gesture recognition. In International workshop on automatic face and gesture recognition, volume 12, pages 296-301. Zurich, Switzerland, 1995. 2",
1830
+ "[14] Wen Gao, Gaolin Fang, Debin Zhao, and Yiqiang Chen. A chinese sign language recognition system based on sofm/srn/hmm. Pattern Recognition, 37(12):2389-2402, 2004. 2",
1831
+ "[15] Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In Proceedings of the 23rd international conference on Machine learning, pages 369-376, 2006. 2, 3",
1832
+ "[16] Junwei Han, George Awad, and Alistair Sutherland. Modelling and segmenting subunits for sign language recognition based on hand motion analysis. Pattern Recognition Letters, 30(6):623-633, 2009. 2",
1833
+ "[17] Aiming Hao, Yuecong Min, and Xilin Chen. Self-mutual distillation learning for continuous sign language recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11303-11312, 2021. 1, 2, 8",
1834
+ "[18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5",
1835
+ "[19] Hezhen Hu, Weichao Zhao, Wengang Zhou, Yuechen Wang, and Houqiang Li. Signbert: Pre-training of hand-model-aware representation for sign language recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11087-11096, 2021. 2",
1836
+ "[20] Hezhen Hu, Wengang Zhou, and Houqiang Li. Hand-model-aware sign language recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 1558-1566, 2021. 2",
1837
+ "[21] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7132-7141, 2018. 2, 6",
1838
+ "[22] Lianyu Hu, Liqing Gao, Zekang Liu, and Wei Feng. Temporal lift pooling for continuous sign language recognition. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXV, pages 511-527. Springer, 2022. 2, 8",
1839
+ "[23] Lianyu Hu, Liqing Gao, Zekang Liu, and Wei Feng. Self-emphasizing network for continuous sign language recognition. In Thirty-seventh AAAI conference on artificial intelligence, 2023. 2, 8",
1840
+ "[24] Jie Huang, Wengang Zhou, Qilin Zhang, Houqiang Li, and Weiping Li. Video-based sign language recognition without temporal segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018. 2, 5, 8",
1841
+ "[25] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 5",
1842
+ "[26] Oscar Koller, Necati Cihan Camgoz, Hermann Ney, and Richard Bowden. Weakly supervised learning with multistream cnn-lstm-hmms to discover sequential parallelism in sign language videos. PAMI, 42(9):2306-2320, 2019. 2, 7, 8"
1843
+ ],
1844
+ "bbox": [
1845
+ 501,
1846
+ 92,
1847
+ 890,
1848
+ 898
1849
+ ],
1850
+ "page_idx": 8
1851
+ },
1852
+ {
1853
+ "type": "page_number",
1854
+ "text": "9",
1855
+ "bbox": [
1856
+ 478,
1857
+ 924,
1858
+ 491,
1859
+ 936
1860
+ ],
1861
+ "page_idx": 8
1862
+ },
1863
+ {
1864
+ "type": "list",
1865
+ "sub_type": "ref_text",
1866
+ "list_items": [
1867
+ "[27] Oscar Koller, Jens Forster, and Hermann Ney. Continuous sign language recognition: Towards large vocabulary statistical recognition systems handling multiple signers. Computer Vision and Image Understanding, 141:108-125, 2015. 2, 5",
1868
+ "[28] Oscar Koller, O Zargaran, Hermann Ney, and Richard Bowden. Deep sign: Hybrid cnn-hmm for continuous sign language recognition. In Proceedings of the British Machine Vision Conference 2016, 2016. 2",
1869
+ "[29] Oscar Koller, Sepehr Zargaran, and Hermann Ney. Re-sign: Re-aligned end-to-end sequence modelling with deep recurrent cnn-hmms. In CVPR, 2017. 2",
1870
+ "[30] Myunggi Lee, Seungeui Lee, Sungjoon Son, Gyutae Park, and Nojun Kwak. Motion feature network: Fixed motion filter for action recognition. In Proceedings of the European Conference on Computer Vision (ECCV), pages 387-403, 2018. 2",
1871
+ "[31] Ji Lin, Chuang Gan, and Song Han. Tsm: Temporal shift module for efficient video understanding. In Proceedings of the IEEE International Conference on Computer Vision, pages 7083-7093, 2019. 1, 6",
1872
+ "[32] Zhaoyang Liu, Donghao Luo, Yabiao Wang, Limin Wang, Ying Tai, Chengjie Wang, Jilin Li, Feiyue Huang, and Tong Lu. Teinet: Towards an efficient architecture for video recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 11669-11676, 2020. 1",
1873
+ "[33] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, and Jian Sun. Shufflenet v2: Practical guidelines for efficient cnn architecture design. In Proceedings of the European conference on computer vision (ECCV), pages 116-131, 2018. 6",
1874
+ "[34] Yuecong Min, Aiming Hao, Xiujuan Chai, and Xilin Chen. Visual alignment constraint for continuous sign language recognition. In ICCV, 2021. 1, 2, 5, 8",
1875
+ "[35] Zhe Niu and Brian Mak. Stochastic fine-grained labeling of multi-state sign glosses for continuous sign language recognition. In ECCV, 2020. 1, 2, 8",
1876
+ "[36] Sylvie CW Ong and Surendra Ranganath. Automatic sign language analysis: A survey and the future beyond lexical meaning. IEEE Transactions on Pattern Analysis & Machine Intelligence, 27(06):873-891, 2005. 1, 3",
1877
+ "[37] Junfu Pu, Wengang Zhou, Hezhen Hu, and Houqiang Li. Boosting continuous sign language recognition via cross modality augmentation. In ACM MM, 2020. 1, 2, 8",
1878
+ "[38] Junfu Pu, Wengang Zhou, and Houqiang Li. Iterative alignment network for continuous sign language recognition. In CVPR, 2019. 2",
1879
+ "[39] Ignacio Rocco, Relja Arandjelovic, and Josef Sivic. Convolutional neural network architecture for geometric matching. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6148-6157, 2017. 2",
1880
+ "[40] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 1, 7",
1881
+ "[41] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. Pwc-net: Cnns for optical flow using pyramid, warping, and"
1882
+ ],
1883
+ "bbox": [
1884
+ 78,
1885
+ 90,
1886
+ 468,
1887
+ 900
1888
+ ],
1889
+ "page_idx": 9
1890
+ },
1891
+ {
1892
+ "type": "list",
1893
+ "sub_type": "ref_text",
1894
+ "list_items": [
1895
+ "cost volume. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8934-8943, 2018. 2",
1896
+ "[42] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1-9, 2015. 6",
1897
+ "[43] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 1, 6",
1898
+ "[44] Anirudh Tunga, Sai Vidyaranya Nuthalapati, and Juan Wachs. Pose-based sign language recognition using gcn and bert. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 31-40, 2021. 2",
1899
+ "[45] Heng Wang, Du Tran, Lorenzo Torresani, and Matt Feiszli. Video modeling with correlation networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 352-361, 2020. 1, 2",
1900
+ "[46] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 7",
1901
+ "[47] Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. Non-local neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7794-7803, 2018. 6",
1902
+ "[48] Philippe Weinzaepfel, Jerome Revaud, Zaid Harchaoui, and Cordelia Schmid. Deepflow: Large displacement optical flow with deep matching. In Proceedings of the IEEE international conference on computer vision, pages 1385-1392, 2013. 2",
1903
+ "[49] Sanghyun Woo, Jongchan Park, Joon-Young Lee, and In So Kweon. Cbam: Convolutional block attention module. In Proceedings of the European conference on computer vision (ECCV), pages 3–19, 2018. 6",
1904
+ "[50] Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In Proceedings of the European conference on computer vision (ECCV), pages 305–321, 2018. 1",
1905
+ "[51] Zhaoyang Yang, Zhenmei Shi, Xiaoyong Shen, and Yu-Wing Tai. Sf-net: Structured feature network for continuous sign language recognition. arXiv preprint arXiv:1908.01341, 2019.8",
1906
+ "[52] Yue Zhao, Yuanjun Xiong, and Dahua Lin. Recognize actions by disentangling components of dynamics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6566-6575, 2018. 2",
1907
+ "[53] Hao Zhou, Wengang Zhou, Weizhen Qi, Junfu Pu, and Houqiang Li. Improving sign language translation with monolingual data by sign back-translation. In Proceedings"
1908
+ ],
1909
+ "bbox": [
1910
+ 503,
1911
+ 92,
1912
+ 890,
1913
+ 900
1914
+ ],
1915
+ "page_idx": 9
1916
+ },
1917
+ {
1918
+ "type": "page_number",
1919
+ "text": "10",
1920
+ "bbox": [
1921
+ 477,
1922
+ 924,
1923
+ 495,
1924
+ 936
1925
+ ],
1926
+ "page_idx": 9
1927
+ },
1928
+ {
1929
+ "type": "list",
1930
+ "sub_type": "ref_text",
1931
+ "list_items": [
1932
+ "of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1316-1325, 2021. 2, 5, 8",
1933
+ "[54] Hao Zhou, Wengang Zhou, Yun Zhou, and Houqiang Li. Spatial-temporal multi-cue network for continuous sign language recognition. In AAAI, 2020. 2, 7, 8",
1934
+ "[55] Ronglai Zuo and Brian Mak. C2slr: Consistency-enhanced continuous sign language recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5131-5140, 2022. 1, 2, 3, 5, 7, 8"
1935
+ ],
1936
+ "bbox": [
1937
+ 78,
1938
+ 90,
1939
+ 467,
1940
+ 219
1941
+ ],
1942
+ "page_idx": 10
1943
+ },
1944
+ {
1945
+ "type": "page_number",
1946
+ "text": "11",
1947
+ "bbox": [
1948
+ 477,
1949
+ 924,
1950
+ 491,
1951
+ 935
1952
+ ],
1953
+ "page_idx": 10
1954
+ }
1955
+ ]
2303.03xxx/2303.03202/52833c8e-2380-4398-9065-8291ff12fc58_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03202/52833c8e-2380-4398-9065-8291ff12fc58_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e10d3c9162df4a7ed1a5848d507d1d50ab0bfa19bb27a71c57424956f06b6faa
3
+ size 1376862
2303.03xxx/2303.03202/full.md ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Continuous Sign Language Recognition with Correlation Network
2
+
3
+ Lianyu Hu, Liqing Gao, Zekang Liu, Wei Feng
4
+
5
+ College of Intelligence and Computing, Tianjin University, Tianjin 300350, China
6
+
7
+ Code: https://github.com/hulianyuyy/CorrNet
8
+
9
+ # Abstract
10
+
11
+ Human body trajectories are a salient cue to identify actions in the video. Such body trajectories are mainly conveyed by hands and face across consecutive frames in sign language. However, current methods in continuous sign language recognition (CSLR) usually process frames independently, thus failing to capture cross-frame trajectories to effectively identify a sign. To handle this limitation, we propose correlation network (CorrNet) to explicitly capture and leverage body trajectories across frames to identify signs. In specific, a correlation module is first proposed to dynamically compute correlation maps between the current frame and adjacent frames to identify trajectories of all spatial patches. An identification module is then presented to dynamically emphasize the body trajectories within these correlation maps. As a result, the generated features are able to gain an overview of local temporal movements to identify a sign. Thanks to its special attention on body trajectories, CorrNet achieves new state-of-the-art accuracy on four large-scale datasets, i.e., PHOENIX14, PHOENIX14-T, CSL-Daily, and CSL. A comprehensive comparison with previous spatial-temporal reasoning methods verifies the effectiveness of CorrNet. Visualizations demonstrate the effects of CorrNet on emphasizing human body trajectories across adjacent frames.
12
+
13
+ # 1. Introduction
14
+
15
+ Sign language is one of the most widely-used communication tools for the deaf community in their daily life. However, mastering this language is rather difficult and time-consuming for the hearing people, thus hindering direct communications between two groups. To relieve this problem, isolated sign language recognition tries to classify a video segment into an independent gloss<sup>1</sup>. Continuous sign language recognition (CSLR) progresses by sequentially translating images into a series of glosses to express a sentence, more prospective toward real-life deployment.
16
+
17
+ ![](images/6521a2a95407e5b24af119b56bacf0eab481a1b1c157903c4dbc654f5d92dc9a.jpg)
18
+ Left frame
19
+
20
+ ![](images/a4a99a216aa9e71c179468ae99c69d03722df054737f66d146142f52e8714d4c.jpg)
21
+ Figure 1. Visualization of correlation maps with Grad-CAM [40]. It's observed that without extra supervision, our method could well attend to informative regions in adjacent left/right frames to identify human body trajectories.
22
+
23
+ ![](images/6733366ac65c9528551bdd52128754a8eb15332325cb4409b32662eafda77cad.jpg)
24
+ Right frame
25
+
26
+ Human body trajectories are a salient cue to identify actions in human-centric video understanding [45]. In sign language, such trajectories are mainly conveyed by both manual components (hand/arm gestures), and non-manual components (facial expressions, head movements, and body postures) [11,36]. Especially, both hands move horizontally and vertically across consecutive frames quickly, with finger twisting and facial expressions to express a sign. To track and leverage such body trajectories is of great importance to understanding sign language.
27
+
28
+ However, current CSLR methods [5,7,17,34,35,37,55] usually process each frame separately, thus failing to exploit such critical cues in the early stage. Especially, they usually adopt a shared 2D CNN to capture spatial features for each frame independently. In this sense, frames are processed individually without interactions with adjacent neighbors, thus inhibited to identify and leverage cross-frame trajectories to express a sign. The generated features are thus not aware of local temporal patterns and fail to perceive the hand/face movements in expressing a sign. To handle this limitation, well-known 3D convolution [4] or its $(2 + 1)\mathrm{D}$ variants [43, 50] are potential candidates to capture short-term temporal information to identify body trajectories. Other temporal methods like temporal shift [31] or temporal convolutions [32] can also attend to short-term temporal movements. However, it's hard for them to aggregate beneficial information from distant informative spatial regions due to their limited spatial-temporal receptive field.
29
+
30
+ Besides, as their structures are fixed for each sample during inference, they may fail to dynamically deal with different samples to identify informative regions. To tackle these problems, we propose to explicitly compute correlation maps between adjacent frames to capture body trajectories, referred to as CorrNet. As shown in fig. 1, our approach dynamically attends to informative regions in adjacent left/right frames to capture body trajectories, without relying on extra supervision.
31
+
32
+ In specific, our CorrNet first employs a correlation module to compute correlation maps between the current frame and its adjacent frames to identify trajectories of all spatial patches. An identification module is then presented to dynamically identify and emphasize the body trajectories embodied within these correlation maps. This procedure doesn't rely on extra expensive supervision like body keypoints [54] or heatmaps [55], which could be end-to-end trained in a lightweight way. The resulting features are thus able to gain an overview of local temporal movements to identify a sign. Remarkably, CorrNet achieves new state-of-the-art accuracy on four large-scale datasets, i.e., PHOENIX14 [27], PHOENIX14-T [2], CSL-Daily [53], and CSL [24], thanks to its special attention on body trajectories. A comprehensive comparison with other spatial-temporal reasoning methods demonstrates the superiority of our method. Visualizations hopefully verify the effects of CorrNet on emphasizing human body trajectories across adjacent frames.
33
+
34
+ # 2. Related Work
35
+
36
+ # 2.1. Continuous Sign Language Recognition
37
+
38
+ Sign language recognition methods can be roughly categorized into isolated sign language recognition [19, 20, 44] and continuous sign language recognition [5, 7, 34, 35, 38] (CSLR), and we focus on the latter in this paper. CSLR tries to translate image frames into corresponding glosses in a weakly-supervised way: only sentence-level label is provided. Earlier methods [13, 14] in CSLR always employ hand-crafted features or HMM-based systems [16, 27-29] to perform temporal modeling and translate sentences step by step. HMM-based systems first employ a feature extractor to capture visual features and then adopt an HMM to perform long-term temporal modeling.
39
+
40
+ The recent success of convolutional neural networks (CNNs) and recurrent neural networks (RNNs) brings huge progress for CSLR. The widely used CTC loss [15] in recent CSLR methods [5, 7, 34, 35, 37, 38] enables training deep networks in an end-to-end manner by sequentially aligning target sentences with input frames. These CTC-based methods first rely on a feature extractor, i.e., 3D or 2D&1D CNN hybrids, to extract frame-wise features, and then adopt a LSTM for capturing long-term temporal de
41
+
42
+ pendencies. However, several methods [7,38] found in such conditions the feature extractor is not well-trained and then present an iterative training strategy to relieve this problem, but consume much more computations. Some recent studies [5,17,34] try to directly enhance the feature extractor by adding alignment losses [17,34] or adopt pseudo labels [5] in a lightweight way, alleviating the heavy computational burden. More recent works enhance CSLR by squeezing more representative temporal features [22] or dynamically emphasizing informative spatial regions [23].
43
+
44
+ Our method is designed to explicitly incorporate body trajectories to identify a sign, especially those from hands and face. Some previous methods have also explicitly leveraged the hand and face features for better recognition. For example, CNN-LSTM-HMM [26] employs a multi-stream HMM (including hands and face) to integrate multiple visual inputs to improve recognition accuracy. STMC [54] first utilizes a pose-estimation network to estimate human body keypoints and then sends cropped appearance regions (including hands and face) for information integration. More recently, $\mathrm{C}^2\mathrm{SLR}$ [55] leverages the preextracted pose keypoints as supervision to guide the model to explicitly focus on hand and face regions. Our method doesn't rely on additional cues like pre-extracted body keypoints [55] or multiple streams [26], which consume much more computations to leverage hand and face information. Instead, our model could be end-to-end trained to dynamically attend to body trajectories in a self-motivated way.
45
+
46
+ # 2.2. Applications of Correlation Operation
47
+
48
+ Correlation operation has been widely used in various domains, especially optical flow estimation and video action recognition. Rocco et al. [39] used it to estimate the geometric transformation between two images, and Feichtenhofer et al. [12] applied it to capture object co-occurrences across time in tracking. For optical flow estimation, Deep matching [48] computes the correlation maps between image patches to find their dense correspondences. CNN-based methods like FlowNet [10] and PWC-Net [41] design a correlation layer to help perform multiplicative patch comparisons between two feature maps. For video action recognition, Zhao et al. [52] firstly employ a correlation layer to compute a cost volume to estimate the motion information. STCNet [9] considers spatial correlations and temporal correlations, respectively, inspired by SENet [21]. MFNet [30] explicitly estimates the approximation of optical flow based on fixed motion filters. Wang et al. [45] design a learnable correlation filter and replace 3D convolutions with the proposed filter to capture spatial-temporal information. Different from these methods that explicitly or implicitly estimate optical flow, the correlation operator in our method is used in combination with other operations to identify and track body trajectories across frames.
49
+
50
+ ![](images/1bef9e6eccb64cd660f57d997c8de9b81dbbaf26bedc58dc89f29f8a9088cd81.jpg)
51
+ Figure 2. An overview for our CorrNet. It first employs a feature extractor (2D CNN) to capture frame-wise features, and then adopts a 1D CNN and a BiLSTM to perform short-term and long-term temporal modeling, respectively, followed by a classifier to predict sentences. We place our proposed identification module and correlation module after each stage of the feature extractor to identify body trajectories across adjacent frames.
52
+
53
+ # 3. Method
54
+
55
+ # 3.1. Overview
56
+
57
+ As shown in fig. 2, the backbone of CSLR models consists of a feature extractor (2D CNN $^2$ ), a 1D CNN, a BiLSTM, and a classifier (a fully connected layer) to perform prediction. Given a sign language video with $T$ input frames $x = \{x_{t}\}_{t=1}^{T} \in \mathcal{R}^{T \times 3 \times H_{0} \times W_{0}}$ , a CSLR model aims to translate the input video into a series of glosses $y = \{y_{i}\}_{i=1}^{N}$ to express a sentence, with $N$ denoting the length of the label sequence. Specifically, the feature extractor first processes input frames into frame-wise features $v = \{v_{t}\}_{t=1}^{T} \in \mathcal{R}^{T \times d}$ . Then the 1D CNN and BiLSTM perform short-term and long-term temporal modeling based on these extracted visual representations, respectively. Finally, the classifier employs widely-used CTC loss [15] to predict the probability of target gloss sequence $p(y|x)$ .
58
+
59
+ The CSLR model processes input frames independently, failing to incorporate interactions between consecutive frames. We present a correlation module and an identification module to identify body trajectories across adjacent frames. Fig. 2 shows an example of a common feature extractor consisting of multiple stages. The proposed two modules are placed after each stage, whose outputs are element-wisely multiplied and added into the original features via a learnable coefficient $\alpha$ . $\alpha$ controls the contribu
60
+
61
+ ![](images/0331c421a90cb6bc7cbf7ba62b0b403de7486e5212ab84edf9d7e04b8db016e9.jpg)
62
+ Figure 3. Illustration for the correlation operator. It computes affinities between a feature patch $p(i,j)$ in $x_{t}$ and patches $p_{t+1}(i',j') / p_{t-1}(i',j')$ in adjacent frame $x_{t+1} / x_{t-1}$ .
63
+
64
+ tions of the proposed modules, and is initialized as zero to make the whole model keep its original behaviors. The correlation module computes correlation maps between consecutive frames to capture trajectories of all spatial patches. The identification module dynamically locates and emphasizes body trajectories embedded within these correlation maps. The outputs of correlation and identification modules are multiplied to enhance inter-frame correlations.
65
+
66
+ # 3.2. Correlation Module
67
+
68
+ Sign language is mainly conveyed by both manual components (hand/arm gestures), and non-manual components (facial expressions, head movements, and body postures) [11, 36]. However, these informative body parts, e.g., hands or face, are misaligned in adjacent frames. We propose to compute correlation maps between adjacent frames to identify body trajectories.
69
+
70
+ Each frame could be represented as a 3D tensor $C \times H \times W$ , where $C$ is the number of channels and $H \times W$ denotes spatial size. Given a feature patch $p_t(i,j)$ in current frame $x_t$ , we compute the affinity between patch $p(i,j)$ and another patch $p_{t+1}(i',j')$ in adjacent frame $x_{t+1}$ , where $(i,j)$ is the spatial location of the patch. To restrict the computation, the size of the feature patch could be reduced to a minimum, i.e., a pixel. The affinity between $p(i,j)$ and $p_{t+1}(i',j')$ is computed in a dot-product way as:
71
+
72
+ $$
73
+ A (i, j, i ^ {\prime}, j ^ {\prime}) = \frac {1}{C} \sum_ {c = 1} ^ {C} \left(p _ {t} ^ {c} (i, j) \cdot p _ {t + 1} ^ {c} \left(i ^ {\prime}, j ^ {\prime}\right)\right). \tag {1}
74
+ $$
75
+
76
+ For the spatial location $(i,j)$ in $x_{t}, (i',j')$ is often restricted within a $K \times K$ neighborhood in $x_{t+1}$ to relieve spatial misalignment. A visualization is given in fig. 3. Thus, for all pixels in $x_{t}$ , the correlation maps are a tensor of size $H \times W \times K \times K$ . $K$ could be set as a smaller value to keep semantic consistency or as a bigger value to attend to distant informative regions.
77
+
78
+ Given the correlation map between a pixel and its neighbors in adjacent frame $x_{t + 1}$ , we constrain its range
79
+
80
+ into (0,1) to measure their semantic similarity by passing $A(i,j,i',j')$ through a sigmoid function. We further subtract 0.5 from the results, to emphasize informative regions with positive values, and suppress redundant areas with negative values as:
81
+
82
+ $$
83
+ A ^ {\prime} (i, j, i ^ {\prime}, j ^ {\prime}) = \operatorname {S i g m o i d} \left(A (i, j, i ^ {\prime}, j ^ {\prime})\right) - 0. 5 \tag {2}
84
+ $$
85
+
86
+ After identifying the trajectories between adjacent frames, we incorporate these local temporal movements into the current frame $x_{t}$ . Specifically, for a pixel in $x_{t}$ , its trajectories are aggregated from its $K \times K$ neighbors in adjacent frame $x_{t + 1}$ , by multiplying their features with the corresponding affinities as:
87
+
88
+ $$
89
+ T (i, j) = \sum_ {i ^ {\prime}, j ^ {\prime}} A ^ {\prime} \left(i, j, i ^ {\prime}, j ^ {\prime}\right) * x _ {t + 1} \left(i ^ {\prime}, j ^ {\prime}\right). \tag {3}
90
+ $$
91
+
92
+ In this sense, each pixel is able to be aware of its trajectories across consecutive frames. We aggregate bidirectional trajectories from both $x_{t-1}$ and $x_{t+1}$ , and attach a learnable coefficient $\beta$ to measure the importance of bi-directions. Thus, eq. 3 could be updated as:
93
+
94
+ $$
95
+ \begin{array}{l} T (i, j) = \beta_ {1} \cdot \sum_ {i ^ {\prime}, j ^ {\prime}} A _ {t + 1} ^ {\prime} \left(i, j, i ^ {\prime}, j ^ {\prime}\right) * x _ {t + 1} \left(i ^ {\prime}, j ^ {\prime}\right) + \tag {4} \\ \beta_ {2} \cdot \sum_ {i ^ {\prime}, j ^ {\prime}} A _ {t - 1} ^ {\prime} (i, j, i ^ {\prime}, j ^ {\prime}) * x _ {t - 1} (i ^ {\prime}, j ^ {\prime}) \\ \end{array}
96
+ $$
97
+
98
+ where $\beta_{1}$ and $\beta_{1}$ are initialized as 0.5. This correlation calculation is repeated for each frame in a video to track body trajectories in videos.
99
+
100
+ # 3.3. Identification Module
101
+
102
+ The correlation module computes correlation maps between each pixel with its $K \times K$ neighbors in adjacent frames $x_{t-1}$ and $x_{t+1}$ . However, as not all regions are critical for expressing a sign, only those informative regions carrying body trajectories should be emphasized in the current frame $x_t$ . The trajectories of background or noise should be suppressed. We present an identification module to dynamically emphasize these informative spatial regions. Specifically, as informative regions like hand and face are misaligned in adjacent frames, the identification module leverages the closely correlated local spatial-temporal features to tackle the misalignment issue and locate informative regions.
103
+
104
+ As shown in fig. 4, the identification module first projects input features $x \in \mathcal{R}^{T \times C \times H \times W}$ into $x_{r} \in \mathcal{R}^{T \times C / r \times H \times W}$ with a $1 \times 1 \times 1$ convolution to decrease the computations, by a channel reduction factor $r$ as 16 by default.
105
+
106
+ As the informative regions, e.g., hands and face, are not exactly aligned in adjacent frames, it's necessary to consider a large spatial-temporal neighborhood to identify
107
+
108
+ ![](images/af232ab19c1642040dd7a71df28ebb9578142e8cdfc504693896298d7b5f0f13.jpg)
109
+ Figure 4. Illustration for our identification module.
110
+
111
+ these features. Instead of directly employing a large 3D spatial-temporal kernel, we present a multi-scale paradigm by decomposing it into parallel branches of progressive dilation rates to reduce required computations and increase the model capacity.
112
+
113
+ Specifically, as shown in fig. 4, with a same small base convolution kernel of $K_{t} \times K_{s} \times K_{s}$ , we employ multiple convolutions with their dilation rates increasing along spatial and temporal dimensions concurrently. The spatial and temporal dilation rate range within $(1, N_{s})$ and $(1, N_{t})$ , respectively, resulting in total $N_{s} \times N_{t}$ branches. Group convolutions are employed for each branch to reduce parameters and computations. Features from different branches are multiplied with learnable coefficients $\{\sigma_{1}, \dots, \sigma_{N_{s} \times N_{t}}\}$ to control their importance, and then added to mix information from branches of various spatial-temporal receptive fields as:
114
+
115
+ $$
116
+ x _ {m} = \sum_ {i = 1} ^ {N _ {s}} \sum_ {j = 1} ^ {N _ {t}} \sigma_ {i, j} \cdot \operatorname {C o n v} _ {i, j} \left(x _ {r}\right) \tag {5}
117
+ $$
118
+
119
+ where the group-wise convolution $\mathrm{Conv}_{i,j}$ of different branches receives features of different spatial-temporal neighborhoods, with dilation rate $(j,i,i)$ .
120
+
121
+ After receiving features from a large spatial-temporal neighborhood, $x_{m}$ is sent into a $1 \times 1 \times 1$ convolution to project its channels back into $C$ . It then passes through a sigmoid function to generate attention maps $M \in \mathcal{R}^{T \times C \times H \times W}$ with its values ranging within (0,1). Spe
122
+
123
+ cially, $M$ is further subtracted from a constant value of 0.5 to emphasize informative regions with positive values, and suppress redundant areas with negative values as:
124
+
125
+ $$
126
+ M = \operatorname {S i g m o i d} \left(\operatorname {C o n v} _ {1 \times 1 \times 1} \left(x _ {m}\right)\right) - 0. 5. \tag {6}
127
+ $$
128
+
129
+ Given the attention maps $M$ to identify informative regions, it's multiplied with the aggregated trajectories $T(x)$ by the correlation module to emphasize body trajectories and suppress others like background or noise. This refined trajectory information is finally incorporated into original spatial features $x$ via a residual connection as:
130
+
131
+ $$
132
+ x ^ {\text {o u t}} = x + \alpha T (x) \cdot M. \tag {7}
133
+ $$
134
+
135
+ As stated before, $\alpha$ is initialized as zero to keep the original spatial features.
136
+
137
+ # 4. Experiments
138
+
139
+ # 4.1. Experimental Setup
140
+
141
+ # 4.1.1 Datasets.
142
+
143
+ PHOENIX14 [27] is recorded from a German weather forecast broadcast with nine actors before a clean background with a resolution of $210 \times 260$ . It contains 6841 sentences with a vocabulary of 1295 signs, divided into 5672 training samples, 540 development (Dev) samples and 629 testing (Test) samples.
144
+
145
+ PHOENIX14-T [2] is available for both CSLR and sign language translation tasks. It contains 8247 sentences with a vocabulary of 1085 signs, split into 7096 training instances, 519 development (Dev) instances and 642 testing (Test) instances.
146
+
147
+ CSL-Daily [53] revolves the daily life, recorded indoor at 30fps by 10 signers. It contains 20654 sentences, divided into 18401 training samples, 1077 development (Dev) samples and 1176 testing (Test) samples.
148
+
149
+ CSL [24] is collected in the laboratory environment by fifty signers with a vocabulary size of 178 with 100 sentences. It contains 25000 videos, divided into training and testing sets by a ratio of 8:2.
150
+
151
+ # 4.1.2 Training details.
152
+
153
+ For fair comparisons, we follow the same setting as state-of-the-art methods [34, 55] to prepare our model. We adopt ResNet18 [18] as the 2D CNN backbone with ImageNet [8] pretrained weights. The 1D CNN of state-of-the-art methods is set as a sequence of $\{\mathrm{K}5,\mathrm{P}2,\mathrm{K}5,\mathrm{P}2\}$ layers where $\mathrm{K}\sigma$ and $\mathrm{P}\sigma$ denotes a 1D convolutional layer and a pooling layer with kernel size of $\sigma$ , respectively. A two-layer BiLSTM with hidden size 1024 is attached for long-term temporal modeling, followed by a fully connected layer for sentence prediction. We train our models for 40 epochs with
154
+
155
+ <table><tr><td>Configurations</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>-</td><td>20.2</td><td>21.0</td></tr><tr><td>Nt=4, Ns=1</td><td>19.6</td><td>20.1</td></tr><tr><td>Nt=4, Ns=2</td><td>19.2</td><td>19.8</td></tr><tr><td>Nt=4, Ns=3</td><td>18.8</td><td>19.4</td></tr><tr><td>Nt=4, Ns=4</td><td>19.1</td><td>19.7</td></tr><tr><td>Nt=2, Ns=3</td><td>19.4</td><td>19.9</td></tr><tr><td>Nt=3, Ns=3</td><td>19.1</td><td>19.7</td></tr><tr><td>Nt=4, Ns=3</td><td>18.8</td><td>19.4</td></tr><tr><td>Nt=5, Ns=3</td><td>19.3</td><td>19.8</td></tr><tr><td>Kt=9, Ks=7</td><td>19.9</td><td>20.4</td></tr></table>
156
+
157
+ Table 1. Ablations for the multi-scale architecture of identification module on the PHOENIX14 dataset.
158
+
159
+ initial learning rate 0.001 which is divided by 5 at epoch 20 and 30. Adam [25] optimizer is adopted as default with weight decay 0.001 and batch size 2. All input frames are first resized to $256 \times 256$ , and then randomly cropped to $224 \times 224$ with $50\%$ horizontal flipping and $20\%$ temporal rescaling during training. During inference, a $224 \times 224$ center crop is simply adopted. Following VAC [34], we employ the VE loss and VA loss for visual supervision, with weights 1.0 and 25.0, respectively. Our model is trained and evaluated upon a 3090 graphical card.
160
+
161
+ # 4.1.3 Evaluation Metric.
162
+
163
+ We use Word Error Rate (WER) as the evaluation metric, which is defined as the minimal summation of the substitution, insertion, and deletion operations to convert the predicted sentence to the reference sentence, as:
164
+
165
+ $$
166
+ \mathrm {W E R} = \frac {\# \text {s u b} + \# \text {i n s} + \# \text {d e l}}{\# \text {r e f e r e n c e}}. \tag {8}
167
+ $$
168
+
169
+ Note that the lowerWER, the better accuracy.
170
+
171
+ # 4.2. Ablation Study
172
+
173
+ We report ablative results on both development (Dev) and testing (Test) sets of PHOENIX14 dataset.
174
+
175
+ Study on the multi-scale architecture of identification module. In tab. 1, without identification module, our baseline achieves $20.2\%$ and $21.0\%$ WER on the Dev and Test Set, respectively. The base kernel size is set as $3 \times 3 \times 3$ for $K_{t} \times K_{s} \times K_{s}$ . When fixing $N_{t} = 4$ and varying spatial dilation rates to expand spatial receptive fields, it's observed a larger $N_{s}$ consistently brings better accuracy. When $N_{s}$ reaches 3, it brings no more accuracy gain. We set $N_{s}$ as 3 by default and test the effects of $N_{t}$ . One can see that either increasing $K_{t}$ to 5 or decreasing $K_{t}$ to 2 and 3 achieves worse accuracy. We thus adopt $N_{t}$ as 4 by default. We also compare our proposed multi-scale architecture with a normal implementation of more parameters. The receptive field
176
+
177
+ <table><tr><td>Configurations</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>-</td><td>20.2</td><td>21.0</td></tr><tr><td>K=3</td><td>19.6</td><td>20.4</td></tr><tr><td>K=5</td><td>19.4</td><td>20.2</td></tr><tr><td>K=7</td><td>19.2</td><td>20.0</td></tr><tr><td>K=9</td><td>19.1</td><td>19.8</td></tr><tr><td>K= H or W (Full image)</td><td>18.8</td><td>19.4</td></tr></table>
178
+
179
+ Table 2. Ablations for the articulated area of correlation module on the PHOENIX14 dataset.
180
+
181
+ <table><tr><td>Correlation</td><td>Identification</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>X</td><td>X</td><td>20.2</td><td>21.0</td></tr><tr><td>✓</td><td>X</td><td>19.5</td><td>20.0</td></tr><tr><td>X</td><td>✓</td><td>19.4</td><td>19.9</td></tr><tr><td>✓</td><td>✓</td><td>18.8</td><td>19.4</td></tr></table>
182
+
183
+ of the identification module with $N_{t} = 4$ , $N_{s} = 3$ is identical to a normal convolution with $K_{t} = 9$ and $K_{s} = 7$ . As shown in the bottom of tab. 1, although a normal convolution owns more parameters and computations than our proposed architecture, it still performs worse, verifying the effectiveness of our architecture.
184
+
185
+ Study on the neighborhood $K$ of correlation module. In tab. 2, when $K$ is null, the correlation module is disabled. It's observed that a larger $K$ , i.e., more incorporated spatial-temporal neighbors, consistently brings better accuracy. The performance reaches the peak when $K$ equals $H$ or $W$ , i.e., the full image is incorporated. In this case, distant informative objects could be interacted to provide discriminative information. We set $K = H$ or $W$ by default.
186
+
187
+ Effectiveness of two proposed modules. In tab. 3, we first notice that either only using the correlation module or identification module could already bring a notable accuracy boost, with $19.5\%$ & $20.0\%$ and $19.4\%$ & $19.9\%$ accuracy on the Dev and Test Sets, respectively. When combining both modules, the effectiveness is further activated with $18.8\%$ & $19.4\%$ accuracy on the Dev and Test Sets, respectively, which is adopted as the default setting.
188
+
189
+ Effects of locations for CorrNet. Tab 4 ablates the locations of our proposed modules, which are placed after Stage 2, 3 or 4. It's observed that choosing any one of these locations could bring a notable accuracy boost, with $19.6\%$ & $20.1\%$ , $19.5\%$ & $20.2\%$ and $19.4\%$ & $20.0\%$ accuracy boost. When combining two or more locations, a larger accuracy gain is witnessed. The accuracy reaches the peak when proposed modules are placed after Stage 2, 3 and 4, with $18.8\%$ & $19.4\%$ accuracy, which is adopted by default.
190
+
191
+ Generalizability of CorrNet. We deploy CorrNet upon
192
+
193
+ Table 3. Ablations for the effectiveness of correlation module and identification module on the PHOENIX14 dataset.
194
+
195
+ <table><tr><td>Stage 2</td><td>Stage 3</td><td>Stage 4</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>X</td><td>X</td><td>X</td><td>20.2</td><td>21.0</td></tr><tr><td>✓</td><td>X</td><td>X</td><td>19.6</td><td>20.1</td></tr><tr><td>X</td><td>✓</td><td>X</td><td>19.5</td><td>20.2</td></tr><tr><td>X</td><td>X</td><td>✓</td><td>19.4</td><td>20.0</td></tr><tr><td>✓</td><td>✓</td><td>X</td><td>19.2</td><td>19.9</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>18.8</td><td>19.4</td></tr></table>
196
+
197
+ Table 4. Ablations for the locations of CorrNet on the PHOENIX14 dataset.
198
+
199
+ <table><tr><td>Configurations</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>SqueezeNet [21]</td><td>22.2</td><td>22.6</td></tr><tr><td>w/ CorrNet</td><td>20.2</td><td>20.4</td></tr><tr><td>ShuffleNet V2 [33]</td><td>21.7</td><td>22.2</td></tr><tr><td>w/ CorrNet</td><td>19.7</td><td>20.2</td></tr><tr><td>GoogleNet [42]</td><td>21.4</td><td>21.5</td></tr><tr><td>w/ CorrNet</td><td>19.6</td><td>19.8</td></tr></table>
200
+
201
+ Table 5. Ablations for the generalizability of CorrNet over multiple backbones on the PHOENIX14 dataset.
202
+
203
+ <table><tr><td>Methods</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>-</td><td>20.2</td><td>21.0</td></tr><tr><td>w/ SENet [21]</td><td>19.8</td><td>20.4</td></tr><tr><td>w/ CBAM [49]</td><td>19.7</td><td>20.2</td></tr><tr><td>w/ NLNet [47]</td><td>-</td><td>-</td></tr><tr><td>I3D [4]</td><td>22.6</td><td>22.9</td></tr><tr><td>R(2+1)D [43]</td><td>22.4</td><td>22.3</td></tr><tr><td>TSM [31]</td><td>19.9</td><td>20.5</td></tr><tr><td>CorrNet</td><td>18.8</td><td>19.4</td></tr></table>
204
+
205
+ Table 6. Comparison with other methods of spatial-temporal attention or temporal reasoning on the PHOENIX14 dataset.
206
+
207
+ multiple backbones, including SqueezeNet [21], ShuffleNet V2 [33] and GoogLeNet [42] to validate its generalizability in tab. 5. The proposed modules are placed after three spatial downsampling layers in SqueezeNet, ShuffleNet V2 and GoogLeNet, respectively. It's observed that our proposed model generalizes well upon different backbones, bringing $+2.0\%$ & $+2.2\%$ , $+2.0\%$ & $+2.0\%$ and $+1.8\%$ & $+1.7\%$ accuracy boost on the Dev and Test Sets, respectively.
208
+
209
+ Comparisons with other spatial-temporal reasoning methods. Tab. 6 compares our approach with other methods of spatial-temporal reasoning ability. SENet [21] and CBAM [49] perform channel attention to emphasize key information. NLNet [47] employs non-local means to aggregate spatial-temporal information from other frames. I3D [4] and R(2+1)D [43] deploys 3D or 2D+1D convolutions to capture spatial-temporal features. TSM [31] adopts temporal shift operation to obtain features from ad
210
+
211
+ ![](images/dc251d304930eca45fb08b488ba26fb982ac8fce8e3ff9e91561a0ee736e3de5.jpg)
212
+
213
+ ![](images/04a78753ff12682a48a0db3e0d0bbaf6363176106c7681694b581785828c53dd.jpg)
214
+
215
+ ![](images/d45fb699dffcc2ed46ac1c2a983a8dd26fa7d7359f2f3a418bf9a08522d8c5d3.jpg)
216
+
217
+ ![](images/5da1b21ffbb392ab568ab1af79cf610499198de4a74fc29e3dbce0b4a6a3e37c.jpg)
218
+ Left
219
+
220
+ ![](images/30d0bb73071782d3bf496a9d927acbe3045d10a94a01935355cf6dfca9b25ee4.jpg)
221
+ Figure 5. Visualizations of correlation maps for correlation module. Based on correlation operators, each frame could especially attend to informative regions in adjacent left/right frames like hands and face (dark red areas).
222
+
223
+ ![](images/ca37528798216d480189a605fb5cf371262105af88bc623c5687055369f6eaf9.jpg)
224
+ Right
225
+
226
+ ![](images/efe451a8062ace0ac58e80a5e5ecf1d020caddfc7db326532a140e628134a444.jpg)
227
+
228
+ ![](images/20cf0c20c8ae6e77d0dc647982930529a9371ec1615ab5069ed85f492d7026fe.jpg)
229
+
230
+ ![](images/2f07363275e37c9c5c7b6ff3c46974474904b852fc1d9f7c89561ca1b16b29e4.jpg)
231
+
232
+ ![](images/ecf58f29197304c3ff52c3db1bc3b33aeb1fba16e0f5c2d82bd08c3ad5e605fb.jpg)
233
+ Left
234
+
235
+ ![](images/e8e6ac5c8760f9d6f4c2f6d7404ed71773494c69609a11f8da43d4b7bbb83143.jpg)
236
+
237
+ ![](images/7d3aff697d02cece681cfd0909ffb265223c6da7239244165257f9093cbd334b.jpg)
238
+ Right
239
+
240
+ <table><tr><td>Methods</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>CNN+HMM+LSTM [26]</td><td>26.0</td><td>26.0</td></tr><tr><td>DNF [7]</td><td>23.1</td><td>22.9</td></tr><tr><td>STMC [54]</td><td>21.1</td><td>20.7</td></tr><tr><td>C²SLR [55]</td><td>20.5</td><td>20.4</td></tr><tr><td>CorrNet</td><td>18.8</td><td>19.4</td></tr></table>
241
+
242
+ Table 7. Comparison with other methods that explicitly exploit hand and face features on the PHOENIX14 dataset.
243
+
244
+ jacent frames. In the upper part of tab. 6, one can see CorrNet largely outperforms other attention-based methods, i.e., SENet, CBAM and NLNet, for its superior ability to identify and aggregate body trajectories. NLNet is out of memory due to its quadratic computational complexity with spatial-temporal size. In the bottom part of tab. 6, it's observed that I3D and $\mathrm{R}(2 + 1)\mathrm{D}$ even degrade accuracy, which may be attributed to their limited spatial-temporal receptive fields and increased training complexity. TSM slightly brings $0.3\%$ & $0.3\%$ accuracy boost. Our proposed approach surpasses these methods greatly, verifying its effectiveness in aggregating beneficial spatial-temporal information, from even distant spatial neighbors.
245
+
246
+ Comparisons with previous methods equipped with hand or face features. Many previous CSLR methods explicitly leverage hand and face features for better recognition, like multiple input streams [26], human body keypoints [54, 55] and pre-extracted hand patches [7]. They require extra expensive pose-estimation networks like HRNet [46] or additional training stages. Our approach doesn't rely on extra supervision and could be end-to-end trained to dynamically attend to body trajectories like hand and face in a self-motivated way. Tab. 7 shows that our method outperforms these methods by a large margin.
247
+
248
+ ![](images/88b5c2ce168c86a54a8bed78df6db7ea2d244c3cef98d569b5f73a0427c6f040.jpg)
249
+ Raw
250
+
251
+ ![](images/7628ea62456ebb80c2835552151762c0b98fafecf3698a20b6309eec5dd29df4.jpg)
252
+
253
+ ![](images/fb00c1ec51cf04841582a9163b85263c8c7380038a5e8afaa2858c8c761f8653.jpg)
254
+
255
+ ![](images/12c3bc0ab54a97748cb81c206d49ac9e68e75429e88ae6b60bff38c30fcaa9ca.jpg)
256
+
257
+ ![](images/8ac3888f1e75298cf9af946019055584447819fd1bb3868f8a721db05509ac2a.jpg)
258
+ Heatmap
259
+
260
+ ![](images/928a75fb0f294e5a168cc27436e7459c7c926daeafe9893d64510aaddb57a2b4.jpg)
261
+
262
+ ![](images/b71394c5fe1116ecf20d3df69f4cacc1fcf41fd4a1690f8ac89d8c2308b0e26b.jpg)
263
+
264
+ ![](images/8b9b93ce62f81fe08ee6bababc44fd9a464a3536468c823742f8859891ed32c9.jpg)
265
+
266
+ ![](images/19feeeb2ee4473b4c19cfb98c4a0e3765e7424ba0ddabc2b35f17db5935dc1cb.jpg)
267
+ Raw
268
+
269
+ ![](images/3b14dab2714390bfd138e841d39c271d7a8390cc47b116e47ffbb6f1eecbc5cb.jpg)
270
+
271
+ ![](images/c8bef05374e98acbb4382a11705aa25c8ae64279938edab0cb39ab36988d64d5.jpg)
272
+
273
+ ![](images/e83d20fc59dd7758c3a3a95e29c511cd9a42b96604fc1b6c43abc92a1123e944.jpg)
274
+
275
+ ![](images/44adcc91c3fa0ebe30fcdd752315b99d7ed67606b6deb90431f3c910ffd47dfa.jpg)
276
+ Heatmap
277
+
278
+ ![](images/9d4705e64a227d166943293af643573f7cba4df85cf902735f5411cb3b5d0b35.jpg)
279
+ Figure 6. Visualizations of heatmaps by Grad-CAM [40]. Top: raw frames; Bottom: heatmaps of our identification module. Our identification module could generally focus on the human body (light yellow areas) and especially pays attention to informative regions like hands and face (dark red areas) to track body trajectories.
280
+
281
+ ![](images/02198b03120a7fff44b713aba7c7cb59f003c952060fc2a9aea3f0ebb158d2a2.jpg)
282
+
283
+ ![](images/ca793193f752210d9577df0b8130e1f62c92caaee4d2f7736a3c52314e455510.jpg)
284
+
285
+ # 4.3. Visualizations
286
+
287
+ Visualizations for correlation module. Fig. 5 shows the correlation maps generated by our correlation module with adjacent frames. It's observed that the reference point could well attend to informative regions in adjacent left/right frame, e.g., hands or face, to track body trajectories in expressing a sign. Especially, they always focus on the moving body parts that play a major role in expressing signs. For example, the reference point (left hand) in the up-
288
+
289
+ <table><tr><td rowspan="3">Methods</td><td rowspan="3">Backbone</td><td colspan="4">PHOENIX14</td><td colspan="2">PHOENIX14-T</td></tr><tr><td colspan="2">Dev(%)</td><td colspan="2">Test(%)</td><td rowspan="2">Dev(%)</td><td rowspan="2">Test(%)</td></tr><tr><td>del/ins</td><td>WER</td><td>del/ins</td><td>WER</td></tr><tr><td>SFL [35]</td><td>ResNet18</td><td>7.9/6.5</td><td>26.2</td><td>7.5/6.3</td><td>26.8</td><td>25.1</td><td>26.1</td></tr><tr><td>FCN [5]</td><td>Custom</td><td>-</td><td>23.7</td><td>-</td><td>23.9</td><td>23.3</td><td>25.1</td></tr><tr><td>CMA [37]</td><td>GoogLeNet</td><td>7.3/2.7</td><td>21.3</td><td>7.3/2.4</td><td>21.9</td><td>-</td><td>-</td></tr><tr><td>VAC [34]</td><td>ResNet18</td><td>7.9/2.5</td><td>21.2</td><td>8.4/2.6</td><td>22.3</td><td>-</td><td>-</td></tr><tr><td>SMKD [17]</td><td>ResNet18</td><td>6.8/2.5</td><td>20.8</td><td>6.3/2.3</td><td>21.0</td><td>20.8</td><td>22.4</td></tr><tr><td>TLP [22]</td><td>ResNet18</td><td>6.3/2.8</td><td>19.7</td><td>6.1/2.9</td><td>20.8</td><td>19.4</td><td>21.2</td></tr><tr><td>SEN [23]</td><td>ResNet18</td><td>5.8/2.6</td><td>19.5</td><td>7.3/4.0</td><td>21.0</td><td>19.3</td><td>20.7</td></tr><tr><td>SLT* [2]</td><td>GoogLeNet</td><td>-</td><td>-</td><td>-</td><td>-</td><td>24.5</td><td>24.6</td></tr><tr><td>CNN+LSTM+HMM* [26]</td><td>GoogLeNet</td><td>-</td><td>26.0</td><td>-</td><td>26.0</td><td>22.1</td><td>24.1</td></tr><tr><td>DNF* [7]</td><td>GoogLeNet</td><td>7.3/3.3</td><td>23.1</td><td>6.7/3.3</td><td>22.9</td><td>-</td><td>-</td></tr><tr><td>STMC* [54]</td><td>VGG11</td><td>7.7/3.4</td><td>21.1</td><td>7.4/2.6</td><td>20.7</td><td>19.6</td><td>21.0</td></tr><tr><td>C²SLR* [55]</td><td>ResNet18</td><td>-</td><td>20.5</td><td>-</td><td>20.4</td><td>20.2</td><td>20.4</td></tr><tr><td>CorrNet</td><td>ResNet18</td><td>5.6/2.8</td><td>18.8</td><td>5.7/2.3</td><td>19.4</td><td>18.9</td><td>20.5</td></tr></table>
290
+
291
+ per left figure specially attends to the quickly moving right hand to capture sign information.
292
+
293
+ Visualizations for identification module. Fig. 6 shows the heatmaps generated by our identification module. Our identification module could generally focus on the human body (light yellow areas). Especially, it pays major attention to regions like hands and face (dark red areas). These results show that our identification module could dynamically emphasize important areas in expressing a sign, e.g., hands and face, and suppress other regions.
294
+
295
+ # 4.4. Comparison with State-of-the-Art Methods
296
+
297
+ PHOENIX14 and PHOENIX14-T. Tab. 8 shows a comprehensive comparison between our CorrNet and other state-of-the-art methods. The entries notated with * indicate these methods utilize additional factors like face or hand features for better accuracy. We notice that CorrNet outperforms other state-of-the-art methods by a large margin upon both datasets, thanks to its special attention on body trajectories. Especially, CorrNet outperforms previous CSLR methods equipped with hand and faces acquired by heavy pose-estimation networks or pre-extracted heatmaps (notated with *), without additional expensive supervision.
298
+
299
+ CSL-Daily. CSL-Daily is a recently released large-scale dataset with the largest vocabulary size (2k) among commonly-used CSLR datasets, with a wide content covering family life, social contact and so on. Tab. 9 shows that our CorrNet achieves new state-of-the-art accuracy upon this challenging dataset with notable progress, which generalizes well upon real-world scenarios.
300
+
301
+ CSL. As shown in tab. 10, our CorrNet could achieve extremely superior accuracy (0.8% WER) upon this well-examined dataset, outperforming existing CSLR methods.
302
+
303
+ Table 8. Comparison with state-of-the-art methods on the PHOENIX14 and PHOENIX14-T datasets. * indicates extra clues such as face or hand features are included by additional networks or pre-extracted heatmaps.
304
+
305
+ <table><tr><td>Methods</td><td>Dev(%)</td><td>Test(%)</td></tr><tr><td>LS-HAN [24]</td><td>39.0</td><td>39.4</td></tr><tr><td>TIN-Iterative [7]</td><td>32.8</td><td>32.4</td></tr><tr><td>Joint-SLRT [3]</td><td>33.1</td><td>32.0</td></tr><tr><td>FCN [5]</td><td>33.2</td><td>32.5</td></tr><tr><td>BN-TIN [53]</td><td>33.6</td><td>33.1</td></tr><tr><td>CorrNet</td><td>30.6</td><td>30.1</td></tr></table>
306
+
307
+ Table 9. Comparison with state-of-the-art methods on the CSL-Daily dataset [53].
308
+
309
+ <table><tr><td>Methods</td><td>WER(%)</td></tr><tr><td>LS-HAN [24]</td><td>17.3</td></tr><tr><td>SubUNet [6]</td><td>11.0</td></tr><tr><td>SF-Net [51]</td><td>3.8</td></tr><tr><td>FCN [5]</td><td>3.0</td></tr><tr><td>STMC [54]</td><td>2.1</td></tr><tr><td>VAC [34]</td><td>1.6</td></tr><tr><td>C²SLR [55]</td><td>0.9</td></tr><tr><td>CorrNet</td><td>0.8</td></tr></table>
310
+
311
+ Table 10. Comparison with state-of-the-art methods on the CSL dataset [24].
312
+
313
+ # 5. Conclusion
314
+
315
+ This paper introduces a correlation module to capture trajectories between adjacent frames and an identification module to locate body regions. Comparisons with previous CSLR methods with spatial-temporal reasoning ability or equipped with hand and face features demonstrate the superiority of CorrNet. Visualizations show that CorrNet could generally attend to hand and face regions to capture body trajectories.
316
+
317
+ # References
318
+
319
+ [1] Nikolas Adaloglou, Theocharis Chatzis, Ilias Papastratis, Andreas Stergioulas, Georgios Th Papadopoulos, Vassia Zacharopoulou, George J Xydopoulos, Klimnis Atzakas, Dimitris Papazachariou, and Petros Daras. A comprehensive study on deep learning-based methods for sign language recognition. IEEE Transactions on Multimedia, 24:1750-1762, 2021. 3
320
+ [2] Necati Cihan Camgoz, Simon Hadfield, Oscar Koller, Hermann Ney, and Richard Bowden. Neural sign language translation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7784-7793, 2018. 2, 5, 8
321
+ [3] Necati Cihan Camgoz, Oscar Koller, Simon Hadfield, and Richard Bowden. Sign language transformers: Joint end-to-end sign language recognition and translation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10023-10033, 2020. 8
322
+ [4] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 6
323
+ [5] Ka Leong Cheng, Zhaoyang Yang, Qifeng Chen, and Yu-Wing Tai. Fully convolutional networks for continuous sign language recognition. In ECCV, 2020. 1, 2, 8
324
+ [6] Necati Cihan Camgoz, Simon Hadfield, Oscar Koller, and Richard Bowden. Subunets: End-to-end hand shape and continuous sign language recognition. In ICCV, 2017. 8
325
+ [7] Runpeng Cui, Hu Liu, and Changshui Zhang. A deep neural framework for continuous sign language recognition by iterative training. TMM, 21(7):1880-1891, 2019. 1, 2, 7, 8
326
+ [8] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In 2009 IEEE conference on computer vision and pattern recognition, pages 248-255. IEEE, 2009. 5
327
+ [9] Ali Diba, Mohsen Fayyaz, Vivek Sharma, M Mahdi Arzani, Rahman Yousefzadeh, Juergen Gall, and Luc Van Gool. Spatio-temporal channel correlation networks for action classification. In Proceedings of the European Conference on Computer Vision (ECCV), pages 284-299, 2018. 2
328
+ [10] Alexey Dosovitskiy, Philipp Fischer, Eddy Ilg, Philip Hausser, Caner Hazirbas, Vladimir Golkov, Patrick Van Der Smagt, Daniel Cremers, and Thomas Brox. Flownet: Learning optical flow with convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 2758-2766, 2015. 2
329
+ [11] Philippe Dreuw, David Rybach, Thomas Deselaers, Morteza Zahedi, and Hermann Ney. Speech recognition techniques for a sign language recognition system. hand, 60:80, 2007. 1, 3
330
+ [12] Christoph Feichtenhofer, Axel Pinz, and Andrew Zisserman. Detect to track and track to detect. In Proceedings of the IEEE international conference on computer vision, pages 3038-3046, 2017. 2
331
+
332
+ [13] William T Freeman and Michal Roth. Orientation histograms for hand gesture recognition. In International workshop on automatic face and gesture recognition, volume 12, pages 296-301. Zurich, Switzerland, 1995. 2
333
+ [14] Wen Gao, Gaolin Fang, Debin Zhao, and Yiqiang Chen. A chinese sign language recognition system based on sofm/srn/hmm. Pattern Recognition, 37(12):2389-2402, 2004. 2
334
+ [15] Alex Graves, Santiago Fernández, Faustino Gomez, and Jürgen Schmidhuber. Connectionist temporal classification: labelling unsegmented sequence data with recurrent neural networks. In Proceedings of the 23rd international conference on Machine learning, pages 369-376, 2006. 2, 3
335
+ [16] Junwei Han, George Awad, and Alistair Sutherland. Modelling and segmenting subunits for sign language recognition based on hand motion analysis. Pattern Recognition Letters, 30(6):623-633, 2009. 2
336
+ [17] Aiming Hao, Yuecong Min, and Xilin Chen. Self-mutual distillation learning for continuous sign language recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11303-11312, 2021. 1, 2, 8
337
+ [18] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 5
338
+ [19] Hezhen Hu, Weichao Zhao, Wengang Zhou, Yuechen Wang, and Houqiang Li. Signbert: Pre-training of hand-model-aware representation for sign language recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11087-11096, 2021. 2
339
+ [20] Hezhen Hu, Wengang Zhou, and Houqiang Li. Hand-model-aware sign language recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 1558-1566, 2021. 2
340
+ [21] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7132-7141, 2018. 2, 6
341
+ [22] Lianyu Hu, Liqing Gao, Zekang Liu, and Wei Feng. Temporal lift pooling for continuous sign language recognition. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXV, pages 511-527. Springer, 2022. 2, 8
342
+ [23] Lianyu Hu, Liqing Gao, Zekang Liu, and Wei Feng. Self-emphasizing network for continuous sign language recognition. In Thirty-seventh AAAI conference on artificial intelligence, 2023. 2, 8
343
+ [24] Jie Huang, Wengang Zhou, Qilin Zhang, Houqiang Li, and Weiping Li. Video-based sign language recognition without temporal segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018. 2, 5, 8
344
+ [25] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 5
345
+ [26] Oscar Koller, Necati Cihan Camgoz, Hermann Ney, and Richard Bowden. Weakly supervised learning with multistream cnn-lstm-hmms to discover sequential parallelism in sign language videos. PAMI, 42(9):2306-2320, 2019. 2, 7, 8
346
+
347
+ [27] Oscar Koller, Jens Forster, and Hermann Ney. Continuous sign language recognition: Towards large vocabulary statistical recognition systems handling multiple signers. Computer Vision and Image Understanding, 141:108-125, 2015. 2, 5
348
+ [28] Oscar Koller, O Zargaran, Hermann Ney, and Richard Bowden. Deep sign: Hybrid cnn-hmm for continuous sign language recognition. In Proceedings of the British Machine Vision Conference 2016, 2016. 2
349
+ [29] Oscar Koller, Sepehr Zargaran, and Hermann Ney. Re-sign: Re-aligned end-to-end sequence modelling with deep recurrent cnn-hmms. In CVPR, 2017. 2
350
+ [30] Myunggi Lee, Seungeui Lee, Sungjoon Son, Gyutae Park, and Nojun Kwak. Motion feature network: Fixed motion filter for action recognition. In Proceedings of the European Conference on Computer Vision (ECCV), pages 387-403, 2018. 2
351
+ [31] Ji Lin, Chuang Gan, and Song Han. Tsm: Temporal shift module for efficient video understanding. In Proceedings of the IEEE International Conference on Computer Vision, pages 7083-7093, 2019. 1, 6
352
+ [32] Zhaoyang Liu, Donghao Luo, Yabiao Wang, Limin Wang, Ying Tai, Chengjie Wang, Jilin Li, Feiyue Huang, and Tong Lu. Teinet: Towards an efficient architecture for video recognition. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 11669-11676, 2020. 1
353
+ [33] Ningning Ma, Xiangyu Zhang, Hai-Tao Zheng, and Jian Sun. Shufflenet v2: Practical guidelines for efficient cnn architecture design. In Proceedings of the European conference on computer vision (ECCV), pages 116-131, 2018. 6
354
+ [34] Yuecong Min, Aiming Hao, Xiujuan Chai, and Xilin Chen. Visual alignment constraint for continuous sign language recognition. In ICCV, 2021. 1, 2, 5, 8
355
+ [35] Zhe Niu and Brian Mak. Stochastic fine-grained labeling of multi-state sign glosses for continuous sign language recognition. In ECCV, 2020. 1, 2, 8
356
+ [36] Sylvie CW Ong and Surendra Ranganath. Automatic sign language analysis: A survey and the future beyond lexical meaning. IEEE Transactions on Pattern Analysis & Machine Intelligence, 27(06):873-891, 2005. 1, 3
357
+ [37] Junfu Pu, Wengang Zhou, Hezhen Hu, and Houqiang Li. Boosting continuous sign language recognition via cross modality augmentation. In ACM MM, 2020. 1, 2, 8
358
+ [38] Junfu Pu, Wengang Zhou, and Houqiang Li. Iterative alignment network for continuous sign language recognition. In CVPR, 2019. 2
359
+ [39] Ignacio Rocco, Relja Arandjelovic, and Josef Sivic. Convolutional neural network architecture for geometric matching. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6148-6157, 2017. 2
360
+ [40] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision, pages 618-626, 2017. 1, 7
361
+ [41] Deqing Sun, Xiaodong Yang, Ming-Yu Liu, and Jan Kautz. Pwc-net: Cnns for optical flow using pyramid, warping, and
362
+
363
+ cost volume. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 8934-8943, 2018. 2
364
+ [42] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1-9, 2015. 6
365
+ [43] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 1, 6
366
+ [44] Anirudh Tunga, Sai Vidyaranya Nuthalapati, and Juan Wachs. Pose-based sign language recognition using gcn and bert. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 31-40, 2021. 2
367
+ [45] Heng Wang, Du Tran, Lorenzo Torresani, and Matt Feiszli. Video modeling with correlation networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 352-361, 2020. 1, 2
368
+ [46] Jingdong Wang, Ke Sun, Tianheng Cheng, Borui Jiang, Chaorui Deng, Yang Zhao, Dong Liu, Yadong Mu, Mingkui Tan, Xinggang Wang, et al. Deep high-resolution representation learning for visual recognition. IEEE transactions on pattern analysis and machine intelligence, 43(10):3349-3364, 2020. 7
369
+ [47] Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. Non-local neural networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 7794-7803, 2018. 6
370
+ [48] Philippe Weinzaepfel, Jerome Revaud, Zaid Harchaoui, and Cordelia Schmid. Deepflow: Large displacement optical flow with deep matching. In Proceedings of the IEEE international conference on computer vision, pages 1385-1392, 2013. 2
371
+ [49] Sanghyun Woo, Jongchan Park, Joon-Young Lee, and In So Kweon. Cbam: Convolutional block attention module. In Proceedings of the European conference on computer vision (ECCV), pages 3–19, 2018. 6
372
+ [50] Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In Proceedings of the European conference on computer vision (ECCV), pages 305–321, 2018. 1
373
+ [51] Zhaoyang Yang, Zhenmei Shi, Xiaoyong Shen, and Yu-Wing Tai. Sf-net: Structured feature network for continuous sign language recognition. arXiv preprint arXiv:1908.01341, 2019.8
374
+ [52] Yue Zhao, Yuanjun Xiong, and Dahua Lin. Recognize actions by disentangling components of dynamics. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6566-6575, 2018. 2
375
+ [53] Hao Zhou, Wengang Zhou, Weizhen Qi, Junfu Pu, and Houqiang Li. Improving sign language translation with monolingual data by sign back-translation. In Proceedings
376
+
377
+ of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1316-1325, 2021. 2, 5, 8
378
+ [54] Hao Zhou, Wengang Zhou, Yun Zhou, and Houqiang Li. Spatial-temporal multi-cue network for continuous sign language recognition. In AAAI, 2020. 2, 7, 8
379
+ [55] Ronglai Zuo and Brian Mak. C2slr: Consistency-enhanced continuous sign language recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5131-5140, 2022. 1, 2, 3, 5, 7, 8
2303.03xxx/2303.03202/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1c6dd55323028936d3be192bbb1ca10954f743cdc8d223714dc6c5aaac2d8a9
3
+ size 599427
2303.03xxx/2303.03202/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03226/49c0fa96-6cd5-4100-9cb9-f0db954478b0_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03226/49c0fa96-6cd5-4100-9cb9-f0db954478b0_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03226/49c0fa96-6cd5-4100-9cb9-f0db954478b0_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a9dff2f0cd74f50e8fed29520a73f767f6094b5f142450d82741a24e028e131a
3
+ size 1845266
2303.03xxx/2303.03226/full.md ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Safe Reinforcement Learning via Probabilistic Logic Shields
2
+
3
+ Wen-Chi Yang $^{1}$ , Giuseppe Marra $^{1}$ , Gavin Rens and Luc De Raedt $^{1,2}$
4
+
5
+ <sup>1</sup>Leuven AI, KU Leuven, Belgium
6
+
7
+ $^{2}$ Centre for Applied Autonomous Sensor Systems, Örebro University, Sweden.
8
+
9
+ {wenchi.yang, giuseppe.marra, luc.deradet} @kuleuven.be, gavin.rens@gmail.com
10
+
11
+ # Abstract
12
+
13
+ Safe Reinforcement learning (Safe RL) aims at learning optimal policies while staying safe. A popular solution to Safe RL is shielding, which uses a logical safety specification to prevent an RL agent from taking unsafe actions. However, traditional shielding techniques are difficult to integrate with continuous, end-to-end deep RL methods. To this end, we introduce Probabilistic Logic Policy Gradient (PLPG). PLPG is a model-based Safe RL technique that uses probabilistic logic programming to model logical safety constraints as differentiable functions. Therefore, PLPG can be seamlessly applied to any policy gradient algorithm while still providing the same convergence guarantees. In our experiments, we show that PLPG learns safer and more rewarding policies compared to other state-of-the-art shielding techniques.
14
+
15
+ # 1 Introduction
16
+
17
+ Shielding is a popular Safe Reinforcement Learning (Safe RL) technique that aims at finding an optimal policy while staying safe [Jansen et al., 2020]. To do so, it relies on a shield, a logical component that monitors the agent's actions and rejects those that violate the given safety constraint. These rejection-based shields are typically based on formal verification, offering strong safety guarantees compared to other safe exploration techniques [García and Fernández, 2015]. While early shielding techniques operate completely on symbolic state spaces [Jansen et al., 2020; Alshiekh et al., 2018; Bastani et al., 2018], more recent ones have incorporated a neural policy learner to handle continuous state spaces [Hunt et al., 2021; Anderson et al., 2020; Harris and Schaub, 2020]. In this paper, we will also focus on integrating shielding with neural policy learners.
18
+
19
+ In current shielding approaches, the shields are deterministic, that is, an action is either safe or unsafe in a particular state. This is an unrealistic assumption as the world is inherently uncertain and safety is a matter of degree and risk rather than something absolute. For example, Fig. 1 demonstrates a scenario in which a car must detect obstacles from visual input and the sensor readings are noisy. The uncertainty arising from noisy sensors cannot be directly exploited
20
+
21
+ by such rejection-based shields. In fact, it is often assumed that agents have perfect sensors [Giacobbe et al., 2021; Hunt et al., 2021], which is unrealistic. By working with probabilistic rather than deterministic shields, we will be able to cope with such uncertainties and risks.
22
+
23
+ Moreover, even when given perfect safety information in all states, rejection-based shielding may fail to learn an optimal policy [Ray et al., 2019; Hunt et al., 2021; Anderson et al., 2020]. This is due to the way the shield and the agent interact as the learning agent is not aware of the rejected actions and continues to update its policy as if all safe actions were sampled directly from its safety-agnostic policy instead of through the shield. By eliminating the mismatch between the shield and the policy, we will be able to guarantee convergence towards an optimal policy if it exists.
24
+
25
+ We introduce probabilistic shields as an alternative to the deterministic rejection-based shields. Essentially, probabilistic shields take the original policy and noisy sensor readings to produce a safer policy, as demonstrated in Fig. 2 (right). By explicitly connecting action safety to probabilistic semantics, probabilistic shields provide a realistic and principled way to balance return and safety. This also allows for shielding to be applied at the level of the policy instead of at the level of individual actions, which is typically done in the literature [Hunt et al., 2021; Jansen et al., 2020].
26
+
27
+ We propose the concept of Probabilistic Logic Shields (PLS) and its implementation in probabilistic logic programs. Probabilistic logic shields are probabilistic shields that models safety through the use of logic. The safety specification is expressed as background knowledge and its interaction with the learning agent and the noisy sensors is encoded in a probabilistic logic program, which is an elegant and effective way of defining a shield. Furthermore, PLS can be automatically compiled into a differentiable structure, allowing for the optimization of a single loss function through the shield, enforcing safety directly in the policy. Probabilistic logic shields have several benefits:
28
+
29
+ - A Realistic Safety Function. PLS models a more realistic, probabilistic evaluation of safety instead of a deterministic one, it allows to balance safety and reward, and in this way control the risk.
30
+ - A Simpler Model. PLS do not require for knowledge of the underlying MDP, but instead use a simpler safety model that only represents internal safety
31
+
32
+ related properties. This is less demanding than requiring the full MDP be known required by many model-based approaches [Jansen et al., 2020; Hunt et al., 2021; Carr et al., 2022].
33
+
34
+ - End-to-end Deep RL. By being differentiable, PLS allows for seamless application of probabilistic logic shields to any model-free reinforcement learning agent such as PPO [Schulman et al., 2018], TRPO [Schulman et al., 2015], A2C [Mnih et al., 2016], etc.
35
+ - Convergence. Using PLS in deep RL comes with convergence guarantees unlike the use of rejection-based shields.
36
+
37
+ # 2 Preliminaries
38
+
39
+ Probabilistic Logic Programming We will introduce probabilistic logic programming (PLP) using the syntax of the ProbLog system [De Raedt and Kimmig, 2015]. An atom is a predicate symbol followed by a tuple of logical variables and/or constants. A literal is an atom or its negation. A ProbLog theory (or program) $\mathcal{T}$ consists of a finite set of probabilistic facts $\mathcal{F}$ and a finite set of clauses $\mathcal{BK}$ . A probabilistic fact is an expression of the form $\mathfrak{p}_{\mathrm{i}}: \mathfrak{f}_{\mathrm{i}}$ where $\mathfrak{p}_{\mathrm{i}}$ denotes the probability of the fact $\mathfrak{f}_{\mathrm{i}}$ being true. It is assumed that the probabilistic facts are independent of one other (akin to the nodes with parents in Bayesian networks) and the dependencies are specified by clauses (or rules). For instance, 0.8 :: obstc(front). states that the probability of having an obstacle in front is 0.8. A clause is a universally quantified expression of the form $\mathsf{h} : -\mathsf{b}_1, \ldots, \mathsf{b}_n$ where $\mathsf{h}$ is a literal and $\mathsf{b}_1, \ldots, \mathsf{b}_n$ is a conjunction of literals, stating that $\mathsf{h}$ is true if all $\mathsf{b}_{\mathrm{i}}$ are true. The clause defining safe in Fig. 1 states that it is safe when there is no crash. Each truth value assignment of all probabilistic facts $\mathcal{F}$ , denoted by $\mathcal{F}_k$ , induces a possible world $w_k$ where all ground facts in $w_k$ are true and all that are not in $w_k$ are false. Formally, the probability of a possible world $w_k$ is defined as follows.
40
+
41
+ $$
42
+ P \left(w _ {k}\right) = \prod_ {\mathrm {f} _ {\mathrm {i}} \in w _ {k}} \mathrm {p} _ {\mathrm {i}} \prod_ {\mathrm {f} _ {\mathrm {i}} \notin w _ {k}} \left(1 - \mathrm {p} _ {\mathrm {i}}\right). \tag {1}
43
+ $$
44
+
45
+ ProbLog allows for annotated disjunctions (ADs). An AD is a clause with multiple heads $\mathbf{h}_{\mathrm{i}}$ that are mutually exclusive to one another, meaning that exactly one head is true when the body is true, and the choice of the head is governed by a probability distribution. An AD has the form of $\mathfrak{p}_1:: \mathfrak{h}_1; \dots; \mathfrak{p}_{\mathfrak{m}}:: \mathfrak{h}_{\mathfrak{m}}$ where each $\mathfrak{h}_{\mathrm{i}}$ is the head of a clause and $\sum_{i=1}^{m} \mathfrak{p}_{\mathrm{i}} \leq 1$ [De Raedt and Kimmig, 2015; Fierens et al., 2015]. For instance, $\{0.1:: \text{act nothing}; 0.5:: \text{act(accel)}; 0.1:: \text{act(brake)}; 0.1:: \text{act(left)}; 0.2:: \text{act(right)}\}$ is an AD (with no conditions), stating the probability with which each action will be taken. The success probability of an atom $q$ given a theory $\mathcal{T}$ is the sum of the probabilities of the possible worlds that entail $q$ . Formally, it is defined as $P(q) := \sum_{w_k \mid q} P(w_k)$ . Given a set of atoms $E$ as evidence, the conditional probability of a query $q$ is $\mathbf{P}(q|E) = \frac{P(q,E)}{P(E)}$ . For instance, in Fig. 1, the probability of being safe in the next state given that the agent accelerates is $\mathbf{P}(\text{safe|act(accel)}) = \frac{0.14}{0.5} = 0.28$ .
46
+
47
+ MDP A Markov decision process (MDP) is a tuple $M = \langle S, A, T, R, \gamma \rangle$ where $S$ and $A$ are state and action spaces, respectively. $T(s, a, s') = P(s'|s, a)$ defines the probability of being in $s'$ after executing $a$ in $s$ . $R(s, a, s')$ defines the immediate reward of executing $a$ in $s$ resulting in $s'$ . $\gamma \in [0,1]$ is the discount factor. A policy $\pi : S \times A \to [0,1]$ defines the probability of taking an action in a state. We use $\pi(a|s)$ to denote the probability of action $a$ in state $s$ under policy $\pi$ and $\pi(s)$ for the probability distribution over all the actions in state $s$ .
48
+
49
+ Shielding A shield is a reactive system that guarantees safety of the learning agent by preventing it from selecting any unsafe action at run time [Jansen et al., 2020]. Usually, a shield is provided as a Markov model and a temporal logical formula, jointly defining a safety specification. The shield constrains the agent's exploration by only allowing it to take actions that satisfy the given safety specification. For example, when there is a car driving in front and the agent proposes to accelerate, a standard shield (Fig. 2, left) will reject the action as the agent may crash.
50
+
51
+ Policy Gradients The objective of a RL agent is to find a policy $\pi_{\theta}$ (parameterized by $\theta$ ) that maximizes the total expected return along the trajectory, formally,
52
+
53
+ $$
54
+ J (\theta) = \mathbb {E} _ {\pi_ {\theta}} [ \sum_ {t = 0} ^ {\infty} \gamma^ {t} r _ {t} ]
55
+ $$
56
+
57
+ $J(\theta)$ is assumed to be finite for all policies. An important class of RL algorithms, particularly relevant in the setting of shielding, is based on policy gradients, which maximize $J(\theta)$ by repeatedly estimating the gradient $\nabla_{\theta}J(\theta)$ . The general form of policy gradient methods is:
58
+
59
+ $$
60
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {\pi_ {\theta}} [ \sum_ {t = 0} ^ {\infty} \Psi_ {t} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}, a _ {t}) ] \tag {2}
61
+ $$
62
+
63
+ where $\Psi_{t}$ is an empirical expectation of the return [Schulman et al., 2018]. The expected value is usually computed using Monte Carlo methods, requiring efficient sampling from $\pi_{\theta}$ .
64
+
65
+ # 3 Probabilistic Logic Shields
66
+
67
+ # 3.1 Probabilistic Shielding
68
+
69
+ To reason about safety, we will assume that we have a safety-aware probabilistic model $\mathbf{P}(\text{safe}|a,s)$ , indicating the probability that an action $a$ is safe to execute in a state $s$ . We use bold $\mathbf{P}$ to distinguish it from the underlying probability distributions $P(\cdot)$ of the MDP. Our safety-aware probabilistic model does not require that the underlying MDP is known. However, it needs to represent internally safe relevant-dynamics as the safety specification. Therefore, the safety-aware model $\mathbf{P}$ is not a full representation of the MDP, it is limited to safety-related properties.
70
+
71
+ Notice that for traditional logical shields actions are either safe or unsafe, which implies that $\mathbf{P}(\text{safe}|a,s) = 1$ or $0$ . We are interested in the safety of policies, that is, in
72
+
73
+ $$
74
+ \mathbf {P} _ {\pi} (\text {s a f e} | s) = \sum_ {a \in A} \mathbf {P} (\text {s a f e} | s, a) \pi (a | s). \tag {3}
75
+ $$
76
+
77
+ ![](images/e09b3e23c6fe79ea3ce0f7813e81e5dee38cdaf612127a6415553064d0e2481f.jpg)
78
+ Figure 1: A motivation example of Probabilistic Logic Shields. We encode the interaction between the base policy $\pi$ , the state-abstraction $H$ and the safety specification using a ProbLog program $\mathcal{T}$ . This provides a uniform language to express many aspects of the shielding process. The shielded policy $\pi^{+}(s)$ decreases the probability of unsafe actions, e.g. acceleration and increases the likelihood of being safe.
79
+
80
+ By marginalizing out actions according to their probability under $\pi$ , the probability $\mathbf{P}_{\pi}$ measures how likely it is to take a safe action in $s$ according to $\pi$ .
81
+
82
+ Definition 3.1. Probabilistic Shielding Given a base policy $\pi$ and a probabilistic safety model $\mathbf{P}(\text{safe}|s,a)$ , the shielded policy is
83
+
84
+ $$
85
+ \pi^ {+} (a | s) = \mathbf {P} _ {\pi} (a | s, \text {s a f e}) = \frac {\mathbf {P} (\text {s a f e} | s , a)}{\mathbf {P} _ {\pi} (\text {s a f e} | s)} \pi (a | s) \tag {4}
86
+ $$
87
+
88
+ Intuitively, a shielded policy is a re-normalization of the base policy that increases (resp. decreases) the probabilities of the actions that are safer (resp. less safe) than average.
89
+
90
+ Proposition 1. A shielded policy is always safer than its base policy in all states, i.e. $P_{\pi^{+}}(\text{safe}|s) \geq P_{\pi}(\text{safe}|s)$ for all $s$ and $\pi$ . (A proof is in Appendix B)
91
+
92
+ # 3.2 Shielding with Probabilistic Logics
93
+
94
+ In this paper, we focus on probabilistic shields implemented through probabilistic logic programs. The ProbLog program defining probabilistic safety consists of three parts.
95
+
96
+ First, an annotated disjunction $\Pi_s$ , which represents the policy $\pi(a|s)$ in the logic program. For example, in Fig. 1, the policy $\pi(a|s)$ is represented as the annotated disjunction $\Pi_s = \{0.1::\text{act nothing}$ ; 0.5:: $\text{act(accel)}$ ; 0.1:: $\text{act(brake)}$ ; 0.1:: $\text{act(left)}$ ; 0.2:: $\text{act(right)}\}$ .
97
+
98
+ The second component of the program is a set of probabilistic facts $\mathbf{H}_s$ representing an abstraction of the current state. Such abstraction should contain information needed to reason about the safety of actions in that state. Therefore, it should not be a fair representation of the entire state $s$ . For example, in Fig. 1, the abstraction is represented as the set of probabilistic facts: $\mathbf{H}_s = \{0.8::\mathrm{obstc(front)}$ , $0.2::\mathrm{obstc(left)}$ , $0.5::\mathrm{obstc(right)}\}$
99
+
100
+ The third component of the program is a safety specification $\mathcal{B}\mathcal{K}$ , which is a set of clauses and ADs representing knowledge about safety. In our example, the predicates crash and safe are defined using clauses. The first clause for crash states that the probability of having a crash is 0.9 if the agent accelerates when there is an obstacle in front of it. The clause for safe states that it is safe if no crash occurs.
101
+
102
+ Therefore, we obtain a ProbLog program $\mathcal{T}(s) = \mathcal{B}\mathcal{K} \cup \mathbf{H}_s \cup \Pi_s$ , inducing a probabilistic measure $\mathbf{P}_{\mathcal{T}}$ . By querying this program, we can reason about safety of policies and actions. More specifically, we can use $\mathcal{T}(s)$ to obtain:
103
+
104
+ - action safety in $s$ : $\mathbf{P}(\text{safe}|s, a) = \mathbf{P}_{\mathcal{T}}(\text{safe}|a)$
105
+ - policy safety in $s$ : $\mathbf{P}_{\pi}(\text{safe}|s) = \mathbf{P}_{\mathcal{T}}(\text{safe})$
106
+ - the shielded policy in $s$ : $\mathbf{P}_{\pi}(a|s, \text{safe}) = \mathbf{P}_{\mathcal{T}}(a|\text{safe})$
107
+
108
+ Notice that these three distributions can be obtained by querying the same identical program ProbLog program $\mathcal{T}$ .
109
+
110
+ It is important to realize that although we are using the probabilistic logic programming language ProbLog to reason about safety, we could also have used alternative representations such as Bayesian networks, or other StarAI models [De Raedt et al., 2016]. The ProbLog representation is however convenient because it allows to easily model planning domains, it is Turing equivalent and it is differentiable. At the same time, it is important to note that the safety model in ProbLog is an abstraction, using possibly a different representation than that of the underlying MDP.
111
+
112
+ Perception Through Neural Predicates The probabilities of both $\Pi_s$ and $\mathbf{H}_s$ depend on the current state $s$ . We compute these probabilities using two neural networks $\pi$ and $H$ operating on real valued inputs (i.e. images or sensors). We feed then the probabilities to the ProbLog program. As we depicted in Fig. 1, both these functions take a state representation $s$ as input (e.g. an image) and they output the probabilities the facts representing, respectively, the actions and the safe-relevant abstraction of the state. This feature is closely related to the notion of neural predicate, which can — for the purposes of this paper — be regarded as encapsulated neural networks inside logic. Since ProbLog programs are differentiable with respect to the probabilities in $\Pi_s$ and $\mathbf{H}_s$ , the gradients can be seamlessly backpropagated to network parameters during learning.
113
+
114
+ # 4 Probabilistic Logic Policy Gradient
115
+
116
+ In this section, we demonstrate how to use probabilistic logic shields with a deep reinforcement learning method. We will focus on real-vector states (such as images), but the safety constraint is specified symbolically and logically. Under this setting, our goal is to find an optimal policy in the safe policy space $\Pi_{\text{safe}} = \{\pi \in \Pi | \mathbf{P}_{\pi}(\text{safe}|s) = 1, \forall s \in S\}$ . Notice that it is possible that no optimal policies exist in the safe policy space. In this case, our aim will be to find a policy that it as safe as possible.
117
+
118
+ We propose a new safe policy gradient technique, which we call Probabilistic Logic Policy Gradient (PLPG). PLPG
119
+
120
+ ![](images/b69c19276e640b9d5c051fa952fb2818577ed54705ab3cc8db148d449719267d.jpg)
121
+ Figure 2: A comparison between a traditional rejection-based shield (SHLD) and a Probabilistic Logic Shield (PLS). Both shields take a policy $\pi$ and a set of noisy sensor readings $\mathbf{H}_{\mathbf{s}}$ to compute a safe action. Left: The policy must keep sampling actions until a safe action is accepted by the rejection-based shield. This requires an assumption that an action is either completely safe or unsafe. Right: We replace SHLD with PLS that proposes a safer policy $\pi^{+}$ on the policy level without imposing the assumption.
122
+
123
+ applies probabilistic logic shields and is guaranteed to converge to a safe and optimal policy if it exists.
124
+
125
+ # 4.1 PLPG for Probabilistic Shielding
126
+
127
+ To integrate PLS with policy gradient, we simply replace the base policy in Eq. (2) with the shielded policy obtained through Eq. (4). We call this a shielded policy gradient.
128
+
129
+ $$
130
+ \mathbb {E} _ {\pi_ {\theta} ^ {+}} \left[ \sum_ {t = 0} ^ {\infty} \Psi_ {t} \nabla_ {\theta} \log \pi_ {\theta} ^ {+} \left(a _ {t} \mid s _ {t}\right) \right] \tag {5}
131
+ $$
132
+
133
+ This requires the shield to be differentiable and cannot be done using rejection-based shields. The gradient encourages policies $\pi^{+}$ that are more rewarding, i.e. that has a large $\Psi$ , in the same way of a standard policy gradient. It does so by assuming that unsafe actions have been filtered by the shield. However, when the safety definition is uncertain, unsafe actions may still be taken and such a gradient may still end up encouraging unsafe but rewarding policies.
134
+
135
+ For this reason, we introduce a safety loss to penalize unsafe policies. Intuitively, a safe policy should have a small safety loss, and the loss of a completely safe policy should be zero. The loss can be expressed by interpreting the policy safety as the probability that $\pi^{+}$ satisfies the safety constraint (using the semantic loss of [Xu et al., 2018]); more formally, $L^s \coloneqq -\log \mathbf{P}_{\pi^+}(\mathsf{safe}|s)$ . Then, the corresponding safety gradient is as follows.
136
+
137
+ $$
138
+ - \mathbb {E} _ {\pi_ {\theta} ^ {+}} \left[ \nabla_ {\theta} \log \mathbf {P} _ {\pi^ {+}} (\text {s a f e} | s) \right] \tag {6}
139
+ $$
140
+
141
+ Notice that the safety gradient is not a shielding mechanism but a regret due to its loss-based nature. By combining the shielded policy gradient and the safety gradient, we obtain a new Safe RL technique.
142
+
143
+ Definition 4.1. (PLPG) The probabilistic logic policy gradient $\nabla_{\theta}J(\theta)$ is
144
+
145
+ $$
146
+ \mathbb {E} _ {\pi_ {\theta} ^ {+}} \left[ \sum_ {t = 0} ^ {\infty} \Psi_ {t} \nabla_ {\theta} \log \pi_ {\theta} ^ {+} \left(a _ {t} \mid s _ {t}\right) - \alpha \nabla_ {\theta} \log \mathbf {P} _ {\pi_ {\theta} ^ {+}} (\mathbf {s a f e} \mid s _ {t}) \right] \tag {7}
147
+ $$
148
+
149
+ where $\alpha$ is the safety coefficient that indicates the weight of the safety gradient.
150
+
151
+ We introduce the safety coefficient $\alpha$ , a hyperparameter that controls the combination of the two gradients.
152
+
153
+ Both gradients in PLPG are essential. The shielded policy gradient forbids the agent from immediate danger and the safety gradient penalizes unsafe behavior. The interaction between shielded and loss-based gradients is still a very new topic, nonetheless, recent advances in neuro-symbolic learning have shown that both are important [Ahmed et al., 2022]. Our experiments show that having only one but not both is practically insufficient.
154
+
155
+ # 4.2 Probabilistic vs Rejection-based Shielding
156
+
157
+ The problem of finding a policy in the safe policy space cannot be solved by rejection-based shielding (cf. Fig. 2, left) without strong assumptions, such as that a state-action pair is either completely safe or unsafe. It is in fact often assumed that a set of safe state-action pairs $SA_{\text{safe}}$ is given. To implement a rejection-based shield, the agent must keep sampling from the base policy until an action $a$ is accepted. This approach implicitly conditions through very inefficient rejection sampling schemes as below, which has an unclear link to probabilistic semantics [Robert et al., 1999].
158
+
159
+ $$
160
+ \pi^ {+} (a | s) = \frac {\mathbb {1} _ {[ (s , a) \in S A _ {\text {s a f e}} ]} \pi (a | s)}{\sum_ {a ^ {\prime} \in A} \mathbb {1} _ {[ (s , a ^ {\prime}) \in S A _ {\text {s a f e}} ]} \pi \left(a ^ {\prime} | s\right)} \tag {8}
161
+ $$
162
+
163
+ # Convergence Under Perfect Safety Information
164
+
165
+ It is common for existing shielding approaches to integrate policy gradients with a rejection-based shield, e.g. Eq. (8), resulting in the following policy gradient.
166
+
167
+ $$
168
+ \nabla_ {\theta} J (\theta) = \mathbb {E} _ {\pi_ {\theta} ^ {+}} [ \sum_ {t = 0} ^ {\infty} \Psi_ {t} \nabla_ {\theta} \log \pi_ {\theta} (s _ {t}, a _ {t}) ] \tag {9}
169
+ $$
170
+
171
+ It is a known problem that this approach may result in sub-optimal policies, even though the agent has perfect safety information [Kalweit et al., 2020; Ray et al., 2019; Hunt et al., 2021; Anderson et al., 2020]. This is due to a policy mismatch between $\pi^{+}$ and $\pi$ in Eq. (9). More specifically, Eq. (9) is an off-policy algorithm, meaning that the policy used to explore (i.e. $\pi^{+}$ ) is different from the policy that is updated (i.e. $\pi^{1}$ ). For any off-policy policy gradient method to converge to an optimal policy (even in the tabular case), the behavior policy (to explore) and the target policy (to be updated) must appropriately visit the same state-action space, i.e., if $\pi(a|s) > 0$ then $\pi^{+}(a|s) > 0$ [Sutton and Barto, 2018]. This requirement is violated by Eq. (9).
172
+
173
+ PLPG, on the other hand, reduces to Eq. (5) when given perfect sensor information of the state. Since it has the same form as the standard policy gradient (Eq. 9), PLPG is guaranteed to converge to an optimal policy according to the Policy Gradient Theorem [Sutton et al., 2000].
174
+
175
+ Proposition 2. PLPG, i.e. Eq. (7), converges to an optimal policy given perfect safety information in all states.
176
+
177
+ It should be noted that the base policy learnt by PLPG is unlikely to be equivalent to the policy that would have been learnt without a shield, all other things being equal. That is,
178
+
179
+ in general, $\pi_{\theta}$ in Eq. (9) will not tend to be the same as $\pi_{\theta}$ in Eq. (7) as learning continues. This is because the parameters $\theta$ in Eq. (7) depend on $\pi_{\theta}^{+}$ instead of $\pi_{\theta}$ , and $\pi_{\theta}$ is learnt in a way to make $\pi^{+}$ optimal given safety imposed by the shield.
180
+
181
+ # Learning From Unsafe Actions
182
+
183
+ PLPG has a significant advantage over a rejection-based shield by learning not only from the actions accepted by the shield but also from the ones that would have been rejected. This is a result of the use of ProbLog programs that computes the safety of all available actions in the current state, allowing us to update the base policy in terms of safety without having to actually execute the other actions.
184
+
185
+ # Leveraging Probabilistic Logic Programming
186
+
187
+ Probabilistic logic programs can, just like Bayesian networks, be compiled into circuits using knowledge compilation [Darwiche, 2003]. Although probabilistic inference is hard (#P-complete), once the circuit is obtained, inference is linear in the size of the circuit [De Raedt and Kimmig, 2015; Fierens et al., 2015]. Furthermore, the circuits can be used to compute gradients and the safety model only needs to be compiled once. We show a compiled program in Appendix C. Our PLP language is still restricted to discrete actions, although it should be possible to model continuous actions using extensions thereof in future work [Nitti et al., 2016].
188
+
189
+ # 5 Experiments
190
+
191
+ Now, we present an empirical evaluation of PLPG, when compared to other baselines in terms of return and safety.
192
+
193
+ Experimental Setup Experiments are run in three environments. (1) Stars, the agent must collect as many stars as possible without going into a stationary fire; (2) Pacman, the agent must collect stars without getting caught by the fire ghosts that can move around, and (3) Car Racing, the agent must go around the track without driving into the grass area. We use two configurations for each domain. A detailed description is in Appendix A.
194
+
195
+ ![](images/b353bdd12ebac21fe1fc70a90d3e10e4be4324d9cc0b2c429c2d4b29d7c95277.jpg)
196
+ Figure 3: The domains of Stars, Pacman and Car Racing. The difference between Stars and Pacman is that fires in Stars are stable and the ones in Pacman move around.
197
+
198
+ We compare PLPG to three RL baselines.
199
+
200
+ - PPO, a standard safety-agnostic agent that starts with a random initial policy [Schulman et al., 2017].
201
+ - VSRL, an agent augmented with a deterministic rejection-based shield [Hunt et al., 2021]. Its structure is shown in Fig. 2 (left).
202
+
203
+ - $\epsilon$ -VSRL a new risk-taking variant of VSRL that has a small $\epsilon$ probability of accepting any action, akin to greedy [Fernández and Veloso, 2006] As $\epsilon$ -VSRL simulates an artificial noise/distrust in the sensors, we expect that it improves VSRL's ability to cope with noise.
204
+
205
+ In order to have a fair comparison, VSRL, $\epsilon$ -VSRL and PLPG agents have identical safety sensors and safety constraints, and PPO does not have any safety sensors. We do not compare to shielding approaches that require a full model of the environment since they are not applicable here. We train all agents in all domains using 600k learning steps and all experiments are repeated using five different seeds.
206
+
207
+ In our experiments, we answer the following questions :
208
+
209
+ Q1 Does PLPG produce safer and more rewarding policies than its competitors?
210
+ Q2 What is the effect of the hyper-parameters $\alpha$ and $\epsilon$ ?
211
+ Q3 Does the shielded policy gradient or the safety gradient have more impact on safety in PLPG?
212
+ Q4 What are the performance and the computational cost of a multi-step safety look-ahead PLS?
213
+
214
+ Metrics We compare all agents in terms of two metrics: (1) average normalized return, i.e. the standard per-episode normalized return averaged over the last 100 episodes; (2) cumulative normalized violation, i.e. the accumulated number of constraint violations from the start of learning. The absolute numbers are listed in Appendix D.
215
+
216
+ Probabilistic Safety via Noisy Sensor Values We consider a set of noisy sensors around the agent for safety purposes. For instance, the four fire sensor readings in Fig. 3 (left) might $\{0.6::\text{fire}(0,1)$ . $0.1::\text{fire}(0,-1)$ . $0.1::\text{fire}(-1,0)$ . $0.4::\text{fire}(1,0)\}$ . These sensors provide to the shield only local information of the learning agent. PLPG agents are able to directly use the noisy sensor values. However, as VSRL and $\epsilon$ -VSRL agents require $0/1$ sensor values to apply formal verification, noisy sensor readings must be discretized to $\{0,1\}$ . For example, the above fire sensor readings become $\{1::\text{fire}(0,1)$ . $0::\text{fire}(0,-1)$ . $0::\text{fire}(-1,0)$ . $0::\text{fire}(1,0)\}$ . For all domains, the noisy sensors are standard, pre-trained neural approximators of an accuracy higher than $99\%$ . The training details are presented in Appendix A. Even with such accurate sensors, we will show that discretizing the sensor values, as in VSRL, is harmful for safety. Details of the pre-training procedure are in Appendix A.
217
+
218
+ # Q1: Lower Violation and Higher Return
219
+
220
+ We evaluate how much safer and more rewarding PLPG is compared to baselines by measuring the cumulative safety violation and the average episodic return during the learning process. Before doing so, we must select appropriate hyperparameters for PLPG and VSRL agents. Since our goal is to find an optimal policy in the safe policy space, we select the $\alpha \in \{0, 0.1, 0.5, 1, 5\}$ and $\epsilon \in \{0, 0.005, 0.01, 0.05, 0.1, 0.2, 0.5, 1.0\}$ values that result in the lowest violation in each domain. The hyperparameter choices are listed in Appendix D. These values are fixed for the rest of the experiments (except for Q2 where we explicitly analyze their effects). We show that PLPG achieves the
221
+
222
+ ![](images/7bb8dba28593c4e3f591a9621da9f73768e78a3e40ef59fd21e40cbdaa0e275e.jpg)
223
+ PPO VSRL e-VSRL PLPG
224
+
225
+ ![](images/ce8c06f40f1f0a702ea531ca9a6eb8d905a12678dade1e800e00fb90f0e40545.jpg)
226
+ CR1 CR2 Pac1 Pac2 Stars1 Stars2
227
+
228
+ ![](images/877e2ba8d829fab9069b1ff4ece49484a5a5de02ad1d9bff1c2d799a4ae9aa99.jpg)
229
+
230
+ ![](images/054274b7b7ce51c7016df5ded90215c036ab1f8aa769a362b1c36eadf67cc59f.jpg)
231
+ Figure 4: Trade-off between Violation (x-axis) and Return (y-axis). Each small data point is an agent's policy and the large data points are the average of five seeds. An ideal policy should lie in the upper-left corner.
232
+
233
+ ![](images/087cdd3aa55e9bef9ea5e023234c9f76079f2ef4a724f0c37af07e5c7037375c.jpg)
234
+
235
+ ![](images/e708d7c61177358303ea5d580606ee3131478eeda1110f32f418ee55b469c603.jpg)
236
+ Figure 5: The episodic return and cumulative constraint violation of VSRL (left) and PLPG (right) agents in noisy environments. Left: The effect of $\epsilon$ is not significant. Right: Increasing $\alpha$ gives a convex trend in both return and violation. The absolute numbers are listed in Appendix D.
237
+
238
+ lowest violation while having a comparable return to other agents.
239
+
240
+ The results are plotted in Fig. 4 where each data point is a policy trained for 600k steps. Fig. 4 clearly shows that PLPG is the safest in most tested domains. When augmented with perfect sensors, PLPG's violation is lower than PPO by $50.2\%$ and lower than VSRL by $25.7\%$ . When augmented with noisy sensors, PLPG's violation is lower than PPO by $51.3\%$ , lower than VSRL by $24.5\%$ and lower than $\epsilon$ -VSRL by $13.5\%$ . Fig. 4 also illustrates that PLPG achieves a comparable (or slightly higher) return while having the lowest violation. When augmented with perfect sensors, PLPG's return is higher than PPO by $0.5\%$ and higher than VSRL by $4.8\%$ . When augmented with noisy sensors, PLPG's return is higher than PPO by $4.5\%$ , higher than VSRL by $6.5\%$ and higher than $\epsilon$ -VSRL by $6.7\%$ .
241
+
242
+ Car Racing has complex and continuous action effects compared to the other domains. In CR, safety sensor readings alone are not sufficient for the car to avoid driving into the grass as they do not capture the inertia of the car, which involves the velocity and the underlying physical mechanism such as friction. In domains where safety sensors are not sufficient, the agent must be able to act a little unsafe to learn, as PPO, $\epsilon$ -VSRL and PLPG can do, instead of completely relying on sensors such as VSRL. Hence, compared to the safety-agnostic baseline PPO, while $\epsilon$ -VSRL and PLPG respectively reduce violations by $37.8\%$ and $50.2\%$ , VSRL causes $18\%$ more violations even when given perfect safety sensors. Note that we measure the actual violations instead of policy safety $P_{\pi}(\text{safe}|s)$ . The policy safety during the learning process is plotted in Appendix D.
243
+
244
+ # Q2: Selection of Hyperparameters $\alpha$ and $\epsilon$
245
+
246
+ We analyze how the hyper-parameters $\alpha$ and $\epsilon$ respectively affect the performance of PLPG and VSRL in noisy environ
247
+
248
+ ![](images/0a95ac2ab348c1a04eebb6b2c97a72bbc800534b0f8bfad4c5f396070df8efb4.jpg)
249
+
250
+ ![](images/39c7a24733e7f3ab56384df19868772b900e437bc7c65c1785bd1e1bfb64da0e.jpg)
251
+
252
+ ![](images/f148e73100dd4d9203edbd2da043e502dffe299e220377560ff32f0634f7c54e.jpg)
253
+
254
+ ments. Specifically, we measure the episodic return and cumulative constraint violation. The results are plotted in Fig. 5.
255
+
256
+ The effect of different values of $\alpha$ to PLPG agents is clear. Increasing $\alpha$ gives a convex trend in both the return and the violation counts. The optimal value of $\alpha$ is generally between 0.1 and 1, where the cumulative constraint violation and the return are optimal. This illustrates the benefit of combining the shielded policy gradient and the safety gradient. On the contrary, the effect of $\epsilon$ to VSRL agents is not significant. Increasing $\epsilon$ generally improves return but worsens constraint violation. This illustrates that simply randomizing the policy is not effective in improving the ability of handling noisy environments. Notice that there is no one-to-one mapping between the two hyper-parameters, as $\alpha$ controls the combination of the gradients and $\epsilon$ controls the degree of unsafe exploration.
257
+
258
+ # Q3: PLPG Gradient Analysis
259
+
260
+ We analyze how the shielded policy gradient and the safety gradient interact with one another. To do this, we introduce two ablated agents, one uses only safety gradients (Eq. (5)) and the other uses only policy gradients (Eq. (6)). The results are plotted in Fig. 6 where each data point is a policy trained for 600k steps.
261
+
262
+ Fig. 6 illustrates that combining both gradients is safer than using only one gradient. When augmented with perfect sensors, the use of both gradients has a violation that is lower than using only safety or policy gradient by $24.7\%$ and $10.8\%$ , respectively. When augmented with noisy sensors, using both gradients has a violation that is lower than using only safety or policy gradient by $25.5\%$ and $15.5\%$ , respectively. Nonetheless, the importance of policy and safety gradients varies among domains. In Stars and Pacman, using only policy gradients causes fewer violations than using only
263
+
264
+ ![](images/102df9e4e40591e7eae75501e3e425dccf375ac124c8e7e98da3f5f37db5eb24.jpg)
265
+
266
+ ![](images/8fad2da0a4a06e01ffa756a138844bff42554d2aa8d5727419e7c3dea18826b8.jpg)
267
+
268
+ ![](images/39a24007de442d72f35ab59ac085938a1acf6fe0dcabc7eea986766b45a62c4d.jpg)
269
+
270
+ ![](images/a04b49f168a6ebf37e8c47aa5b7e9c81b8f0d1ccb5258a87a988bf96002f5991.jpg)
271
+ Figure 6: Trade-off between Violation (x-axis) and Return (y-axis). Each data point is an agent's policy and the big data points are the average of five seeds.
272
+
273
+ ![](images/70799e85aa6712241e9e798d816b8e7a74837bc8540bd2fdbc4f59849d48e464.jpg)
274
+
275
+ ![](images/101380c093ba06abad798528290458dd308cfaf0b644103460926bca3ac987c1.jpg)
276
+
277
+ <table><tr><td>Safety horizon</td><td>1 step</td><td>2 steps</td><td>3 steps</td><td>4 steps</td></tr><tr><td>#sensors</td><td>4</td><td>12</td><td>24</td><td>40</td></tr><tr><td>Circuit size</td><td>116</td><td>1073</td><td>8246</td><td>373676</td></tr><tr><td>Compilation (s)</td><td>0.29</td><td>0.70</td><td>1.70</td><td>15.94</td></tr><tr><td>Evaluation (s)</td><td>0.01</td><td>0.08</td><td>0.66</td><td>27.39</td></tr><tr><td>Ret./Vio.</td><td>0.81 / 0.82</td><td>0.85 / 0.65</td><td>0.86 / 0.57</td><td>-</td></tr></table>
278
+
279
+ Table 1: Multi-step Safety Look-ahead
280
+
281
+ safety gradients. In CR, however, using only safety gradients causes fewer violations. This is a consequence of the inertia of the car not being fully captured by safety sensors, as discussed in Q1.
282
+
283
+ # Q4: Multi-step Safety Look-ahead
284
+
285
+ We analyze the behaviour of PLPG when we use probabilistic logic shields for multi-step safety look-ahead. In this case, the safety program requires more sensors around the agent for potential danger over a larger horizon in the future. In Pacman, for example, new sensors are required to detect whether there is a ghost N units away where N is the safety horizon. We analyze the behavior/performance in terms of return/violation and computational cost. The results are shown in Table 1. Increasing the horizon causes an exponential growth in the compiled circuit size and the corresponding inference time. However, we can considerably improve both safety and return, especially when moving from one to two steps.
286
+
287
+ # 6 Related Work
288
+
289
+ Safe RL. Safe RL aims to avoid unsafe consequences through the use of various safety representations [García and Fernández, 2015]. There are several ways to achieve this goal. One could constrain the expected cost [Achiam
290
+
291
+ et al., 2017; Moldovan and Abbeel, 2012], maximize safety constraint satisfiability through a loss function [Xu et al., 2018], add a penalty to the agent when the constraint is violated [Pham et al., 2018; Tessler et al., 2019; Memarian et al., 2021], or construct a more complex reward structure using temporal logic [De Giacomo et al., 2021; Camacho et al., 2019; Jiang et al., 2021; Hasanbeig et al., 2019; Den Hengst et al., 2022]. These approaches express safety as a loss, while our method directly prevents the agent from taking actions that can potentially lead to safety violation.
292
+
293
+ Probabilistic Shields. Shielding is a safe reinforcement learning approach that aims to completely avoid unsafe actions during the learning process [Alshiekh et al., 2018; Jansen et al., 2020]. Previous shielding approaches have been limited to symbolic state spaces and are not suitable for noisy environments [Jansen et al., 2020; Harris and Schaub, 2020; Hunt et al., 2021; Anderson et al., 2020]. To address uncertainty, some methods incorporate randomization, e.g. simulating future states in an emulator to estimate risk [Li and Bastani, 2020; Giacobbe et al., 2021], using $\epsilon$ -greedy exploration that permits unsafe actions [García and Fernández, 2019], or randomizing the policy based on the current belief state [Karkus et al., 2017]. To integrate shielding with neural policies, one can translate a neural policy to a symbolic one that can be formally verified [Bastani et al., 2018; Verma et al., 2019]. However, these methods rely on sampling and do not have a clear connection to uncertainty present in the environment while our method directly exploits such uncertainty through the use of PLP principles. Belief states can capture uncertainty but require an environment model to keep track of the agent's belief state, which is a stronger assumption than our method [Junges et al., 2021; Carr et al., 2022].
294
+
295
+ Differentiable layers for neural policies. The use of differentiable shields has gain some attention in the field. One popular approach is to add a differentiable layer to the policy network to prevent constraint violations. Most of these methods focus on smooth physical rules [Dalal et al., 2018; Pham et al., 2018; Cheng et al., 2019] and only a few involve logical constraints. In [Kimura et al., 2021], an optimization layer is used to transform a state-value function into a policy encoded as a logical neural network. [Ahmed et al., 2022] encode differentiable and hard logical constraints for neural networks using PLP. While being similar, this last model focuses on prediction tasks and do not consider a trade-off between return and constraint satisfiability.
296
+
297
+ # 7 Conclusion
298
+
299
+ We introduced Probabilistic Logic Shields, a proof of concept of a novel class of end-to-end differentiable shielding techniques. PLPG enable efficient training of safe-by-construction neural policies. This is done by using a probabilistic logic programming layer on top of the standard neural policy. PLPG is a generalization of classical shielding [Hunt et al., 2021] that allows for both deterministic and probabilistic safety specifications. Future work will be dedicated to extending PLS to be used with a larger class of RL algorithms such as the ones with continuous policies.
300
+
301
+ # References
302
+
303
+ [Achiam et al., 2017] Joshua Achiam, David Held, Aviv Tamar, and Pieter Abbeel. Constrained policy optimization. In Proceedings of the 34th International Conference on Machine Learning - Volume 70, ICML'17, page 22-31. JMLR.org, 2017.
304
+ [Ahmed et al., 2022] Kareem Ahmed, Stefano Teso, Kai-Wei Chang, Guy Van den Broeck, and Antonio Vergari. Semantic probabilistic layers for neuro-symbolic learning. In Advances in Neural Information Processing Systems, 2022.
305
+ [Alshiekh et al., 2018] Mohammed Alshiekh, Roderick Bloem, Rüdiger Ehlers, Bettina Könighofer, Scott Niekum, and Ufuk Topcu. Safe reinforcement learning via shielding. AAAI'18/IAAI'18/EAAI'18. AAAI Press, 2018.
306
+ [Anderson et al., 2020] Greg Anderson, Abhinav Verma, Isil Dillig, and Swarat Chaudhuri. Neurosymbolic reinforcement learning with formally verified exploration. In Proceedings of the 34th International Conference on Neural Information Processing Systems, NIPS'20, 2020.
307
+ [Bastani et al., 2018] Osbert Bastani, Yewen Pu, and Armando Solar-Lezama. Verifiable reinforcement learning via policy extraction. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, NIPS'18, page 2499-2509. Curran Associates Inc., 2018.
308
+ [Brockman et al., 2016] Greg Brockman, Vicki Cheung, Ludwig Pettersson, Jonas Schneider, John Schulman, Jie Tang, and Wojciech Zaremba. Openai gym, 2016.
309
+ [Camacho et al., 2019] Alberto Camacho, Rodrigo Toro Icarte, Toryn Q. Klassen, Richard Valenzano, and Sheila A. McIlraith. Ltl and beyond: Formal languages for reward function specification in reinforcement learning. In Proceedings of the Twenty-Eighth International Joint Conference on Artificial Intelligence, IJCAI-19, 7 2019.
310
+ [Carr et al., 2022] Steven Carr, Nils Jansen, Sebastian Junges, and Ufuk Topcu. Safe reinforcement learning via shielding under partial observability, 2022.
311
+ [Cheng et al., 2019] Richard Cheng, Gábor Orosz, Richard M. Murray, and Joel W. Burdick. End-to-end safe reinforcement learning through barrier functions for safety-critical continuous control tasks. AAAI'19/IAAI'19/EAAI'19. AAAI Press, 2019.
312
+ [Dalal et al., 2018] Gal Dalal, Krishnamurthy Dvijotham, Matej Vecerik, Todd Hester, Cosmin Paduraru, and Yuval Tassa. Safe exploration in continuous action spaces, 2018.
313
+ [Darwiche, 2003] Adnan Darwiche. A differential approach to inference in bayesian networks. J. ACM, 50(3):280-305, may 2003.
314
+ [De Giacomo et al., 2021] Giuseppe De Giacomo, Luca Iocchi, Marco Favorito, and Fabio Patrizi. Foundations for restraining bolts: Reinforcement learning with tlf/ldlf
315
+
316
+ restraining specifications. Proceedings of the International Conference on Automated Planning and Scheduling, 29(1):128-136, 2021.
317
+ [De Raedt and Kimmig, 2015] Luc De Raedt and Angelika Kimmig. Probabilistic (logic) programming concepts. Mach. Learn., 100(1):5-47, 2015.
318
+ [De Raedt et al., 2016] Luc De Raedt, Kristian Kersting, Sriraam Natarajan, and David Poole. Statistical Relational Artificial Intelligence: Logic, Probability, and Computation, volume 32 of Synthesis Lectures on Artificial Intelligence and Machine Learning. Morgan & Claypool, 2016.
319
+ [Den Hengst et al., 2022] Floris Den Hengst, Vincent François-Lavet, Mark Hoogendoorn, and Frank van Harmelen. Planning for potential: Efficient safe reinforcement learning. Mach. Learn., 111(6):2255-2274, 2022.
320
+ [Fernández and Veloso, 2006] Fernando Fernández and Manuela Veloso. Probabilistic policy reuse in a reinforcement learning agent. AAMAS '06. Association for Computing Machinery, 2006.
321
+ [Fierens et al., 2015] Daan Fierens, Guy Van den Broeck, Joris Renkens, Dimitar Shterionov, Bernd Gutmann, Ingo Thon, Gerda Janssens, and Luc De Raedt. Inference and learning in probabilistic logic programs using weighted Boolean formulas. Theory and Practice of Logic Programming, 15:358-401, 5 2015.
322
+ [García and Fernández, 2015] Javier García and Fernando Fernández. A comprehensive survey on safe reinforcement learning. Journal of Machine Learning Research, 16:1437-1480, 2015.
323
+ [García and Fernández, 2019] Javier García and Fernando Fernández. Probabilistic policy reuse for safe reinforcement learning. ACM Trans. Auton. Adapt. Syst., 13(3), 2019.
324
+ [Giacobbe et al., 2021] Mirco Giacobbe, Mohammadhosein Hasanbeig, Daniel Kroening, and Hjalmar Wijk. Shielding atari games with bounded prescience. In Proceedings of the 20th International Conference on Autonomous Agents and MultiAgent Systems, AAMAS '21, 2021.
325
+ [Harris and Schaub, 2020] Andrew Harris and Hanspeter Schaub. Spacecraft command and control with safety guarantees using shielded deep reinforcement learning. AIAA Scitech 2020 Forum, 1 PartF, 2020.
326
+ [Hasanbeig et al., 2019] M. Hasanbeig, Y. Kantaros, A. Abate, D. Kroening, G. J. Pappas, and I. Lee. Reinforcement learning for temporal logic control synthesis with probabilistic satisfaction guarantees. In 2019 IEEE 58th Conference on Decision and Control (CDC), 2019.
327
+ [Hill et al., 2018] Ashley Hill, Antonin Raffin, Maximilian Ernestus, Adam Gleave, Anssi Kanervisto, Rene Traore, Prafulla Dhariwal, Christopher Hesse, Oleg Klimov, Alex Nichol, Matthias Plappert, Alec Radford, John Schulman, Szymon Sidor, and Yuhuai Wu. Stable baselines. https://github.com/hill-a/stable-baselines, 2018.
328
+
329
+ [Hunt et al., 2021] Nathan Hunt, Nathan Fulton, Sara Magliacane, Trong Nghia Hoang, Subhro Das, and Armando Solar-Lezama. Verifiably safe exploration for end-to-end reinforcement learning. In Proceedings of the 24th International Conference on Hybrid Systems: Computation and Control, HSCC '21, 2021.
330
+ [Jansen et al., 2020] Nils Jansen, Bettina Könighofer, Sebastian Junges, Alex Serban, and Roderick Bloem. Safe reinforcement learning using probabilistic shields. In 31st International Conference on Concurrence Theory, CONCUR 2020, 2020.
331
+ [Jiang et al., 2021] Yuqian Jiang, Suda Bharadwaj, Bo Wu, Rishi Shah, Ufuk Topcu, and Peter Stone. Temporal-logic-based reward shaping for continuing reinforcement learning tasks. Proceedings of the AAAI Conference on Artificial Intelligence, 35(9):7995–8003, 2021.
332
+ [Junges et al., 2021] Sebastian Junges, Nils Jansen, and Sanjit A. Seshia. Enforcing almost-sure reachability in pomdpps. In Computer Aided Verification. Springer International Publishing, 2021.
333
+ [Kalweit et al., 2020] Gabriel Kalweit, Maria Huegle, Moritz Werling, and Joschka Boedecker. Deep Constrained Q-learning. 2020.
334
+ [Karkus et al., 2017] Peter Karkus, David Hsu, and Wee Sun Lee. Qmdp-net: Deep learning for planning under partial observability. In Proceedings of the 31st International Conference on Neural Information Processing Systems, NIPS'17. Curran Associates Inc., 2017.
335
+ [Kimura et al., 2021] Daiki Kimura, Subhajit Chaudhury, Akifumi Wachi, Ryosuke Kohita, Asim Munawar, Michiaki Tatsubori, and Alexander Gray. Reinforcement Learning with External Knowledge by using Logical Neural Networks. 2021.
336
+ [Li and Bastani, 2020] Shuo Li and Osbert Bastani. Robust Model Predictive Shielding for Safe Reinforcement Learning with Stochastic Dynamics. Proceedings - IEEE International Conference on Robotics and Automation, 2020.
337
+ [Memarian et al., 2021] Farzan Memarian, Wonjoon Goo, Rudolf Lioutikov, Ufuk Topcu, and Scott Niekum. Self-Supervised Online Reward Shaping in Sparse-Reward Environments. 2021.
338
+ [Mnih et al., 2016] Volodymyr Mnih, Adrià Puigdomenech Badia, Mehdi Mirza, Alex Graves, Timothy P. Lillicrap, Tim Harley, David Silver, and Koray Kavukcuoglu. Asynchronous methods for deep reinforcement learning. 2016.
339
+ [Moldovan and Abbeel, 2012] Teodor Mihai Moldovan and Pieter Abbeel. Safe exploration in markov decision processes. In Proceedings of the 29th International Coference on International Conference on Machine Learning, ICML'12. Omnipress, 2012.
340
+ [Nitti et al., 2016] Davide Nitti, Tinne De Laet, and Luc De Raedt. Probabilistic logic programming for hybrid relational domains. Machine Learning, 103:407-449, 2016.
341
+ [Pham et al., 2018] Tu-Hoa Pham, Giovanni De Magistris, and Ryuki Tachibana. Optlayer - practical constrained
342
+
343
+ optimization for deep reinforcement learning in the real world. In 2018 IEEE International Conference on Robotics and Automation (ICRA), pages 6236-6243, 2018.
344
+ [Pishro-Nik, 2014] H. Pishro-Nik. Introduction to Probability, Statistics, and Random Processes. Kappa Research, LLC, 2014.
345
+ [Ray et al., 2019] Alex Ray, Joshua Achiam, and Dario Amodei. Benchmarking Safe Exploration in Deep Reinforcement Learning. arXiv preprint, pages S. 1-6, 2019.
346
+ [Robert et al., 1999] Christian P Robert, George Casella, and George Casella. Monte Carlo statistical methods, volume 2. Springer, 1999.
347
+ [Schulman et al., 2015] John Schulman, Sergey Levine, Pieter Abbeel, Michael Jordan, and Philipp Moritz. Trust region policy optimization. In Proceedings of the 32nd International Conference on Machine Learning, Proceedings of Machine Learning Research. PMLR, 2015.
348
+ [Schulman et al., 2017] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms, 2017.
349
+ [Schulman et al., 2018] John Schulman, Philipp Moritz, Sergey Levine, Michael Jordan, and Pieter Abbeel. High-dimensional continuous control using generalized advantage estimation, 2018.
350
+ [Sutton and Barto, 2018] Richard S. Sutton and Andrew G. Barto. Reinforcement Learning: An Introduction. A Bradford Book, 2018.
351
+ [Sutton et al., 2000] Richard S Sutton, David McAllester, Satinder Singh, and Yishay Mansour. Policy gradient methods for reinforcement learning with function approximation. In Advances in Neural Information Processing Systems, volume 12. MIT Press, 2000.
352
+ [Tessler et al., 2019] Chen Tessler, Daniel J. Mankowitz, and Shie Mannor. Reward constrained policy optimization. In 7th International Conference on Learning Representations, ICLR. OpenReview.net, 2019.
353
+ [Verma et al., 2019] Abhinav Verma, Hoang M. Le, Yisong Yue, and Swarat Chaudhuri. Imitation-projected programmatic reinforcement learning. In Proceedings of the 33rd International Conference on Neural Information Processing Systems, 2019.
354
+ [Xu et al., 2018] Jingyi Xu, Zilu Zhang, Tal Friedman, Yitao Liang, and Guy Van den Broeck. A semantic loss function for deep learning with symbolic knowledge. In Proceedings of the 35th International Conference on Machine Learning, ICML'18. PMLR, 2018.
355
+
356
+ # A Experiment settings
357
+
358
+ Experiments are run on machines that consist of Intel(R) Xeon(R) E3-1225 CPU cores and 32Gb memory. All environments in this work are available on GitHub under the MIT license. The measurements are obtained by taking the average of five seeds. All agents are trained using PPO in stable-baselines [Hill et al., 2018] with batch size=512, n_epochs=15, n_steps=2048, clip range=0.1, learning rate=0.0001. All policy networks and value networks have two hidden layers of size 64. All the other hyperparameters are set to default as in stable-baselines [Hill et al., 2018]. The source code will be available upon publication of the paper.
359
+
360
+ # A.1 Stars
361
+
362
+ ![](images/076ce4dc27cf93a9c60e8f13ee929996548458117d930513315aa34e4c3a609a.jpg)
363
+ Figure 7: Left: A Stars image. Middle: A Stars state (downsampled from the left). Right: A example to pre-train noisy sensors.
364
+
365
+ ![](images/d3d642d739e55d657003cd8b91f9a01e3ad0f84c7d7e5015fc4e609a2a6c5f7c.jpg)
366
+
367
+ ![](images/5efe4c2734dc29d9f4a6affc0c50e0f7f31ee9f484368e4663819cf0394a184a.jpg)
368
+
369
+ We build the Stars environments using Berkeley's Pac-Man Project $^{2}$ . Our environment is a 15 by 15 grid world containing stars and fires, e.g. Fig. 7, where the agent can move around using five discrete, deterministic actions: stay, up, down, left, right. Each action costs a negative reward of $-0.1$ . The task of the agent is to collect all stars in the environment. Collecting one star yields a reward of 1. When the agent finds all stars, the episode ends with a reward of 10. If the agent crashes into fire or has taken 200 steps (maximum episode length), the episode ends with no rewards. Each state is a downsampled, grayscale image where each pixel is a value between -1 and 1, e.g. Fig. 7 (right).
370
+
371
+ We use the following probabilistic logic shield. The act/1 predicates represent the base policy and the fire/2 predicates represent whether there is fire in an immediate neighboring grid. These predicates are neural predicates, meaning that all probabilities $\mathbf{a_i}$ (resp. $\mathbf{f_i}$ will be substituted by real values in [0, 1], produced by the underlying base policy (resp. perfect or noisy sensors). In Fig. 7 (right), the sensors may produce $\{0::\text{fire}(0,1), 0::\text{fire}(0,-1), 1::\text{fire}(-1,0), 0::\text{fire}(1,0)\}$ .
372
+
373
+ % BASE POLICY
374
+
375
+ a0::act(stay); a1::act(up);
376
+
377
+ a2:::act(down); a3:::act(left);
378
+
379
+ a4:::act(right).
380
+
381
+ % SENSORS
382
+
383
+ f0::fire(0,1). f1::fire(0,-1).
384
+
385
+ f2::fire(-1, 0). f3::fire(1, 0).
386
+
387
+ % SAFETY LOOKAHEAD
388
+
389
+ xagentstay,0,0).xagent(left,-1,0).
390
+
391
+ xagent(right,1,0). xagent(up, 0,1).
392
+
393
+ xagent (down, 0,-1).
394
+
395
+ % SAFETY CONSTRAINT
396
+
397
+ crash:- act(A),
398
+
399
+ xagent(A, X, Y), fire(X, Y).
400
+
401
+ safe:- not crash).
402
+
403
+ # A.2 Pacman
404
+
405
+ Pacman is more complicated than the Stars in that the fire ghosts can move around and their transition models are unknown. All the other environmental parameters are the same as Stars.
406
+
407
+ We use the following probabilistic logic shield to perform a two-step look-ahead. The agent has 12 sensors to detect whether there is a fire ghost one or two units away. The agent is assumed to follow the policy at $T = 0$ and then do nothing at $T = 1$ . The fire ghosts are assumed to be able to move to any immediate neighboring cells at $T = 0$ and $T = 1$ . A crash can occur at $T = 0$ or $T = 1$ .
408
+
409
+ % BASE POLICY
410
+
411
+ a0::actstay);al::act(up);
412
+
413
+ a2:::act(down); a3:::act(left);
414
+
415
+ a4:::act(right).
416
+
417
+ $\%$ SENSORS
418
+
419
+ f0:: fire(0, 0, 1). f1:: fire(0, 0,-1).
420
+
421
+ f2:: fire(0, -1, 0). f3:: fire(0, 1, 0).
422
+
423
+ f4:: fire(0, 0, 2). f5:: fire(0, 0,-2).
424
+
425
+ f6:: fire(0,-2, 0). f7:: fire(0, 2, 0).
426
+
427
+ f8:: fire(0, 1, 1). f9:: fire(0, 1,-1).
428
+
429
+ f10::fire(0,-1,1). f11::fire(0,-1,-1).
430
+
431
+ % SAFETY LOOKAHEAD
432
+
433
+ ```c
434
+ fire(T, X, Y):-
435
+ fire(T-1, PrevX, PrevY),
436
+ move(PrevX, PrevY, _, X, Y).
437
+
438
+ agent(1, X, Y):- action(A), move(0, 0, A, X, Y).
439
+
440
+ agent(2, X, Y):- agent(1, X, Y).
441
+
442
+ move(X, Y, stay, X, Y).
443
+
444
+ move(X,Y,left,X-1,Y).
445
+
446
+ move(X, Y, right, X+1, Y).
447
+
448
+ move(X, Y, up, X, Y+1).
449
+
450
+ move(X, Y, down, X, Y-1).
451
+
452
+ % SAFETY CONSTRAINT
453
+
454
+ crash:- fire(T, X, Y), agent(T, X, Y).
455
+
456
+ safe: - not crash).
457
+
458
+ # A.3 Car Racing
459
+
460
+ We simplify Gym's Car Racing environment [Brockman et al., 2016]. Our environment is a randomly generated car track, e.g. Fig. 8 (left). At the beginning, the agent will be
461
+
462
+ ![](images/3e89331ddf479c2606fc056d63edf8387590b5a259938a5ab2295319ab8a1840.jpg)
463
+ Figure 8: Left: A lap in Car Racing (Configuration 1). Middle: A CR image. We mark the sensors around the agent white but they not visible in training. Right: A CR state (downsampled from the middle).
464
+
465
+ put on the track, e.g. Fig. 8 (middle). The task of the agent is to drive around the track (i.e. to finish one lap) within 1000 frames. In each frame, the agent takes a discrete action: do-nothing, accelerate, brake, turn-left or turn-right. Each action costs a negative reward of $-0.1$ . The agent gets a small reward if it continues to follow the track. The agent does not get a penalty for driving on the grass, however, if the car drives outside of the map (i.e. the black part in Fig. 8, left), the episode ends with a negative reward of $-100$ . Each state is a downsampled, grayscale image where each pixel is a value between $-1$ and $1$ , e.g. Fig. 8 (right).
466
+
467
+ The original environment has a continuous action space that consists of three parameters: steering, gas and break with the minimum values $[-1,0,0]$ and the maximum values $[+1, + 1, + 1]$ . In this paper, all agents use five discrete actions: do-nothing ([0,0,0]), accelerate ([0,1,0]), brake ([0,0,0.8]), turn-left $([ - 1,0,0 ])$ , turn-right ([1,0,0]).
468
+
469
+ We use the following probabilistic logic shield. The act/1 predicates represent the base policy and the grass/1 predicates represent whether there is grass in front, on the left or right hand side of the agent. In Fig. 8 (middle), the sensors may produce $\{0::\text{grass}(0), 0::\text{grass}(1) 0::\text{grass}(2)\}$ .
470
+
471
+ % BASE POLICY
472
+
473
+ a0::act (dn); a1::act(accel);
474
+
475
+ a2::act brake);
476
+
477
+ a3::act(left); a4::act(right).
478
+
479
+ % SENSORS
480
+
481
+ g0::grass(front). g1::grass(left).
482
+
483
+ g2::grass(right).
484
+
485
+ % SAFETY LOOKAHEAD
486
+
487
+ ingrass:-
488
+
489
+ grass(left), not(grass(right)), (act(left); act(acc)).
490
+
491
+ ingrass:-
492
+
493
+ not(grass(left)),grass(right), (act(right);act(acc)).
494
+
495
+ % SAFETY CONSTRAINT
496
+
497
+ safe:- not(ingrass).
498
+
499
+ # A.4 Approximating Noisy Sensors
500
+
501
+ We approximate noisy sensors using convolutional neural networks. In all the environments, we use 4 convolutional
502
+
503
+ layers with respectively 8,16,32,64 (5x5) filters and relu activations. The output is computed by a dense layer with sigmoid activation function. The number of output neurons depends on the ProbLog program for that experiment (see Appendix A.1). We pre-train the networks and then we fix them during the reinforcement learning process. The pre-training strategy is the following. We generated randomly 3k images for Stars and Pacman with 30 fires and 30 stars, e.g. Fig. 7 (right), and 2k images for Car Racing, e.g. Fig. 8 (right). We selected the size of the pre-training datasets in order to achieve an accuracy higher than $99\%$ on a validation set of 100 examples.
504
+
505
+ # B Safety Guarantees of Probabilistic Logic Shield
506
+
507
+ In Definition 3.1, a shielded policy $\pi^{+}$ is always safer than its base policy $\pi$ for all $s$ and $\pi$ , i.e.
508
+
509
+ $$
510
+ \mathbf {P} _ {\pi^ {+}} (\text {s a f e} | s) \geq \mathbf {P} _ {\pi} (\text {s a f e} | s) \forall \pi \in \Pi \text {a n d} \forall s \in S
511
+ $$
512
+
513
+ Proof.
514
+
515
+ $$
516
+ \begin{array}{l} \mathbf {P} _ {\pi^ {+}} (\text {s a f e} | s) \quad \triangleright \text {E q .} (3) \\ = \sum_ {a \in A} \pi^ {+} (a | s) \mathbf {P} (\text {s a f e} | s, a) \quad \triangleright \text {E q .} (4) \\ = \frac {1}{\mathbf {P} _ {\pi} (\mathbf {s a f e} | s)} [ \sum_ {a \in A} \pi (a | s) \mathbf {P} (\mathbf {s a f e} | s, a) ^ {2} ] \\ \end{array}
517
+ $$
518
+
519
+ $\triangleright$ Jensen's inequality
520
+
521
+ $$
522
+ \begin{array}{l} \geq \frac {1}{\mathbf {P} _ {\pi} (\mathbf {s a f e} | s)} \left[ \sum_ {a \in A} \pi (a | s) \mathbf {P} (\mathbf {s a f e} | s, a) \right] ^ {2} \quad \triangleright \text {E q .} \tag {3} \\ = \mathbf {P} _ {\pi} (\text {s a f e} | s) \\ \end{array}
523
+ $$
524
+
525
+ The inequality comes from Jensen's inequality [Pishro-Nik, 2014], which states that $\mathbb{E}[g(X)] \geq g(\mathbb{E}[X])$ for any convex function $g(X)$ . In this proof, $g(X) = X^2$ .
526
+
527
+ # C A Compiled Circuit
528
+
529
+ The following program is compiled to the circuit in Fig. 9.
530
+
531
+ 0.2::act (dn); 0.6::act(left);
532
+
533
+ 0.2::act(right).
534
+
535
+ 0.8::ghost(left). 0.1::ghost(right).
536
+
537
+ crash:- act(left), ghost(left).
538
+
539
+ crash:- act(right), ghost(right).
540
+
541
+ <table><tr><td></td><td>Description</td><td>Return Range</td><td>Violation Range</td><td>Perfect Sensors α</td><td>Noisy Sensors ε</td><td>α</td></tr><tr><td>Stars1</td><td>Deterministic actions</td><td>[0, 45]</td><td>[0, 15k]</td><td>0.5</td><td>0.005</td><td>1</td></tr><tr><td>Stars2</td><td>Stochastic actions</td><td></td><td></td><td>1</td><td>0.01</td><td>1</td></tr><tr><td>Pacman1</td><td>One ghost</td><td>[0, 40]</td><td>[0, 7k]</td><td>0.1</td><td>0.05</td><td>0.5</td></tr><tr><td>Pacman2</td><td>Two ghosts</td><td></td><td>[0, 15k]</td><td>0.5</td><td>0.005</td><td>0.1</td></tr><tr><td>Car Racing1</td><td>Smoother track</td><td>[0, 900]</td><td>[0, 1]</td><td>0.5</td><td>0.5</td><td>1</td></tr><tr><td>Car Racing2</td><td>Curlier track</td><td></td><td></td><td>0.1</td><td>0.5</td><td>0.5</td></tr></table>
542
+
543
+ Table 2: Summary of configurations.
544
+
545
+ ![](images/343365a7ae08322684f80a81e998befe578529ce7e34439beff16e7ed040263b.jpg)
546
+ Figure 9: A circuit compiled from a program.
547
+
548
+ curves are smoothed by taking the exponential moving average with the coefficient of 0.05.
549
+
550
+ # D Experiment Details
551
+
552
+ We run two configurations for each domain. The second configuration is more challenging than the first one. The configurations may have different normalization ranges and hyperparameters. We select the hyperparameters that result in the lowest violations in each configuration. Table 2 lists a summary of all configurations and all other tables follows this table if not explicitly specified.
553
+
554
+ We train all agents in all configurations using 600k learning steps and all experiments are repeated using five different seeds. The following tables list the normalized, average return and violation values of the five runs. Table 3 lists the return/violation results of all agents (cf. the large data points in Fig. 4). Table 4 lists the return/violation results for PLPG gradient analysis (cf. the large data points in Fig. 6). Tables 5 and 6 respectively list the return/violation results for $\alpha \in \{0,0.1,0.5,1,5\}$ and $\epsilon \in \{0,0.005,0.01,0.05,0.1,0.2,0.5,1.0\}$ (cf. Fig. 5).
555
+
556
+ We plot episodic return, cumulative violation and stepwise policy safety in the learning process for all agents $(\mathbf{P}_{\pi^{+}}(\text{safe}|s)$ for shielded agents and $\mathbf{P}_{\pi}(\text{safe}|s)$ for PPO). Fig. 11 plots these curves for the agents augmented with perfect sensors and Fig. 10 for the ones with noisy sensors. PPO agents are plotted in both figures as a baseline but they are not augmented with any sensors. The return and policy safety
557
+
558
+ <table><tr><td rowspan="2">Return/Violation</td><td colspan="2">No Safety</td><td colspan="2">Perfect Sensors</td><td colspan="3">Noisy Sensors</td></tr><tr><td>PPO</td><td>VSRL</td><td>PLPG</td><td>VSRL</td><td>ε-VSRL</td><td>PLPG</td><td></td></tr><tr><td>Stars1</td><td>0.68 / 0.90</td><td>0.57 / 0.00</td><td>0.71 / 0.00</td><td>0.61 / 0.00</td><td>0.64 / 0.03</td><td>0.74 / 0.01</td><td></td></tr><tr><td>Stars2</td><td>0.50 / 0.89</td><td>0.43 / 0.58</td><td>0.44 / 0.42</td><td>0.43 / 0.59</td><td>0.46 / 0.52</td><td>0.46 / 0.35</td><td></td></tr><tr><td>Pacman1</td><td>0.74 / 0.92</td><td>0.72 / 0.74</td><td>0.85 / 0.65</td><td>0.71 / 0.72</td><td>0.75 / 0.65</td><td>0.71 / 0.65</td><td></td></tr><tr><td>Pacman2</td><td>0.56 / 0.82</td><td>0.56 / 0.62</td><td>0.59 / 0.58</td><td>0.46 / 0.67</td><td>0.51 / 0.62</td><td>0.62 / 0.59</td><td></td></tr><tr><td>Car Racing1</td><td>0.77 / 0.60</td><td>0.74 / 0.84</td><td>0.62 / 0.21</td><td>0.83 / 0.82</td><td>0.88 / 0.47</td><td>0.83 / 0.18</td><td></td></tr><tr><td>Car Racing2</td><td>0.58 / 0.55</td><td>0.55 / 0.67</td><td>0.65 / 0.17</td><td>0.67 / 0.51</td><td>0.70 / 0.45</td><td>0.74 / 0.17</td><td></td></tr></table>
559
+
560
+ Table 3: Normalized return and violation for all agents (cf. the large data points in Fig. 4).
561
+
562
+ <table><tr><td rowspan="2">Return/Violation</td><td colspan="3">Perfect Sensors</td><td colspan="3">Noisy Sensors</td></tr><tr><td>Only Safety Grad</td><td>Only Policy Grad</td><td>Both</td><td>Only Safety Grad</td><td>Only Policy Grad</td><td>Both</td></tr><tr><td>Stars1</td><td>0.63 / 0.52</td><td>0.66 / 0.00</td><td>0.71 / 0.00</td><td>0.61 / 0.47</td><td>0.95 / 0.29</td><td>0.74 / 0.01</td></tr><tr><td>Stars2</td><td>0.43 / 0.60</td><td>0.51 / 0.53</td><td>0.44 / 0.42</td><td>0.44 / 0.62</td><td>0.46 / 0.62</td><td>0.46 / 0.35</td></tr><tr><td>Pacman1</td><td>0.72 / 0.89</td><td>0.87 / 0.65</td><td>0.85 / 0.65</td><td>0.54 / 0.88</td><td>0.84 / 0.64</td><td>0.71 / 0.65</td></tr><tr><td>Pacman2</td><td>0.51 / 0.79</td><td>0.66 / 0.58</td><td>0.59 / 0.58</td><td>0.39 / 0.78</td><td>0.60 / 0.58</td><td>0.62 / 0.59</td></tr><tr><td>Car Racing1</td><td>1.00 / 0.14</td><td>0.91 / 0.58</td><td>0.62 / 0.21</td><td>0.86 / 0.25</td><td>0.93 / 0.42</td><td>0.83 / 0.18</td></tr><tr><td>Car Racing2</td><td>0.67 / 0.20</td><td>0.76 / 0.32</td><td>0.65 / 0.17</td><td>0.56 / 0.29</td><td>0.70 / 0.39</td><td>0.74 / 0.17</td></tr></table>
563
+
564
+ Table 4: Analysis of PLPG gradients (cf. the large data points in Fig. 6).
565
+
566
+ <table><tr><td>α</td><td>0</td><td>0.1</td><td>0.5</td><td>1</td><td>5</td></tr><tr><td>Stars1</td><td>0.95 / 0.29</td><td>0.68 / 0.01</td><td>0.68 / 0.01</td><td>0.74 / 0.01</td><td>0.17 / 0.50</td></tr><tr><td>Stars2</td><td>0.46 / 0.62</td><td>0.53 / 0.44</td><td>0.51 / 0.45</td><td>0.46 / 0.35</td><td>0.34 / 0.37</td></tr><tr><td>Pacman1</td><td>0.84 / 0.64</td><td>0.79 / 0.66</td><td>0.71 / 0.65</td><td>0.56 / 0.69</td><td>0.31 / 0.77</td></tr><tr><td>Pacman2</td><td>0.60 / 0.58</td><td>0.62 / 0.59</td><td>0.50 / 0.62</td><td>0.43 / 0.62</td><td>0.27 / 0.67</td></tr><tr><td>Car Racing1</td><td>0.93 / 0.42</td><td>0.99 / 0.19</td><td>0.82 / 0.19</td><td>0.83 / 0.18</td><td>0.47 / 0.52</td></tr><tr><td>Car Racing2</td><td>0.70 / 0.39</td><td>0.84 / 0.21</td><td>0.74 / 0.17</td><td>0.60 / 0.34</td><td>0.21 / 0.77</td></tr></table>
567
+
568
+ Table 5: The return and violation of different $\alpha$ values in noisy environments (cf. Fig. 5).
569
+
570
+ <table><tr><td>ε</td><td>0 (VSRL)</td><td>0.005</td><td>0.01</td><td>0.05</td><td>0.1</td><td>0.2</td><td>0.5</td><td>1 (PPO)</td></tr><tr><td>Stars1</td><td>0.61 / 0.00</td><td>0.64 / 0.03</td><td>0.69 / 0.07</td><td>0.76 / 0.30</td><td>0.69 / 0.54</td><td>0.76 / 0.65</td><td>0.68 / 0.80</td><td>0.68 / 0.90</td></tr><tr><td>Stars2</td><td>0.43 / 0.59</td><td>0.46 / 0.59</td><td>0.46 / 0.52</td><td>0.46 / 0.62</td><td>0.48 / 0.67</td><td>0.47 / 0.78</td><td>0.49 / 0.85</td><td>0.50 / 0.89</td></tr><tr><td>Pacman1</td><td>0.71 / 0.72</td><td>0.64 / 0.81</td><td>0.66 / 0.80</td><td>0.75 / 0.65</td><td>0.66 / 0.86</td><td>0.72 / 0.71</td><td>0.77 / 0.84</td><td>0.74 / 0.92</td></tr><tr><td>Pacman2</td><td>0.46 / 0.67</td><td>0.51 / 0.62</td><td>0.48 / 0.67</td><td>0.52 / 0.65</td><td>0.51 / 0.68</td><td>0.53 / 0.70</td><td>0.54 / 0.74</td><td>0.56 / 0.82</td></tr><tr><td>Car Racing1</td><td>0.83 / 0.82</td><td>0.65 / 0.65</td><td>0.61 / 0.84</td><td>0.67 / 0.80</td><td>0.75 / 0.72</td><td>0.66 / 0.73</td><td>0.88 / 0.47</td><td>0.77 / 0.60</td></tr><tr><td>Car Racing2</td><td>0.67 / 0.51</td><td>0.36 / 0.62</td><td>0.67 / 0.75</td><td>0.43 / 0.86</td><td>0.63 / 0.57</td><td>0.52 / 0.95</td><td>0.70 / 0.45</td><td>0.58 / 0.55</td></tr></table>
571
+
572
+ Table 6: The return and violation of different $\epsilon$ values in noisy environments (cf. Fig. 5).
573
+
574
+ ![](images/bf6e2a325a5eef30f98b1935ba77635a05a730871b3f3d573b175688bcc47b6d.jpg)
575
+ PPO VSRL PLPG
576
+
577
+ ![](images/a7e08fdab44a8c69f97cbc2ececce0162017c1d6a966e2ccd7b9cca3a91d49f0.jpg)
578
+
579
+ ![](images/b30f80498f377dba626836b9a889675e358c1bdfe12d0e53ad1e08534825c6d1.jpg)
580
+ Figure 10: Performance of agents augmented with perfect sensors. Left: The average episodic return. Middle: The cumulative episodic constraint violation. Right: The step-wise policy safety, i.e. $P_{\pi}(\text{safe}|s)$ .
581
+
582
+ ![](images/a142249d963aa6463b210922d8b9dc6d00ac8c9e37f21ef26bacf2ede9a5a9f3.jpg)
583
+
584
+ ![](images/ff3aa1b7e1aa48ec61ab48f8b6216c22d1e5848729b3805ef081e07d23ff6534.jpg)
585
+
586
+ ![](images/3610717c723fccc1b3696474931502b419f12fe4fa4ea99ed8ea4dc202a60087.jpg)
587
+
588
+ ![](images/34c5427f65d4629320ccec8e7da748f0551df37e44fdb660a1d3e2c4a4a500f4.jpg)
589
+
590
+ ![](images/c7c775d6c8f10bbf982ac2723cc9ed9c5efb3f59805a2c1a23ed2d5efbaff3de.jpg)
591
+
592
+ ![](images/a2b5e49ee6f7bafc5d82b181c58a71e0c7bc7759128137eec64107c4894f1c08.jpg)
593
+
594
+ ![](images/de11cf34660fc5e0a94f59fd050e1ebcc3fb9852ea339201100d8f18a492c8a2.jpg)
595
+
596
+ ![](images/93048458f83c5c7355033795596b8b6fb553da88e69dffb3f67aede2807518ed.jpg)
597
+
598
+ ![](images/2ea5f8210bee224b7a5ea71e77ad375a7a4b7d9d2f080511a152cb5cda229110.jpg)
599
+
600
+ ![](images/d44beb40d7ce0b17111a1ff7508bb2619593629b70d570d5a3df7617c08565cc.jpg)
601
+
602
+ ![](images/edab12f04948d48bb62b94f5d25ee4e7448ab29435cf3d60e3058efa71e0303b.jpg)
603
+
604
+ ![](images/3879fcb8f04fad6e74bc44b8ad64ca39ef2fb0e72955f234d9f58e438e05531a.jpg)
605
+
606
+ ![](images/e0a077e8cce96f7f56f1c2bd26b4dceeceaeb04a0ae19ee40411c376e3ade0c6.jpg)
607
+
608
+ ![](images/1f32f4c3595bf06ccd313c37eb738a7ad742a74a74aef2f196226376fc26c7e0.jpg)
609
+
610
+ ![](images/eb6e66759cc6148e8c1a22ac5edbe0f3febb0333e6307f030e2aa0cbfbfea1d2.jpg)
611
+
612
+ Figure 11: Performance of agents augmented with noisy sensors. Left: The average episodic return. Middle: The cumulative episodic constraint violation. Right: The step-wise policy safety, i.e. $P_{\pi}(\text{safe}|s)$ .
613
+ ![](images/f1a9b70b6eff8deac70c86211f4d5ecd18e57353b12e1793c2b98f1728322f83.jpg)
614
+ PPO VSRL E-VSRL PLPG
615
+
616
+ ![](images/af77eb71be1ccb733cd1e2fde6f87b318db29b598690dd8b5bfa6efe20f19573.jpg)
617
+
618
+ ![](images/bcc50ef789ca44635623662c9380eae12461713429a025dce3e8caaf847315ba.jpg)
619
+
620
+ ![](images/0833ff6e89ef03fdd28277e4fc1a7cce3952003f73597228144c27b68e9c09c3.jpg)
621
+
622
+ ![](images/d0a295aab1cbec00dcc617ef10ea175ec38a20e24e0ca841fd56bef3f1e5fb0d.jpg)
623
+
624
+ ![](images/5e79017a2a2cc43b8f40890b8c1958aa3bb74b67312e3d599ad1cf821f431e59.jpg)
625
+
626
+ ![](images/6045a025a4092173d1f7da9adbdc462c13f6c230bbcf8946de8d09eba99531ad.jpg)
627
+
628
+ ![](images/c4befb32d69db8cb1d8eca670964574b05e8dab62ac0d144b2e1116b13be2772.jpg)
629
+
630
+ ![](images/e30ab53acfa8892b32f00033e1e8d8a235fcf4d8241659570904519168a8b92b.jpg)
631
+
632
+ ![](images/5fc85c001cb6aaefb8694675136063a61b6b1f9f65b0e8d4442e89d38951793e.jpg)
633
+
634
+ ![](images/f54b1a37d67be80ca854846d62a8e968d8d7a38c4a5f388e3b888dfdcf49e9c0.jpg)
635
+
636
+ ![](images/1ddab13d4680b505a2b7b27a60e8a3d3c9a5cc41d3a89724d38ebaf66b521816.jpg)
637
+
638
+ ![](images/829488dc97758446f4e660d3231f0c9e8fcbdcbc85cc17ba042f1f98c502815a.jpg)
639
+
640
+ ![](images/b0bb3203d29fda918cd527ac65a03298e6df80389f94c7d1284e393d8746e79c.jpg)
641
+
642
+ ![](images/d9c44c4a418062c376bc4120bdd2ccd803d1a0cecc788ce6a8e4d79c5f9965cc.jpg)
643
+
644
+ ![](images/9e23a32f63acbcde7a7ebf030315b17ca2d7d41f98d7e3e820b593fe9024e09e.jpg)
645
+
646
+ ![](images/c8f66585ab185b1bdc054d45e05b01b0928eb18ae809840afd96cb51a0519330.jpg)
647
+
648
+ ![](images/d0cb13f7fec1bb4ec4af691034c58d783d8d3b9a931fbad54d50a6221726c32a.jpg)
2303.03xxx/2303.03226/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af22bafbb85e81c895af20816c7de3d2bded50788ca3c67b07a19ab3fb6e7eb7
3
+ size 902751
2303.03xxx/2303.03226/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03278/fb3d2e2e-3027-4b46-be1a-86d15f2ff2a2_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03278/fb3d2e2e-3027-4b46-be1a-86d15f2ff2a2_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03278/fb3d2e2e-3027-4b46-be1a-86d15f2ff2a2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed1080273aeb7716d73168a4d1d95050d532c3a84ea3df9afeb1a8e935bd675a
3
+ size 1346388
2303.03xxx/2303.03278/full.md ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Faithfulness-Aware Decoding Strategies for Abstractive Summarization
2
+
3
+ David Wan $^{1*}$ Mengwen Liu $^{2}$ Kathleen McKeown $^{3}$ Markus Dreyer $^{2\dagger}$ Mohit Bansal $^{1,2\dagger}$
4
+
5
+ $^{1}$ UNC Chapel Hill $^{2}$ Amazon Alexa AI $^{3}$ AWS AI Labs
6
+
7
+ {davidwan,mbansal}@cs.unc.edu
8
+
9
+ {mengwliu, mckeownk, mddreyer, mobansal}@amazon.com
10
+
11
+ # Abstract
12
+
13
+ Despite significant progress in understanding and improving faithfulness in abstractive summarization, the question of how decoding strategies affect faithfulness is less studied. We present a systematic study of the effect of generation techniques such as beam search and nucleus sampling on faithfulness in abstractive summarization. We find a consistent trend where beam search with large beam sizes produces the most faithful summaries while nucleus sampling generates the least faithful ones. We propose two faithfulness-aware generation methods to further improve faithfulness over current generation techniques: (1) ranking candidates generated by beam search using automatic faithfulness metrics and (2) incorporating lookahead heuristics that produce a faithfulness score on the future summary. We show that both generation methods significantly improve faithfulness across two datasets as evaluated by four automatic faithfulness metrics and human evaluation. To reduce computational cost, we demonstrate a simple distillation approach that allows the model to generate faithful summaries with just greedy decoding. $^{1}$
14
+
15
+ # 1 Introduction
16
+
17
+ Recent developments in large pre-trained language models have achieved remarkable performance on abstractive summarization (Lewis et al., 2020; Zhang et al., 2020a). However, such models often suffer from the problem of hallucinations, where the generated summary contains facts or entities not present in the original document. Prior research has analyzed and defined potential error types and typology (Maynez et al., 2020; Pagnoni
18
+
19
+ et al., 2021; van der Poel et al., 2022), and developed methods to improve faithfulness, including post-processing models (Chen et al., 2021b; Dong et al., 2020; Liu and Liu, 2021; Ladhak et al., 2022) and faithfulness-aware training (Goyal and Durrett, 2021; Nan et al., 2021; Cao and Wang, 2021; Wan and Bansal, 2022; Zhang et al., 2022; Xiao and Carenini, 2022).
20
+
21
+ One aspect that is less understood on faithfulness of abstractive summarization is the effect of decoding strategies, which determine how the model generates the output strings. Our primary objective is to understand whether different types of exploration of the search space, such as traversing and maintaining multiple possible output hypotheses with beam search or encouraging diversity with nucleus sampling (Holtzman et al., 2020), have an impact on faithfulness. To this end, we first conduct a thorough analysis comparing the faithfulness of popular decoding strategies, including greedy decoding, beam search, and nucleus sampling for two popular summarization datasets XSum (Narayan et al., 2018) and CNN/DM (Hermann et al., 2015). Evaluating the generated summaries using four faithfulness metrics, including BertScore (Zhang et al., 2020b), FactCC (Kryscinski et al., 2020), DAE (Goyal and Durrett, 2021), and QuestEval (Scialom et al., 2021), and human evaluation, we find a consistent trend that beam search provides the most faithful summaries with its large exploration of the search space, and the randomness introduced by sampling hurts faithfulness.
22
+
23
+ To further improve faithfulness beyond the common decoding strategies, we propose two faithfulness-aware decoding methods. First, similar to Falke et al. (2019), we make use of the multiple candidates generated by beam search and propose a simple re-ranker, which selects the best summary according to a faithfulness metric. Instead of using a specific metric, we rank and select the summaries with a composite metric, a weighted
24
+
25
+ ![](images/9c8d1af468ddb400e34ca6d5a4d00b4659fffafd0345304ff5943bf71817ec6f.jpg)
26
+ Figure 1: Illustration of our proposed decoding methods. 1a shows our ranker that re-ranks the candidates produced by beam search according to faithfulness metrics. The first summary achieves a high score and would be used as the final summary for beam search, but it is not faithful. Our ranker ensures that the more faithful summary is ranked higher. 1b shows the lookahead heuristics that provide a faithfulness score given the full future summary. The model assigns a higher score to the word "World" than "British". However, by looking ahead we know that the completed summary following the most likely token will result in an unfaithful summary. Hence, the lookahead heuristics will ensure selecting the token "British" so that the resulting summary will be faithful.
27
+
28
+ combination of popular faithfulness metrics. Next, inspired by Lu et al. (2022), we propose a faithfulness heuristic that looks into the future to generate a full summary starting with the current tokens of any partially generated summary so as to provide a faithfulness score of the future summary during generation. The added heuristic ensures that the selected tokens will lead to a more faithful path in the search space. Compared to the baseline decoding strategies we analyzed, the two proposed methods significantly improve faithfulness as evaluated by four automatic faithfulness metrics and further confirmed by human evaluation.
29
+
30
+ Finally, to overcome the computational and runtime overhead of our proposed decoding methods, we explore distillation to transfer the knowledge of generating faithful summaries from a teacher model to a student model. Specifically, we use the faithfulness-aware decoding strategies as the teacher model to generate reference summaries. Then, we train student models, which have not been fine-tuned on the original task, to imitate the more faithful generation techniques using an additional cross-entropy loss between the generated summaries by the student and teacher models. Results indicate that the student model is able to generate summaries of similar faithfulness to that of the full teacher model while reducing the decoding time (seconds per example) up to 1/6 of what the teacher model takes. This process can be performed iteratively by using the student model as the teacher for the next iteration (See Figure 2). With each iteration, the new student model is able to
31
+
32
+ generate more faithful summaries, and outperform the original teacher model with just two iterations. To summarize, our contributions are:
33
+
34
+ 1. An analysis of the effect of popular decoding strategies, including greedy, beam, and nucleus sampling, on the faithfulness of abstractive summarization.
35
+ 2. Two faithfulness-aware generation methods, ranking and lookahead, that improve faithfulness over existing decoding strategies.
36
+ 3. A simple distillation approach that allows a student model to generate faithful summaries with just greedy decoding.
37
+
38
+ # 2 Faithfulness Behavior of Popular Decoding Strategies
39
+
40
+ We first describe our experiment investigating the effect of popular decoding strategies on faithfulness. We wish to primarily investigate whether better exploration of the search space, such as the candidate expansion with beam search, can improve faithfulness, and how randomness introduced through sampling impacts faithfulness. These investigations in turn motivate our more advanced, faithfulness-aware decoding strategies in Section 3.
41
+
42
+ Decoding Strategies (Greedy, Beam, and Nucleus Sampling). For generation, we assume the common left-to-right, auto-regressive setting where the model generates a summary $y$ with $n$ tokens given the input document $x$ :
43
+
44
+ $$
45
+ P (y | x) = \prod_ {t = 1} ^ {n} p \left(y _ {t} \mid y _ {1: t - 1}, x\right)
46
+ $$
47
+
48
+ The summary tokens are selected with probability according to the decoding strategies. We explore three common decoding strategies: greedy, beam search, and nucleus sampling (Holtzman et al., 2020). Greedy search selects the next token by the most probable token $y_{t} = \arg \max_{y} p(y|y_{1:t-1}, x)$ . Beam search extends greedy search by keeping top- $k$ hypothesis at each time step, where $k$ is the number of beams. Another approach to decoding is to use sampling, where we consider nucleus sampling. Holtzman et al. (2020) surprisingly find that methods that optimize probability, such as beam search, may lead to text degeneration, and thus propose nucleus sampling, a method that randomly selects from top tokens whose cumulative probability satisfies the threshold $p$ . A small $p$ means less randomness and becomes greedy search, while a large $p$ allows for a more diverse output.
49
+
50
+ # 3 Faithfulness-Aware Decoding Strategies
51
+
52
+ We hypothesize (and later test and confirm whether it is true in Section 6.1 and Appendix E) that current decoding methods, such as beam search which explores a large space, may not explore the paths that focus on faithfulness directly and effectively. Hence, we propose two faithfulness-aware methods that can be applied on top of the base decoding strategies to modify how the space is explored from two different perspectives: (1) Ranking makes use of the large exploration of beam search and picks the explored path that is most faithful; (2) Lookahead directly guides the search process by adding faithfulness heuristics when selecting the next token starting from the initial decoding process.
53
+
54
+ # 3.1 Ranking with Faithfulness Metrics
55
+
56
+ Since beam search already explores many different suitable candidates during the decoding process, we hypothesize that more faithful summaries exist in the list of possible candidates, even if the model score is not directly optimized towards faithfulness (we show that this is true later in Section 6.1). Thus, we propose to rerank the generated candidates from beam search according to faithfulness metrics.
57
+
58
+ The process is illustrated in Figure 1a. Assuming a beam search with beam size $k$ , we have $k$ summaries generated by the decoding method. We compute a faithfulness metric (details of the metrics are presented in Section 5.2) over all summaries and select the summary that achieves the highest
59
+
60
+ faithfulness score. In the example, the more faithful summary that was originally ranked low according to model score is now ranked as the top summary according to faithfulness.
61
+
62
+ Re-ranking candidates for abstractive summarization have been studied primarily from the informativeness perspective (Ravaut et al., 2022a,b), and our focus is on improving faithfulness. Our idea is most similar to Falke et al. (2019), where the authors use NLI models to re-rank. However, the results indicate that the NLI performance does not translate to improvement in faithfulness; their best-ranking model actually increases the number of unfaithful summaries at the top summary after re-ranking by $3\%$ . The authors attribute it to domain shift and NLI models relying on simple heuristics like lexical matching. We thus explore using faithfulness metrics directly for ranking.
63
+
64
+ Composite Metric. While it is possible to use one of the faithfulness metrics to rank the candidates, it often leads to over-fitting for one particular metric (each metric can have its own domain biases and idiosyncrasies) and hurts the overall faithfulness scores evaluated by other metrics. We instead tune a composite metric that aggregates the vote of several popular metrics (See Section 5.4). We use linear regression to provide weights for each metric and tune on human judgments of faithfulness. We refer the readers to Appendix D and Appendix E for details and ablations for the composite metric.
65
+
66
+ # 3.2 Lookahead
67
+
68
+ Lu et al. (2022) use lookahead to provide a future constraint satisfaction estimate and show its effectiveness in several constrained generation tasks (commonsense generation, constrained machine translation, table-to-text generation, and constrained question generation). We extend this idea to improve faithfulness of abstractive summarization. Instead of relying on explicit constraints that are available for the constrained generation tasks, we use reference-free faithfulness metrics on the full future summaries as an estimate. Unlike reranking which is constrained by the search space explored by beam search, lookahead allows for exploration of a much larger number of candidates.
69
+
70
+ Figure 1b shows an example of the lookahead. When selecting the next token, the usual decoding scheme would select the word "World" that has the highest probability. However, if we were to follow this path, the resulting summary would introduce
71
+
72
+ ![](images/6206b22ddd12a6728a04a52676e25feb2b85f2e93ef3f8af21174e98a19761e8.jpg)
73
+ Figure 2: Illustration of the iterative distillation process. We train a student model $\theta^{\prime}$ with summaries generated by the teacher model $\theta$ , which uses faithfulness-aware decoding methods. The resultant student model $\theta^{\prime}$ that is trained on more faithful summaries can in turn be used as $\theta$ to generate the training data for the next iteration.
74
+
75
+ hallucinations. Instead, we would like to guide the model to select the less probable token "British," which will yield a faithful summary sentence.
76
+
77
+ Formally, each summary token is selected by:
78
+
79
+ $$
80
+ f \left(y _ {t}\right) = \log P \left(y _ {1: t} \mid x\right) + w \cdot \max _ {L _ {y \leq t}} h \left(y _ {1: t + l}, x\right)
81
+ $$
82
+
83
+ where $\log P(y_{1:t} \mid x)$ is the model score, $h(\cdot)$ is a reference-free faithfulness evaluation function that assigns a score to the summary, $w$ is the weight, and $l$ is the number of tokens to look into the future.
84
+
85
+ Here, $L_{y \leq t}$ is a set of possible generated summaries that start with the summary tokens $y_{1:t}$ . The number of summaries for $L$ varies given the decoding strategies we use to generate future summaries. Greedy search and sampling produce a single expansion, and beam search produces $k$ number of summaries depending on the beam size. Although the lookahead length $l$ can be specified, we instead generate the full summary, as current faithfulness metrics expect full summaries as input and do not work well on partial summaries (see Appendix E).
86
+
87
+ # 3.3 Combining Ranking and Lookahead
88
+
89
+ We can combine the two methods to further improve faithfulness. We first use the BEAM+LOOKAHEAD to generate faithful beam candidates and then select the best candidates with ranking. We refer to this method as BEAM+LOOKAHEAD+RANKING.
90
+
91
+ # 4 Efficient Decoding via Distillation
92
+
93
+ One drawback of the proposed decoding methods is the heavy computational cost during decoding. We
94
+
95
+ thus explore using distillation to transfer the knowledge of faithfulness-aware decoding to a student model that can generate summaries of similar faithfulness with just greedy decoding. We note here that our distillation aims at improving the decoding time rather than downsizing the model. Similar to Kim and Rush (2016), we assume that we have a teacher model and a student model. In our setting, the teacher model does not necessarily need to be a different model, but it needs to decode with more faithfulness-aware methods. Typical distillation methods use the teacher's probability distribution (Kim and Rush, 2016) as the target for the student model to imitate. In our case, however, that distribution is the same for all methods – the difference lies in how the probability is used to generate the next tokens. Thus, we propose a new decoding distillation loss. We use the teacher model to generate summaries $y_{\mathrm{gen}}$ as additional reference summaries, and interpolate between the cross-entropy loss using the original reference summaries and the cross-entropy loss where we consider $y_{\mathrm{gen}}$ as reference summaries. Formally, the training loss is:
96
+
97
+ $$
98
+ \mathcal {L} _ {\text {d i s t i l l}} = \mathcal {L} _ {\mathrm {X E}} \left(y ^ {\prime}, y\right) + \lambda \mathcal {L} _ {\mathrm {X E}} \left(y ^ {\prime}, y _ {\text {g e n}}\right)
99
+ $$
100
+
101
+ where $\mathcal{L}_{\mathrm{XE}}$ is the cross entropy, $y^\prime$ is the generated summary by the student model, and $\lambda$ is a hyperparameter for the weight of the cross-entropy loss on the generated summaries.
102
+
103
+ Iterative Distillation. While we use the student model with just greedy decoding to improve decoding speed, the student model can also benefit from using our proposed faithfulness-aware decoding methods. Thus, the student models can also serve as a new teacher model to distill more faithfulness knowledge to a new student model. The distillation process thus becomes iterative, illustrated in Figure 2. We use the trained student model as a new teacher model, where we decode with our proposed faithfulness methods to create additional reference summaries $y_{\mathrm{gen}}$ for the next iteration.
104
+
105
+ # 5 Experiments
106
+
107
+ # 5.1 Datasets and Models
108
+
109
+ We perform experiments on two popular datasets for abstractive summarization, XSum (Narayan et al., 2018) and CNN/DM (Hermann et al., 2015). More details on the datasets are described in Appendix A.1. We use the released checkpoint of
110
+
111
+ BART-large (406M) for the two datasets.2 The same experiment is done with PEGASUS (Zhang et al., 2020a), which is presented in Appendix B.
112
+
113
+ # 5.2 Evaluation Metrics
114
+
115
+ We use the F1 measure of ROUGE-L (Lin, 2004, RL), i.e., the overlap of the longest common subsequence between a generated summary and reference summary, and the F1 measure of BERTScore (Zhang et al., 2020b, BS) to evaluate summary quality. In addition, we use BS-Fact, i.e., the BERTScore precision of a summary with respect to its source document rather than the reference summary, FactCC (Kryscinski et al., 2020), DAE (Goyal and Durrett, 2021), and QuestEval (Scialom et al., 2021) for faithfulness evaluation. Details of the metrics are presented in section A.
116
+
117
+ # 5.3 Human Evaluation Setup
118
+
119
+ We use Amazon Mechanical Turk (AMT) to ask human annotators to judge the faithfulness and informativeness of the summaries generated with different decoding.
120
+
121
+ Faithfulness. We ask workers to judge the faithfulness of a summary sentence using a 3-star rating (1=major factual error, 2=minor factual error, 3=no factual error. Three judgments per summary are then aggregated using majority voting. We randomly select 200 examples from both datasets and use the summaries generated using greedy, sampling, beam search, as well as the ranking and lookahead strategies applied to beam search. We report the percentage of summaries that are fully factual (i.e. the percentage of summaries rated as 3-star) as the faithfulness score, and also report the distribution of summaries rated as 1, 2, and 3 stars. Details on qualification, payment and other aspects of the evaluation can be found in Appendix A.4.
122
+
123
+ Informativeness. We also evaluate the generated summaries in terms of informativeness. We consider summary to be informative if its content is important and relevant, but it does not necessarily need to be long. We use best-worst-scaling (BWS) for evaluating the informativeness of the generated summaries, as this method is "a less labor-intensive alternative to paired comparisons that has been shown to produce more reliable results than rating
124
+
125
+ scales” (Kiritchenko and Mohammad, 2017). Accordingly, for each dataset, we select 200 random articles with the corresponding summaries from five systems in random order. We ask three annotators to select the most informative (“best”) and the least informative (“worst”) among the five. A rating per system is computed as the percentage of times it is chosen as best minus the percentage of times it is selected as worst. A value of 100 means that the system has been unanimously picked as “best”, whereas a value of -100 means that the system has been unanimously picked as “worst”. Additional details, as well as the screenshot of the annotation interface, are in Appendix A.4.
126
+
127
+ # 5.4 Decoding Setting Details
128
+
129
+ We describe the settings of the basic decoding methods, our faithfulness-aware decoding methods and distillation. More details are in Appendix A.3.
130
+
131
+ Basic Decoding Method. We compare the summaries generated using greedy search, beam search $(k = 10)$ , and nucleus sampling $(p = 0.9)$ . Additional experiments with various beam sizes and top-p values can be found in Appendix B.
132
+
133
+ Ranking and Composite Metric. We use beam search $(k = 10)$ and rank the candidates using the composite metric introduced in Section 3.1. To train the composite metric, we explore combining FactCC, BS-Fact, DAE, and QuestEval. We use FACTCOLLECT (Ribeiro et al., 2022), a large collection of four faithfulness annotations to train a linear regression on the human-labeled faithfulness judgments. More details of the composite metric and its robustness to another domain can be seen in Appendix D.
134
+
135
+ Lookahead. We use BS-Fact as the faithfulness metric for the lookahead as it correlates highly with human judgment (Pagnoni et al., 2021) and is quick to compute without the need for additional pre-processing. We use greedy search to generate future summaries and apply it to both greedy and beam searches.
136
+
137
+ Distillation. We use the checkpoint of our two proposed faithfulness-aware decoding methods as the teacher model, and train the student model from BART-LARGE.<sup>3</sup> We follow the original fine-tuning hyperparameters provided by the authors (Lewis
138
+
139
+ ![](images/2e36e2e2f8e725bfb1ecb28e3ea85998788f87bdaa9c69a5698c4290c5ece571.jpg)
140
+ Figure 3: Maximum possible score (Max) for each faithfulness metric and the faithfulness scores of the top candidate (Top) at various beam sizes. As beam size increases, more faithful summaries exist in the list of candidates, but the faithfulness of the top beam improves only slightly.
141
+
142
+ ![](images/060bd9f712757105b28ffc60717d51213549f7c13a8bdfbf25201ebb01176b4a.jpg)
143
+
144
+ ![](images/93c69dec6c2ed3ba181a90cb590d5fd3f64b99a8b6b3738976ccaf3303f2fd57.jpg)
145
+
146
+ ![](images/8f58c06551858d9041be346d353b90d15f4b098874027cdbb6362eb6e349118b.jpg)
147
+
148
+ <table><tr><td></td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td colspan="7">CNN/DM</td></tr><tr><td>Greedy</td><td>30.93</td><td>88.39</td><td>93.15</td><td>69.61</td><td>8.15</td><td>59.13</td></tr><tr><td>Nucleus</td><td>27.64</td><td>87.90</td><td>91.76</td><td>54.05</td><td>21.61</td><td>56.43</td></tr><tr><td>Beam</td><td>29.99</td><td>88.03</td><td>94.20</td><td>84.23</td><td>3.30</td><td>60.03</td></tr><tr><td colspan="7">XSum</td></tr><tr><td>Greedy</td><td>36.16</td><td>92.03</td><td>89.28</td><td>23.53</td><td>65.35</td><td>36.51</td></tr><tr><td>Nucleus</td><td>31.15</td><td>91.26</td><td>88.62</td><td>21.04</td><td>76.20</td><td>34.98</td></tr><tr><td>Beam</td><td>37.11</td><td>92.12</td><td>89.45</td><td>22.97</td><td>63.49</td><td>37.05</td></tr></table>
149
+
150
+ Table 1: Baseline results of popular decoding methods measured by summarization quality metrics (Rouge-L (RL) and BertScore (BS)) and faithfulness metrics. We observe a general trend where beam search performs the best and nucleus sampling performs the worst in terms of faithfulness. Full result with different beam sizes and top- $p$ probability for nucleus sampling is in Table 7.
151
+
152
+ et al., 2020) and use $\lambda = 1$ for the weight of the additional cross-entropy loss.
153
+
154
+ # 6 Results
155
+
156
+ # 6.1 Baseline Decoding Results
157
+
158
+ We show the analysis of common decoding strategies in Table 1. Both datasets show a similar trend. Beam search performs the best in terms of faithfulness except for FactCC on the XSum dataset. Compared to greedy decoding, which is beam search with $k = 1$ , the candidate expansion with a larger beam size provides better exploration for faithfulness. Nucleus sampling degrades faithfulness compared with greedy search, showing that the introduced randomness is not helpful for faithfulness. This aligns with observations from Narayan et al. (2022) and Chen et al. (2021a), which show that nucleus sampling produces less relevant text for data-to-text generation.
159
+
160
+ The results are surprisingly mixed for both datasets in terms of summary quality, i.e., RL and
161
+
162
+ BS scores. Comparing beam search with greedy decoding, we see improvement of both scores on XSum but not for CNN/DM. Nucleus sampling, on the other hand, is also worse than greedy search on this aspect, suggesting that randomness may not be suited for the task of abstractive summarization.
163
+
164
+ Search Space for Beam Search. Inspired by Xu et al. (2022) who hinted at the potential of better faithfulness with a large exploration of the search space, we use beam search to explore whether larger beam sizes (and hence larger exploration) derive more faithful summaries. To this end, we use all summaries generated by beam search and select the beam that would result in the highest possible score for each metric. We show the maximum score (Max) for the four faithfulness metrics and the faithfulness score of selecting the top beam (Top) given different beam sizes in Figure 3. We see a clear trend that increasing the beam size improves all faithfulness scores. This confirms our hypothesis that larger exploration of the search space can provide additional faithfulness gain, and thus showing the potential of our proposed decoding strategies, especially our reranking strategy, to output more faithful summaries. The faithfulness scores of TOP only increase marginally compared to the increase for Max, showing the importance of having better faithfulness guidance, such as our proposed faithfulness lookahead heuristics.
165
+
166
+ # 6.2 Faithfulness-Aware Decoding Results
167
+
168
+ We now show the impact of faithfulness-aware methods compared with the traditional decoding methods, which is shown in Table 2. We first observe that applying ranking on top of beam search improves faithfulness significantly over beam search, as measured by all faithfulness metrics. Specifically, QuestEval reaches 62.57 (2.5
169
+
170
+ <table><tr><td></td><td>Rouge-L</td><td>BERTScore</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td colspan="7">CNN/DM</td></tr><tr><td>Greedy</td><td>30.93</td><td>88.39</td><td>93.15</td><td>69.61</td><td>8.15</td><td>59.13</td></tr><tr><td>Beam</td><td>29.99</td><td>88.03</td><td>94.20</td><td>84.23</td><td>3.30</td><td>60.03</td></tr><tr><td>BEAM+RANKING</td><td>30.08</td><td>88.12</td><td>94.31</td><td>90.27</td><td>1.92</td><td>62.57</td></tr><tr><td>GREEDY+LOOKAHEAD</td><td>30.75</td><td>88.35</td><td>93.90</td><td>71.54</td><td>5.70</td><td>60.13</td></tr><tr><td>BEAM+LOOKAHEAD</td><td>28.66</td><td>87.84</td><td>95.32</td><td>86.10</td><td>1.68</td><td>61.80</td></tr><tr><td>BEAM+LOOKAHEAD+RANKING</td><td>28.86</td><td>87.92</td><td>95.26</td><td>91.68</td><td>1.08</td><td>63.69</td></tr><tr><td colspan="7">XSum</td></tr><tr><td>Greedy</td><td>36.16</td><td>92.03</td><td>89.28</td><td>23.53</td><td>65.35</td><td>36.51</td></tr><tr><td>Beam</td><td>37.11</td><td>92.12</td><td>89.45</td><td>22.97</td><td>63.49</td><td>37.05</td></tr><tr><td>BEAM+RANKING</td><td>36.42</td><td>92.10</td><td>89.79</td><td>40.11</td><td>51.48</td><td>40.10</td></tr><tr><td>GREEDY+LOOKAHEAD</td><td>36.25</td><td>92.11</td><td>89.71</td><td>24.21</td><td>60.46</td><td>37.17</td></tr><tr><td>BEAM+LOOKAHEAD</td><td>35.27</td><td>91.94</td><td>90.78</td><td>23.38</td><td>50.04</td><td>39.24</td></tr><tr><td>BEAM+LOOKAHEAD+RANKING</td><td>34.71</td><td>91.90</td><td>90.78</td><td>38.86</td><td>41.04</td><td>41.94</td></tr></table>
171
+
172
+ Table 2: Results for our proposed decoding strategies. Compared to the baseline methods (greedy and beam search), both ranking and lookahead improve faithfulness. The combination of both methods further increases faithfulness.
173
+
174
+ points improvement) and 40.10 (3.1 points improvement) on CNN/DM and XSum respectively. DAE error rate reduces from 63.49 to 51.48 and 3.30 to 1.92, which is a relative improvement of $18.92\%$ (12.01 points) and $41.8\%$ percent (1.38 points) on XSum and CNN/DM, respectively.
175
+
176
+ We observe similar improvement for lookahead as well, where applying the lookahead improves the faithfulness over the base decoding strategy over all faithfulness metrics. Nevertheless, the base decoding strategy is still the dominating factor, as BEAM+LOOKAHEAD generates more faithful summaries than GREEDY+LOOKAHEAD for all faithfulness metrics. GREEDY+LOOKAHEAD outperforms Beam on the XSum dataset, showing that better guidance with future faithfulness heuristics can improve faithfulness without large exploration. Finally, the combination of lookahead and ranking can further improve faithfulness as evaluated by FactCC, DAE, and QuestEval.
177
+
178
+ In terms of ROUGE score, applying faithful decoding methods decreases RL. This tradeoff between faithfulness and ROUGE has been observed in many prior works (Chen et al., 2021b; Kryscinski et al., 2020; Wan and Bansal, 2022). One reason for this phenomenon is that more than $70\%$ of the reference summaries contain hallucinations (Maynez et al., 2020), so the more faithful summaries that do not contain such hallucinations will have lower ROUGE scores. To investigate this problem, we perform a human evaluation study, where we find that the summaries generated by BEAM+LOOKAHEAD are considered to be most
179
+
180
+ informative. More details are in Appendix A.4.
181
+
182
+ # 6.3 Human Evaluation Results
183
+
184
+ Faithfulness. The observation on automatic faithfulness metrics aligns with the result of human evaluation in Table 3. For XSum, among the baseline decoding methods, we see that sampling performs the worst. Interestingly, greedy is more faithful than beam search, but the difference is only 1.5 points. Our proposed decoding strategies generate summaries that are judged more faithful compared to that of the baseline decoding strategies. Specifically, BEAM+LOOKAHEAD reaches 56.5, even outperforming BEAM+RANKING by 5 points. We also observe that our proposed methods are able to significantly reduce the percentage of summaries that are considered to contain major factual errors; Compared to beam search, ranking reduces the percentage from 44.5 to 36.5, and lookahead further reduces the percentage by 3 points. For CNN/DM, we see the striking result that the summaries generated by our proposed methods achieve the highest faithfulness, and among the two systems, there are no major errors for BEAM+LOOKAHEAD.
185
+
186
+ Informativeness. The result is shown in Table 4. The output of the BEAM+LOOKAHEAD is clearly seen as the most informative among the five methods. This result suggests that Rouge-L and BERTScore may not be good indicators for informativeness, as BEAM+LOOKAHEAD achieves the lowest scores for the two automatic metrics on both datasets.
187
+
188
+ <table><tr><td></td><td colspan="3">XSum</td><td colspan="3">CNN/DM</td></tr><tr><td></td><td>1</td><td>2</td><td>3</td><td>1</td><td>2</td><td>3</td></tr><tr><td>Greedy</td><td>43.0</td><td>12.0</td><td>45.0</td><td>4.0</td><td>3.5</td><td>92.5</td></tr><tr><td>Sampling</td><td>55.0</td><td>13.0</td><td>32.0</td><td>8.0</td><td>10.5</td><td>81.5</td></tr><tr><td>Beam</td><td>44.5</td><td>12.0</td><td>43.5</td><td>0.0</td><td>2.0</td><td>98.0</td></tr><tr><td>BEAM+RANKING</td><td>36.5</td><td>14.0</td><td>49.5</td><td>0.5</td><td>1.0</td><td>98.5</td></tr><tr><td>BEAM+LOOKAHEAD</td><td>31.5</td><td>12.0</td><td>56.5</td><td>0.0</td><td>1.5</td><td>98.5</td></tr></table>
189
+
190
+ Table 3: Human evaluation results on faithfulness with the 3-star rating system (1=major factual error, 2=minor factual error, 3=no factual error). Our proposed faithfulness-aware methods are judged as the most faithful (the percentage of summaries rated as 3), confirming our observation with automatic faithfulness metrics.
191
+
192
+ # 6.4 Abstractiveness
193
+
194
+ Models can "trivially" become more faithful by becoming more extractive (Dreyer et al., 2023), and thus it is important to understand where the gain in faithfulness stems from. We experiment on XSum, as methods can achieve larger improvement in faithfulness and thus potentially more gain through extensiveness. We experiment with the 200 examples used for human evaluation and calculate MINT (Dreyer et al., 2023) for abstractiveness and plot this score against the human-labeled faithfulness, similar to Ladhak et al. (2022). The result is shown in Figure 4. Similar to the observation of Dreyer et al. (2023), more faithful models tend to be more extractive; however, the gain in faithfulness is considerably larger than the decrease in abstractiveness. For example, comparing BEAM+LOOKAHEAD with beam search, the relative increase in faithfulness $(29.89\%)$ is quadruple the decrease $(7.27\%)$ in abstractiveness. Similar experiments on CNN/DM are in Appendix F.
195
+
196
+ Lookahead with Faithfulness and Abstrutiveness. We further show that our lookahead method can easily allow additional heuristics, such as balancing both faithfulness and abstractiveness. Specifically, we replace $h(\cdot)$ with combination of BS-Fact and MINT:
197
+
198
+ $$
199
+ h (y, x) = \alpha \mathrm {B S - F a c t} (y, x) + (1 - \alpha) \mathrm {M I N T} (y, x)
200
+ $$
201
+
202
+ We use $\alpha = 0.75$ and the same hyper-parameters as BEAM+LOOKAHEAD. We refer to this model as BEAM+LOOKAHEAD+ABSTR and show the point in Figure 4. Compared to BEAM+LOOKAHEAD, this model can increase abstractiveness at a small cost in faithfulness, demonstrating the flexibility of our lookahead method to incorporate various characteristics for summarization.
203
+
204
+ <table><tr><td></td><td>XSum</td><td>CNN/DM</td></tr><tr><td>Greedy</td><td>3.0</td><td>-8.2</td></tr><tr><td>Sampling</td><td>-20.5</td><td>-23.8</td></tr><tr><td>Beam</td><td>1.8</td><td>8.5</td></tr><tr><td>BEAM+RANKING</td><td>1.0</td><td>-2.8</td></tr><tr><td>BEAM+LOOKAHEAD</td><td>17.7</td><td>31.0</td></tr></table>
205
+
206
+ Table 4: Human evaluation results on informativeness with best-worst-scaling (100=unanimous best, -100=unanimous worst).
207
+
208
+ ![](images/4a638cc6059ed0ecbe4346537b2904c8860d0e35e285bef31264dcc79c4791d5.jpg)
209
+ Figure 4: Faithfulness and abstractiveness tradeoff results on 200 XSum examples used for human annotation. BEAM+LOOKAHEAD+ABSTR is the model that is trained with additional abstractiveness heuristics (See Section 6.4 for more details).
210
+
211
+ # 6.5 Distillation
212
+
213
+ We present the distillation result in Table 5. While the student models are not able to outperform the teacher models, they approach the performance of the teacher models. The student models are also able to generate more faithful summaries compared to the greedy search baseline, which is only trained using the cross-entropy loss $\mathcal{L}_{\mathrm{XE}}(y', y)$ .
214
+
215
+ The main benefit of the student model comes from the improved decoding speed. The ranking time reduces from 0.77 seconds per example to 0.47, which is a $40\%$ improvement. The largest gain can be seen for lookahead, where the decoding speed reduces from 3 seconds per example to 0.49, only 1/6 of the time it was originally taking.
216
+
217
+ For example, the student model distilled from BEAM+RANKING improves DAE by 6.6 points and QuestEval by a point compared to the greedy search baseline and only differs from the teacher model by 2.5 points for DAE and 0.5 points for QuestEval. When using a more faithful teacher model, i.e. BEAM+LOOKAHEAD, the student model is able to generate more faithful summaries, as evaluated by BS-Fact, DAE, and QuestEval.
218
+
219
+ <table><tr><td></td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FC</td><td>DAE ↓</td><td>QE</td><td>Speed</td></tr><tr><td>Greedy</td><td>36.16</td><td>92.03</td><td>89.28</td><td>23.53</td><td>65.35</td><td>36.51</td><td>0.39</td></tr><tr><td colspan="8">BEAM+RANKING</td></tr><tr><td>Teacher</td><td>36.46</td><td>92.14</td><td>90.15</td><td>22.47</td><td>56.33</td><td>37.98</td><td>0.77</td></tr><tr><td>Student</td><td>36.59</td><td>92.07</td><td>89.80</td><td>23.52</td><td>58.78</td><td>37.46</td><td>0.47</td></tr><tr><td colspan="8">BEAM+LOOKAHEAD</td></tr><tr><td>Teacher</td><td>35.91</td><td>92.06</td><td>90.51</td><td>22.44</td><td>52.50</td><td>38.72</td><td>3.00</td></tr><tr><td>Student</td><td>36.52</td><td>92.07</td><td>89.97</td><td>22.58</td><td>58.02</td><td>37.89</td><td>0.49</td></tr></table>
220
+
221
+ Table 5: Distillation results using our proposed faithfulness-aware decoding methods as the teacher. We abbreviate FactCC as FC and QuestEval as QE. Speed is calculated by seconds per summary.
222
+
223
+ Iterative Distillation. Next, we show the result of distilling BEAM+RANKING iteratively on XSum in Table 6. We see that with each iteration, the model is able to improve faithfulness further. When compared to the original teacher model, BEAM+RANKING, the student model is able to outperform all faithfulness metrics with two iterations. We stress that here all models only use greedy decoding, thus showing the potential of combining decoding with training for more faithful models.
224
+
225
+ # 7 Related Work
226
+
227
+ Many of the related works of our proposed decoding methods have been discussed in Section 3; here we cover other related areas.
228
+
229
+ Decoding methods. A decoding method for text generation explores an approximate search method to select the best tokens to form a hypothesis. Several works have critically analyzed different decoding strategies for natural language generation, including beam search (Meister et al., 2020a; Stahlberg and Byrne, 2019; Xu et al., 2022; Holtzman et al., 2020), best-first-search (Meister et al., 2020b), and lattice (Xu et al., 2022). While these works investigated the effectiveness of decoding methods on generated outputs from the perspective of diversity and repetitiveness, to our best knowledge, none of the works have explicitly analyzed their performance on faithfulness.
230
+
231
+ Distillation. Distillation aims at compressing the knowledge from a larger model into a smaller one. A conventional approach uses soft targets, i.e. learning the logits of a teacher model rather than final predictions (Buciluundefined et al., 2006; Hinton et al., 2015; Kim and Rush, 2016). While this method has shown to be very effective, it is less applicable to our case where the underlying
232
+
233
+ <table><tr><td></td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td>Teacher</td><td>36.46</td><td>92.14</td><td>90.15</td><td>22.47</td><td>56.33</td><td>37.98</td></tr><tr><td>Iter. 1</td><td>36.59</td><td>92.07</td><td>89.80</td><td>23.52</td><td>58.78</td><td>37.46</td></tr><tr><td>Iter. 2</td><td>35.95</td><td>91.95</td><td>90.16</td><td>23.14</td><td>54.01</td><td>38.10</td></tr><tr><td>Iter. 3</td><td>35.09</td><td>91.73</td><td>90.48</td><td>22.77</td><td>50.66</td><td>38.86</td></tr><tr><td>Iter. 4</td><td>34.32</td><td>91.54</td><td>90.81</td><td>24.49</td><td>47.83</td><td>39.64</td></tr><tr><td>Iter. 5</td><td>33.60</td><td>91.34</td><td>91.11</td><td>25.52</td><td>45.85</td><td>40.39</td></tr></table>
234
+
235
+ Table 6: Iterative distillation results using BEAM+RANKING as the teacher decoding method. With two iterations, the student model is able to outperform the original teacher model in terms of faithfulness, and further iterations continuously improve faithfulness.
236
+
237
+ distribution for the next probable tokens does not necessarily change (for ranking, we do not modify the model scores at all) and thus not useful to learn soft labels. Different from compressing model size, our approach focuses on reducing the computational cost during decoding. Our method is most similar to pseudo-labeling (Shleifer and Rush, 2020), where we use generated summaries as "hard" labels. We do not replace reference summaries with our generated ones. Instead, we use interpolation (Kim and Rush, 2016) to account for both faithfulness and quality.
238
+
239
+ # 8 Conclusion
240
+
241
+ In this paper, we show a thorough analysis of the effect of decoding strategies on faithfulness for abstractive summarization. We present an analysis of popular decoding strategies, as well as our two newly proposed faithfulness-aware decoding strategies, ranking and lookahead, that can further improve faithfulness upon the base decoding methods. Finally, we show a simple (and optionally iterative) distillation trick where the training of a student model incorporates the summaries generated with more faithfulness-aware methods, and the student model generates summaries of similar faithfulness with minimal decoding time.
242
+
243
+ Future experiments could extend similar analysis of faithfulness and factuality beyond summarization and develop a combination of heuristics that also encompasses other aspects and styles.
244
+
245
+ # 9 Limitations
246
+
247
+ While the decoding strategies with lookahead show improvement in faithfulness, they require a heavy computational overhead, especially when they are coupled with beam search for the base decoding strategy and for generating the future summary.
248
+
249
+ We provide one solution with our distillation to improve decoding speed. Many of the computations, including the generated future summaries and the faithfulness scores on them, during this online process, are also later disregarded, similar to how any candidates are pruned during beam search. We believe an interesting direction might be to store the already generated future summaries so that the decoding may directly use the future summary if it is considered a good summary candidate.
250
+
251
+ # 10 Ethical Impact
252
+
253
+ While our work aims to reduce potential malicious or unintended harmful effects, our methods rely on the use of faithfulness metrics. The inherent problems and biases when using such metrics have been under-studied. Our decoding strategies can also be applied to be used for other metrics, even those that could be optimized for malicious intents. Another aspect to consider is the environmental impact of our proposed methods, as they require large computations. We hope that our distillation can mitigate this problem and future work can work towards more environmentally friendly approaches while improving faithfulness for safer use of large models.
254
+
255
+ # References
256
+
257
+ Cristian Buciluundefined, Rich Caruana, and Alexandru Niculescu-Mizil. 2006. Model compression. In Proceedings of the 12th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, KDD '06, page 535-541, New York, NY, USA. Association for Computing Machinery.
258
+ Shuyang Cao and Lu Wang. 2021. CLIFF: Contrastive learning for improving faithfulness and factuality in abstractive summarization. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6633-6649, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
259
+ Mingda Chen, Sam Wiseman, and Kevin Gimpel. 2021a. WikiTableT: A large-scale data-to-text dataset for generating Wikipedia article sections. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021, pages 193-209, Online. Association for Computational Linguistics.
260
+ Sihao Chen, Fan Zhang, Kazoo Sone, and Dan Roth. 2021b. Improving faithfulness in abstractive summarization with contrast candidate generation and selection. In Proceedings of the 2021 Conference of the North American Chapter of the Association for
261
+
262
+ Computational Linguistics: Human Language Technologies, pages 5935-5941, Online. Association for Computational Linguistics.
263
+ Yue Dong, Shuohang Wang, Zhe Gan, Yu Cheng, Jackie Chi Kit Cheung, and Jingjing Liu. 2020. Multi-fact correction in abstractive text summarization. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9320-9331, Online. Association for Computational Linguistics.
264
+ Markus Dreyer, Mengwen Liu, Feng Nan, Sandeep Atluri, and Sujith Ravi. 2023. Evaluating the tradeoff between abstractiveness and factuality in abstractive summarization. In Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics: Findings. Association for Computational Linguistics.
265
+ Tobias Falke, Leonardo F. R. Ribeiro, Prasetya Ajie Utama, Ido Dagan, and Iryna Gurevych. 2019. Ranking generated summaries by correctness: An interesting but challenging application for natural language inference. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2214-2220, Florence, Italy. Association for Computational Linguistics.
266
+ Tanya Goyal and Greg Durrett. 2021. Annotating and modeling fine-grained factuality in summarization. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1449-1462, Online. Association for Computational Linguistics.
267
+ Karl Moritz Hermann, Tomás Kocisky, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Suleyman, and Phil Blunsom. 2015. Teaching machines to read and comprehend. In NIPS, pages 1693-1701.
268
+ Geoffrey Hinton, Oriol Vinyals, and Jeff Dean. 2015. Distilling the knowledge in a neural network.
269
+ Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The curious case of neural text degeneration. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.
270
+ Yoon Kim and Alexander M. Rush. 2016. Sequence-level knowledge distillation. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1317-1327, Austin, Texas. Association for Computational Linguistics.
271
+ Svetlana Kiritchenko and Saif Mohammad. 2017. Bestworst scaling more reliable than rating scales: A case study on sentiment intensity annotation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 465-470, Vancouver, Canada. Association for Computational Linguistics.
272
+
273
+ Mahnaz Koupae and William Yang Wang. 2018. Wikihow: A large scale text summarization dataset.
274
+ K. Krippendorff. 1980. Content Analysis: An Introduction to Its Methodology. Commtext Series. SAGE Publications.
275
+ Wojciech Kryscinski, Bryan McCann, Caiming Xiong, and Richard Socher. 2020. Evaluating the factual consistency of abstractive text summarization. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9332-9346, Online. Association for Computational Linguistics.
276
+ Faisal Ladhak, Esin Durmus, He He, Claire Cardie, and Kathleen McKeown. 2022. Faithful or extractive? on mitigating the faithfulness-abstractiveness tradeoff in abstractive summarization. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1410–1421, Dublin, Ireland. Association for Computational Linguistics.
277
+ Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pretraining for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
278
+ Quentin Lhoest, Albert Villanova del Moral, Yacine Jernite, Abhishek Thakur, Patrick von Platen, Suraj Patil, Julien Chaumont, Mariama Drame, Julien Plu, Lewis Tunstall, Joe Davison, Mario Šaško, Gunjan Chhablani, Bhavitvya Malik, Simon Brandeis, Teven Le Scao, Victor Sanh, Canwen Xu, Nicolas Patry, Angelina McMillan-Major, Philipp Schmid, Sylvain Gugger, Clément Delangue, Theo Matussière, Lysandre Debut, Stas Bekman, Pierric Cistac, Thibault Goehringer, Victor Mustar, François Lagunas, Alexander Rush, and Thomas Wolf. 2021. Datasets: A community library for natural language processing. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 175-184, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
279
+ Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
280
+ Yixin Liu and Pengfei Liu. 2021. SimCLS: A simple framework for contrastive learning of abstractive summarization. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 1065-1072, Online. Association for Computational Linguistics.
281
+
282
+ Ximing Lu, Sean Welleck, Peter West, Liwei Jiang, Jungo Kasai, Daniel Khashabi, Ronan Le Bras, Lianhui Qin, Youngjae Yu, Rowan Zellers, Noah A. Smith, and Yejin Choi. 2022. NeuroLogic a*esque decoding: Constrained text generation with lookahead heuristics. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 780-799, Seattle, United States. Association for Computational Linguistics.
283
+ Joshua Maynez, Shashi Narayan, Bernd Bohnet, and Ryan McDonald. 2020. On faithfulness and factuality in abstractive summarization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1906-1919, Online. Association for Computational Linguistics.
284
+ Clara Meister, Ryan Cotterell, and Tim Vieira. 2020a. If beam search is the answer, what was the question? In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2173-2185, Online. Association for Computational Linguistics.
285
+ Clara Meister, Tim Vieira, and Ryan Cotterell. 2020b. Best-first beam search. Transactions of the Association for Computational Linguistics, 8:795-809.
286
+ Feng Nan, Ramesh Nallapati, Zhiguo Wang, Cicero Nogueira dos Santos, Henghui Zhu, Dejiao Zhang, Kathleen McKeown, and Bing Xiang. 2021. Entity-level factual consistency of abstractive text summarization. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 2727-2733, Online. Association for Computational Linguistics.
287
+ Shashi Narayan, Shay B. Cohen, and Mirella Lapata. 2018. Don't give me the details, just the summary! topic-aware convolutional neural networks for extreme summarization. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1797-1807, Brussels, Belgium. Association for Computational Linguistics.
288
+ Shashi Narayan, Gonçalo Simões, Yao Zhao, Joshua Maynez, Dipanjan Das, Michael Collins, and Mirella Lapata. 2022. A well-composed text is half done! composition sampling for diverse conditional generation. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1319-1339, Dublin, Ireland. Association for Computational Linguistics.
289
+ Artidoro Pagnoni, Vidhisha Balachandran, and Yulia Tsvetkov. 2021. Understanding factuality in abstractive summarization with FRANK: A benchmark for factuality metrics. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4812-4829, Online. Association for Computational Linguistics.
290
+
291
+ Mathieu Ravaut, Shafiq Joty, and Nancy Chen. 2022a. SummaReranker: A multi-task mixture-of-experts re-ranking framework for abstractive summarization. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 4504-4524, Dublin, Ireland. Association for Computational Linguistics.
292
+ Mathieu Ravaut, Shafiq Joty, and Nancy F Chen. 2022b. Towards summary candidates fusion. In EMNLP 2022.
293
+ Leonardo F. R. Ribeiro, Mengwen Liu, Iryna Gurevych, Markus Dreyer, and Mohit Bansal. 2022. Fact-Graph: Evaluating factuality in summarization with semantic graph representations. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3238-3253, Seattle, United States. Association for Computational Linguistics.
294
+ Thomas Scialom, Paul-Alexis Dray, Sylvain Lamprier, Benjamin Piwowarski, Jacopo Staiano, Alex Wang, and Patrick Gallinari. 2021. QuestEval: Summarization asks for fact-based evaluation. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 6594-6604, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
295
+ Sam Shleifer and Alexander M. Rush. 2020. Pre-trained summarization distillation.
296
+ Felix Stahlberg and Bill Byrne. 2019. On NMT search errors and model errors: Cat got your tongue? In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3356-3362, Hong Kong, China. Association for Computational Linguistics.
297
+ Liam van der Poel, Ryan Cotterell, and Clara Meister. 2022. Mutual information alleviates hallucinations in abstractive summarization.
298
+ David Wan and Mohit Bansal. 2022. FactPEGASUS: Factuality-aware pre-training and fine-tuning for abstractive summarization. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1010-1028, Seattle, United States. Association for Computational Linguistics.
299
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing:
300
+
301
+ System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
302
+ Wen Xiao and Giuseppe Carenini. 2022. Entity-based spancopy for abstractive summarization to improve the factual consistency.
303
+ Jiacheng Xu, Siddhartha Jonnalagadda, and Greg Durrett. 2022. Massive-scale decoding for text generation using lattices. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4659-4676, Seattle, United States. Association for Computational Linguistics.
304
+ Haopeng Zhang, Semih Yavuz, Wojciech Kryscinski, Kazuma Hashimoto, and Yingbo Zhou. 2022. Improving the faithfulness of abstractive summarization via entity coverage control. In *Findings of the Association for Computational Linguistics: NAACL* 2022, pages 528-535, Seattle, United States. Association for Computational Linguistics.
305
+ Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Peter Liu. 2020a. PEGASUS: Pre-training with extracted gap-sentences for abstractive summarization. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 11328-11339. PMLR.
306
+ Tianyi Zhang, Varsha Kishore, Felix Wu*, Kilian Q. Weinberger, and Yoav Artzi. 2020b. *Bertscore: Evaluating text generation with bert*. In International Conference on Learning Representations.
307
+
308
+ # A Experiments Details.
309
+
310
+ # A.1 Datasets
311
+
312
+ We evaluate on XSum and CNN/DM. We use the dataset processed and provided by DATASETS (Lhoest et al., 2021). Both datasets contain English news articles and the corresponding summaries. XSum contains 204045, 11332, and 11334 examples for training, validation, and test set, respectively, and CNN/DM contains 287113, 13368, and 11490 for the splits.
313
+
314
+ # A.2 Metrics
315
+
316
+ We use the official code and follow the instructions to set up and run all the metrics we used. We use the ROUGE package from https://github.com/google-research/google-research/tree/master/rouge. We report all scores of our models from single runs.
317
+
318
+ Please evaluate how consistent the blue sentence from the summary is with respect to the information in the articles.
319
+
320
+ 1 star: Major error. The blue sentence contains a major factual error or multiple minor errors.
321
+ - 2 stars: Minor error. The blue sentence contains one minor factual error.
322
+ - 3 stars: No errors. The blue sentence contains no factual errors.
323
+
324
+ # Major errors:
325
+
326
+ Definition: Readers knowledgeable in the space would likely recognize the error in the blue sentence. If printed in a newspaper, the newspaper would have to print a correction or retraction to maintain its reputation.
327
+ Example 1: The blue sentence might say "A fire broke out in Seattle", but an article says it broke out in Portland.
328
+ Example 2:The blue sentence might say "the Republicans won the election", but the articles indicate that the Democrats won instead.
329
+ Example 3: The blue sentence might say that "A fire broke out at 2am", but the articles don't mention the time when the fire broke out, or they mention it was during the day.
330
+
331
+ # Minor errors:
332
+
333
+ Definition: Most readers would not notice the error or find it less important. If printed in a newspaper, the newspaper may not need to print a correction.
334
+ Example 1: The blue sentence might say that a celebrity couple shared a video of their daughter, but the articles says that the mom shared the video.
335
+ Example 2: The blue sentence might misspell a name.
336
+ Example 3: The blue sentence might contain a repetition that is not literally correct, e.g., "the soccer team won the game 1-2 and 1-2".
337
+ Example 4: The blue sentence might say "Lady Celia Vestey was one of Prince Harry's six godmothers", but it should be godparents.
338
+ Example 5: The blue sentence might say "The Game Awards will take place in Los Angeles and London", but the articles say they take place "virtually from Los Angeles and London".
339
+
340
+ # Meaning of the colors:
341
+
342
+ - Summary: The gray sentences in the summary are displayed to give context only. Please evaluate the blue sentence only.
343
+ - Articles: The sentences in the articles have green background color to help you find information more quickly. Article sentences with darker green background color are more related to the blue sentence. The least related sentence have been removed, indicated by three dots (...).
344
+
345
+ Figure 5: Annotation instructions to annotate factual consistency on Mechanical Turk.
346
+
347
+ # Welcome!
348
+
349
+ We need your help on evaluating (up to) five automatically generated summaries by comparing them to the original article that the summaries try to summarize.
350
+
351
+ For these (up to) five summaries, you only need to select one as most informative and one as least informative.
352
+
353
+ The most informative summary is best at expressing the main points of the article, its content is the most important and relevant. It's not necessarily the longest summary -- just the one that contains the most important information.
354
+
355
+ The least informative summary is worst at expressing the main points of the article, its content is the least important and relevant. It's not necessarily the shortest summary -- just the one with the least important information.
356
+
357
+ When you select one as most informative, it will turn green.
358
+
359
+ When you select one as least informative, it will turn red.
360
+
361
+ If there are multiple best (or worst) summaries, pick one of them at random.
362
+
363
+ Disclaimer: This task takes at least 20 seconds to complete. You do not qualify for the bonus if you spend less time.
364
+
365
+ Figure 6: Annotation instructions to annotate informativeness on Mechanical Turk.
366
+
367
+ For BS and BS-Fact, we use the default model for English (ROBERTA-LARGE). For DAE, we use the sentence error, which considers the sentence to contain an error if one of its arcs is predicted to be not factual.
368
+
369
+ # A.3 Decoding Details
370
+
371
+ Basic Decoding Method Details. We use the official generation code provided by TRANSFORMERS (Wolf et al., 2020). We use a single NVIDIA V100 GPU to generate the summaries. Greedy and sampling experiments take around 2 hours and beam search variants take 4 hours.
372
+
373
+ Ranking. We do not need to do additional computation as we already have the outputs of beam search and the metric scores.
374
+
375
+ Lookahead. To reduce computational overhead, we only calculate and incorporate the lookahead heuristics for the top 5 tokens according to the
376
+
377
+ model score at each time step. Experiments using beam search or sampling to generate a future summary can be found in Appendix C. We show additional ablations on the length of the future summaries and how the exploration changes with the heuristics in Appendix E. For tuning the $w$ , the weight for the heuristics, we search over the interval from 5 to 55 with a step of 5, and evaluate the generated summaries on the development set. We use the average of all metric scores, including RL and BS so that we do not over-optimize for faithfulness. We find 25 to be optimal for CNN/DM and 55 for XSum. The time to run for XSum is 33 hours, and that for CNN/DM is around 70 hours.
378
+
379
+ Distillation Details. We use the example code from TRANSFORMERS to train summarization models. We follow the authors' hyper-parameters to train BART-Large models. We use 8 V100 GPUs and the training time is around 5 hours. To generate the summaries for the training data, The BS
380
+
381
+ <table><tr><td></td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td colspan="7">CNN/DM</td></tr><tr><td>Greedy</td><td>30.93</td><td>88.39</td><td>93.15</td><td>69.61</td><td>8.15</td><td>59.13</td></tr><tr><td>Nucleus p = 0.1</td><td>30.93</td><td>88.39</td><td>93.15</td><td>69.58</td><td>8.17</td><td>59.13</td></tr><tr><td>Nucleus p = 0.3</td><td>30.77</td><td>88.37</td><td>93.12</td><td>69.43</td><td>8.40</td><td>59.01</td></tr><tr><td>Nucleus p = 0.5</td><td>30.39</td><td>88.31</td><td>92.95</td><td>66.67</td><td>10.05</td><td>58.81</td></tr><tr><td>Nucleus p = 0.7</td><td>29.43</td><td>88.20</td><td>92.56</td><td>60.88</td><td>12.64</td><td>58.11</td></tr><tr><td>Nucleus p = 0.9</td><td>27.64</td><td>87.90</td><td>91.76</td><td>54.05</td><td>21.61</td><td>56.43</td></tr><tr><td>Beam k = 2</td><td>30.78</td><td>88.29</td><td>93.62</td><td>76.00</td><td>5.80</td><td>59.72</td></tr><tr><td>Beam k = 4</td><td>30.44</td><td>88.17</td><td>93.95</td><td>81.19</td><td>4.11</td><td>59.97</td></tr><tr><td>Beam k = 6</td><td>30.30</td><td>88.12</td><td>94.07</td><td>82.94</td><td>3.76</td><td>60.07</td></tr><tr><td>Beam k = 8</td><td>30.10</td><td>88.07</td><td>94.15</td><td>83.50</td><td>3.39</td><td>60.06</td></tr><tr><td>Beam k = 10</td><td>29.99</td><td>88.03</td><td>94.20</td><td>84.23</td><td>3.30</td><td>60.03</td></tr><tr><td colspan="7">XSum</td></tr><tr><td>Greedy</td><td>36.16</td><td>92.03</td><td>89.28</td><td>23.53</td><td>65.35</td><td>36.51</td></tr><tr><td>Nucleus p = 0.1</td><td>31.08</td><td>91.24</td><td>88.61</td><td>21.63</td><td>76.07</td><td>35.04</td></tr><tr><td>Nucleus p = 0.3</td><td>31.19</td><td>91.25</td><td>88.62</td><td>21.51</td><td>76.35</td><td>34.98</td></tr><tr><td>Nucleus p = 0.5</td><td>31.08</td><td>91.24</td><td>88.63</td><td>21.11</td><td>75.31</td><td>34.99</td></tr><tr><td>Nucleus p = 0.7</td><td>31.24</td><td>91.24</td><td>88.62</td><td>21.26</td><td>76.11</td><td>35.01</td></tr><tr><td>Nucleus p = 0.9</td><td>31.15</td><td>91.26</td><td>88.62</td><td>21.04</td><td>76.20</td><td>34.98</td></tr><tr><td>Beam k = 2</td><td>36.76</td><td>92.13</td><td>89.38</td><td>22.98</td><td>64.62</td><td>36.82</td></tr><tr><td>Beam k = 4</td><td>36.96</td><td>92.14</td><td>89.42</td><td>23.00</td><td>63.81</td><td>36.97</td></tr><tr><td>Beam k = 6</td><td>37.09</td><td>92.14</td><td>89.43</td><td>22.70</td><td>63.71</td><td>37.00</td></tr><tr><td>Beam k = 8</td><td>37.09</td><td>92.13</td><td>89.44</td><td>23.05</td><td>63.52</td><td>37.02</td></tr><tr><td>Beam k = 10</td><td>37.11</td><td>92.12</td><td>89.45</td><td>22.97</td><td>63.49</td><td>37.05</td></tr></table>
382
+
383
+ Fact Ranker takes around 3 hours to generate the summaries when parallelized across the 8 GPUs. Lookahead takes 10 hours to generate the training data split across 8 GPUs.
384
+
385
+ # A.4 Human Evaluation Details
386
+
387
+ Human Evaluation on Faithfulness. The screenshot of the annotation can be seen in Figure 5. We required annotators to pass a custom qualification test consisting of three summaries with factual errors. To pass the test, the annotators had to correctly describe the factual errors in words. Workers also needed to have previously completed 100 or more tasks with an acceptance rate of $95\%$ or higher. We recruited workers from countries whose main language is English. To prevent any one worker from dominating the results, we set a maximum of 100 HITs per worker per dataset. The payment for judging each summary was $0.22 plus a bonus of$ 0.03. Annotators who spent more than 10 seconds per HIT and maintained high accuracy on HITs with known answers obtained the bonus. Annotators spent a median amount of 57.5 seconds per HIT, which amounts to a pay of $15.65 per hour. Krippendorff alpha (Krippendorff, 1980) for the CNN/DM factuality annotation is 0.63, and Krippendorff alpha for the XSum annotation is 0.57.
388
+
389
+ Table 7: Full results of beam search and nucleus sampling for fine-tuned BART-LARGE models. The trend can still be seen under different beam sizes and top- $p$ values, where increasing $k$ improves faithfulness and increasing $p$ degrades it.
390
+
391
+ <table><tr><td></td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td colspan="7">CNN/DM</td></tr><tr><td>Greedy</td><td>30.20</td><td>87.65</td><td>89.71</td><td>53.00</td><td>15.44</td><td>56.70</td></tr><tr><td>Nucleus p = 0.1</td><td>30.20</td><td>87.65</td><td>89.71</td><td>52.99</td><td>15.44</td><td>56.68</td></tr><tr><td>Nucleus p = 0.3</td><td>30.15</td><td>87.64</td><td>89.71</td><td>52.96</td><td>15.72</td><td>56.68</td></tr><tr><td>Nucleus p = 0.5</td><td>29.88</td><td>87.61</td><td>89.62</td><td>51.48</td><td>17.25</td><td>56.43</td></tr><tr><td>Nucleus p = 0.7</td><td>28.86</td><td>87.47</td><td>89.36</td><td>46.44</td><td>20.97</td><td>55.92</td></tr><tr><td>Nucleus p = 0.9</td><td>30.15</td><td>87.23</td><td>88.88</td><td>38.82</td><td>28.53</td><td>54.78</td></tr><tr><td>Beam k = 2</td><td>30.67</td><td>87.72</td><td>90.28</td><td>57.75</td><td>11.40</td><td>57.24</td></tr><tr><td>Beam k = 4</td><td>30.82</td><td>87.71</td><td>90.61</td><td>62.36</td><td>9.50</td><td>57.42</td></tr><tr><td>Beam k = 6</td><td>30.67</td><td>87.66</td><td>90.75</td><td>64.18</td><td>8.72</td><td>57.43</td></tr><tr><td>Beam k = 8</td><td>30.68</td><td>87.65</td><td>90.82</td><td>64.96</td><td>8.66</td><td>57.43</td></tr><tr><td>Beam k = 10</td><td>30.66</td><td>87.65</td><td>90.87</td><td>65.32</td><td>8.23</td><td>57.46</td></tr><tr><td colspan="7">XSum</td></tr><tr><td>Greedy</td><td>38.53</td><td>92.45</td><td>89.05</td><td>24.53</td><td>68.33</td><td>35.75</td></tr><tr><td>Nucleus p = 0.1</td><td>38.53</td><td>92.44</td><td>89.05</td><td>24.50</td><td>68.33</td><td>35.76</td></tr><tr><td>Nucleus p = 0.3</td><td>38.42</td><td>92.42</td><td>89.02</td><td>24.10</td><td>69.23</td><td>35.68</td></tr><tr><td>Nucleus p = 0.5</td><td>37.85</td><td>92.33</td><td>88.97</td><td>23.14</td><td>70.07</td><td>35.51</td></tr><tr><td>Nucleus p = 0.7</td><td>36.13</td><td>92.09</td><td>88.80</td><td>22.95</td><td>72.72</td><td>35.27</td></tr><tr><td>Nucleus p = 0.9</td><td>33.76</td><td>91.68</td><td>88.51</td><td>22.46</td><td>76.23</td><td>34.73</td></tr><tr><td>Beam k = 2</td><td>39.09</td><td>92.53</td><td>89.13</td><td>23.58</td><td>67.72</td><td>35.90</td></tr><tr><td>Beam k = 4</td><td>39.35</td><td>92.58</td><td>89.19</td><td>22.64</td><td>67.16</td><td>35.97</td></tr><tr><td>Beam k = 6</td><td>39.32</td><td>92.57</td><td>89.21</td><td>22.89</td><td>66.86</td><td>35.98</td></tr><tr><td>Beam k = 8</td><td>39.37</td><td>92.57</td><td>89.21</td><td>22.71</td><td>66.73</td><td>35.97</td></tr><tr><td>Beam k = 10</td><td>39.43</td><td>92.57</td><td>89.23</td><td>22.75</td><td>66.48</td><td>35.96</td></tr></table>
392
+
393
+ Table 8: Full results of beam search and nucleus sampling for fine-tuned PEGASUS-LARGE models. We observe a similar observation as Table 7, showing that the faithfulness trend holds for different models.
394
+
395
+ Human Evaluation on Informativeness. The screenshot of the annotation can be seen in Figure 6. To achieve good quality, we set up a qualification task of three documents with their associated summaries. A selected pool of workers who had passed previous factuality qualification tests was allowed to take this current qualification test. The workers who passed the current qualification test were allowed to participate in this evaluation. In addition, we added the same three documents with known answers to the evaluation and observed that workers had $100\%$ accuracy on them. We set the same maximum of 100 HITs per worker per dataset as in the factuality evaluation. The pay was $0.40 plus$ 0.10 bonus per HIT. Annotators spent a median time of 112 seconds per HIT, amounting to a pay of $16.07 per hour. For inter-annotator agreement, Krippendorff alpha (Krippendorff, 1980) for the CNN/DM annotation is 0.22, and Krippendorff alpha for the XSum annotation is 0.32.
396
+
397
+ # B Full Analysis
398
+
399
+ Table 7 shows the full result. We see the general trend where increasing beam size improves faithfulness and increasing $p$ for sampling is not helpful for faithfulness.
400
+
401
+ We similarly run the experiment on PEGASUS, a 568M model specifically trained for the task of abstractive summarization, with its respective check
402
+
403
+ <table><tr><td></td><td>Rouge-L</td><td>BERTScore</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td colspan="7">CNN/DM</td></tr><tr><td>Greedy</td><td>30.93</td><td>88.39</td><td>93.15</td><td>69.61</td><td>8.15</td><td>53.71</td></tr><tr><td>Beam k = 10</td><td>29.99</td><td>88.03</td><td>94.20</td><td>84.23</td><td>3.30</td><td>60.03</td></tr><tr><td>Greedy + Greedy Lookahead</td><td>30.88</td><td>88.38</td><td>93.57</td><td>71.54</td><td>6.42</td><td>59.70</td></tr><tr><td>Greedy + Sampling Lookahead</td><td>30.67</td><td>88.35</td><td>93.54</td><td>78.28</td><td>7.09</td><td>59.72</td></tr><tr><td>Greedy + Beam Lookahead</td><td>30.63</td><td>88.32</td><td>93.85</td><td>82.07</td><td>5.33</td><td>60.13</td></tr><tr><td>Beam + Greedy Lookahead</td><td>28.66</td><td>87.84</td><td>95.32</td><td>86.10</td><td>1.68</td><td>63.69</td></tr><tr><td colspan="7">XSum</td></tr><tr><td>Greedy</td><td>36.16</td><td>92.03</td><td>89.28</td><td>23.53</td><td>65.35</td><td>36.51</td></tr><tr><td>Beam k = 10</td><td>37.11</td><td>92.12</td><td>89.45</td><td>22.97</td><td>63.49</td><td>37.05</td></tr><tr><td>Greedy + Greedy Lookahead</td><td>36.25</td><td>92.11</td><td>89.71</td><td>24.21</td><td>60.46</td><td>37.17</td></tr><tr><td>Greedy + Sampling Lookahead</td><td>36.24</td><td>92.10</td><td>89.55</td><td>23.97</td><td>62.35</td><td>36.90</td></tr><tr><td>Greedy + Beam Lookahead</td><td>36.17</td><td>92.07</td><td>89.62</td><td>23.58</td><td>61.90</td><td>37.10</td></tr><tr><td>Beam + Greedy Lookahead</td><td>35.27</td><td>91.94</td><td>90.78</td><td>23.38</td><td>50.04</td><td>39.24</td></tr></table>
404
+
405
+ Table 9: Lookahead results with different decoding strategies for base decoding strategies and the lookahead generation strategies.
406
+
407
+ <table><tr><td rowspan="3"></td><td colspan="4">All</td><td colspan="4">CNN/DM</td><td colspan="4">XSum</td></tr><tr><td>Pearson</td><td colspan="3">Spearman</td><td>Pearson</td><td colspan="3">Spearman</td><td>Pearson</td><td colspan="3">Spearman</td></tr><tr><td>ρ</td><td>p</td><td>r</td><td>p</td><td>ρ</td><td>p</td><td>r</td><td>p</td><td>ρ</td><td>p</td><td>r</td><td>p</td></tr><tr><td>FactCC*</td><td>.20</td><td>.00</td><td>.30</td><td>.00</td><td>.36</td><td>.00</td><td>.30</td><td>.00</td><td>.07</td><td>.07</td><td>.19</td><td>.00</td></tr><tr><td>DAE*</td><td>.18</td><td>.00</td><td>.20</td><td>.00</td><td>.27</td><td>.00</td><td>.22</td><td>.00</td><td>.03</td><td>.38</td><td>.33</td><td>.00</td></tr><tr><td>BS-Fact*</td><td>.30</td><td>.00</td><td>.25</td><td>.00</td><td>.38</td><td>.00</td><td>.31</td><td>.00</td><td>.20</td><td>.00</td><td>.09</td><td>.02</td></tr><tr><td>QuestEval</td><td>.19</td><td>.00</td><td>.20</td><td>.00</td><td>.21</td><td>.00</td><td>.19</td><td>.00</td><td>.16</td><td>.00</td><td>.09</td><td>.00</td></tr><tr><td>Comp. Avg</td><td>.34</td><td>.00</td><td>.32</td><td>.00</td><td>.30</td><td>.00</td><td>.33</td><td>.00</td><td>.30</td><td>.00</td><td>.32</td><td>.00</td></tr><tr><td>Comp. Tuned</td><td>.37</td><td>.00</td><td>.34</td><td>.00</td><td>.42</td><td>.00</td><td>.36</td><td>.00</td><td>.31</td><td>.00</td><td>.19</td><td>.00</td></tr></table>
408
+
409
+ Table 10: Partial correlations of metrics on the Frank test dataset. Composite achieves the highest correlations on the combined and XSum dataset. * indicates results copied from the original work.
410
+
411
+ points.5 The result is presented in Table 8.
412
+
413
+ # C Lookahead Methods
414
+
415
+ We show the result of combining different decoding strategies for the base decoding strategy as well as for lookahead in Table 9 shows the result. We experiment with greedy and beam search as the base decoding strategies. For greedy, we experiment with all three decoding strategies for lookahead. For beam search, we are unable to run it with sampling or beam search due to the large computational cost. Interestingly, using beam for lookahead does not provide additional gains. We suspect that this is because exploring the future with more beams cannot guarantee that the base decoding strategy is able to explore them, as it is limited to selecting only the top tokens.
416
+
417
+ <table><tr><td></td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td>Greedy</td><td>26.36</td><td>89.09</td><td>88.93</td><td>89.57</td><td>75.34</td><td>38.39</td></tr><tr><td>Beam</td><td>27.52</td><td>87.56</td><td>89.41</td><td>87.20</td><td>60.21</td><td>39.44</td></tr><tr><td>BEAM+RANKING</td><td>27.60</td><td>87.62</td><td>89.64</td><td>91.11</td><td>47.01</td><td>41.91</td></tr></table>
418
+
419
+ Table 11: Results for ranking on the WikiHow dataset.
420
+
421
+ # D Composite Metric
422
+
423
+ As described in Section 3.1, we train the composite metric on FACTCOLLECT and tune it on FRANK (Pagnoni et al., 2021). We use the test set of Pagnoni et al. (2021) for evaluation and the rest for tuning the composite metric. The resulting weights for the metrics are 0.29, -0.29, 1.97, and 0.94 for the FactCC, DAE, BS-Fact, and QuestEval, respectively, and the intercept is -1.91. We additionally compute partial correlations on FRANK, shown in Table 10. We see that the composite is able to further increase the correlations in all settings except for XSum's Spearman correlation. Ablations on the effect of ranking with a single metric in Appendix E.
424
+
425
+ Since FACTCOLLECT only contains annotations on XSum and CNN/DM, we analyze whether the composite metric is robust for another dataset and domain. We use WikiHow (Koupaee and Wang, 2018) and decode using PEGASUS with greedy and beam decoding. The result of applying ranking to the beam output can be seen in Table 11. We see consistent gains in all faithfulness metrics when we apply ranking, showing its robustness of improving faithfulness in another domain.
426
+
427
+ ![](images/87ffeb16b0c73aa6733a9aeb37e61fab36de11dc34ff48077010fdb75653b11c.jpg)
428
+ Figure 7: Faithfulness score of the lookahead summaries at each time step. Adding lookahead as the heuristics improves the search space to generate more faithful summaries.
429
+
430
+ ![](images/0e90a60b629ea6d6318eed6b98203f1fb4d8cc16a76aea2989ac5a5e090e94a2.jpg)
431
+
432
+ <table><tr><td></td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FactCC</td><td>DAE ↓</td><td>QuestEval</td></tr><tr><td colspan="7">CNN/DM</td></tr><tr><td>Greedy</td><td>30.93</td><td>88.39</td><td>93.15</td><td>69.61</td><td>8.15</td><td>59.13</td></tr><tr><td>l = 0</td><td>30.71</td><td>88.34</td><td>93.13</td><td>70.41</td><td>8.19</td><td>58.65</td></tr><tr><td>l = full</td><td>30.75</td><td>88.35</td><td>93.90</td><td>71.54</td><td>5.70</td><td>60.13</td></tr><tr><td colspan="7">XSum</td></tr><tr><td>Greedy</td><td>36.16</td><td>92.03</td><td>89.28</td><td>23.53</td><td>65.35</td><td>36.51</td></tr><tr><td>l = 0</td><td>35.73</td><td>92.00</td><td>89.39</td><td>23.43</td><td>64.06</td><td>36.55</td></tr><tr><td>l = full</td><td>36.25</td><td>92.11</td><td>89.71</td><td>24.21</td><td>60.46</td><td>37.17</td></tr></table>
433
+
434
+ # E Ablations
435
+
436
+ We present several ablation studies for our proposed faithfulness-aware decoding methods. More ablation studies exploring how lookahead explores the search space can be found in Appendix E.
437
+
438
+ Lookahead Length. We first present the result of using the lookahead heuristics but with $l = 0$ . This means that at each time step, we do not use future heuristics but directly evaluate the faithfulness of the already generated partial summaries as the additional score. The result using GREEDY+LOOKAHEAD is shown in Table 12. Compared to greedy decoding, adding the faithfulness score of the current partial summary shows mixed results; the heuristic can only slightly improve BS-Fact, DAE, and QuestEval for XSum. However, we only see substantial gain when the future is taken into account (i.e. $l =$ full). This shows the necessity of using the full summary to achieve the full potential of current faithfulness
439
+
440
+ Table 12: Lookahead ablation with different lengths. $l = 0$ provides the faithfulness heuristic score only on the partially generated summaries while $l =$ full is our lookahead model that evaluates on the full future summary. The faithfulness score calculated on the partial summaries does not provide an effective estimate that improves the faithfulness of the generated summary.
441
+
442
+ <table><tr><td>Ranker</td><td>RL</td><td>BS</td><td>BS-Fact</td><td>FC</td><td>DAE ↓</td><td>QE</td><td>COMP</td></tr><tr><td colspan="8">CNN/DM</td></tr><tr><td>First</td><td>29.99</td><td>88.03</td><td>94.20</td><td>84.23</td><td>3.30</td><td>60.03</td><td>74.68</td></tr><tr><td>BS-Fact</td><td>29.81</td><td>88.04</td><td>94.64</td><td>84.08</td><td>3.04</td><td>60.41</td><td>75.94</td></tr><tr><td>FC</td><td>29.98</td><td>88.04</td><td>94.20</td><td>90.75</td><td>3.00</td><td>60.06</td><td>76.75</td></tr><tr><td>DAE</td><td>30.00</td><td>88.03</td><td>94.20</td><td>84.28</td><td>1.92</td><td>60.04</td><td>75.11</td></tr><tr><td>QE</td><td>30.27</td><td>88.16</td><td>94.14</td><td>82.81</td><td>2.83</td><td>63.26</td><td>77.33</td></tr><tr><td>Comp.</td><td>30.08</td><td>88.12</td><td>94.31</td><td>90.27</td><td>1.92</td><td>62.57</td><td>79.51</td></tr><tr><td colspan="8">XSum</td></tr><tr><td>Top</td><td>37.11</td><td>92.12</td><td>89.45</td><td>22.97</td><td>63.49</td><td>37.05</td><td>8.08</td></tr><tr><td>BS-Fact</td><td>36.46</td><td>92.14</td><td>90.15</td><td>22.10</td><td>56.33</td><td>37.98</td><td>12.15</td></tr><tr><td>FC</td><td>36.98</td><td>92.11</td><td>89.44</td><td>41.93</td><td>63.47</td><td>37.01</td><td>13.67</td></tr><tr><td>DAE</td><td>36.94</td><td>92.11</td><td>89.54</td><td>23.27</td><td>50.82</td><td>37.28</td><td>12.24</td></tr><tr><td>QE</td><td>36.36</td><td>92.06</td><td>89.61</td><td>23.07</td><td>60.35</td><td>41.17</td><td>13.20</td></tr><tr><td>Comp.</td><td>36.42</td><td>92.10</td><td>89.79</td><td>40.11</td><td>51.48</td><td>40.10</td><td>20.20</td></tr></table>
443
+
444
+ Table 13: Ranking results with different faithfulness metrics. Top is the best summary from beam search, and each subsequent rows represent the ranker using the corresponding faithfulness metric. We abbreviate FactCC as FC, QuestEval as QE, and Composite as Comp.
445
+
446
+ metrics.
447
+
448
+ Ranking with Faithfulness Metrics. Next, we present the result for ranking with each respective faithfulness metric. The result is shown in Table 13. Generally, optimizing for one metric will lead to improvement in other faithfulness metrics. While optimizing each of the faithfulness metrics will undoubtedly perform the best when we use that metric for evaluation, the composite metric is able to achieve a similarly good score for all faithfulness metric that we are considering.
449
+
450
+ Evaluating the Search Space. We hypothesize that by incorporating lookahead, we can improve the search space even when a few tokens are generated. To better understand this, we greedily decode the full summary at each time step given the prefix similar to how lookahead works. We then use BS-Fact and DAE to score all generated summaries
451
+
452
+ ![](images/396474eff0dcba043da37eb3905469341b6b37efb90a440c68571e2c7da3552c.jpg)
453
+ Figure 8: Faithfulness and abstractiveness tradeoff results on the 200 CNN/DM examples used for human annotation. While our proposed methods are less abstractive, the gain in faithfulness is much larger than the decrease in abstractiveness.
454
+
455
+ ![](images/3ebe4d124b3162f40a2a98e9ebb24382d561aeff66767b2d4d12a651c51ec08b.jpg)
456
+ Figure 9: Faithfulness and Abstractiveness tradeoff results on the full examples XSum test set. Faithfulness is calculated by taking the average across all automatic faithfulness metrics.
457
+
458
+ and analyze the faithfulness score at each time step. Here, we focus on XSum and compare greedy and GreEDY+LookaHEAD. The plots of faithfulness scores using the current prefix to generate the full summaries are shown in Figure 7, where we see the benefit of having the lookahead heuristics. For BS-Fact, we see a large gap between the two methods especially when t is between 5 and 50. Though it may be less surprising as this is the faithfulness metric that the lookahead heuristic optimizes on, the heuristic can nevertheless prevent the score to dip, which we see for greedy search between $t = 5$ to $t = 40$ . This shows that it is able to lead the model to a more faithful path to prevent straying away from a less faithful path. When we evaluate DAE, we show that optimizing on BS-Fact with lookahead heuristic can consistently improve the score for all lengths.
459
+
460
+ ![](images/de039d53d09318ecba1af53d0c2be158017638eca3971d3af476ac57ac937d88.jpg)
461
+ Figure 10: Faithfulness and Abstractiveness tradeoff results on the full examples CNN/DM test set. Faithfulness is calculated by taking the average across all automatic faithfulness metrics.
462
+
463
+ # F Abstractiveness
464
+
465
+ We first show the same tradeoff result in CNN/DM in Figure 8. BEAM+LOOKAHEAD+ABSTR does achieve a slightly higher MINT score while also improving faithfulness.
466
+
467
+ We also extend the analysis to the whole test dataset and show the faithfulness score by taking the average of all faithfulness metrics (Avg.). Since DAE is an error rate, we subtract the score from 100 so that a higher score means it is more faithful. We do not use the composite metric as the ranking directly optimizes for it.
468
+
469
+ We can see a similar trend with the average of faithfulness metrics for both datasets in Figure 9 and Figure 10, where the gain in faithfulness outweighs the decrease in abstractiveness. The difference from the result using human faithfulness score is that BEAM+RANKING achieves the highest average score since ranking with composite metric optimizes the faithfulness metrics.
2303.03xxx/2303.03278/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ac8853de8baa2792ce065f319863bc283a3d6a8b507bf3728224c8b9de11ee0
3
+ size 781043
2303.03xxx/2303.03278/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03283/c2e1acdb-42dd-441d-a39c-3ad4cbc8d300_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03283/c2e1acdb-42dd-441d-a39c-3ad4cbc8d300_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03283/c2e1acdb-42dd-441d-a39c-3ad4cbc8d300_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:676a01e83af3c9c525847142494be13672998899933e5516a3a00dac278ae281
3
+ size 2938039
2303.03xxx/2303.03283/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03283/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a133dc274dad0a671287ac8caf5e7c37bb40eccf1cb5d3ecc638f9b72a0b53a1
3
+ size 816024
2303.03xxx/2303.03283/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2303.03xxx/2303.03297/788a8595-a785-4c27-915c-cbf6cafb8054_content_list.json ADDED
@@ -0,0 +1,1351 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Robust Immersive Telepresence and Mobile Telemanipulation: NimbRo wins ANA Avatar XPRIZE Finals",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 140,
8
+ 88,
9
+ 854,
10
+ 136
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Max Schwarz*, Christian Lenz*, Raphael Memmesheimer, Bastian Pätzold, Andre Rochow, Michael Schreiber, and Sven Behnke",
17
+ "bbox": [
18
+ 217,
19
+ 157,
20
+ 779,
21
+ 191
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Abstract—Robotic avatar systems promise to bridge distances and reduce the need for travel. We present the updated NimbRo avatar system, winner of the $5M grand prize at the international ANA Avatar XPRIZE competition, which required participants to build intuitive and immersive robotic telepresence systems that could be operated by briefly trained operators. We describe key improvements for the finals, compared to the system used in the semifinals: To operate without a power- and communications tether, we integrated a battery and a robust redundant wireless communication system. Video and audio data are compressed using low-latency HEVC and Opus codecs. We propose a new locomotion control device with tunable resistance force. To increase flexibility, the robot's upper-body height can be adjusted by the operator. We describe essential monitoring and robustness tools which enabled the success at the competition. Finally, we analyze our performance at the competition finals and discuss lessons learned.",
28
+ "bbox": [
29
+ 81,
30
+ 217,
31
+ 488,
32
+ 433
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "I. INTRODUCTION",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 218,
42
+ 441,
43
+ 352,
44
+ 455
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Traveling large distances costs money and time; and most forms of travel impact the environment. Reducing the need to travel is thus beneficial for many reasons. While voice calls and video conferencing help, they cannot replace in-person meetings entirely due to lack of immersion and social interaction. Furthermore, many remote tasks require mobility, physical touch, grasping and handling of objects, or even more complex manipulation skills. These requirements cannot be addressed by VR-based conferencing systems that focus on meetings in a virtual space. In contrast, avatar systems allow full immersion into a remote space while also embodying the operator in a robotic system, giving them the ability to navigate and physically interact with both the remote environment and persons therein.",
51
+ "bbox": [
52
+ 81,
53
+ 459,
54
+ 488,
55
+ 671
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "The ANA Avatar XPRIZE competition<sup>1</sup> challenged the robotics community to advance the state of the art in avatar systems. Promising a record $10M prize purse, the competition required teams to build intuitive and robust robotic avatar systems that allow a human operator to be present in a remote space. The tasks to be solved included social interaction and communication, but also locomotion and complex manipulation. Critically, the systems were to be used and evaluated by operator and recipient judges. In contrast to previous teleoperation competitions, such as the DARPA Robotics Challenge [1] operators could be trained only for a short time how to use the developed avatar systems.",
62
+ "bbox": [
63
+ 81,
64
+ 670,
65
+ 488,
66
+ 867
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "image",
72
+ "img_path": "images/a6ff304865b8acd42117ca8d4200acba2cfb0b6663036f1cf388222e7039b387.jpg",
73
+ "image_caption": [],
74
+ "image_footnote": [],
75
+ "bbox": [
76
+ 509,
77
+ 210,
78
+ 674,
79
+ 314
80
+ ],
81
+ "page_idx": 0
82
+ },
83
+ {
84
+ "type": "image",
85
+ "img_path": "images/cd39875e7fd5cfc1190f67d70682d385525d05ae643813146e84d029fefdd774.jpg",
86
+ "image_caption": [
87
+ "Fig. 1. NimbRo avatar system at the ANA Avatar XPRIZE competition Finals. Stills from the end of our winning final run with the robot holding the correctly retrieved stone (magenta). Top left: Operator judge controlling the avatar. Bottom left: VR view (cropped). Right: Avatar robot in the arena."
88
+ ],
89
+ "image_footnote": [],
90
+ "bbox": [
91
+ 511,
92
+ 315,
93
+ 674,
94
+ 417
95
+ ],
96
+ "page_idx": 0
97
+ },
98
+ {
99
+ "type": "image",
100
+ "img_path": "images/93ebfdaa1712f1b5f72aba6c1f4f1cb74896a1dbbac01ff27be253d479a59952.jpg",
101
+ "image_caption": [],
102
+ "image_footnote": [],
103
+ "bbox": [
104
+ 676,
105
+ 210,
106
+ 910,
107
+ 417
108
+ ],
109
+ "page_idx": 0
110
+ },
111
+ {
112
+ "type": "text",
113
+ "text": "In this paper, we present and discuss the updates and extensions of the NimbRo avatar system (Fig. 1) that we made for our highly successful participation in the ANA Avatar XPRIZE Finals in November 2022, where our team won the grand prize<sup>2</sup>. The finals posed new requirements and tasks that resulted in various system extensions and improvements, compared to our earlier system used in the semifinals [2]. The contributions of this paper include:",
114
+ "bbox": [
115
+ 504,
116
+ 484,
117
+ 911,
118
+ 606
119
+ ],
120
+ "page_idx": 0
121
+ },
122
+ {
123
+ "type": "list",
124
+ "sub_type": "text",
125
+ "list_items": [
126
+ "1) hardware integration for tetherless and battery-powered operation of the avatar robot and mobility of the operator station,",
127
+ "2) a redundant network stack for robust wireless communication,",
128
+ "3) monitoring tools for efficient support crew operations,",
129
+ "4) auto-recovery mechanisms for failure tolerance on multiple levels, and",
130
+ "5) a thorough analysis of the competition results and lessons learned from our participation at the finals."
131
+ ],
132
+ "bbox": [
133
+ 521,
134
+ 608,
135
+ 913,
136
+ 758
137
+ ],
138
+ "page_idx": 0
139
+ },
140
+ {
141
+ "type": "text",
142
+ "text": "II. RELATED WORK",
143
+ "text_level": 1,
144
+ "bbox": [
145
+ 637,
146
+ 773,
147
+ 782,
148
+ 787
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "Teleoperated robotic systems are widespread. For example, the DARPA Robotics Challenge [1] resulted in an array of legged, wheeled, and tracked teleoperated robots. We focus our discussion on avatar systems, that is, systems which not only allow teleoperation, but also telepresence—immersion in the remote environment and interaction with human recipients, as in the XPRIZE competition.",
155
+ "bbox": [
156
+ 504,
157
+ 792,
158
+ 913,
159
+ 900
160
+ ],
161
+ "page_idx": 0
162
+ },
163
+ {
164
+ "type": "header",
165
+ "text": "Accepted final version. IEEE-RAS International Conference on Humanoid Robots (HUMANOIDS), Austin, USA, December 2023",
166
+ "bbox": [
167
+ 75,
168
+ 13,
169
+ 921,
170
+ 31
171
+ ],
172
+ "page_idx": 0
173
+ },
174
+ {
175
+ "type": "aside_text",
176
+ "text": "arXiv:2303.03297v3 [cs.RO] 6 Dec 2023",
177
+ "bbox": [
178
+ 22,
179
+ 267,
180
+ 57,
181
+ 700
182
+ ],
183
+ "page_idx": 0
184
+ },
185
+ {
186
+ "type": "page_footnote",
187
+ "text": "*Equal contribution.",
188
+ "bbox": [
189
+ 98,
190
+ 877,
191
+ 210,
192
+ 888
193
+ ],
194
+ "page_idx": 0
195
+ },
196
+ {
197
+ "type": "page_footnote",
198
+ "text": "All authors are with the Autonomous Intelligent Systems group of University of Bonn, Germany; schwarz@ais.uni-bonn.de",
199
+ "bbox": [
200
+ 84,
201
+ 888,
202
+ 488,
203
+ 912
204
+ ],
205
+ "page_idx": 0
206
+ },
207
+ {
208
+ "type": "page_footnote",
209
+ "text": "1https://www.xprize.org/prizes/ avatar",
210
+ "bbox": [
211
+ 96,
212
+ 912,
213
+ 387,
214
+ 925
215
+ ],
216
+ "page_idx": 0
217
+ },
218
+ {
219
+ "type": "page_footnote",
220
+ "text": "2https://www.youtube.com/watch?v=EmESa2O1q4c",
221
+ "bbox": [
222
+ 519,
223
+ 912,
224
+ 867,
225
+ 926
226
+ ],
227
+ "page_idx": 0
228
+ },
229
+ {
230
+ "type": "image",
231
+ "img_path": "images/38ca6832cafe7a02d14e6c10a3c4d59b8354c5ac6b53d6de1eb807f9f307e30b.jpg",
232
+ "image_caption": [
233
+ "Fig. 2. NimbRo avatar system consisting of the operator station (left) and the avatar robot (right)."
234
+ ],
235
+ "image_footnote": [],
236
+ "bbox": [
237
+ 106,
238
+ 58,
239
+ 537,
240
+ 316
241
+ ],
242
+ "page_idx": 1
243
+ },
244
+ {
245
+ "type": "image",
246
+ "img_path": "images/8f427aba403670e5e789b80208c2c73e5c0fa98502f71f2e253659b2abb99476.jpg",
247
+ "image_caption": [],
248
+ "image_footnote": [],
249
+ "bbox": [
250
+ 545,
251
+ 56,
252
+ 893,
253
+ 316
254
+ ],
255
+ "page_idx": 1
256
+ },
257
+ {
258
+ "type": "text",
259
+ "text": "Some participants are commercial entities and have no scientific publications, such as Pollen Robotics, who came in second in the finals. Their avatar robot is a heavily modified version of their product Reachy with stronger arms (7 DoF, $3.5\\mathrm{kg}$ payload) and a communication head displaying the operator's face. In contrast to our operator station, Pollen uses VR controllers with vibration actuators and a 1 DoF elbow exoskeleton for manipulation. Torque and haptic feedback is thus limited to elbow torque and controller vibration.",
260
+ "bbox": [
261
+ 81,
262
+ 348,
263
+ 488,
264
+ 484
265
+ ],
266
+ "page_idx": 1
267
+ },
268
+ {
269
+ "type": "text",
270
+ "text": "Luo et al. [3] describe the approach by team Northeastern, who achieved third place at the finals. Their avatar system features an interesting glove and gripper system with hydrostatic actuation, which gives fine-grained force feedback. A wave variable approach handles varying communication latency. The decision to forgo a VR head-mounted display in favor of 2D screens is a simple way to avoid motion sickness caused by network latency, but limits immersiveness in comparison to our system.",
271
+ "bbox": [
272
+ 81,
273
+ 484,
274
+ 488,
275
+ 621
276
+ ],
277
+ "page_idx": 1
278
+ },
279
+ {
280
+ "type": "text",
281
+ "text": "Marques et al. [4] came in fourth with the AVATRINA system. Mechanically, it is similar to our system with two Franka Emika Panda arms, dexterous hands and VR teleoperation. In contrast to ours, the neck can only rotate and not translate, which makes looking around occlusions impossible and reduces depth perception and immersion.",
282
+ "bbox": [
283
+ 81,
284
+ 622,
285
+ 488,
286
+ 712
287
+ ],
288
+ "page_idx": 1
289
+ },
290
+ {
291
+ "type": "text",
292
+ "text": "Van Erp et al. [5] showcase the system of team i-Botics, who came in fifth. Based on Halodi Eve, their robot is considerably more humanoid in shape than the other top five teams, although it operated with a gantry for safety reasons up until the last competition run. During the competition, the system suffered from network connectivity problems. The robot's neck is only 1 DoF, seriously limiting camera motion.",
293
+ "bbox": [
294
+ 81,
295
+ 713,
296
+ 488,
297
+ 819
298
+ ],
299
+ "page_idx": 1
300
+ },
301
+ {
302
+ "type": "text",
303
+ "text": "Park et al. [6] describe the system of team SNU ( $8^{\\text{th}}$ place). Their avatar robot is fully humanoid in shape, although for finals it was sitting on a holonomic base for fast and safe locomotion. Interestingly, the team decided not to attempt realistic animation of the operator's face, but displayed operator emotions through basic line drawings of the mouth area, which limits identification of the robot with the human",
304
+ "bbox": [
305
+ 81,
306
+ 820,
307
+ 490,
308
+ 926
309
+ ],
310
+ "page_idx": 1
311
+ },
312
+ {
313
+ "type": "text",
314
+ "text": "controlling it. The SNU operator station features a unique linear actuator system which seems to enlarge the workspace.",
315
+ "bbox": [
316
+ 504,
317
+ 348,
318
+ 911,
319
+ 378
320
+ ],
321
+ "page_idx": 1
322
+ },
323
+ {
324
+ "type": "text",
325
+ "text": "Aside from the XPRIZE competition, there are other notable avatar systems targeting different applications like enabling people with disabilities to work remotely [7], underwater telepresence [8], and space [9]. Takeuchi et al. [7] presented an avatar robot system with focus on enabling people with disabilities to execute physical work remotely. The robot is controlled by mouse and gaze input. In contrast to our proposed system, which aims at transmitting the movements of an operator naturally, their system follows a less immersive approach by controlling motion through a GUI. Lii et al. [9] describe a teleoperated robot controlled by astronauts through a tablet GUI and a 1-DoF haptic joystick. The system can execute assembly and maintenance tasks but has, due to the domain, no human interaction capabilities. TELESAR VI [10] has a long history in the field of teexistence, starting in 1980. The system is designed for remote manipulation and gesturing. Unlike ours, the robot is operated in a stationary seated posture and has two controllable legs. All ten fingers are equipped with multiple sensors for enhanced tactile feedback. However, the force feedback is limited to the fingers.",
326
+ "bbox": [
327
+ 504,
328
+ 378,
329
+ 911,
330
+ 696
331
+ ],
332
+ "page_idx": 1
333
+ },
334
+ {
335
+ "type": "text",
336
+ "text": "III. NIMBRO AVATAR SYSTEM",
337
+ "text_level": 1,
338
+ "bbox": [
339
+ 599,
340
+ 709,
341
+ 818,
342
+ 722
343
+ ],
344
+ "page_idx": 1
345
+ },
346
+ {
347
+ "type": "text",
348
+ "text": "The NimbRo avatar system consists of a robotic operator station and an anthropomorphic avatar robot (see Fig. 2). The human operator sits on a chair and is strapped into two 7 DoF compliant arm exoskeletons (Franka Emika Panda arms) at their palms. 6 DoF force/torque sensors (Nordbo NRS-6050-D80) at the arm wrists are used to provide a weightless feeling for the operator and force/torque feedback to their hands. Finger movements are captured using SenseGlove DK1 hand exoskeletons, which provide force and haptic feedback to the finger tips. The operator's feet are resting on a custom-built 3D pedal device (see Section III-F), which allows omnidirectional control of the robot's base. For visual and auditive immersion, the operator wears a VR head",
349
+ "bbox": [
350
+ 504,
351
+ 729,
352
+ 913,
353
+ 926
354
+ ],
355
+ "page_idx": 1
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "mounted display (Valve Index), which is equipped with additional cameras to capture gaze direction, eye opening, and mouth expressions.",
360
+ "bbox": [
361
+ 83,
362
+ 65,
363
+ 486,
364
+ 109
365
+ ],
366
+ "page_idx": 2
367
+ },
368
+ {
369
+ "type": "text",
370
+ "text": "The avatar robot is equipped with a holonomic mechan-wheeled base for indoor locomotion. Its spine is a linear actuator that can be used to adapt to different manipulation and communication heights. For bimanual manipulation, the robot features two arms (Franka Emika Panda) in approximately humanoid configuration. Force/torque sensors (On-Robot HEX-E) are mounted on the wrists for force feedback. We chose two different robotic hands (Schunk SVH and SIH), each featuring different capabilities. Please refer to Lenz and Behnke [11] for a detailed description of our arm force feedback telemanipulation controller, including active joint limit avoidance using model-based predictions and an oscillation detection and suppression module.",
371
+ "bbox": [
372
+ 81,
373
+ 111,
374
+ 486,
375
+ 306
376
+ ],
377
+ "page_idx": 2
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "The robot head is mounted on a robotic arm (UFactory xArm-6), which mirrors the operator's 6D head movement. Together with the head-mounted wide-angle stereo camera pair (2×Basler a2A3840-45ucBAS) this enables a highly immersive visual 3D experience for the operator. Latencies are mitigated by spherical rendering [12]. The head further carries a screen showing a live animation of the operator's face—a direct video display is not possible, since the operator is wearing the HMD. See [13], [14] for details on the facial animation.",
382
+ "bbox": [
383
+ 81,
384
+ 306,
385
+ 486,
386
+ 455
387
+ ],
388
+ "page_idx": 2
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "The first integrated NimbRo avatar system (as of February 2021, prior to the ANA Avatar XPRIZE Semifinals) is described in detail in Schwarz et al. [2].",
393
+ "bbox": [
394
+ 83,
395
+ 458,
396
+ 488,
397
+ 503
398
+ ],
399
+ "page_idx": 2
400
+ },
401
+ {
402
+ "type": "text",
403
+ "text": "The ANA Avatar XPRIZE finals [15] brought new tasks and requirements which necessitated changes to our design. On the one hand, it was clear that the robot had to operate without a tether for communications and power, which necessitated integration of wireless communication and battery power. The robot had to navigate through narrow passages, necessitating to reduce its width. Much emphasis was placed by XPRIZE on haptic perception and we extended our system accordingly. On the other hand, the competition format changed: Whereas in the semifinals, individual scenarios could be attempted multiple times, with manual intervention allowed in-between, the finals called for a continuous mission through ten tasks, with no possibility to skip tasks or to restart the system in case of failure. Additionally, the participants were down-selected over the successive competition days (see Section IV). This meant that considerable focus had to be placed on making the system robust.",
404
+ "bbox": [
405
+ 81,
406
+ 505,
407
+ 486,
408
+ 760
409
+ ],
410
+ "page_idx": 2
411
+ },
412
+ {
413
+ "type": "text",
414
+ "text": "We will now detail the changes and improvements to the system since early 2021 up to the finals in November 2022.",
415
+ "bbox": [
416
+ 83,
417
+ 761,
418
+ 488,
419
+ 791
420
+ ],
421
+ "page_idx": 2
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "A. Mobile Operator Station",
426
+ "text_level": 1,
427
+ "bbox": [
428
+ 83,
429
+ 801,
430
+ 274,
431
+ 816
432
+ ],
433
+ "page_idx": 2
434
+ },
435
+ {
436
+ "type": "text",
437
+ "text": "The finals required a mobile operator station that could be moved into an operator control room and set-up quickly. To this end, we added extendable wheels to it (see Fig. 2). The control computer and a dedicated computer for face animation were integrated into the structure. Additionally, we added a large battery which can supply the operator station for multiple hours. Using this setup, we could prepare",
438
+ "bbox": [
439
+ 81,
440
+ 820,
441
+ 486,
442
+ 926
443
+ ],
444
+ "page_idx": 2
445
+ },
446
+ {
447
+ "type": "table",
448
+ "img_path": "images/96d25ca21820e5ba12b476bc33c208a8d09a03eafdf6a33e91202a5ed34f484d.jpg",
449
+ "table_caption": [
450
+ "TABLEI AVATAR ROBOT POWER DISTRIBUTION & AVERAGE CONSUMPTION"
451
+ ],
452
+ "table_footnote": [],
453
+ "table_body": "<table><tr><td>Voltage</td><td>Device</td><td>Power</td><td>Voltage</td><td>Device</td><td>Power</td></tr><tr><td rowspan=\"2\">Battery</td><td>Wheels (idle)</td><td>16 W</td><td></td><td>xArm motors</td><td>50 W</td></tr><tr><td>PC</td><td>350 W</td><td>24V</td><td>Hands</td><td>8 W</td></tr><tr><td colspan=\"2\">5V - Base controller</td><td>5 W</td><td></td><td>F/T sensors</td><td>11 W</td></tr><tr><td rowspan=\"2\">12V</td><td>xArm PC</td><td>15 W</td><td colspan=\"2\">48V - Panda motors</td><td>60 W</td></tr><tr><td>Panda PCs</td><td>145 W</td><td></td><td></td><td></td></tr><tr><td colspan=\"6\">Total power consumption (avg): 660 W</td></tr></table>",
454
+ "bbox": [
455
+ 511,
456
+ 88,
457
+ 903,
458
+ 198
459
+ ],
460
+ "page_idx": 2
461
+ },
462
+ {
463
+ "type": "text",
464
+ "text": "the operator station for use long before our run and leave everything initialized and switched on during transport to the operator control room.",
465
+ "bbox": [
466
+ 504,
467
+ 208,
468
+ 911,
469
+ 253
470
+ ],
471
+ "page_idx": 2
472
+ },
473
+ {
474
+ "type": "text",
475
+ "text": "B. Telemanipulation",
476
+ "text_level": 1,
477
+ "bbox": [
478
+ 506,
479
+ 268,
480
+ 648,
481
+ 284
482
+ ],
483
+ "page_idx": 2
484
+ },
485
+ {
486
+ "type": "text",
487
+ "text": "Our tele manipulation components, including the arm and hand exoskeletons on the operator side, arms and five-finger hands on the avatar side, and our force feedback controller have been proven during semifinals [11]. To adapt to the finals requirements, we made three changes. First, we mounted the avatar's arm bases closer together, which reduced the shoulder width and made it easier to maneuver through narrow passages. Second, we equipped the fingertips of the SVH and SIH hands with microswitches and magnet hall sensors, respectively. This allows contact to be measured and then displayed haptically to the operator using the SenseGlove DK1. Finally, we replaced the OnRobot HEX-E force/torque sensors on the operator side with more rigid Nordbo NRS-6050-D80 which offer higher update rate of $1\\mathrm{kHz}$ , resulting in faster response to operator movement.",
488
+ "bbox": [
489
+ 504,
490
+ 291,
491
+ 913,
492
+ 517
493
+ ],
494
+ "page_idx": 2
495
+ },
496
+ {
497
+ "type": "text",
498
+ "text": "C. Roughness Sensing",
499
+ "text_level": 1,
500
+ "bbox": [
501
+ 506,
502
+ 534,
503
+ 661,
504
+ 549
505
+ ],
506
+ "page_idx": 2
507
+ },
508
+ {
509
+ "type": "text",
510
+ "text": "Task 10 of the finals (see Fig. 9) required the ability to remotely feel texture, especially surface roughness. To enable this, we developed an audio-based sensing, detection, and vibrational display system: The left index fingertip is equipped with two microphones: one measuring air vibrations, the other measuring vibrations coupled through the finger. The audio data is transmitted over WiFi (see Section III-D.2) and then analyzed by a CNN, which classifies very short audio segments as rough or smooth. Finally, a vibration actuator on the operator's fingertip displays the classification result, giving the illusion of feeling rough bumps on the object surface with low latency. This low-cost approach is very small and non-invasive on the sensing side. Details of this subsystem are described by Pätzold et al. [16].",
511
+ "bbox": [
512
+ 504,
513
+ 555,
514
+ 913,
515
+ 767
516
+ ],
517
+ "page_idx": 2
518
+ },
519
+ {
520
+ "type": "text",
521
+ "text": "D. Tetherless Operation",
522
+ "text_level": 1,
523
+ "bbox": [
524
+ 506,
525
+ 782,
526
+ 671,
527
+ 797
528
+ ],
529
+ "page_idx": 2
530
+ },
531
+ {
532
+ "type": "text",
533
+ "text": "To make the system operable without a tether, wideranging changes to the avatar robot had to be implemented.",
534
+ "bbox": [
535
+ 504,
536
+ 804,
537
+ 911,
538
+ 834
539
+ ],
540
+ "page_idx": 2
541
+ },
542
+ {
543
+ "type": "text",
544
+ "text": "1) Power Supply: The robot now carries a RELiON In-Sight 48 V 30 Ah battery, which powers all onboard systems. All power consumers had to be converted to DC power. We identified four voltage rails (see Table I) and installed individual DC-to-DC converters. This required tight integration to fit everything into the robot base (see Fig. 3).",
545
+ "bbox": [
546
+ 504,
547
+ 835,
548
+ 911,
549
+ 926
550
+ ],
551
+ "page_idx": 2
552
+ },
553
+ {
554
+ "type": "image",
555
+ "img_path": "images/8942fcb9eadc49595e6e5ba56858b9b56735ae5ba19dd78ad37d97c8f768dc4c.jpg",
556
+ "image_caption": [
557
+ "Fig. 3. Robot base before (left) and after update (right)."
558
+ ],
559
+ "image_footnote": [],
560
+ "bbox": [
561
+ 89,
562
+ 58,
563
+ 488,
564
+ 236
565
+ ],
566
+ "page_idx": 3
567
+ },
568
+ {
569
+ "type": "image",
570
+ "img_path": "images/ce079463a3c324ea00545c95b0ad48c481c67007e58a1c35a84fb1199d7a875e.jpg",
571
+ "image_caption": [
572
+ "Fig. 4. Network Architecture. The operator station contains a 1GBit/s ethernet adapter, which is connected to the XPRIZE network (or our own access point during testing). Two separate access points broadcast a WiFi network at $2.4\\mathrm{GHz}$ and $5\\mathrm{GHz}$ , respectively. The avatar control PC is equipped with two PCI-E WiFi adapters, one for each of the networks."
573
+ ],
574
+ "image_footnote": [],
575
+ "bbox": [
576
+ 86,
577
+ 268,
578
+ 486,
579
+ 343
580
+ ],
581
+ "page_idx": 3
582
+ },
583
+ {
584
+ "type": "text",
585
+ "text": "2) Wireless Communication: For the finals, XPRIZE supplied a $2.4\\mathrm{GHz}$ and a $5\\mathrm{GHz}$ WiFi network on the competition arena. To utilize this fully, we incorporated two WiFi adapters on our robot (see Fig. 4). All network adapters (wired on the operator side, wireless on the robot side) are connected via PCI-E to their respective control computers—reducing latency to a minimum and increasing robustness, compared to USB devices. On the robot, we use two Intel AX210 cards, which have excellent driver support. Each WiFi card is connected to two antennas: a main antenna extended up from the spine of the robot (see Fig. 2) and a backup antenna in the base.",
586
+ "bbox": [
587
+ 81,
588
+ 419,
589
+ 488,
590
+ 599
591
+ ],
592
+ "page_idx": 3
593
+ },
594
+ {
595
+ "type": "text",
596
+ "text": "To facilitate fine-grained control of routing, i.e. which WiFi band is used for which data stream, each WiFi adapter has its own IP address. Conversely, the operator station uses two IP addresses on its Ethernet interface. Static routes ensure that the correct source address and interface are used for each destination address.",
597
+ "bbox": [
598
+ 81,
599
+ 599,
600
+ 488,
601
+ 689
602
+ ],
603
+ "page_idx": 3
604
+ },
605
+ {
606
+ "type": "text",
607
+ "text": "Our network transport is based on nimbro_network $^3$ , which transmits data between the otherwise isolated ROS [17] instances. The type and amount of data is statically configured and transmitted over UDP, which avoids any kind of connection handshake. This allows our system to seamlessly begin operation as soon as network connectivity is established and to recover immediately after a network interruption. This capability strongly contributed to our success at the finals (see Section IV).",
608
+ "bbox": [
609
+ 81,
610
+ 690,
611
+ 488,
612
+ 825
613
+ ],
614
+ "page_idx": 3
615
+ },
616
+ {
617
+ "type": "text",
618
+ "text": "At semifinals in 2021, our system required around $300\\mathrm{MBit / s}$ downlink bandwidth. At the time, this was fine given a wired 1GBit/s connection, but now bandwidth had to be reduced for robust WiFi operation. While current WiFi systems can sustain such bandwidths in ideal situations,",
619
+ "bbox": [
620
+ 81,
621
+ 825,
622
+ 488,
623
+ 902
624
+ ],
625
+ "page_idx": 3
626
+ },
627
+ {
628
+ "type": "table",
629
+ "img_path": "images/95f999d62d566f935c7eed0cbf5a89150fa0992781f495b10b073eb800b14d17.jpg",
630
+ "table_caption": [
631
+ "TABLE II WiFi BANDWIDTH REQUIREMENTS"
632
+ ],
633
+ "table_footnote": [],
634
+ "table_body": "<table><tr><td colspan=\"4\">Downlink from avatar</td><td colspan=\"4\">Uplink to avatar</td></tr><tr><td>Channel</td><td>MBit/s</td><td colspan=\"2\">5 GHz 2.4 GHz</td><td>Channel</td><td>MBit/s</td><td colspan=\"2\">5 GHz 2.4 GHz</td></tr><tr><td>Arm feedback</td><td>8.5</td><td>✓</td><td>×</td><td>Arm control</td><td>4.9</td><td>✓</td><td>✓</td></tr><tr><td>Transformations</td><td>4.1</td><td>✓</td><td>×</td><td>Transformations</td><td>1.4</td><td>✓</td><td>×</td></tr><tr><td>Main cameras</td><td>14.7</td><td>✓</td><td>×</td><td>Operator face</td><td>5.7</td><td>×</td><td>✓</td></tr><tr><td>Hand camera</td><td>5.5</td><td>×</td><td>✓</td><td>Audio</td><td>0.4</td><td>✓</td><td>✓</td></tr><tr><td>Diagnostics</td><td>0.4</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td></tr><tr><td>Audio</td><td>0.4</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"2\">Total [MBit/s]</td><td>28.1</td><td>6.3</td><td colspan=\"2\">Total [MBit/s]</td><td>6.7</td><td>11.0</td></tr></table>",
635
+ "bbox": [
636
+ 511,
637
+ 90,
638
+ 911,
639
+ 212
640
+ ],
641
+ "page_idx": 3
642
+ },
643
+ {
644
+ "type": "text",
645
+ "text": "they cannot guarantee this in non-line-of-sight or otherwise difficult circumstances. We reduced the required bandwidth significantly by compressing the main video stream $(2\\times 2472\\times 2178\\ @ 46\\mathrm{Hz})$ with the HEVC video codec instead of transmitting individual JPEG images. We took care not to increase video latency, which is a source of operator disorientation and fatigue. To this end, we perform debayering, white balancing, and color correction in a fused CUDA kernel on the onboard RTX 3070 GPU. Video encoding is then performed using the NVIDIA NVENC library on the GPU as well. On the operator side, compressed video packets are uploaded to the GPU (RTX A6000 48GB), extracted using NVDEC, and sent to the HMD after spherical rendering [12], which corrects for head movement latencies. The total latency from start of camera exposure to HMD display is under $50\\mathrm{ms}$ . Overall, we achieve considerably lower bandwidth than our old system (see Table II).",
646
+ "bbox": [
647
+ 504,
648
+ 226,
649
+ 913,
650
+ 483
651
+ ],
652
+ "page_idx": 3
653
+ },
654
+ {
655
+ "type": "text",
656
+ "text": "Table II also shows that manipulation control and audio are routed over both WiFi networks redundantly. Both data types are very sensitive to interruptions and packet drops, which lead to uncontrolled movement and loss of information, respectively. The redundant transmission minimizes this risk. We also experimented with redundant configuration for our camera stream, but refrained from activating this in the finals due to concerns about 2.4 GHz bandwidth in an arena with many devices of spectators operating in this band.",
657
+ "bbox": [
658
+ 504,
659
+ 484,
660
+ 913,
661
+ 619
662
+ ],
663
+ "page_idx": 3
664
+ },
665
+ {
666
+ "type": "text",
667
+ "text": "Audio transmission over WiFi is in itself problematic. Our semifinal solution used the Opus codec with a very low buffer size of 64 samples (at $48\\mathrm{kHz}$ audio), which gives a high packet rate—challenging for WiFi networks. The packet rate can be reduced by increasing the buffer size, but this increases latency and easily leads to echo effects, where the operator can hear their own voice as transmitted by the avatar and then captured by the avatar's microphones. To mitigate this, we integrate an echo cancellation system based on NVIDIA Maxine<sup>4</sup>, which allows us to increase the buffer size to 512, reducing the audio packet rate to roughly $90\\mathrm{Hz}$ .",
668
+ "bbox": [
669
+ 504,
670
+ 621,
671
+ 913,
672
+ 787
673
+ ],
674
+ "page_idx": 3
675
+ },
676
+ {
677
+ "type": "text",
678
+ "text": "3) Wireless E-Stop: We integrated an HRI Wireless Emergency Stop to provide a reliable tetherless safety system. Activating the E-Stop depowers the wheels. The avatar stops quickly due to friction, and the base can be pushed around manually. In addition, both Panda arms, the xArm, and both hands hold their current joint positions. The Panda arms can be moved freely using the teach button on their wrist.",
679
+ "bbox": [
680
+ 504,
681
+ 789,
682
+ 913,
683
+ 893
684
+ ],
685
+ "page_idx": 3
686
+ },
687
+ {
688
+ "type": "footer",
689
+ "text": "<sup>3</sup>https://github.com/AIS-Bonn/nimbro_network",
690
+ "bbox": [
691
+ 94,
692
+ 912,
693
+ 434,
694
+ 926
695
+ ],
696
+ "page_idx": 3
697
+ },
698
+ {
699
+ "type": "footer",
700
+ "text": "4https://developer.nvidia.com/maxine",
701
+ "bbox": [
702
+ 519,
703
+ 912,
704
+ 803,
705
+ 926
706
+ ],
707
+ "page_idx": 3
708
+ },
709
+ {
710
+ "type": "image",
711
+ "img_path": "images/d688aedbf3025efac24b438c37802c6eea6f9833dd2577bb080616b421b0483d.jpg",
712
+ "image_caption": [
713
+ "Fig. 5. Self-centering rudder design with individually tunable springs for intuitive omnidirectional locomotion control. Fig. 2 shows the device during operation."
714
+ ],
715
+ "image_footnote": [],
716
+ "bbox": [
717
+ 106,
718
+ 59,
719
+ 472,
720
+ 170
721
+ ],
722
+ "page_idx": 4
723
+ },
724
+ {
725
+ "type": "text",
726
+ "text": "E. Height Adaption",
727
+ "text_level": 1,
728
+ "bbox": [
729
+ 83,
730
+ 219,
731
+ 220,
732
+ 234
733
+ ],
734
+ "page_idx": 4
735
+ },
736
+ {
737
+ "type": "text",
738
+ "text": "In contrast to semifinals, where manipulation on table height was required, finals specified a wider height range. To increase flexibility, we integrated a linear joint in the spine of the robot (see Fig. 2). The operator can control the height with a bidirectional Danfoss KEP foot pedal. The pedal contains springs to provide resistance and uses hall effect sensors for angle sensing. The current height and a side-view rendering of the robot is shown to the operator during movement of the actuator to assist the height adjustment.",
739
+ "bbox": [
740
+ 81,
741
+ 238,
742
+ 488,
743
+ 375
744
+ ],
745
+ "page_idx": 4
746
+ },
747
+ {
748
+ "type": "text",
749
+ "text": "F. Locomotion Control",
750
+ "text_level": 1,
751
+ "bbox": [
752
+ 83,
753
+ 382,
754
+ 243,
755
+ 396
756
+ ],
757
+ "page_idx": 4
758
+ },
759
+ {
760
+ "type": "text",
761
+ "text": "For locomotion of the holonomic robot platform, we propose a new feet-controlled 3D-printed rudder design (see Fig. 5). We identified two major problems with the old device: First, the rudder lacked resistance and self-centering, which made it difficult to control. Second, the rudder yielded only position estimates when both feet were placed on the rudder surface. This became especially problematic when the feet were lifted off the rudder and tilted or rotated slightly, necessitating an often unintuitive, lengthy re-initialization step for operators.",
762
+ "bbox": [
763
+ 81,
764
+ 401,
765
+ 488,
766
+ 551
767
+ ],
768
+ "page_idx": 4
769
+ },
770
+ {
771
+ "type": "text",
772
+ "text": "To address these issues, we introduced resistance and self-centering by employing a spring mechanism. The mechanical base of the rudder is built around a ball-bearing joint and a rotational thrust-bearing joint. By employing two joints, we have control over the pitch, roll and yaw axes by using different springs. Springs with different tension allow individual control of the resistance per axis. For absolute position estimates, we attach an HTC Vive tracker to the rudder which receives signals from the VR tracking system that are then translated to movement commands. We place a separator on the rudder's surface to ease blind foot placement.",
773
+ "bbox": [
774
+ 81,
775
+ 551,
776
+ 488,
777
+ 717
778
+ ],
779
+ "page_idx": 4
780
+ },
781
+ {
782
+ "type": "text",
783
+ "text": "We equipped the avatar robot with addressable LED strips (90 RGB LEDs) under the base, which indicate the driving direction by illuminating the corresponding side of the robot. The LEDs also show the battery level during charging.",
784
+ "bbox": [
785
+ 81,
786
+ 718,
787
+ 488,
788
+ 780
789
+ ],
790
+ "page_idx": 4
791
+ },
792
+ {
793
+ "type": "text",
794
+ "text": "G. Monitoring",
795
+ "text_level": 1,
796
+ "bbox": [
797
+ 83,
798
+ 787,
799
+ 187,
800
+ 801
801
+ ],
802
+ "page_idx": 4
803
+ },
804
+ {
805
+ "type": "text",
806
+ "text": "Monitoring is an essential part of robust robotics. It allows engineers to analyze problems and find their causes quickly. In our scenario, it was especially important to make sure the system is healthy before starting the run, since from then on, manual intervention was not permitted. During the run, the role of monitoring switches to a safety perspective, allowing the support crew to abort the run in case of danger to the human operator, the robot, or the environment. To be",
807
+ "bbox": [
808
+ 81,
809
+ 805,
810
+ 490,
811
+ 926
812
+ ],
813
+ "page_idx": 4
814
+ },
815
+ {
816
+ "type": "image",
817
+ "img_path": "images/52ae7219343d185b59e6816a4f725287947b965c7d4cfd8643532c474095251a.jpg",
818
+ "image_caption": [
819
+ "Fig. 6. System Monitoring GUI. Left: Operator Station status. Each line corresponds to a system check. The red check indicates an issue with the VR trackers mounted on the exoskeleton—caused by a support crew member occluding the line-of-sight. Center: Avatar robot status. Right: Control buttons that enable/disable individual system components."
820
+ ],
821
+ "image_footnote": [],
822
+ "bbox": [
823
+ 509,
824
+ 61,
825
+ 911,
826
+ 261
827
+ ],
828
+ "page_idx": 4
829
+ },
830
+ {
831
+ "type": "image",
832
+ "img_path": "images/fa1185d8c028ff60b2cc7462fcf5e8b8db3e0eaf18c3b325b119922a1aadc837.jpg",
833
+ "image_caption": [
834
+ "Fig. 7. Network status. Left: Overview with individual network flows. Green boxes indicate hosts in the network. Right: Individual data groups can be configured to use $5\\mathrm{GHz}$ and/or $2.4\\mathrm{GHz}$ streams. The packet/s rates on the right indicate packet drops due to WiFi congestion."
835
+ ],
836
+ "image_footnote": [],
837
+ "bbox": [
838
+ 508,
839
+ 332,
840
+ 738,
841
+ 430
842
+ ],
843
+ "page_idx": 4
844
+ },
845
+ {
846
+ "type": "image",
847
+ "img_path": "images/58b34d01535b3453a0fa18b1167591d796280403511f10690153bba3091b5673.jpg",
848
+ "image_caption": [],
849
+ "image_footnote": [],
850
+ "bbox": [
851
+ 743,
852
+ 330,
853
+ 911,
854
+ 430
855
+ ],
856
+ "page_idx": 4
857
+ },
858
+ {
859
+ "type": "text",
860
+ "text": "able to monitor the highly complex avatar system with one glance, we developed an integrated GUI. Because it contains a multitude of video streams and complex plots, the standard ROS GUI, rqt, was not suitable, as it is not optimized for high-bandwidth display. Instead, we developed a GUI based on imgui<sup>5</sup>, an immediate-mode GUI toolkit with OpenGL bindings. This allows us to decode and display the video streams directly on the GPU. The GUI follows the rqt paradigm with windows that are arranged via drag & drop.",
861
+ "bbox": [
862
+ 504,
863
+ 491,
864
+ 911,
865
+ 627
866
+ ],
867
+ "page_idx": 4
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "The most important monitoring display is shown in Fig. 6. Both operator station and avatar robot run a sysmon node, which performs several checks with $1\\mathrm{Hz}$ . These checks range from \"Is hardware device X connected?\" over \"Does component Y produce data?\" to \"Is the operator station properly calibrated?\". The intention is simple: If all checks are successful, the support crew can start the run with confidence. Indeed, our policy was that every time an undetected error or misconfiguration led to a sub-optimal test run, a specific check for this condition was added. Overall, checks are similar to unit tests in software engineering, but monitor the live system in hardware and software.",
872
+ "bbox": [
873
+ 504,
874
+ 627,
875
+ 913,
876
+ 808
877
+ ],
878
+ "page_idx": 4
879
+ },
880
+ {
881
+ "type": "text",
882
+ "text": "The network subsystem is monitored and configured through two GUI components (see Fig. 7). An overview visualization shows bandwidths and parameters of each individual network connection. For the two WiFi connections, signal strengths are also visualized as green bars. A small control box allows switching data groups between the two",
883
+ "bbox": [
884
+ 504,
885
+ 809,
886
+ 913,
887
+ 898
888
+ ],
889
+ "page_idx": 4
890
+ },
891
+ {
892
+ "type": "page_footnote",
893
+ "text": "5https://github.com/ocornut/imgui",
894
+ "bbox": [
895
+ 519,
896
+ 912,
897
+ 779,
898
+ 926
899
+ ],
900
+ "page_idx": 4
901
+ },
902
+ {
903
+ "type": "image",
904
+ "img_path": "images/7ceab86b83452a94f9980a4f591747d60abb2ccb3d8db5d79b47cd593d63fc49.jpg",
905
+ "image_caption": [
906
+ "Fig. 8. Camera streams. Left: Raw wide-angle camera stream (left eye) from the robot. Right: Eye cameras, mouth camera, and reconstructed animated face of the operator."
907
+ ],
908
+ "image_footnote": [],
909
+ "bbox": [
910
+ 86,
911
+ 61,
912
+ 486,
913
+ 200
914
+ ],
915
+ "page_idx": 5
916
+ },
917
+ {
918
+ "type": "text",
919
+ "text": "WiFi connections. This ability allows quick trouble-shooting by giving instant feedback on bandwidths and packet drops.",
920
+ "bbox": [
921
+ 81,
922
+ 255,
923
+ 486,
924
+ 286
925
+ ],
926
+ "page_idx": 5
927
+ },
928
+ {
929
+ "type": "text",
930
+ "text": "Finally, a section of the GUI with camera streams (see Fig. 8) together with headsets providing audio feedback give situational awareness to the support crew.",
931
+ "bbox": [
932
+ 81,
933
+ 286,
934
+ 486,
935
+ 330
936
+ ],
937
+ "page_idx": 5
938
+ },
939
+ {
940
+ "type": "text",
941
+ "text": "H. System Robustness",
942
+ "text_level": 1,
943
+ "bbox": [
944
+ 83,
945
+ 345,
946
+ 236,
947
+ 359
948
+ ],
949
+ "page_idx": 5
950
+ },
951
+ {
952
+ "type": "text",
953
+ "text": "Ensuring support crew situational awareness and the connectionless network system are features that make the system more robust. However, there are many problems that can occur during a run, where manual intervention is not possible without aborting the trial. For this reason, we added autorecovery mechanisms on multiple layers.",
954
+ "bbox": [
955
+ 81,
956
+ 366,
957
+ 488,
958
+ 455
959
+ ],
960
+ "page_idx": 5
961
+ },
962
+ {
963
+ "type": "text",
964
+ "text": "First, the Franka Emika Panda arms have independent safety systems which detect unsafe situations and either perform a soft-stop (braking with motor power) or hard-stop (engaging hardware brakes and switching off motor power). Since the operator can trigger both, e.g. by hitting an object with high speed, it is desirable to recover from these conditions. To this end, we modified the Panda firmware to be able to trigger recovery from an autonomous observer, which restarts the arms as long as the manual E-Stop is not triggered. During restart of the arm, the operator is shown a 3D model of the arm to indicate that the arm is restarting and they should wait until the process is finished. The arm pose is then softly faded to the current operator pose and operation can continue (see [11] for more details).",
965
+ "bbox": [
966
+ 81,
967
+ 455,
968
+ 488,
969
+ 667
970
+ ],
971
+ "page_idx": 5
972
+ },
973
+ {
974
+ "type": "text",
975
+ "text": "Secondly, many hard- and software problems can be solved by simply restarting the affected processes [18]. As a simple example, restarting a device driver ROS node will recover after a transient disconnection of the device, without the need to make the driver node itself robust against such events. We stringently use the respawn feature of the ROS launch system to ensure that all nodes are automatically restarted whenever they exit. Watchdogs are integrated that force nodes which do not produce output to exit.",
976
+ "bbox": [
977
+ 81,
978
+ 667,
979
+ 488,
980
+ 804
981
+ ],
982
+ "page_idx": 5
983
+ },
984
+ {
985
+ "type": "text",
986
+ "text": "Finally, as a last line of defense, the main control PC is equipped with an external watchdog device. Our software running on the control PC regularly resets this watchdog. Should the system hang completely (which happened once during testing), the watchdog device will force a reset of the computer. Consequently, the software is configured to autostart again, automatically resuming operations. The complete boot-and-recovery process takes less than one minute.",
987
+ "bbox": [
988
+ 81,
989
+ 805,
990
+ 488,
991
+ 926
992
+ ],
993
+ "page_idx": 5
994
+ },
995
+ {
996
+ "type": "table",
997
+ "img_path": "images/463fb6eb208005645960a080dfb153be1dd1b583955ef9da168da94e0c1b87b9.jpg",
998
+ "table_caption": [
999
+ "TABLE III RESULTS OF THE ANA AVATAR XPRIZE FINALs"
1000
+ ],
1001
+ "table_footnote": [],
1002
+ "table_body": "<table><tr><td rowspan=\"2\">Rank</td><td rowspan=\"2\">Team</td><td colspan=\"3\">Points</td><td>Time</td></tr><tr><td>Total</td><td>Task</td><td>Judged</td><td>[mm:ss]</td></tr><tr><td>1</td><td>NimbRo</td><td>15.0</td><td>10</td><td>5.0</td><td>05:50</td></tr><tr><td>2</td><td>Pollen Robotics</td><td>15.0</td><td>10</td><td>5.0</td><td>10:50</td></tr><tr><td>3</td><td>Team Northeastern [19]</td><td>14.5</td><td>10</td><td>4.5</td><td>21:09</td></tr><tr><td>4</td><td>AVATRINA [4]</td><td>14.5</td><td>10</td><td>4.5</td><td>24:47</td></tr><tr><td>5</td><td>i-Botics [5]</td><td>14.0</td><td>9</td><td>5.0</td><td>25:00</td></tr><tr><td>6</td><td>Team UNIST</td><td>13.5</td><td>9</td><td>4.5</td><td>25:00</td></tr><tr><td>7</td><td>Inbiodroid</td><td>13.0</td><td>8</td><td>5.0</td><td>25:00</td></tr><tr><td>8</td><td>Team SNU [6]</td><td>12.5</td><td>8</td><td>4.5</td><td>25:00</td></tr><tr><td>9</td><td>AlterEgo [20]</td><td>12.5</td><td>8</td><td>4.5</td><td>25:00</td></tr><tr><td>10</td><td>Dragon Tree Labs</td><td>11.0</td><td>7</td><td>4.0</td><td>25:00</td></tr><tr><td>11</td><td>Avatar-Hubo [21]</td><td>9.5</td><td>6</td><td>3.5</td><td>25:00</td></tr><tr><td>12</td><td>Last Mile</td><td>9.0</td><td>5</td><td>4.0</td><td>25:00</td></tr></table>",
1003
+ "bbox": [
1004
+ 506,
1005
+ 89,
1006
+ 915,
1007
+ 272
1008
+ ],
1009
+ "page_idx": 5
1010
+ },
1011
+ {
1012
+ "type": "text",
1013
+ "text": "IV. EVALUATION",
1014
+ "text_level": 1,
1015
+ "bbox": [
1016
+ 645,
1017
+ 282,
1018
+ 772,
1019
+ 296
1020
+ ],
1021
+ "page_idx": 5
1022
+ },
1023
+ {
1024
+ "type": "text",
1025
+ "text": "The ANA Avatar XPRIZE Finals took place in November 2022 in Long Beach, CA, USA. After several downselections over three years, 17 teams from 10 countries competed in the finals for a prize purse of $8 million. The developed avatar systems were evaluated by untrained operator and recipient judges in a series of ten tasks over one qualification and two competition days. Only the top 16 teams and ties (qualification day) and top 12 teams (first competition day) advanced to the next day. Both judges were selected from the international expert panel and unknown to the teams until 60 min before the competition run. Teams had 45 min to train and familiarize the operator judge with their system. The operator judge controlled the avatar robot located in the arena from the operator control room, out of range of direct visual or auditory feedback. Information could only be exchanged between both locations via the avatar system. The operator had up to 25 min to complete all ten tasks (see Fig. 9). During the competition run, teams were not allowed to interact with the system or the judges.",
1026
+ "bbox": [
1027
+ 504,
1028
+ 308,
1029
+ 913,
1030
+ 595
1031
+ ],
1032
+ "page_idx": 5
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "text": "A. Analysis of Competition Scores",
1037
+ "text_level": 1,
1038
+ "bbox": [
1039
+ 504,
1040
+ 614,
1041
+ 743,
1042
+ 630
1043
+ ],
1044
+ "page_idx": 5
1045
+ },
1046
+ {
1047
+ "type": "text",
1048
+ "text": "Each competition run was scored based on task performance and judge experience. Table III shows the final scores of the top 12 teams. One point was awarded for each task completed. In addition, up to five points were given based on the experience of both judges. Each judge scored up to one point per criterion if they felt the operator was present at the remote site and if they could clearly see and hear each other through the system. The final point was given by the operator judge if the avatar system was easy and comfortable to use. The maximum score of both competition days counted as the final result. Ties were broken by completion time.",
1049
+ "bbox": [
1050
+ 504,
1051
+ 638,
1052
+ 911,
1053
+ 804
1054
+ ],
1055
+ "page_idx": 5
1056
+ },
1057
+ {
1058
+ "type": "text",
1059
+ "text": "Four systems completed all ten tasks. Pollen Robotics' and our system were the only ones to solve all tasks on both days. Most of the systems received 4.5 or the maximum 5 points for the judge experience. Our team NimbRo won the competition with a perfect score of 15 points and the fastest completion time of $5:50\\mathrm{min}$ —almost twice as fast as the runner-up Pollen Robotics, who also received a perfect score with a time of $10:50\\mathrm{min}$ .",
1060
+ "bbox": [
1061
+ 504,
1062
+ 805,
1063
+ 913,
1064
+ 926
1065
+ ],
1066
+ "page_idx": 5
1067
+ },
1068
+ {
1069
+ "type": "image",
1070
+ "img_path": "images/5e5a6b66c3132a592b373ac32566822cfd569614bdfb4c458fd17308a3ac6c04.jpg",
1071
+ "image_caption": [
1072
+ "Fig. 9. Tasks of the ANA Avatar XPRIZE finals. T1: Short locomotion (approx. $10\\mathrm{m}$ ) to the mission control desk. T2: The operator introduces themselves to the mission commander. T3: The operator receives mission details and confirms the tasks. T4: Activate the power switch. T5: Approx. $40\\mathrm{m}$ of locomotion. T6: Select a canister by weight (approx. $1.2\\mathrm{kg}$ ). T7: Place the canister in the designated slot. T8: Navigate around obstacles. T9: Grasp and use the power drill to unscrew the hex bolt. T10: Select a rough-textured stone based on touch and retrieve it."
1073
+ ],
1074
+ "image_footnote": [],
1075
+ "bbox": [
1076
+ 106,
1077
+ 59,
1078
+ 890,
1079
+ 270
1080
+ ],
1081
+ "page_idx": 6
1082
+ },
1083
+ {
1084
+ "type": "image",
1085
+ "img_path": "images/9138c0b78d92648e2d88160171310800ffd36002916ecb745ce7c9fb235c0531.jpg",
1086
+ "image_caption": [
1087
+ "Fig. 10. Per-task execution time for the top six competition runs solving all ten tasks in the ANA Avatar XPRIZE finals."
1088
+ ],
1089
+ "image_footnote": [],
1090
+ "bbox": [
1091
+ 119,
1092
+ 328,
1093
+ 890,
1094
+ 436
1095
+ ],
1096
+ "page_idx": 6
1097
+ },
1098
+ {
1099
+ "type": "text",
1100
+ "text": "B. Task Completion Times",
1101
+ "text_level": 1,
1102
+ "bbox": [
1103
+ 83,
1104
+ 465,
1105
+ 266,
1106
+ 479
1107
+ ],
1108
+ "page_idx": 6
1109
+ },
1110
+ {
1111
+ "type": "text",
1112
+ "text": "We extracted the per task completion times for both competition days from the official video feed<sup>6</sup> for the six runs completing all ten tasks (see Figure 10 and Table IV). Both of our competition runs were faster than any other successful run. As our operator judge on Day 1 solved all tasks in $8:15\\mathrm{min}$ , giving us a comfortable lead, our operator judge on Day 2 was instructed to take more risks. In addition, we greatly increased our avatar's maximum base velocity for Day 2, resulting in much faster execution times for all tasks involving larger locomotion (Tasks 1, 4, 5, and 8). We encountered a minor network issue during Task 9 on Day 1 (see Section IV-C) which explains our longer execution time of $1:56\\mathrm{min}$ , compared to $1:04\\mathrm{min}$ on Day 2. All remaining tasks (2, 3, 6, 7, and 10) were solved within the same time ( $\\pm 4$ sec.) on both days, showing the robustness of our system.",
1113
+ "bbox": [
1114
+ 81,
1115
+ 488,
1116
+ 488,
1117
+ 714
1118
+ ],
1119
+ "page_idx": 6
1120
+ },
1121
+ {
1122
+ "type": "text",
1123
+ "text": "The shorter tasks 1-3 (locomotion and communication with the recipient judge) and Task 7 (placing the canister into the designated slot) were consistently solved with similar execution times across the top six competition runs. AVATRINA's system had a much slower drive compared to the top three teams, as evidenced by longer execution times for the locomotion tasks. They also had problems during the manipulation in Task 6, which resulted in a software restart, costing $2:10\\mathrm{min}$ . Pollen Robotics' longer locomotion time (Task 5) on Day 2 was similarly due to a reset of the operator control. Larger differences in individual task execution times are due to subsystem failures or sub-optimal",
1124
+ "bbox": [
1125
+ 81,
1126
+ 715,
1127
+ 488,
1128
+ 895
1129
+ ],
1130
+ "page_idx": 6
1131
+ },
1132
+ {
1133
+ "type": "table",
1134
+ "img_path": "images/b3441163ad1d5f92cea21a4a1ba07e6e777a2fb88d19f0d99ee0b93af092df7e.jpg",
1135
+ "table_caption": [
1136
+ "TABLE IV TASK COMPLETION TIMES IN ANA AVATAR XPRIZE FINALs"
1137
+ ],
1138
+ "table_footnote": [
1139
+ "We show times of the top six competition run in minutes. D1/D2: Day 1 / Day 2."
1140
+ ],
1141
+ "table_body": "<table><tr><td>Team / Run</td><td>T1</td><td>T2</td><td>T3</td><td>T4</td><td>T5</td><td>T6</td><td>T7</td><td>T8</td><td>T9</td><td>T10</td><td>Total</td></tr><tr><td>Avatrina D1</td><td>0:28</td><td>0:23</td><td>2:03</td><td>1:45</td><td>3:10</td><td>6:17</td><td>0:19</td><td>2:24</td><td>3:10</td><td>4:48</td><td>24:47</td></tr><tr><td>Northeastern D2</td><td>0:16</td><td>0:19</td><td>1:47</td><td>0:52</td><td>1:14</td><td>1:05</td><td>0:15</td><td>1:00</td><td>4:54</td><td>9:27</td><td>21:09</td></tr><tr><td>Pollen Rob. D1</td><td>0:10</td><td>0:09</td><td>1:39</td><td>0:40</td><td>1:15</td><td>0:53</td><td>0:14</td><td>0:50</td><td>5:06</td><td>2:24</td><td>13:20</td></tr><tr><td>Pollen Rob. D2</td><td>0:15</td><td>0:09</td><td>1:43</td><td>0:49</td><td>2:02</td><td>1:15</td><td>0:18</td><td>0:51</td><td>1:28</td><td>1:59</td><td>10:50</td></tr><tr><td>NimbRo D1</td><td>0:18</td><td>0:10</td><td>1:35</td><td>0:52</td><td>1:00</td><td>0:22</td><td>0:06</td><td>0:50</td><td>1:56</td><td>1:06</td><td>8:15</td></tr><tr><td>NimbRo D2</td><td>0:08</td><td>0:09</td><td>1:31</td><td>0:23</td><td>0:32</td><td>0:26</td><td>0:09</td><td>0:26</td><td>1:04</td><td>1:02</td><td>5:50</td></tr><tr><td>NimbRo D2-D1</td><td>-0:10</td><td>-0:01</td><td>-0:04</td><td>-0:29</td><td>-0:28</td><td>+0:04</td><td>+0:03</td><td>-0:24</td><td>-0:52</td><td>-0:04</td><td>-2:25</td></tr></table>",
1142
+ "bbox": [
1143
+ 511,
1144
+ 487,
1145
+ 908,
1146
+ 590
1147
+ ],
1148
+ "page_idx": 6
1149
+ },
1150
+ {
1151
+ "type": "text",
1152
+ "text": "grasp poses in case of the drill (Task 9): Both Pollen Robotics on Day 1 and Team Northeastern lost the first drill, requiring grasping the second drill. Team Northeastern then struggled to reach the stones in the box for Task 10, maybe due to their kinematics with the wrist above the hand. The left arm shut down completely due to collision with the top bar and could not recover. However, the operator managed to retrieve the correct stone with the right arm after several attempts.",
1153
+ "bbox": [
1154
+ 504,
1155
+ 613,
1156
+ 911,
1157
+ 733
1158
+ ],
1159
+ "page_idx": 6
1160
+ },
1161
+ {
1162
+ "type": "text",
1163
+ "text": "Despite the small sample size, this time analysis shows the robustness and reliability of our system. Comparing our completion times with the rest of the competition suggests that our system is easier and faster to use and provides sufficient intuitive situational awareness to the operator.",
1164
+ "bbox": [
1165
+ 504,
1166
+ 734,
1167
+ 913,
1168
+ 809
1169
+ ],
1170
+ "page_idx": 6
1171
+ },
1172
+ {
1173
+ "type": "text",
1174
+ "text": "C. System Failures & Recovery",
1175
+ "text_level": 1,
1176
+ "bbox": [
1177
+ 506,
1178
+ 816,
1179
+ 725,
1180
+ 830
1181
+ ],
1182
+ "page_idx": 6
1183
+ },
1184
+ {
1185
+ "type": "text",
1186
+ "text": "During all three competition runs, the arm recovery system was put to the test. On qualification day, we had switched off most traffic on the $2.4\\mathrm{GHz}$ band since it had proven unstable in the team garages due to channel congestion. During the run, there were certain intervals with higher packet jitter on the $5\\mathrm{GHz}$ band. Since redundancy on the arm commands",
1187
+ "bbox": [
1188
+ 504,
1189
+ 835,
1190
+ 913,
1191
+ 926
1192
+ ],
1193
+ "page_idx": 6
1194
+ },
1195
+ {
1196
+ "type": "footer",
1197
+ "text": "$^{6}$ https://www.youtube.com/watch?v=1OnV1Go6Op0",
1198
+ "bbox": [
1199
+ 94,
1200
+ 912,
1201
+ 442,
1202
+ 926
1203
+ ],
1204
+ "page_idx": 6
1205
+ },
1206
+ {
1207
+ "type": "image",
1208
+ "img_path": "images/5d00f0be12667b075b5bbd5db167694e08585ba9ea261eb007e2e66236187261.jpg",
1209
+ "image_caption": [
1210
+ "Fig. 11. Network latency events during finals. Delays over $100\\mathrm{ms}$ in the arm command channel (red), temporarily put the arms in pause mode. From Day 1 on, the arm commands were transmitted redundantly. Additional logs from Day 1 show video decoding errors resulting from packet loss (green)."
1211
+ ],
1212
+ "image_footnote": [],
1213
+ "bbox": [
1214
+ 89,
1215
+ 59,
1216
+ 486,
1217
+ 165
1218
+ ],
1219
+ "page_idx": 7
1220
+ },
1221
+ {
1222
+ "type": "text",
1223
+ "text": "was unavailable, the arm controllers on the robot disabled themselves when not receiving commands (see Fig. 11). Thankfully, the packet jitter was only transient, so that auto-restarting the controllers was possible. Additionally, during the final task, the left wrist hit the boundary of the stone box with enough force to disable the arm. Pressure on the arm prevented immediate auto-recovery but the operator resolved the situation by lowering the torso, reducing the pressure and allowing the arm to restart.",
1224
+ "bbox": [
1225
+ 81,
1226
+ 234,
1227
+ 488,
1228
+ 369
1229
+ ],
1230
+ "page_idx": 7
1231
+ },
1232
+ {
1233
+ "type": "text",
1234
+ "text": "After experiencing the packet jitter on qualification day, we enabled redundancy for the arm commands, which greatly reduced this error class from seven instances in qualification to two on Day 1 and only one on Day 2. This demonstrates the utility of the redundant communication system.",
1235
+ "bbox": [
1236
+ 81,
1237
+ 371,
1238
+ 488,
1239
+ 446
1240
+ ],
1241
+ "page_idx": 7
1242
+ },
1243
+ {
1244
+ "type": "text",
1245
+ "text": "Arm shutdowns due to excessive force happened once on Day 1 again in the stone box, and on Day 2 while putting a canister back on the table. In both cases, auto-recovery immediately succeeded and the run could continue without problems.",
1246
+ "bbox": [
1247
+ 81,
1248
+ 446,
1249
+ 488,
1250
+ 522
1251
+ ],
1252
+ "page_idx": 7
1253
+ },
1254
+ {
1255
+ "type": "text",
1256
+ "text": "D. Lessons Learned",
1257
+ "text_level": 1,
1258
+ "bbox": [
1259
+ 83,
1260
+ 531,
1261
+ 225,
1262
+ 544
1263
+ ],
1264
+ "page_idx": 7
1265
+ },
1266
+ {
1267
+ "type": "list",
1268
+ "sub_type": "ref_text",
1269
+ "list_items": [
1270
+ "1) Robustness: Despite the extensive experience available in our group, this is the most complex system we ever built. Good monitoring, failure tolerance, and auto-recovery were extremely important for success. In contrast to other teams, technical problems did not cause major delays or failures.",
1271
+ "2) Frequent testing under competition conditions: Having a high test frequency allowed us to identify and address several issues that were annoying or uncomfortable for operators, e.g. resulting in the improved rudder device. Frequent tests also helped the support crew to establish routine in efficiently training the operators.",
1272
+ "3) 1:1 correspondence is best: The connection between operator and avatar needs to be as close to identity as possible. Avoiding any scaling, offsetting or 3D processing helps operators to quickly immerse into the system. In particular, correct hand-eye transformations let operators identify the avatar's hands as their own."
1273
+ ],
1274
+ "bbox": [
1275
+ 81,
1276
+ 550,
1277
+ 488,
1278
+ 805
1279
+ ],
1280
+ "page_idx": 7
1281
+ },
1282
+ {
1283
+ "type": "text",
1284
+ "text": "V. CONCLUSION",
1285
+ "text_level": 1,
1286
+ "bbox": [
1287
+ 225,
1288
+ 815,
1289
+ 346,
1290
+ 829
1291
+ ],
1292
+ "page_idx": 7
1293
+ },
1294
+ {
1295
+ "type": "text",
1296
+ "text": "We presented the extended and updated NimbRo avatar system, which won the ANA Avatar XPRIZE finals. Key improvements, compared to the semifinals system [2], such as a new base design, a linear actuator to adjust the torso height, haptic perception, monitoring tools, failure tolerance, and robust wireless communication enabled this success.",
1297
+ "bbox": [
1298
+ 81,
1299
+ 834,
1300
+ 488,
1301
+ 924
1302
+ ],
1303
+ "page_idx": 7
1304
+ },
1305
+ {
1306
+ "type": "text",
1307
+ "text": "REFERENCES",
1308
+ "text_level": 1,
1309
+ "bbox": [
1310
+ 661,
1311
+ 66,
1312
+ 756,
1313
+ 78
1314
+ ],
1315
+ "page_idx": 7
1316
+ },
1317
+ {
1318
+ "type": "list",
1319
+ "sub_type": "ref_text",
1320
+ "list_items": [
1321
+ "[1] E. Krotkov, D. Hackett, L. Jackel, M. Perschbacher, J. Pippine, J. Strauss, G. Pratt, and C. Orlowski, “The DARPA robotics challenge finals: Results and perspectives,” The DARPA Robotics Challenge Finals: Humanoid Robots To The Rescue, pp. 1–26, 2018.",
1322
+ "[2] M. Schwarz, C. Lenz, A. Rochow, M. Schreiber, and S. Behnke, \"NimbRo Avatar: Interactive immersive telepresence with forcefeedback telemanipulation,\" in International Conference on Intelligent Robots and Systems (IROS), 2021, pp. 5312-5319.",
1323
+ "[3] R. Luo, C. Wang, C. Keil, D. Nguyen, H. Mayne, S. Alt, E. Schwarm, M. Evelyn, T. Padir, and J. P. Whitney, \"Team Northeastern's approach to ANXPRIZEAvatar final testing: A holistic approach to telepresence and lessons learned,\" arXiv:2303.04932, 2023.",
1324
+ "[4] J. M. Marques, N Patrick, Y. Zhu, N. Malhotra, and K. Hauser, \"Commodity telepresence with the AvaTRINA nursebot in the ANA Avatar XPRIZE semifinals,\" in RSS Workshop: Perspectives on the ANA Avatar XPRIZE Competition, 2022.",
1325
+ "[5] J. B. Van Erp, C. Sallaberry, C. Brekelmans, D. Dresscher, F. Ter Haar, G. Englebienne, J. Van Bruggen, J. De Greeff, L. F. S. Pereira, A. Toet, et al., \"What comes after telepresence? Embodiment, social presence and transporting one's functional and social self,\" in Int. Conference on Systems, Man, and Cybernetics (SMC), 2022.",
1326
+ "[6] B. Park, J. Jung, J. Sim, S Kim, J Ahn, D. Lim, D. Kim, M. Kim, S. Park, E Sung, et al., \"Team SNU's avatar system for teleoperation using humanoid robot: ANA Avatar XPRIZE competition,\" in RSS Workshop: Perspectives on the ANA Avatar XPRIZE Comp., 2022.",
1327
+ "[7] K. Takeuchi, Y. Yamazaki, and K. Yoshifuji, “Avatar work: Telework for disabled people unable to go outside by using avatar robots,” in Companion of Int. Conference on Human-Robot Interaction, 2020.",
1328
+ "[8] O. Khatib et al., “Ocean one: A robotic avatar for oceanic discovery,” IEEE Robotics & Automation Magazine, 2016.",
1329
+ "[9] N. Y. Lii, D. Leidner, P. Birkenkampf, B. Pleintinger, R. Bayer, and T. Krueger, \"Toward scalable intuitive telecommand of robots for space deployment with METERON SUPVIS Justin,\" in Symp. on Adv. Space Tech. for Robotics and Automation (ASTRA), 2017.",
1330
+ "[10] S. Tachi, Y. Inoue, and F. Kato, \"TELESAR VI: Teexistence surrogate anthropomorphic robot VI,\" International Journal of Humanoid Robotics, vol. 17, no. 05, p. 2050019, 2020.",
1331
+ "[11] C. Lenz and S. Behnke, \"Bimanual tele manipulation with force and haptic feedback through an anthropomorphic avatar system,\" Robotics and Autonomous Systems, vol. 161, p. 104338, 2023.",
1332
+ "[12] M. Schwarz and S. Behnke, “Low-latency immersive 6D tevisualization with spherical rendering,” in International Conference on Humanoid Robots (Humanoids), 2021.",
1333
+ "[13] A. Rochow, M. Schwarz, M. Schreiber, and S. Behnke, \"VR facial animation for immersive telepresence avatars,\" in International Conference on Intelligent Robots and Systems (IROS), 2022.",
1334
+ "[14] A. Rochow, M. Schwarz, and S. Behnke, \"Attention-based VR facial animation with visual mouth camera guidance for immersive telepresence avatars,\" in International Conference on Intelligent Robots and Systems (IROS), 2023.",
1335
+ "[15] XPRIZE Inc., “ANA Avatar XPRIZE rules and regulations,” 2022.",
1336
+ "[16] B. Pätzold, A. Rochow, M. Schreiber, R. Memmesheimer, C. Lenz, M. Schwarz, and S. Behnke, \"Audio-based roughness sensing and tactile feedback for haptic perception in telepresence,\" in Int. Conference on Systems, Man, and Cybernetics (SMC), 2023.",
1337
+ "[17] M. Quigley, B. Gerkey, K. Conley, J. Faust, T. Foote, J. Leibs, E. Berger, R. Wheeler, and A. Ng, “ROS: An open-source robot operating system,” in International Conference on Robotics and Automation (ICRA) Workshop on Open Source Robotics, 2009.",
1338
+ "[18] G. Candea, S. Kawamoto, Y. Fujiki, G. Friedman, and A. Fox, \"Microreboot - a technique for cheap recovery,\" in Symposium on Operating Systems Design and Implementation (OSDI), 2004.",
1339
+ "[19] R. Luo, C. Wang, E. Schwarm, C. Keil, E. Mendoza, P. Kaveti, S. Alt, H. Singh, T. Padir, and J. P. Whitney, \"Towards robot avatars: Systems and methods for teleinteraction at Avatar XPRIZE semifinals,\" in Int. Conf. on Intelligent Robots and Systems (IROS), 2022.",
1340
+ "[20] G. Lentini, A. Settimi, D. Caporale, M. Garabini, G. Grioli, L. Pallottino, M. G. Catalano, and A. Bicchi, \"Alter-Ego: A mobile robot with a functionally anthropomorphic upper body designed for physical interaction,\" IEEE Robotics & Automation Magazine, 2019.",
1341
+ "[21] J. C. Vaz, A. Dave, N. Kassai, N. Kosanovic, and P. Y. Oh, \"Immersive auditory-visual real-time avatar system of ANA Avatar XPRIZE finalist Avatar-Hubo,\" in IEEE International Conference on Advanced Robotics and Its Social Impacts (ARSO), 2022."
1342
+ ],
1343
+ "bbox": [
1344
+ 509,
1345
+ 83,
1346
+ 911,
1347
+ 922
1348
+ ],
1349
+ "page_idx": 7
1350
+ }
1351
+ ]