SlowGuess commited on
Commit
1fe1ac6
·
verified ·
1 Parent(s): af2014f

Add Batch 82c7a46e-8cae-4de1-b77c-af4fb42cdeff

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2202.08xxx/2202.08791/3d13601b-66e4-4165-a30b-60fdfaeef09b_content_list.json +0 -0
  3. 2202.08xxx/2202.08791/3d13601b-66e4-4165-a30b-60fdfaeef09b_model.json +0 -0
  4. 2202.08xxx/2202.08791/3d13601b-66e4-4165-a30b-60fdfaeef09b_origin.pdf +3 -0
  5. 2202.08xxx/2202.08791/full.md +413 -0
  6. 2202.08xxx/2202.08791/images.zip +3 -0
  7. 2202.08xxx/2202.08791/layout.json +0 -0
  8. 2202.08xxx/2202.08792/13a6d837-3222-427d-a1a3-77b4e886a508_content_list.json +0 -0
  9. 2202.08xxx/2202.08792/13a6d837-3222-427d-a1a3-77b4e886a508_model.json +0 -0
  10. 2202.08xxx/2202.08792/13a6d837-3222-427d-a1a3-77b4e886a508_origin.pdf +3 -0
  11. 2202.08xxx/2202.08792/full.md +0 -0
  12. 2202.08xxx/2202.08792/images.zip +3 -0
  13. 2202.08xxx/2202.08792/layout.json +0 -0
  14. 2202.08xxx/2202.08814/0e4977c0-9108-464d-ba63-1e8d8819dfe2_content_list.json +1236 -0
  15. 2202.08xxx/2202.08814/0e4977c0-9108-464d-ba63-1e8d8819dfe2_model.json +1653 -0
  16. 2202.08xxx/2202.08814/0e4977c0-9108-464d-ba63-1e8d8819dfe2_origin.pdf +3 -0
  17. 2202.08xxx/2202.08814/full.md +242 -0
  18. 2202.08xxx/2202.08814/images.zip +3 -0
  19. 2202.08xxx/2202.08814/layout.json +0 -0
  20. 2202.08xxx/2202.08816/882d8f61-0f84-4e48-96e3-ebc90db06a9d_content_list.json +0 -0
  21. 2202.08xxx/2202.08816/882d8f61-0f84-4e48-96e3-ebc90db06a9d_model.json +0 -0
  22. 2202.08xxx/2202.08816/882d8f61-0f84-4e48-96e3-ebc90db06a9d_origin.pdf +3 -0
  23. 2202.08xxx/2202.08816/full.md +498 -0
  24. 2202.08xxx/2202.08816/images.zip +3 -0
  25. 2202.08xxx/2202.08816/layout.json +0 -0
  26. 2202.08xxx/2202.08818/4cb6ce6c-2c4f-416d-8656-8e7ab063d642_content_list.json +0 -0
  27. 2202.08xxx/2202.08818/4cb6ce6c-2c4f-416d-8656-8e7ab063d642_model.json +0 -0
  28. 2202.08xxx/2202.08818/4cb6ce6c-2c4f-416d-8656-8e7ab063d642_origin.pdf +3 -0
  29. 2202.08xxx/2202.08818/full.md +0 -0
  30. 2202.08xxx/2202.08818/images.zip +3 -0
  31. 2202.08xxx/2202.08818/layout.json +0 -0
  32. 2202.08xxx/2202.08821/a330e14c-eb6d-4e85-bc1d-a5b4b5c46483_content_list.json +0 -0
  33. 2202.08xxx/2202.08821/a330e14c-eb6d-4e85-bc1d-a5b4b5c46483_model.json +0 -0
  34. 2202.08xxx/2202.08821/a330e14c-eb6d-4e85-bc1d-a5b4b5c46483_origin.pdf +3 -0
  35. 2202.08xxx/2202.08821/full.md +911 -0
  36. 2202.08xxx/2202.08821/images.zip +3 -0
  37. 2202.08xxx/2202.08821/layout.json +0 -0
  38. 2202.08xxx/2202.08827/5a452966-07a3-4cce-816d-8e579346dd6c_content_list.json +0 -0
  39. 2202.08xxx/2202.08827/5a452966-07a3-4cce-816d-8e579346dd6c_model.json +0 -0
  40. 2202.08xxx/2202.08827/5a452966-07a3-4cce-816d-8e579346dd6c_origin.pdf +3 -0
  41. 2202.08xxx/2202.08827/full.md +411 -0
  42. 2202.08xxx/2202.08827/images.zip +3 -0
  43. 2202.08xxx/2202.08827/layout.json +0 -0
  44. 2202.08xxx/2202.08832/ac21a927-1452-4896-b2be-adb7e1ec7eaa_content_list.json +0 -0
  45. 2202.08xxx/2202.08832/ac21a927-1452-4896-b2be-adb7e1ec7eaa_model.json +0 -0
  46. 2202.08xxx/2202.08832/ac21a927-1452-4896-b2be-adb7e1ec7eaa_origin.pdf +3 -0
  47. 2202.08xxx/2202.08832/full.md +0 -0
  48. 2202.08xxx/2202.08832/images.zip +3 -0
  49. 2202.08xxx/2202.08832/layout.json +0 -0
  50. 2202.08xxx/2202.08862/e4a7970f-80af-4e53-a91b-15ca008317bb_content_list.json +0 -0
.gitattributes CHANGED
@@ -7800,3 +7800,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
7800
  2202.12xxx/2202.12705/7854cee8-3128-4680-ae73-8c8227a8e539_origin.pdf filter=lfs diff=lfs merge=lfs -text
7801
  2202.12xxx/2202.12937/3900635e-ae81-4532-8d60-ed1956a2a251_origin.pdf filter=lfs diff=lfs merge=lfs -text
7802
  2202.12xxx/2202.12938/aabe42ae-be62-404d-aec2-32ac9d00f5d5_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7800
  2202.12xxx/2202.12705/7854cee8-3128-4680-ae73-8c8227a8e539_origin.pdf filter=lfs diff=lfs merge=lfs -text
7801
  2202.12xxx/2202.12937/3900635e-ae81-4532-8d60-ed1956a2a251_origin.pdf filter=lfs diff=lfs merge=lfs -text
7802
  2202.12xxx/2202.12938/aabe42ae-be62-404d-aec2-32ac9d00f5d5_origin.pdf filter=lfs diff=lfs merge=lfs -text
7803
+ 2202.08xxx/2202.08791/3d13601b-66e4-4165-a30b-60fdfaeef09b_origin.pdf filter=lfs diff=lfs merge=lfs -text
7804
+ 2202.08xxx/2202.08792/13a6d837-3222-427d-a1a3-77b4e886a508_origin.pdf filter=lfs diff=lfs merge=lfs -text
7805
+ 2202.08xxx/2202.08814/0e4977c0-9108-464d-ba63-1e8d8819dfe2_origin.pdf filter=lfs diff=lfs merge=lfs -text
7806
+ 2202.08xxx/2202.08816/882d8f61-0f84-4e48-96e3-ebc90db06a9d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7807
+ 2202.08xxx/2202.08818/4cb6ce6c-2c4f-416d-8656-8e7ab063d642_origin.pdf filter=lfs diff=lfs merge=lfs -text
7808
+ 2202.08xxx/2202.08821/a330e14c-eb6d-4e85-bc1d-a5b4b5c46483_origin.pdf filter=lfs diff=lfs merge=lfs -text
7809
+ 2202.08xxx/2202.08827/5a452966-07a3-4cce-816d-8e579346dd6c_origin.pdf filter=lfs diff=lfs merge=lfs -text
7810
+ 2202.08xxx/2202.08832/ac21a927-1452-4896-b2be-adb7e1ec7eaa_origin.pdf filter=lfs diff=lfs merge=lfs -text
7811
+ 2202.08xxx/2202.08862/e4a7970f-80af-4e53-a91b-15ca008317bb_origin.pdf filter=lfs diff=lfs merge=lfs -text
7812
+ 2202.08xxx/2202.08871/1b9ed6bf-e494-44af-8162-4bc271aa2fd6_origin.pdf filter=lfs diff=lfs merge=lfs -text
7813
+ 2202.08xxx/2202.08894/1dcc24a9-d1af-4eef-b046-c2acf86c4099_origin.pdf filter=lfs diff=lfs merge=lfs -text
7814
+ 2202.08xxx/2202.08904/552141d3-58cf-4e5d-a171-0f9f3f9bd461_origin.pdf filter=lfs diff=lfs merge=lfs -text
7815
+ 2202.08xxx/2202.08906/dec02fb6-effe-4e10-9237-e3d9250fe00a_origin.pdf filter=lfs diff=lfs merge=lfs -text
7816
+ 2202.08xxx/2202.08938/de3071a9-5286-4b58-a3bd-467997a329da_origin.pdf filter=lfs diff=lfs merge=lfs -text
7817
+ 2202.08xxx/2202.08946/e283da65-96de-4d27-9dbd-f6d44c715b72_origin.pdf filter=lfs diff=lfs merge=lfs -text
7818
+ 2202.08xxx/2202.08982/a7a10c4c-4aaa-425b-854a-6b47e27a0b70_origin.pdf filter=lfs diff=lfs merge=lfs -text
7819
+ 2202.09xxx/2202.09025/fa3db4f2-a32b-4060-831f-1f67bbf194ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
7820
+ 2202.09xxx/2202.09027/805741f0-4dc2-43f1-9d92-01c90d3fd733_origin.pdf filter=lfs diff=lfs merge=lfs -text
7821
+ 2202.09xxx/2202.09061/979302e9-87e7-4cbb-a3e3-ebc81c9ad16f_origin.pdf filter=lfs diff=lfs merge=lfs -text
7822
+ 2202.09xxx/2202.09155/705ba4f6-c4f0-4639-9c0b-3692b03e762e_origin.pdf filter=lfs diff=lfs merge=lfs -text
7823
+ 2202.09xxx/2202.09195/6610e9db-c144-4b21-96ab-e7a63efaaf8d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7824
+ 2202.09xxx/2202.09263/5abea229-582f-4105-aeb6-cf1b3576508f_origin.pdf filter=lfs diff=lfs merge=lfs -text
7825
+ 2202.09xxx/2202.09346/1744567c-6592-4249-8fd1-44e68e8b9ac4_origin.pdf filter=lfs diff=lfs merge=lfs -text
7826
+ 2202.09xxx/2202.09357/1d784bfb-8cb2-4de0-a918-0c96c2539ed7_origin.pdf filter=lfs diff=lfs merge=lfs -text
7827
+ 2202.09xxx/2202.09367/fdad2245-7187-4de4-9efe-a90a8062c21d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7828
+ 2202.09xxx/2202.09368/9b973b3d-4743-4952-a38c-ccbc4c022f59_origin.pdf filter=lfs diff=lfs merge=lfs -text
7829
+ 2202.09xxx/2202.09381/a5bb52f6-b4ba-472c-b174-475cca0a2320_origin.pdf filter=lfs diff=lfs merge=lfs -text
7830
+ 2202.09xxx/2202.09400/ba74f5af-5d0a-4fc7-b1d4-82a15f0435ce_origin.pdf filter=lfs diff=lfs merge=lfs -text
7831
+ 2202.09xxx/2202.09467/816ed019-2993-43cc-b16b-3893d1176a0b_origin.pdf filter=lfs diff=lfs merge=lfs -text
7832
+ 2202.09xxx/2202.09468/f804b9ab-a7a4-47bd-bce7-d738ceeba3cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
7833
+ 2202.09xxx/2202.09481/6ad4e03e-c276-4dac-bfe9-e7211427abc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
7834
+ 2202.09xxx/2202.09507/bec7bcd4-220d-4d80-a08d-a069ef41beef_origin.pdf filter=lfs diff=lfs merge=lfs -text
7835
+ 2202.09xxx/2202.09517/197d168a-1b48-4eb7-887d-e665affe55ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
7836
+ 2202.09xxx/2202.09533/cd95c572-04b9-4586-88f9-7b270c32a7c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
7837
+ 2202.09xxx/2202.09554/274e754c-ff5d-4e21-b11c-c38982b4c680_origin.pdf filter=lfs diff=lfs merge=lfs -text
7838
+ 2202.09xxx/2202.09572/f6d09b9c-6bbe-425a-af32-273101af744b_origin.pdf filter=lfs diff=lfs merge=lfs -text
7839
+ 2202.09xxx/2202.09583/89947fbc-65b6-46cd-910b-dad1d83f327b_origin.pdf filter=lfs diff=lfs merge=lfs -text
7840
+ 2202.09xxx/2202.09597/507a07bd-dc9f-4f70-ae1a-f456998076b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
7841
+ 2202.09xxx/2202.09652/8f58bdc9-76a7-415d-83c0-9c376e9a5fa1_origin.pdf filter=lfs diff=lfs merge=lfs -text
7842
+ 2202.09xxx/2202.09657/ac5c055b-db84-43fd-82e2-fab1ccffea0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
7843
+ 2202.09xxx/2202.09671/a1c0841f-d0e2-454d-8935-b033d9c5ba98_origin.pdf filter=lfs diff=lfs merge=lfs -text
7844
+ 2202.09xxx/2202.09729/da8e73b0-a1ff-484a-8d29-d497beb96388_origin.pdf filter=lfs diff=lfs merge=lfs -text
7845
+ 2202.09xxx/2202.09741/422550b1-992b-4457-b3e1-ef942b08f8c4_origin.pdf filter=lfs diff=lfs merge=lfs -text
7846
+ 2202.09xxx/2202.09745/cd844dc4-45d3-4a34-af12-2d2e66e52b7b_origin.pdf filter=lfs diff=lfs merge=lfs -text
7847
+ 2202.09xxx/2202.09769/a978e6ba-8cdb-4727-afa2-d51fd615f2d3_origin.pdf filter=lfs diff=lfs merge=lfs -text
7848
+ 2202.09xxx/2202.09778/96b7847d-8c2c-4209-9da1-659dd77f2146_origin.pdf filter=lfs diff=lfs merge=lfs -text
7849
+ 2202.09xxx/2202.09791/8910b91e-b40a-4f5a-a425-7b7b248e745a_origin.pdf filter=lfs diff=lfs merge=lfs -text
7850
+ 2202.09xxx/2202.09844/574b89e2-1d73-4c0e-a7f2-c8eefaaacd06_origin.pdf filter=lfs diff=lfs merge=lfs -text
7851
+ 2202.09xxx/2202.09852/998644e3-0d85-4752-90c5-f9d0ad72445a_origin.pdf filter=lfs diff=lfs merge=lfs -text
7852
+ 2202.09xxx/2202.09895/a321f149-6aa4-434b-8f2e-4a072c1dcf7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
7853
+ 2202.09xxx/2202.09947/4362768b-bf15-487c-b78c-017bfede0871_origin.pdf filter=lfs diff=lfs merge=lfs -text
7854
+ 2202.10xxx/2202.10015/395ec392-2ff3-4878-9d54-68ccea2617ee_origin.pdf filter=lfs diff=lfs merge=lfs -text
7855
+ 2202.10xxx/2202.10020/33f9475d-e70e-4056-b70a-23a1a88131b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
7856
+ 2202.10xxx/2202.10047/d0e00493-d634-47b5-a99d-b57007ee7da7_origin.pdf filter=lfs diff=lfs merge=lfs -text
7857
+ 2202.10xxx/2202.10054/12e292d5-ef3c-46b6-b9f5-cce2133f367d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7858
+ 2202.10xxx/2202.10098/79e216eb-f69d-469a-b635-10885d5ffc7f_origin.pdf filter=lfs diff=lfs merge=lfs -text
7859
+ 2202.10xxx/2202.10103/cbc564a6-5841-438f-8e2a-faf577ce7a1e_origin.pdf filter=lfs diff=lfs merge=lfs -text
7860
+ 2202.10xxx/2202.10108/5090a145-6b9b-4128-89af-5cab354edf22_origin.pdf filter=lfs diff=lfs merge=lfs -text
7861
+ 2202.10xxx/2202.10166/9c3c445e-6eed-4cd7-8786-f26548595bf1_origin.pdf filter=lfs diff=lfs merge=lfs -text
7862
+ 2202.10xxx/2202.10203/ac00bf3c-9696-4e84-833f-ebaddf4ad9c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
7863
+ 2202.10xxx/2202.10936/e1b89235-b187-4f29-b8e5-5d0187b72aaa_origin.pdf filter=lfs diff=lfs merge=lfs -text
7864
+ 2202.10xxx/2202.10938/a73abd6a-3740-443b-810e-42c6235d35e8_origin.pdf filter=lfs diff=lfs merge=lfs -text
7865
+ 2202.11xxx/2202.11200/1112c9c1-eeab-456d-ae34-c7bf3834671d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7866
+ 2203.03xxx/2203.03717/f760662b-4260-4020-9463-670751460143_origin.pdf filter=lfs diff=lfs merge=lfs -text
2202.08xxx/2202.08791/3d13601b-66e4-4165-a30b-60fdfaeef09b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08791/3d13601b-66e4-4165-a30b-60fdfaeef09b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08791/3d13601b-66e4-4165-a30b-60fdfaeef09b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f382c102e83284b567374e6dd930656343f54ab1a708d051b0c0607a5a16ad12
3
+ size 709460
2202.08xxx/2202.08791/full.md ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # COSFORMER : RETHINKING SOFTMAX IN ATTENTION
2
+
3
+ $^{1}$ Zhen Qin† $^{1,3}$ Weixuan Sun† $^{1,4}$ Hui Deng† $^{3}$ Dongxu Li $^{1}$ Yunshen Wei $^{1}$ Baohong Lv $^{1}$ Junjie Yan $^{2,5}$ Lingpeng Kong $^{1,2}$ Yiran Zhong*
4
+
5
+ $^{1}$ SenseTime Research $^{2}$ Shanghai AI Laboratory $^{3}$ Australian National University
6
+
7
+ $^{4}$ Northwestern Polytechnical University $^{5}$ The University of Hong Kong {lastnamefirstname}@sensetime.com, lpk@cs.hku.hk
8
+
9
+ # ABSTRACT
10
+
11
+ Transformer has shown great successes in natural language processing, computer vision, and audio processing. As one of its core components, the softmax attention helps to capture long-range dependencies yet prohibits its scale-up due to the quadratic space and time complexity to the sequence length. Kernel methods are often adopted to reduce the complexity by approximating the softmax operator. Nevertheless, due to the approximation errors, their performances vary in different tasks/corpus and suffer crucial performance drops when compared with the vanilla softmax attention. In this paper, we propose a linear transformer called COSFORMER that can achieve comparable or better accuracy to the vanilla transformer in both casual and cross attentions. COSFORMER is based on two key properties of softmax attention: i). non-negativeness of the attention matrix; ii). a non-linear re-weighting scheme that can concentrate the distribution of the attention matrix. As its linear substitute, COSFORMER fulfills these properties with a linear operator and a cosine-based distance re-weighting mechanism. Extensive experiments on language modeling and text understanding tasks demonstrate the effectiveness of our method. We further examine our method on long sequences and achieve state-of-the-art performance on the Long-Range Arena benchmark. The source code is available at COSFORMER.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ ![](images/c0fd66974759065fc5e7dc64d423f00dff9ec7f456274a0e81555ca425432954.jpg)
16
+ Figure 1: Performance $(y$ axis), speed $(x$ axis), and memory footprint (circle sizes) of efficient transformers on the Long-Range Arena benchmark. The proposed COSFORMER achieves an all-around supremacy over competing methods in the top left quadrant.
17
+
18
+ With years of development, the transformer model (Vaswani et al., 2017) and its variants (Zaheer et al., 2020; Wang et al., 2020; Tay et al., 2020a) have been successfully adapted to three most popular artificial intelligence (AI) fields: i.e., natural language processing (Devlin et al., 2019; Liu et al., 2019), computer vision (Dosovitskiy et al., 2020; Carion et al., 2020; Liu et al., 2021) and audio processing (Schneider et al., 2019; Baevski et al., 2020). Compared with conventional
19
+
20
+ recurrent (Hochreiter & Schmidhuber, 1997) and convolutional architectures (He et al., 2016), transformer-based architectures are generally more scalable to data volumes (Brown et al., 2020) and stronger in capturing global information with less inductive bias, thus excelling on many tasks.
21
+
22
+ Dot-product attention with softmax normalization is the cornerstone of the transformer to capture long-range dependencies. However, its quadratic space and time complexity with regard to the length of the sequence make its computational overhead prohibitive, especially for long inputs. To address this issue, numerous methods are proposed recently, such as the sparse attention matrix (Zaheer et al., 2020; Beltagy et al., 2020; Tay et al., 2020a; Kitaev et al., 2019; Child et al., 2019), low-rank representations (Wang et al., 2020) or kernel-based methods (Peng et al., 2020; Choromanski et al., 2020; Katharopoulos et al., 2020), among many others. These methods achieve reduced computational complexity with comparable performances when compared with the vanilla attention architecture on several selected tasks or corpus.
23
+
24
+ However, the improved efficiency is usually achieved via introducing additional yet often impractical assumptions on the attention matrix (Wang et al., 2020) or with valid approximation of softmax operation only within constrained theoretical bounds (Choromanski et al., 2020; Peng et al., 2020). Therefore, when their assumptions are unsatisfied or when approximation errors get accumulated, these methods may not always be advantageous over the vanilla architecture (Narang et al., 2021). Consequently, performance deficiencies in a broad application spectrum are often observed in these transformer variants, especially those with linear complexity. For example, the Performer (Choromanski et al., 2020), RFA (Peng et al., 2020) and Reformer (Kitaev et al., 2019) show less satisfactory performance on the GLUE benchmark (Wang et al., 2018) when compared with the vanilla architecture as suggested in our preliminary experiments (Tab. 2). Furthermore, many of these aforementioned methods are not applicable to casual attentions, which are critical for auto-regressive training. For example, techniques proposed in Linformer (Wang et al., 2020) and BigBird (Zaheer et al., 2020) are specific to cross attentions.
25
+
26
+ Since the softmax operator appears to be the main hurdle while efficient yet accurate approximation to softmax is difficult to achieve, one question naturally arises: "Can we replace the softmax operator with a linear function instead, while maintaining its key properties?" By digging into the softmax attention, we find two key properties that affect its empirical performance: (i) elements in the attention matrix are non-negative (Tsai et al., 2019; Katharopoulos et al., 2020); (ii) the non-linear re-weighting scheme acts as a stabilizer for the attention weights (Titsias, 2016; Gao & Pavel, 2017; Jang et al., 2016). These findings reveal some new insights of the current approaches. For example, the linear transformer (Katharopoulos et al., 2020) achieves property (i) using an exponential linear unit (Clevert et al., 2016) activation function. However, due to lack of the re-weighting scheme, it underperforms other efficient transformer variants on the Long-Range Arena benchmark as shown in Figure 1 as well as the language modeling task (Table 2) based on our controlled experiments.
27
+
28
+ In this paper, we propose a new variant of linear transformer called COSFORMER that satisfies both of the above properties. Specifically, we enforce the non-negative property by passing the features to a ReLU (Agarap, 2018) activation function before computing the similarity scores. In this way, we encourage the model to avoid aggregating negatively-correlated contextual information. Further, we adopt a $\cos$ re-weighting scheme to stabilize the attention weights. This helps the model to amplify local correlations, which usually contain more relevant information for natural language tasks. Thanks to the Ptolemy's theorem, our attention can be exactly decomposed into a linear form. We perform extensive experiments on both autoregressive language models and bidirectional models on five public benchmarks, including WikiText-103 (Merit et al., 2017), GLUE (Wang et al., 2018), IMDB (Maas et al., 2011), AMAZON (Ni et al., 2019) and Long-Range Arena benchmark (Tay et al., 2020b). Our model shows much better inference speed and smaller memory footprint, while achieving on par performance with the vanilla transformer. It is noteworthy that our method ranks $1^{\text{st}}$ on the Long-Range Arena benchmark, showing favorable performance than other competitors, which well demonstrates its strong capacity in modeling long sequence inputs.
29
+
30
+ # 2 OUR METHOD
31
+
32
+ In this section, we provide technique details of our linear transformer called COSFORMER. The key insight of the COSFORMER is to replace the non-decomposable non-linear softmax operation by a linear operation with decomposable non-linear re-weighting mechanism. Our model is applicable
33
+
34
+ to both casual and cross attentions with a linear time and space complexity with regard to the input sequence length, thus exhibiting strong capacity in modeling long-range dependencies.
35
+
36
+ # 2.1 THE GENERAL FORM OF TRANSFORMER
37
+
38
+ Given an input sequence $x$ with length of $N$ , we first represent it in the embedding space $x \in \mathbb{R}^{N \times d}$ with feature dimension of $d$ . A transformer block $\mathcal{T}: \mathbb{R}^{N \times d} \to \mathbb{R}^{N \times d}$ with input $x$ is defined as:
39
+
40
+ $$
41
+ \mathcal {T} (x) = \mathcal {F} (\mathcal {A} (x) + x), \tag {1}
42
+ $$
43
+
44
+ where $\mathcal{F}$ is a feedforward network that contains a residual connection; $\mathcal{A}$ is the self-attention function that computes the attention matrix $A$ , which has quadratic space and time complexity with respect to $N$ , thus becoming the computation bottleneck of $\mathcal{T}$ on long inputs.
45
+
46
+ There are three key components in $\mathcal{A}$ , namely, query $(Q)$ , key $(K)$ , value $(V)$ computed through three learnable linear matrices $W_{Q}, W_{K}, W_{V}$ : $Q = xW_{Q}, K = xW_{K}, V = xW_{V}$ . We use $M_{i}$ to represent the $i$ -th row of a matrix $M$ , then the output $\mathcal{O} \in \mathbb{R}^{N \times d}$ of $\mathcal{A}(x)$ can be computed as:
47
+
48
+ $$
49
+ \mathcal {O} = \mathcal {A} (x) = \left[ \mathcal {O} _ {1}, \dots , \mathcal {O} _ {N} \right] ^ {T}, \quad \mathcal {O} _ {i} = \sum_ {j} \frac {\mathcal {S} \left(Q _ {i} , K _ {j}\right)}{\sum_ {j} \mathcal {S} \left(Q _ {i} , K _ {j}\right)} V _ {j}, \tag {2}
50
+ $$
51
+
52
+ where $S(\cdot)$ measures the similarity between queries. If $S(Q, K) = \exp(QK^T)$ , the Eq. 2 becomes the dot-product attention with softmax normalization. In this case, the space and time complexity to compute one row of the output $\mathcal{O}_i$ is $O(N)$ . Therefore, the total space and time complexity for computing $\mathcal{O}$ grows quadratically with respect to the input length.
53
+
54
+ # 2.2 LINEARIZATION OF SELF-ATTENTION
55
+
56
+ According to Eq. 2, we can select any similarity functions to compute the attention matrix. In order to maintain a linear computation budget, one solution is to adopt a decomposable similarity function such that:
57
+
58
+ $$
59
+ \mathcal {S} \left(Q _ {i}, K _ {j}\right) = \phi \left(Q _ {i}\right) \phi \left(K _ {j}\right) ^ {T}, \tag {3}
60
+ $$
61
+
62
+ where $\phi$ is a kernel function that maps the queries and keys to their hidden representations. Then one can rewrite Eq. 2 in the form of kernel functions as:
63
+
64
+ $$
65
+ O _ {i} = \frac {\sum_ {j = 1} ^ {N} \left(\phi \left(Q _ {i}\right) \phi \left(K _ {j}\right) ^ {T}\right) V _ {j}}{\sum_ {j = 1} ^ {N} \left(\phi \left(Q _ {i}\right) \phi \left(K _ {j}\right) ^ {T}\right)}. \tag {4}
66
+ $$
67
+
68
+ After that, attention operation in linear complexity is achieved via the matrix product property:
69
+
70
+ $$
71
+ \left(\phi (Q) \phi (K) ^ {T}\right) V = \phi (Q) \left(\phi (K) ^ {T} V\right). \tag {5}
72
+ $$
73
+
74
+ In this form (Eq. 5), instead of explicitly computing the attention matrix $A = QK^T \in \mathbb{R}^{N \times N}$ , we calculate the $\phi(K)^T V \in \mathbb{R}^{d \times d}$ first, and then multiplying $\phi(Q) \in \mathbb{R}^{N \times d}$ . By using this trick, we only incurs a computation complexity of $O(Nd^2)$ . Note that in typical natural language tasks, the feature dimension of one head $d$ is always much smaller than the input sequence length $N$ ( $d \ll N$ ), so we can safely omit $d$ and achieve computation complexity of $O(N)$ , as illustrated in Figure 2.
75
+
76
+ Previous Solutions As aforementioned, the key to the linear attentions is to find a decomposable similarity function $S(\cdot)$ that generalizes well to different tasks. Most existing linear transformers are trying to find an unbiased estimation of the softmax attention. For example, RFA (Peng et al., 2020) approximates the softmax operation with random feature maps using theorem of random fourier features (Rahimi & Recht, 2008) and the Performer (Choromanski et al., 2020) utilizes positive random features to approximate it. However, we empirically find that these methods are sensitive to the selection of sampling rate and becomes unstable if the sampling rate gets too high. Also, to accommodate recency bias, gating mechanisms are employed to better exploit more recent context.
77
+
78
+ Another group of works attempt to directly replace the softmax with a linear operation. For example, the linear transformer (Katharopoulos et al., 2020) model replaces the softmax similarity function with a pure dot product $S = QK^T$ , and use a non-linear activation function $\phi(\cdot) = \mathrm{elu}(\cdot) + 1$ to model the pairwise relation between features. However, our controlled experiments show that their solution does not necessarily generalize well on many downstream tasks (Tab. 2) or the Long-Range Arena benchmark (Tab. 4). In this paper, we propose a new replacement of softmax that not only achieves comparable or better performance than the softmax attention in a wide range of tasks, but also enjoys linear space and time complexity.
79
+
80
+ ![](images/763396ee0ef2571c47fcfe356a6d8cdff49cda8498e0f8859c89199f16979ae5.jpg)
81
+ Vanilla self attention
82
+
83
+ ![](images/9fd3c7b648aacb6ae1952c6ca8ac36419f03c3ad7d7565c22a2ba06d46c787ef.jpg)
84
+ Linearized self attention
85
+ Figure 2: Illustration of the computations for vanilla self attention (left) and linearized attention (right). The input length is $N$ and feature dimension is $d$ , with $d \ll N$ . Tensors in the same box are associated for computation. The linearized formulation allows $O(N)$ time and space complexity.
86
+
87
+ # 2.3 ANALYSIS OF SOFTMAX ATTENTION
88
+
89
+ In the vanilla transformer architecture, when $S(Q,K) = \exp (QK^T)$ , the softmax operation is applied to obtain row-wise normalization on the attention matrix $A\in \mathbb{R}^{N\times N}$ as shown in the Eq. 2. In other words, we normalize the relations of each element in the input sequence to all other elements in order to obtain a weighted aggregation of contextual information. However, apart from the good empirical performance of softmax attention, what are the crucial and necessary characteristics of it remain only loosely determined in the original transformer paper and follow-up works.
90
+
91
+ In this work, we empirically identify two key properties of the softmax operation that may play important roles for its performance: 1) it ensures all values in the attention matrix $A$ to be non-negative; 2) it provides a non-linear weighting mechanism to concentrates the distribution of attention connections and stabilizes the training(Titsias, 2016; Gao & Pavel, 2017; Jang et al., 2016).
92
+
93
+ To validate these assumptions, we design the following preliminary studies as shown in Table 1. First, to validate the importance of nonnegativity, we compare three instantiations of
94
+
95
+ Table 1: Analysis of the softmax properties. All attention variants are implemented in the RoBERTa (Liu et al., 2019) architecture and are pre-trained on the WikiText-103 (Merit et al., 2017) dataset. The Loss represents the validation loss. We then fine-tune these variants on each downstream datasets and show the accuracy (the higher the better).
96
+
97
+ <table><tr><td></td><td>Loss</td><td>QQP</td><td>SST-2</td><td>MNLI</td></tr><tr><td>φI</td><td>2.343</td><td>84.23</td><td>76.26</td><td>58.27</td></tr><tr><td>φLeakyReLU</td><td>2.246</td><td>84.46</td><td>78.21</td><td>74.26</td></tr><tr><td>φReLU</td><td>1.993</td><td>88.86</td><td>89.90</td><td>77.86</td></tr><tr><td>softmax</td><td>1.915</td><td>88.41</td><td>92.31</td><td>79.15</td></tr></table>
98
+
99
+ the function $\phi$ in equation 3: an identify mapping $\phi_{\mathbf{I}} = \mathbf{I}$ that does not preserve the non-negativity, and the other variant $\phi_{\mathrm{ReLU}(\cdot)} = \mathrm{ReLU}(\cdot)$ that retains only positive input values while replacing negative values to zeros. We also add the $\phi_{\mathrm{LeakyReLU}(\cdot)} = \mathrm{LeakyReLU}(\cdot)$ variant as it does not have the non-negativity as well but have the same non-linearly as the ReLU one. Second, to demonstrate the effect of non-linear re-weighting, we compare the models using only $\phi_{\mathrm{ReLU}(\cdot)}$ without any re-weighting and those with softmax operations. From Table 1, the superior results of $\phi_{\mathrm{ReLU}}$ over $\phi_{\mathbf{I}}$ and $\phi_{\mathrm{LeakyReLU}}$ demonstrate the benefit of retaining non-negative values. Our conjecture is that by retaining only positive values in the similarity matrices, the model ignores features with negative correlations, thus effectively avoiding aggregating irrelevant contextual information. By comparing the results of $\phi_{\mathrm{ReLU}}$ with the softmax, we observe that models with softmax re-weighting converge faster and generalize better to downstream tasks. This might be explained as softmax normalization amplifies the correlated pairs, which might be useful to identify useful patterns.
100
+
101
+ # 2.4 COSFORMER
102
+
103
+ Based on the observations above, we propose our model COSFORMER, which discards entirely the softmax normalization while still features the non-negativity and re-weighting mechanism. Our COSFORMER consists two main components: a linear projection kernel $\phi_{\mathrm{linear}}$ and a cos-Based Reweighting mechanism. Below we describe details of each components:
104
+
105
+ Linear projection kernel $\phi_{\mathrm{linear}}$ Recall the general form of the attention in Eq. 2, let us define a linear similarity as:
106
+
107
+ $$
108
+ \mathcal {S} (Q, K) = \mathrm {s} \left(\phi_ {\text {l i n e a r}} (Q), \phi_ {\text {l i n e a r}} (K)\right) = \mathrm {s} \left(Q ^ {\prime}, K ^ {\prime}\right) \tag {6}
109
+ $$
110
+
111
+ where $\phi_{\mathrm{linear}}$ is the transformation function that map queries $Q$ and keys $K$ to our desired representations $Q^{\prime}$ and $K^{\prime}$ , and s is a function that can be linearly decomposed to measure the similarity between $Q^{\prime}$ and $K^{\prime}$ . Specifically, in order to ensure a full positive attention matrix $A$ and avoid
112
+
113
+ ![](images/f8f6152fad3b36da086a630f7c44fb2da32be062db277375bd2db9b189b43f78.jpg)
114
+ (1)Vanilla Transformer
115
+
116
+ ![](images/a7c9479e7a83f2c3b432fc8e63c9b2cdca00a25fad3bd54ad9f7dc0302926662.jpg)
117
+ (2)cosFormer
118
+
119
+ ![](images/35c6a422f6cdad64d7cff919cfcb256a1cd495480c43e465f4b1ad67d4b79ea9.jpg)
120
+ (3)cosFormer (w/o re-weighting)
121
+ Figure 3: (1): Attention matrix of vanilla transformer.(2): Attention matrix of COSFORMER .(3): Attention matrix of COSFORMER without re-weighting. (4): Visualization of the $cos$ -based distance matrix. After reweighting, we can see a smoother attention distribution along the diagonal region of attention matrix, exhibiting a similar pattern to the vanilla transformer, which assists in stabilizing the training.
122
+
123
+ ![](images/89da93d9b7ff4f62be4d9f6547e6daa9278d36b9330ee248391e0213bd8e7b49.jpg)
124
+ (4)cos re-weighting matrix
125
+
126
+ aggregating negatively-correlated information, we adopt $\mathrm{ReLU}(\cdot)$ as the transformation functions and therefore effectively eliminate negative values:
127
+
128
+ $$
129
+ \phi_ {\text {l i n e a r}} (x) = \operatorname {R e L U} (x) \tag {7}
130
+ $$
131
+
132
+ As $Q'$ and $K'$ contain only non-negative values, we directly take their dot-product $s(x,y) = xy^T, x,y \in \mathbb{R}^{1 \times d}$ followed by a row-wise normalization to compute attention matrices:
133
+
134
+ $$
135
+ \mathcal {O} _ {i} = \frac {\sum_ {j = 1} ^ {N} f \left(\phi_ {\text {l i n e a r}} \left(Q _ {i}\right) , \phi_ {\text {l i n e a r}} \left(K _ {j}\right)\right) V _ {j}}{\sum_ {j = 1} ^ {N} f \left(\phi_ {\text {l i n e a r}} \left(Q _ {i}\right) , \phi_ {\text {l i n e a r}} \left(K _ {j}\right)\right)} = \frac {\sum_ {j = 1} ^ {N} \left(\operatorname {R e L U} \left(Q _ {i}\right) \operatorname {R e L U} \left(K _ {j}\right) ^ {T}\right) V _ {j}}{\sum_ {j = 1} ^ {N} \left(\operatorname {R e L U} \left(Q _ {i}\right) \operatorname {R e L U} \left(K _ {j}\right) ^ {T}\right)} \tag {8}
136
+ $$
137
+
138
+ Based on Eq. 4, we rearrange the order of dot-product and obtain the formulation of the proposed attention in linear complexity as:
139
+
140
+ $$
141
+ \mathcal {O} _ {i} = \frac {\operatorname {R e L U} \left(Q _ {i}\right) \sum_ {j = 1} ^ {N} \operatorname {R e L U} \left(K _ {j}\right) ^ {T} V _ {j}}{\operatorname {R e L U} \left(Q _ {i}\right) \sum_ {j = 1} ^ {N} \operatorname {R e L U} \left(K _ {j}\right) ^ {T}} \tag {9}
142
+ $$
143
+
144
+ cos-Based Re-weighting Mechanism The non-linear re-weighting mechanism introduced by the softmax attention can concentrate the distribution of the attention weights and therefore stabilize the training process (Titsias, 2016; Gao & Pavel, 2017; Jang et al., 2016). We also empirically find that it can punish far-away connections and enforce locality in some cases. In fact, such locality bias, i.e., a large portion of contextual dependencies are from neighboring tokens, is commonly observed on downstream NLP tasks (Clark et al., 2019; Kovaleva et al., 2019), as shown in Figure 3 (1).
145
+
146
+ Based on the assumption above, what we need to fulfill the second property of softmax may be a decomposable re-weighting mechanism that can introduce recency bias to the attention matrix. Here, we propose a cos-based re-weighting mechanism as it perfectly fit our purpose: 1). the Ptolemy's theorem ensures the cos weights can be decomposed into two summations; 2). as shown in Figure 3 (4), the cos will put more weights on the neighbouring tokens and therefore enforces locality. Also, by comparing the attention matrices in Figure 3 (2) and (3), the COSFORMER enforces more locality than the one without the re-weighting mechanism.
147
+
148
+ Specifically, by combining with Eq 6, the model with cosine re-weighting is defined as:
149
+
150
+ $$
151
+ s \left(Q _ {i} ^ {\prime}, K _ {j} ^ {\prime}\right) = Q _ {i} ^ {\prime} K _ {j} ^ {\prime T} \cos \left(\frac {\pi}{2} \times \frac {i - j}{M}\right) \tag {10}
152
+ $$
153
+
154
+ By leveraging the Ptolemy's theorem, we decompose this formulation as:
155
+
156
+ $$
157
+ \begin{array}{l} Q _ {i} ^ {\prime} K _ {j} ^ {\prime T} \cos \left(\frac {\pi}{2} \times \frac {i - j}{M}\right) = Q _ {i} ^ {\prime} K _ {j} ^ {\prime T} \left(\cos \left(\frac {\pi i}{2 M}\right) \cos \left(\frac {\pi j}{2 M}\right) + \sin \left(\frac {\pi i}{2 M}\right) \sin \left(\frac {\pi j}{2 M}\right)\right) \\ = \left(Q _ {i} ^ {\prime} \cos \left(\frac {\pi i}{2 M}\right)\right) \left(K _ {j} ^ {\prime} \cos \left(\frac {\pi j}{2 M}\right)\right) ^ {T} + \left(Q _ {i} ^ {\prime} \sin \left(\frac {\pi i}{2 M}\right)\right) \left(K _ {j} ^ {\prime} \sin \left(\frac {\pi j}{2 M}\right)\right) ^ {T} \\ \end{array}
158
+ $$
159
+
160
+ where $i,j = 1,\dots,N,M\geq N$ , and $Q^{\prime} = \mathrm{ReLU}(Q),K^{\prime} = \mathrm{ReLU}(K)$ . Let $Q_{i}^{\cos} = Q_{i}^{\prime}\cos \left(\frac{\pi i}{2M}\right)$ $Q_{i}^{\sin} = Q_{i}^{\prime}\sin \left(\frac{\pi i}{2M}\right),K_{j}^{\cos} = K_{j}^{\prime}\cos \left(\frac{\pi j}{2M}\right),K_{j}^{\sin} = K_{j}^{\prime}\sin \left(\frac{\pi j}{2M}\right)$ , the output of the proposed attention module can be expressed as:
161
+
162
+ $$
163
+ O _ {i} = \frac {\sum_ {j = 1} ^ {N} f \left(Q _ {i} ^ {\prime} , K _ {j} ^ {\prime}\right) V _ {j}}{\sum_ {j = 1} ^ {N} f \left(Q _ {i} ^ {\prime} , K _ {j} ^ {\prime}\right)} = \frac {\sum_ {j = 1} ^ {N} Q _ {i} ^ {\cos} \left(\left(K _ {j} ^ {\cos}\right) ^ {T} V _ {j}\right) + \sum_ {j = 1} ^ {N} Q _ {i} ^ {\sin} \left(\left(K _ {j} ^ {\sin}\right) ^ {T} V _ {j}\right)}{\sum_ {j = 1} ^ {N} Q _ {i} ^ {\cos} \left(K _ {j} ^ {\cos}\right) ^ {T} + \sum_ {j = 1} ^ {N} Q _ {i} ^ {\sin} \left(K _ {j} ^ {\sin}\right) ^ {T}}, \tag {11}
164
+ $$
165
+
166
+ ![](images/6ffd99b67f79e334d1c783bf4f67496cf985b266977374cf7f2b1c9c1ce6d27e.jpg)
167
+ Figure 4: Training loss (left) and validation loss (right) of the bidirectional language modeling pre-train. In both training and validation, the proposed COSFORMER has a faster converge speed than vanilla transformer.
168
+
169
+ ![](images/28c74906a16e2c938162243108d3de0a3b13f78a7403d2dbfbe7909fdeda081f.jpg)
170
+
171
+ where $O_{i}$ is the output at the $i^{th}$ position of the sequence from the attention module. Detailed derivation are included in the Appendix. Without losing the generality, our method achieves a linear complexity as:
172
+
173
+ $$
174
+ \mathcal {O} = \mathcal {S} (Q, K) V = \left(Q ^ {\cos} K ^ {\cos} + Q ^ {\sin} K ^ {\sin}\right) V = Q ^ {\cos} \left(K ^ {\cos} V\right) + Q ^ {\sin} \left(K ^ {\sin} V\right) \tag {12}
175
+ $$
176
+
177
+ Relation to positional encoding. COSFORMER can be seen as a new way of introducing the relative positional bias to the efficient transformer. Compared with the Rotary Position Embedding (Su et al., 2021), they use a more complex position embedding strategy and did not enforce the nonnegativity to the similarity scores as ours. Also, since they only change the position embedding on the numerator while keeping the denominator unchanged, the summation of their attention scores is not equal to 1. For Stochastic Positional Encoding (Liutkus et al., 2021), they use a sampling strategy to approximate the softmax, and introduce relative positional encoding to linear transformers.
178
+
179
+ # 3 EXPERIMENTS
180
+
181
+ In this section, we experimentally validate the effectiveness of the proposed method in multiple settings. The purposes of the experiments are three-fold. First, we validate the capacity of COSFORMER in language modeling through autoregressive (Sec. 3.1) and bidirectional (Sec. 3.2) setups using WikiText-103 (Merit et al., 2017). In this way, we validate the effectiveness of the proposed linear attention module in both causal and non-causal cases. Second, we investigate the generalization ability of COSFORMER on downstream tasks by comparisons with other existing transformer variants. This is achieved by performing comparative finetuning experiments on five datasets, including GLUE (QQP, SST-2, MNLI) (Wang et al., 2018), IMDB (Maas et al., 2011) and AMAZON (Ni et al., 2019) (Sec. 3.3). We further compare COSFORMER with other transformer variants on the long-range-arena benchmark (Tay et al., 2020b) to understand its ability in modeling long-range dependencies (Sec. 3.4) and show comparative analysis into model efficiency (Sec. 3.5). Third, we conduct ablation studies to understand each component in COSFORMER (Sec. 3.6).
182
+
183
+ # 3.1 AUTOREGRESSIVE LANGUAGE MODELING
184
+
185
+ In autoregressive or left-to-right language modeling, we estimate the probability distribution of a token given its previous tokens. We use (Baevski & Auli, 2018) as our baseline model. Specifically, we adopt their large model which has 16 cascaded layers with a projected dimensions of 1024, and replace the self-attention module with our proposed linear attention module. We train our model on 8 Nvidia Tesla A100 GPUs with a sequence length of 512 for 150K updates on the WikiText-103 (Merit et al., 2017) and report perplexity on the validation and test splits in Table 2.
186
+
187
+ We observe that although the baseline model is a powerful standard transformer which requires quadratic computation complexity, COSFORMER outperforms it with a clear margin in linear computation complexity. Besides, we achieve comparable perplexity to other methods on the validation set, and significantly outperform all competing methods on the test set by a clear gap, which further demonstrates the effectiveness of COSFORMER.
188
+
189
+ Table 2: Perplexity (lower is better) results of language modeling pre-training task on validation set and test set of the WikiText-103 (Merit et al., 2017) dataset.
190
+
191
+ <table><tr><td></td><td>ppl(val) ↓</td><td>ppl(test) ↓</td></tr><tr><td>Vanilla Transformer</td><td>24.5</td><td>26.2</td></tr><tr><td>Linear Transformer</td><td>28.7</td><td>30.2</td></tr><tr><td>RFA-Gaussian</td><td>25.8</td><td>27.5</td></tr><tr><td>RFA-across</td><td>26.4</td><td>28.1</td></tr><tr><td>RFA-Gate-across</td><td>24.8</td><td>26.3</td></tr><tr><td>RFA-Gate-Gaussian</td><td>23.2</td><td>25.0</td></tr><tr><td>COSFORMER</td><td>23.5</td><td>23.1</td></tr></table>
192
+
193
+ Table 3: Results on fine-tuned downstream tasks based on pre-trained bidirectional model. Best result is in boldface and second best is underlined. The proposed COSFORMER achieves superb performances over competing efficient transformers and is approaching vanilla transformer.
194
+
195
+ <table><tr><td></td><td>QQP↑</td><td>SST-2 ↑</td><td>MNLI ↑</td><td>IMDB ↑</td><td>AMAZON ↑</td><td>Avg ↑</td></tr><tr><td>Vanilla Transformer (Liu et al., 2019)</td><td>88.41</td><td>92.31</td><td>79.15</td><td>92.86</td><td>75.79</td><td>85.70</td></tr><tr><td>Performer (Choromanski et al., 2020)</td><td>69.92</td><td>50.91</td><td>35.37</td><td>60.36</td><td>64.84</td><td>56.28</td></tr><tr><td>Reformer (Kitaev et al., 2019)</td><td>63.18</td><td>50.92</td><td>35.47</td><td>50.01</td><td>64.28</td><td>52.77</td></tr><tr><td>Linear Trans. (Katharopoulos et al., 2020)</td><td>74.85</td><td>84.63</td><td>66.56</td><td>91.48</td><td>72.50</td><td>78.00</td></tr><tr><td>Longformer (Beltagy et al., 2020)</td><td>85.51</td><td>88.65</td><td>77.22</td><td>91.14</td><td>73.34</td><td>83.17</td></tr><tr><td>RFA (Peng et al., 2020)</td><td>75.28</td><td>76.49</td><td>57.6</td><td>78.98</td><td>68.15</td><td>71.30</td></tr><tr><td>COSFORMER</td><td>89.26</td><td>91.05</td><td>76.70</td><td>92.95</td><td>76.30</td><td>85.25</td></tr></table>
196
+
197
+ # 3.2 BIDIRECTIONAL LANGUAGE MODEL
198
+
199
+ For bidirectional language modeling, we adopt RoBERTa (Liu et al., 2019) as the baseline model. Similarly, we replace the self-attention module in the RoBERTa by the proposed linear attention module, and keep other structures unchanged. We train this bidirectional task on 2 Nvidia Tesla A100 GPUs for 50K iterations with a input sequence length 512. As shown in Figure 4, COSFORMER converges faster than vanilla transformer on both training and validation sets with a comparable or smaller loss values, despite it only consumes linear space and time computation complexity. In addition, the COSFORMER variant with re-weighting mechanism has both notably better converge speed and final results over the counterpart without re-weighting, which further validates the effectiveness of our $\cos$ -based distance matrix and also demonstrates the effectiveness of recency bias on natural language data.
200
+
201
+ # 3.3 DOWNSSTREAM FINE-TUNING TASKS
202
+
203
+ In this section, we fine-tune the pre-trained model on downstream tasks to demonstrate the generalization ability of COSFORMER on downstream tasks. We use the pre-trained bidirectional model and fine-tune it on three downstream text classification tasks: GLUE (QQP, SST-2, MNLi) (Wang et al., 2018), IMDB (Maas et al., 2011) and AMAZON (Ni et al., 2019). For fair comparison, we first pre-train all the competing efficient transformer variants for the same 50K iterations on WikiText103 (Merit et al., 2017) under the same setting, then we follow the same fine-tuning protocol as RoBERTa (Liu et al., 2019) to fine-tune these methods on the downstream tasks. From Table 3, we can see that COSFORMER outperforms baseline (Liu et al., 2019) on three out of five datasets, and achieves either best or secondary place on all five downstream datasets compared to competing efficient transformers. It is worth noting that despite Longformer (Beltagy et al., 2020) achieves better results on MNLI than COSFORMER , it requires a computation complexity of $O(Nw)$ , where $w$ is window size. As shown in Figure 1, Longformer is slower and requires more memory overhead than COSFORMER . Other competing methods(Peng et al., 2020; Choromanski et al., 2020; Kitaev et al., 2019) are all based on kernel functions and have substantial performance gaps compared with our model. This validates the effectiveness of the proposed COSFORMER model compared with other efficient transformer variants.
204
+
205
+ # 3.4 RESULTS ON LONG-RANGE-ARENA BENCHMARK
206
+
207
+ To further evaluate the generalization ability of the proposed method, we train our model from scratch on Long-range-arena benchmark 2020b. Long-range arena (Tay et al., 2020b) is a benchmark specifically designed for efficient transformers with long input sequences, thus serving as a suitable testbed to assess the quality of efficient transformer variants comparatively. To ensure fair comparison, we first implement our method on Jax (Bradbury et al., 2018), then carefully follow their preprocessing, data split, model structure and training protocol. We evaluate our method on a variety of tasks including Long sequence ListOps (Nangia & Bowman, 2018), Byte-level text classification (Maas et al., 2011), document retrieval using the ACL Anthology Network (Radev et al., 2013), image classification on sequence of pixels on CIFAR-10 (Krizhevsky & Hinton, 2009), and Pathfinder (Linsley et al., 2018). As shown in Table 4, COSFORMER overall achieves competitive results across all the tasks while achieving best performance on ListOps and Document Retrieval. For the Pathfinder task, since the distance between the two points can be very far from each other, our introduced locality bias would have negative impact to this task and show a bit lags to other SOTA methods, despite that the performance gap between our method and the vanilla transformer is small. It is worth mentioning that COSFORMER achieves the best overall scores on Long-range-arena benchmark, being one of the only two models that surpass vanilla transformer architecture.
208
+
209
+ Table 4: Results on Long-range-arena benchmark. Best result is in boldface and second best is underlined. COSFORMER achieves the best average score across 5 different tasks.
210
+
211
+ <table><tr><td>Model</td><td>ListOps ↑</td><td>Text↑</td><td>Retrieval↑</td><td>Image↑</td><td>Pathfinder↑</td><td>Avg ↑</td></tr><tr><td>Local Attention (Tay et al., 2020b)</td><td>15.82</td><td>52.98</td><td>53.39</td><td>41.46</td><td>66.63</td><td>46.06</td></tr><tr><td>Linear Trans. (Katharopoulos et al., 2020)</td><td>16.13</td><td>65.9</td><td>53.09</td><td>42.34</td><td>75.3</td><td>50.55</td></tr><tr><td>Reformer (Kitaev et al., 2019)</td><td>37.27</td><td>56.1</td><td>53.4</td><td>38.07</td><td>68.5</td><td>50.67</td></tr><tr><td>Sparse Trans.(Child et al., 2019)</td><td>17.07</td><td>63.58</td><td>59.59</td><td>44.24</td><td>71.71</td><td>51.24</td></tr><tr><td>Sinkhorn Trans.(Tay et al., 2020a)</td><td>33.67</td><td>61.2</td><td>53.83</td><td>41.23</td><td>67.45</td><td>51.29</td></tr><tr><td>Linformer(Wang et al., 2020)</td><td>35.7</td><td>53.94</td><td>52.27</td><td>38.56</td><td>76.34</td><td>51.36</td></tr><tr><td>Performer(Choromanski et al., 2020)</td><td>18.01</td><td>65.4</td><td>53.82</td><td>42.77</td><td>77.05</td><td>51.41</td></tr><tr><td>Synthesizer (Tay et al., 2021)</td><td>36.99</td><td>61.68</td><td>54.67</td><td>41.61</td><td>69.45</td><td>52.88</td></tr><tr><td>Longformer(Beltagy et al., 2020)</td><td>35.63</td><td>62.85</td><td>56.89</td><td>42.22</td><td>69.71</td><td>53.46</td></tr><tr><td>Transformer (Vaswani et al., 2017)</td><td>36.37</td><td>64.27</td><td>57.46</td><td>42.44</td><td>71.4</td><td>54.39</td></tr><tr><td>BigBird (Zaheer et al., 2020)</td><td>36.05</td><td>64.02</td><td>59.29</td><td>40.83</td><td>74.87</td><td>55.01</td></tr><tr><td>COSFORMER</td><td>37.9</td><td>63.41</td><td>61.36</td><td>43.17</td><td>70.33</td><td>55.23</td></tr></table>
212
+
213
+ Table 5: Speed comparison on the long-range-arena benchmark in both training and inference varying sequence lengths (1-4k). We mark it with a cross if a method runs out of memory. The higher, the better.
214
+
215
+ <table><tr><td></td><td colspan="4">Inference Speed(steps per second)↑</td><td colspan="4">Train Speedsteps per second)↑</td></tr><tr><td>model</td><td>1K</td><td>2K</td><td>3K</td><td>4k</td><td>1K</td><td>2K</td><td>3K</td><td>4K</td></tr><tr><td>Transformer(Vaswani et al., 2017)</td><td>25.37</td><td>7.83</td><td>✘</td><td>✘</td><td>6.95</td><td>2.23</td><td>✘</td><td>✘</td></tr><tr><td>Local Attention(Tay et al., 2020b)</td><td>57.73</td><td>33.19</td><td>23.36</td><td>17.79</td><td>13.45</td><td>6.71</td><td>4.32</td><td>3.09</td></tr><tr><td>Linformer(Wang et al., 2020)</td><td>70.09</td><td>39.1</td><td>27.05</td><td>20.62</td><td>14.75</td><td>7.09</td><td>4.52</td><td>3.21</td></tr><tr><td>Reformer(Kitaev et al., 2019)</td><td>44.21</td><td>21.58</td><td>12.74</td><td>8.37</td><td>11.58</td><td>4.98</td><td>2.94</td><td>1.95</td></tr><tr><td>Sinkhorn Trans. (Tay et al., 2020a)</td><td>43.29</td><td>23.58</td><td>16.53</td><td>12.7</td><td>11.09</td><td>5.57</td><td>3.68</td><td>2.68</td></tr><tr><td>Synthesizer (Tay et al., 2021)</td><td>20.89</td><td>6.24</td><td>✘</td><td>✘</td><td>6.36</td><td>2.01</td><td>✘</td><td>✘</td></tr><tr><td>BirBird (Zaheer et al., 2020)</td><td>20.96</td><td>11.5</td><td>8.12</td><td>6.15</td><td>6.46</td><td>3.2</td><td>2.13</td><td>1.53</td></tr><tr><td>Linear Trans. (Katharopoulos et al., 2020)</td><td>67.85</td><td>38.24</td><td>26.28</td><td>19.98</td><td>11.86</td><td>5.54</td><td>3.53</td><td>2.56</td></tr><tr><td>Performer (Choromanski et al., 2020)</td><td>74.15</td><td>42.31</td><td>29.5</td><td>22.44</td><td>14</td><td>6.49</td><td>4.1</td><td>2.94</td></tr><tr><td>Longformer (Beltagy et al., 2020)</td><td>22.99</td><td>6.72</td><td>✘</td><td>✘</td><td>4.4</td><td>1.3</td><td>✘</td><td>✘</td></tr><tr><td>Sparse Trans. Child et al. (2019)</td><td>24.87</td><td>7.5</td><td>✘</td><td>✘</td><td>6.77</td><td>2.2</td><td>✘</td><td>✘</td></tr><tr><td>COSFORMER</td><td>58.82</td><td>33.45</td><td>22.77</td><td>17.42</td><td>12.27</td><td>5.72</td><td>3.62</td><td>2.64</td></tr></table>
216
+
217
+ # 3.5 EFFICIENCY COMPARISON
218
+
219
+ In this section, we compare the efficiency of COSFORMER with other models, with a focus on long sequences as inputs. With the proposed linear attention module, we expect that COSFORMER scales comparably with other linear variants while significantly surpassing the vanilla transformer architecture. For a fair and comprehensive comparison, we implement our method and competing methods on Jax (Bradbury et al., 2018). We use the byte-level text classification benchmark and report runtime speed during both training and inference under different sequence lengths (1k-4k). We conduct experiments on one Nvidia A6000 GPU and also report the corresponding inference-time memory foot prints as shown in Figure 1. As shown in Table 5 and Figure 1, most pattern based methods (Beltagy et al., 2020; Zaheer et al., 2020; Tay et al., 2020a; 2021) and vanilla transformer (Vaswani et al., 2017) are much slower and require greater memory than COSFORMER prevents them from extending to longer sequence. Further, the kernel based methods like (Narang et al., 2021; Choromanski et al., 2020; Tay et al., 2020a) have comparable speed and memory overheads, but their performances are less satisfactory compared to COSFORMER across above metrics. In summary, our model COSFORMER achieves overall better efficiency than other linear variants while maintain superior modeling and generalization ability.
220
+
221
+ # 3.6 ABLATION: cos-BASED RE-WEIGHTING
222
+
223
+ By introducing $cos$ -based re-weighting, we provide a non-linear mechanism to concentrate the distribution of attention connections and stabilizes the training. In this way, we encourage the model to better take into account the locality inductive biases commonly observed on many natural language tasks. In particular, we investi
224
+
225
+ Table 6: Performance comparison of COSFORMER with and without $cos$ -based re-weighting $(\phi_{\mathrm{ReLU}})$ . We evaluate on two composite metrics. Bidirectional finetuneavg: average score across 5 datasets reported in Table 3. LRAavg: average score across 5 tasks reported in Table 4.
226
+
227
+ <table><tr><td>Model</td><td>Bidirectional finetuneavg ↑</td><td>LRAavg ↑</td></tr><tr><td>φReLU</td><td>85.12</td><td>54.20</td></tr><tr><td>COSFORMER</td><td>85.25</td><td>55.23</td></tr></table>
228
+
229
+ gate the effect of the $cos$ -based re-weighting in two aspects. First, as shown in Figure 4, by adding
230
+
231
+ cos-based re-weighting, we obtain both notably better converge speed and final results in autoregressive language modeling. Further, in Table 6, we present a comparison between COSFORMER models with and without re-weighting mechanism. We use two composite metrics which comprehensively include 10 different datasets from bidirectional downstream fine-tuning tasks and long-range-arena (Tay et al., 2020b). COSFORMER achieves overall better results over the counterpart without reweighting, improving the average scores on bidirectional finetuning and long-range-arena by a clear margin. This verifies that the proposed re-weighting effectively incorporates the locality inductive biases for natural language tasks.
232
+
233
+ # 4 RELATED WORK
234
+
235
+ This section will introduce the existing works on improving the efficiency of Transformers, they can be broadly divided into two categories, Pattern based methods and Kernel based methods.
236
+
237
+ Pattern based method Pattern based methods sparsify the attention matrix with handcrafted or learnable patterns. As an early approach, Lee et al. (2019) leverages the inducing points from the sparse Gaussian process to reduce the quadratic complexities of a transformer. Child et al. (2019) reduces the complexity by applying combination of strided pattern and local pattern to the vanilla attention matrix. Longformer (Beltagy et al., 2020) designs fixed diagonal sliding windows combined with global window, and the sliding window pattern can also be extended with dilation to enlarge the receptive field. Zaheer et al. (2020) presents a more powerful and expressive sparse attention mechanism, which combines multiple types of attention patterns and gives a thorough study of sparse attention mechanism. Instead of fixed patterns, Kitaev et al. (2019) and Daras et al. (2020) group the attention computation process into buckets by local sensitive hashing, while Roy et al. (2020) uses mini-batch spherical $k$ -means. Nevertheless, Pattern based methods can only cope with sequences up to a certain length, and the computational complexity still grows rapidly when the input sequence becomes longer.
238
+
239
+ Kernel based method When faced with longer input sequences, it is more efficient to directly reduce the complexity of the theoretical calculation method. Kernel based methods speed up self-attention by reducing the computation complexity of self-attention from quadratic to linear. Vyas et al. (2020) approximate the full attention with a fixed number of cluster attention groups by assuming neighbouring queries in Euclidean space should have similar attention distributions. Peng et al. (2020) chooses to use the production of Gaussian kernel functions to approximate Softmax, changing the order of scale dot product calculation, thus reducing the theoretical time to linear complexity and Choromanski et al. (2020) uses Haar measurement based kernel instead. Wang et al. (2020) imports the low-rank prior for attention matrix and approximate softmax with SVD decomposition manner. Xiong et al. (2021) utilizes the Nyström method with segment-means to generate a low-rank approximation of the Softmax matrix. Katharopoulos et al. (2020) formalizes the transformer layer as a recurrent neural network. In this paper, we demonstrate that the approximation to Softmax is unnecessary for Linearization of self-attention module. We instead propose a new method to replace Softmax with a linear operation with a re-weighting mechanism, which reduces both time complexity and space complexity to $O(N)$ while maintaining the accuracy.
240
+
241
+ # 5 CONCLUSION
242
+
243
+ We presented COSFORMER, a new efficient transformer that has linear time and space complexity. Our COSFORMER is based on two key properties of the original softmax attention: (i) every element in the attention matrix are non-negative, such that negatively-correlated information are not included for contextual information aggregation; (ii) the non-linear re-weighting scheme concentrates the distribution of the attention matrix, in order to better exploit the locality inductive biases on sequence modeling. To fulfill these properties in our COSFORMER, we utilized the RuLU function as our linear operation to ensure the non-negative property; a new cos-based re-weighting mechanism was proposed to enforce the locality bias in the original softmax attention. Since our COSFORMER is naturally decomposable, it does not suffer the accumulated approximation error that usually happens in previous linear transformers. On causal pre-training, bidirectional pre-training, and multiple downstream text understanding tasks, COSFORMER achieves comparable or even better performances than the vanilla transformer. On long sequence benchmark, COSFORMER achieved state-of-the-art performance over five different tasks. Further, COSFORMER obtains a significant overall advantage in terms of time and memory efficiency over all existing efficient transformers, facilitating the transformers to easily scale to longer input sequence.
244
+
245
+ # REFERENCES
246
+
247
+ Abien Fred Agarap. Deep learning using rectified linear units (relu). arXiv preprint arXiv:1803.08375, 2018.
248
+ Alexei Baevski and Michael Auli. Adaptive input representations for neural language modeling. In International Conference on Learning Representations, 2018.
249
+ Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33, 2020.
250
+ Iz Beltagy, Matthew E Peters, and Arman Cohan. Longformer: The long-document transformer. arXiv preprint arXiv:2004.05150, 2020.
251
+ James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, and Skye Wanderman-Milne. Jax: composable transformations of python+ numpy programs. Version 0.1, 55, 2018.
252
+ Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. arXiv, 2020.
253
+ Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In European Conference on Computer Vision, pp. 213-229. Springer, 2020.
254
+ Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. Generating long sequences with sparse transformers. arXiv preprint arXiv:1904.10509, 2019.
255
+ Krzysztof Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Davis, Afroz Mohiuddin, Lukasz Kaiser, et al. Rethinking attention with performers. arXiv preprint arXiv:2009.14794, 2020.
256
+ Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D Manning. What does bert look at? an analysis of bert's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pp. 276-286, 2019.
257
+ Djork-Arné Clevert, Thomas Unterthiner, and Sepp Hochreiter. Fast and accurate deep network learning by exponential linear units (elus). In Yoshua Bengio and Yann LeCun (eds.), 4th International Conference on Learning Representations, ICLR, San Juan, Puerto Rico, 2016.
258
+ Giannis Daras, Nikita Kitaev, Augustus Odena, and Alexandros G Dimakis. Smyrf - efficient attention using asymmetric clustering. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin (eds.), NeurIPS, volume 33, pp. 6476-6489. Curran Associates, Inc., 2020.
259
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pp. 4171-4186, 2019.
260
+ Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2020.
261
+ Bolin Gao and Lacra Pavel. On the properties of the softmax function with application in game theory and reinforcement learning. arXiv preprint arXiv:1704.00805, 2017.
262
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 770-778, 2016.
263
+
264
+ Sepp Hochreiter and Jürgen Schmidhuber. Long short-term memory. Neural computation, 9(8): 1735-1780, 1997.
265
+ Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016.
266
+ Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention. In International Conference on Machine Learning, pp. 5156-5165. PMLR, 2020.
267
+ Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. In International Conference on Learning Representations, 2019.
268
+ Olga Kovaleva, Alexey Romanov, Anna Rogers, and Anna Rumshisky. Revealing the dark secrets of bert. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 4365-4374, 2019.
269
+ A. Krizhevsky and G. Hinton. Learning multiple layers of features from tiny images. Master's thesis, Department of Computer Science, University of Toronto, 2009.
270
+ Juho Lee, Yoonho Lee, Jungtaek Kim, Adam Kosiorek, Seungjin Choi, and Yee Whye Teh. Set transformer: A framework for attention-based permutation-invariant neural networks. In ICML, pp. 3744-3753, 2019.
271
+ Drew Linsley, Junkyung Kim, Vijay Veerabadran, Charlie Windolf, and Thomas Serre. Learning long-range spatial dependencies with horizontal gated recurrent units. In Proceedings of the 32nd International Conference on Neural Information Processing Systems, pp. 152-164, 2018.
272
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019.
273
+ Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030, 2021.
274
+ Antoine Liutkus, Ondrej Cífka, Shih-Lun Wu, Umut Şimşekli, Yi-Hsuan Yang, and Gael Richard. Relative positional encoding for Transformers with linear complexity. In Marina Meila and Tong Zhang (eds.), Proceedings of the 38th International Conference on Machine Learning, volume 139 of Proceedings of Machine Learning Research, pp. 7067-7079. PMLR, 18-24 Jul 2021.
275
+ Andrew Maas, Raymond E Daly, Peter T Pham, Dan Huang, Andrew Y Ng, and Christopher Potts. Learning word vectors for sentiment analysis. In Proceedings of the 49th annual meeting of the association for computational linguistics: Human language technologies, pp. 142-150, 2011.
276
+ Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. Pointer sentinel mixture models. 5th International Conference on Learning Representations, ICLR, Toulon, France, 2017.
277
+ Nikita Nangia and Samuel Bowman. Listops: A diagnostic dataset for latent tree learning. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 92-99, 2018.
278
+ Sharan Narang, Hyung Won Chung, Yi Tay, William Fedus, Thibault Fevry, Michael Matena, Krishna Malkan, Noah Fiedel, Noam Shazeer, Zhenzhong Lan, et al. Do transformer modifications transfer across implementations and applications? arXiv preprint arXiv:2102.11972, 2021.
279
+ Jianmo Ni, Jiacheng Li, and Julian McAuley. Justifying recommendations using distantly-labeled reviews and fine-grained aspects. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 188-197, 2019.
280
+ Hao Peng, Nikolaos Pappas, Dani Yogatama, Roy Schwartz, Noah Smith, and Lingpeng Kong. Random feature attention. In International Conference on Learning Representations, 2020.
281
+
282
+ Dragomir R Radev, Pradeep Muthukrishnan, Vahed Qazvinian, and Amjad Abu-Jbara. The acl anthology network corpus. Language Resources and Evaluation, 47(4):919-944, 2013.
283
+ Ali Rahimi and Benjamin Recht. Random features for large-scale kernel machines. In J. Platt, D. Koller, Y. Singer, and S. Roweis (eds.), Advances in Neural Information Processing Systems, volume 20. Curran Associates, Inc., 2008.
284
+ Aurko Roy, Mohammad Taghi Saffar, David Grangier, and Ashish Vaswani. Efficient content-based sparse attention with routing transformers. In TACL, 2020.
285
+ Steffen Schneider, Alexei Baevski, Ronan Collobert, and Michael Auli. wav2vec: Unsupervised pre-training for speech recognition. In INTERSPEECH, 2019.
286
+ Jianlin Su, Yu Lu, Shengfeng Pan, Bo Wen, and Yunfeng Liu. Roformer: Enhanced transformer with rotary position embedding. In arXiv, 2021.
287
+ Yi Tay, Dara Bahri, Liu Yang, Donald Metzler, and Da-Cheng Juan. Sparse sinkhorn attention. In International Conference on Machine Learning, pp. 9438-9447. PMLR, 2020a.
288
+ Yi Tay, Mostafa Dehghani, Samira Abnar, Yikang Shen, Dara Bahri, Philip Pham, Jinfeng Rao, Liu Yang, Sebastian Ruder, and Donald Metzler. Long range arena: A benchmark for efficient transformers. In International Conference on Learning Representations, 2020b.
289
+ Yi Tay, Dara Bahri, Donald Metzler, Da-Cheng Juan, Zhe Zhao, and Che Zheng. Synthesizer: Rethinking self-attention for transformer models. In International Conference on Machine Learning, pp. 10183–10192. PMLR, 2021.
290
+ Michalis K Titsias. One-vs-each approximation to softmax for scalable estimation of probabilities. arXiv preprint arXiv:1609.07410, 2016.
291
+ Yao-Hung Hubert Tsai, Shaojie Bai, Makoto Yamada, Louis-Philippe Morency, and Ruslan Salakhutdinov. Transformer dissection: An unified understanding for transformer's attention via the lens of kernel. In EMNLP, 2019.
292
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pp. 5998-6008, 2017.
293
+ A. Vyas, A. Katharopoulos, and F. Fleuret. Fast transformers with clustered attention. In NeurIPS, 2020.
294
+ Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pp. 353-355, 2018.
295
+ Sinong Wang, Belinda Z Li, Madian Khabsa, Han Fang, and Hao Ma. Linformer: Self-attention with linear complexity. arXiv preprint arXiv:2006.04768, 2020.
296
+ Yunyang Xiong, Zhanpeng Zeng, Rudrasis Chakraborty, Mingxing Tan, Glenn Fung, Yin Li, and Vikas Singh. Nyströmformer: A nyström-based algorithm for approximating self-attention. In AAAI, 2021.
297
+ Manzil Zaheer, Guru Guruganesh, Kumar Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontonon, Philip Pham, Anirudh Ravula, Qifan Wang, Li Yang, et al. Big bird: Transformers for longer sequences. In NeurIPS, 2020.
298
+
299
+ # A APPENDIX
300
+
301
+ # A.1 MATHEMATICAL DERIVATION OF $cos$ -BASED RE-WEIGHTING
302
+
303
+ Following Equation 11, we give a detailed deviation of how to obtain output at position $i^{th}$ position:
304
+
305
+ $$
306
+ \begin{array}{l} O _ {i} = \frac {\sum_ {j = 1} ^ {N} f \left(Q _ {i} ^ {\prime} , K _ {j} ^ {\prime}\right) V _ {j}}{\sum_ {j = 1} ^ {N} f \left(Q _ {i} ^ {\prime} , K _ {j} ^ {\prime}\right)} \\ = \frac {\sum_ {j = 1} ^ {N} \left(\bar {Q} _ {i} ^ {\cos} \left(\bar {K} _ {j} ^ {\cos}\right) ^ {T} + \tilde {Q} _ {i} ^ {\sin} \left(\tilde {K} _ {j} ^ {\sin}\right) ^ {T}\right) V _ {j}}{\sum_ {j = 1} ^ {N} \left(\bar {Q} _ {i} ^ {\cos} \left(\bar {K} _ {j} ^ {\cos}\right) ^ {T} + \tilde {Q} _ {i} ^ {\sin} \left(\tilde {K} _ {j} ^ {\sin}\right) ^ {T}\right)} \\ = \frac {\sum_ {j = 1} ^ {N} \bar {Q} _ {i} ^ {\cos} \left(\bar {K} _ {j} ^ {\cos}\right) ^ {T} V _ {j} + \sum_ {j = 1} ^ {N} \tilde {Q} _ {i} ^ {\sin} \left(\tilde {K} _ {j} ^ {\sin}\right) ^ {T} V _ {j}}{\sum_ {j = 1} ^ {N} \bar {Q} _ {i} ^ {\cos} \left(\bar {K} _ {j} ^ {\cos}\right) ^ {T} + \sum_ {j = 1} ^ {N} \tilde {Q} _ {i} ^ {\sin} \left(\tilde {K} _ {j} ^ {\sin}\right) ^ {T}} \tag {13} \\ = \frac {\sum_ {j = 1} ^ {N} \bar {Q} _ {i} ^ {\mathrm {c o s}} \left(\left(\bar {K} _ {j} ^ {\mathrm {c o s}}\right) ^ {T} V _ {j}\right) + \sum_ {j = 1} ^ {N} \tilde {Q} _ {i} ^ {\mathrm {s i n}} \left(\left(\tilde {K} _ {j} ^ {\mathrm {s i n}}\right) ^ {T} V _ {j}\right)}{\sum_ {j = 1} ^ {N} \bar {Q} _ {i} ^ {\mathrm {c o s}} \left(\bar {K} _ {j} ^ {\mathrm {c o s}}\right) ^ {T} + \sum_ {j = 1} ^ {N} \tilde {Q} _ {i} ^ {\mathrm {s i n}} \left(\tilde {K} _ {j} ^ {\mathrm {s i n}}\right) ^ {T}} \\ \end{array}
307
+ $$
308
+
309
+ where $i, j = 1, \dots, N, M \geq N$ , and $Q' = \mathrm{ReLU}(Q)$ , $K' = \mathrm{ReLU}(K)$ . Let $Q_i^{\cos} = Q_i' \cos \left(\frac{\pi i}{2M}\right)$ , $Q_i^{\cos} = Q_i' \cos \left(\frac{\pi i}{2M}\right)$ , $K_j^{\cos} = K_j' \cos \left(\frac{\pi j}{2M}\right)$ , $K_j^{\sin} = K_j' \sin \left(\frac{\pi j}{2M}\right)$ . It presents that the output of the proposed COSFORMER attention can be obtained in a linear manner.
310
+
311
+ # A.2 PSEUDO CODE OF COSFORMER
312
+
313
+ Algorithm 1 describe the way to compute COSFORMER attention
314
+
315
+ # Algorithm 1 COSFORMER attention
316
+
317
+ Input: $Q\in \mathbb{R}^{N\times d_1}$ $K\in \mathbb{R}^{M\times d_1}$ $V\in \mathbb{R}^{M\times d_2}$
318
+
319
+ Output: $O\in \mathbb{R}^{N\times d_2}$
320
+
321
+ Use $M_{i}$ to represent the $i$ -th row of matrix $M$ ;
322
+
323
+ Initialize $A[i] = \frac{\pi i}{2N}, O[i][j] = 0, i = 1, \dots, N, j = 1, \dots, d_2$ ;
324
+
325
+ Initialize $S^{\mathrm{cos}}[i][j] = 0, S^{\mathrm{sin}}[i][j] = 0, T^{\mathrm{cos}}[i] = 0, T^{\mathrm{sin}}[i] = 0, i = 1,\ldots,d_1, j = 1,\ldots,d_2$ ;
326
+
327
+ for $i$ in $1,\ldots ,M$ do:
328
+
329
+ $$
330
+ K _ {i} ^ {\cos} = K _ {i} \cos \left(\frac {\pi i}{2 M}\right), K _ {i} ^ {\sin} = K _ {i} \sin \left(\frac {\pi i}{2 M}\right);
331
+ $$
332
+
333
+ $$
334
+ S ^ {\cos} + = \left(K _ {i} ^ {\cos}\right) ^ {T} V _ {i};
335
+ $$
336
+
337
+ $$
338
+ S ^ {\sin} + = \left(K _ {i} ^ {\sin}\right) ^ {T} V _ {i};
339
+ $$
340
+
341
+ $$
342
+ T ^ {\cos} + = \check {K} _ {i} ^ {\cos};
343
+ $$
344
+
345
+ $$
346
+ T ^ {\sin} + = K _ {i} ^ {\sin};
347
+ $$
348
+
349
+ end for
350
+
351
+ for $i$ in $1,\ldots ,N$ do:
352
+
353
+ $$
354
+ Q _ {i} ^ {\mathrm {c o s}} = Q _ {i} \cos \left(\frac {\pi i}{2 M}\right), Q _ {i} ^ {\mathrm {s i n}} = Q _ {i} \sin \left(\frac {\pi i}{2 M}\right);
355
+ $$
356
+
357
+ $$
358
+ O _ {i} = \frac {Q _ {i} ^ {\cos} S ^ {\cos} + Q _ {i} ^ {\sin} S ^ {\sin}}{Q _ {i} ^ {\cos} T ^ {\cos} + Q _ {i} ^ {\sin} T ^ {\sin}};
359
+ $$
360
+
361
+ end for
362
+
363
+ # A.3 ALGORITHM TO VISUALIZE ATTENTION MATRIX
364
+
365
+ Algorithm 2 describe the way to visualize attention matrix as Figure 3
366
+
367
+ Algorithm 2 Algorithm to visualize attention matrix
368
+ ```javascript
369
+ Input: $M_{k}\in \mathbb{R}^{d\times d},k = 1,\dots ,n$ ; threshold $\in [0,1]$
370
+ Output: $M\in \mathbb{R}^{d\times d}$ .
371
+ Initialize $M[i][j] = 0,i\in 1,\ldots ,d,j\in 1,\ldots ,d;$
372
+ for $k$ in 1,...,n do: for i in 1,...,d do: index $=$ argsort( $M_{k}[i])$ (in descending order) $p = 0$ for j in 1,...,d do: $l =$ index[j] $p + = M_k[i][l]$ $M[i][l] + = 1$ if $p>$ threshold then: break end if end for end for end for $M / = n$ Use heatmap to visualize M;
373
+ ```
374
+
375
+ # A.4 INTRODUCTION OF DATASET
376
+
377
+ We train both models on autoregressive language modeling and bidirectional modeling by Wikitext-103 dataset, it is split by tokens and its statistics as Table 7. Then we fine-tune the pre-trained bidirectional modeling on several text classification tasks.
378
+
379
+ QQP dataset contain thousands of sentence pair from community question-answering website Quora.Network need to determine pairs of question are semantically equivalent. SST-2 and IMDB are collections of movie reviews. The task is to determine whether a review is positive or not. AMA-ZON dataset contains millions of product reviews from Amazon. The requirement of this task is to infer the scoring of the product from the review text.MNLI is a crow-source collections of sentence pairs. The network must distinguish which of the three categories entailment, contradiction and neutral the given sentences belong to.
380
+
381
+ The long-range-aren benchmark contains 5 different datasets.ListOps contains some designed clever mathematical problem to clarify the parsing ability of neural models. IMDB is also used in this benchmark to examine the text classification ability of neural models. CIFAR-10 is a image collection of various of object, this task require models capture 2D spatial relations between flatten pixels.In Pathfinder task, models need to determine the connection of two points in the picture, so as to examine the model's ability to acquire 2D spatial relationships.AAN dataset is used to evaluate the ability for models to encode and store compressed representations for retrieving.
382
+
383
+ <table><tr><td>Data</td><td>Train</td><td>Valid</td><td>Test</td></tr><tr><td>WikiText-103</td><td>103M</td><td>218K</td><td>246K</td></tr><tr><td>QQP</td><td>364K</td><td>-</td><td>391K</td></tr><tr><td>SST-2</td><td>67K</td><td>-</td><td>1.8K</td></tr><tr><td>MNLI</td><td>393K</td><td>-</td><td>20K</td></tr><tr><td>IMDB</td><td>25K</td><td>-</td><td>25K</td></tr><tr><td>AMAZON</td><td>3M</td><td>168K</td><td>168K</td></tr><tr><td>ListOps</td><td>90K</td><td>-</td><td>10K</td></tr><tr><td>AAN</td><td>147K</td><td>18K</td><td>17K</td></tr><tr><td>CIFAR-10</td><td>50K</td><td>-</td><td>10K</td></tr><tr><td>Pathfinder</td><td>160K</td><td>-</td><td>20K</td></tr></table>
384
+
385
+ Table 7: Statistics for the datasets.A subset of "Small" amazon subset on electronics category is used for experiment
386
+
387
+ # A.5 QUALITATIVE RESULTS OF LRA
388
+
389
+ We provide our qualitative results of the ListOps and Document Retrieval tasks on Long-Range-Arena benchmark (Tay et al., 2020b) with a comparison to the vanilla transformer.
390
+
391
+ ListOps is a ten-way classification task which aims to prediction the results of a sequence with a hierarchical structure and operators MAX, MEAN, MEDIAN and SUM MOD that are enclosed by delimiters (brackets). The network needs to access all tokens and model the logical structure of the inputs in order to make a prediction.
392
+
393
+ Document Retrieval task is to decide whether the two input long documents are similar or not with a binary label. This task evaluates a model's ability to encode and store compressed representations that are useful for matching and retrieval. Since the samples in LRA are too long, We substantially shorten some selected samples and display them as below:
394
+
395
+ Listops:
396
+ Listing 1: Examples of LisOps
397
+ ```txt
398
+ 1 Input: ( ( ( ( ( [MED 7 ) 9 ) 3 ) 1 ......... 5 ) 6 ) 8 ) ) ) 2 )
399
+ 8 ) 9 ) 5 ) 0 ) ] ) 8 ) 5 ) 1 ) 2 ) ] Our Output: 0,
400
+ Transformer output: 9, Ground-truth: 0
401
+ 2
402
+ 3 Input: ( ( ( ( ( ( ( [SM 5 ) 6 ) 0 ) 7 ) 1 ) ( ( ( ( ( ( ......... ( ( Input: ( ( [MIN 5 ) 8 ) 1 ) 0 ) (( [MED ( ( ( ( 8 ) 7 ) 2 ) 8 ) 1 ) 8 ) ] ) 7 ) ]) ) Our output: 9, Transformer output: 3, Ground-truth: 9
403
+ 4
404
+ 5 Input: ( ( ( ( ( ( ( [MAX 7 ) 4 ) 8 ) ( ( ( ( ( ( ( [MAX 5 ) 2 ) ( ( [SM 3 ) 6 ) 9 ) ( ( ......... ) ) 1 ) 6 ) 4 ) 2 )) ) ) Our output: 9, Transformer output: 5, Ground-truth: 9
405
+ ```
406
+
407
+ Byte-level document retrieval:
408
+ Listing 2: Examples of Document Retrieval
409
+ ```txt
410
+ 1 Text1: b'1 Introduction Recent advances in Statistical Machine Translation (SMT) are widely centred around two concepts: (a) hierarchical translation processes, frequently employing Synchronous Context Free Grammars (SCFGs) and (b) transduction or synchronous rewrite processes over a linguistic .... 2
411
+ 3 Text2: b'1 Introduction Automatic Grammatical Error Correction (GEC) for non-native English language learners has attracted more and more attention with the development of natural language processing, machine learning and big-data techniques. ?The CoNLL2013 shared task focuses on the problem of GEC in five different error types including determiner, preposition, noun number.... 4
412
+ 5 Our output: False, Transformer output: True, Ground-truth: False
413
+ ```
2202.08xxx/2202.08791/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de3e74ed96876cf054fbaaeb69faf356a31cf7820880dbebc793e1323f8796c0
3
+ size 603891
2202.08xxx/2202.08791/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08792/13a6d837-3222-427d-a1a3-77b4e886a508_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08792/13a6d837-3222-427d-a1a3-77b4e886a508_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08792/13a6d837-3222-427d-a1a3-77b4e886a508_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f004ec2a7822d0c55bfe2410d864cf839759ae262ab0172765cc33790cd83811
3
+ size 384945
2202.08xxx/2202.08792/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08792/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e11ce7766db8a9505f97063b5fd6ecd76450d288d31511e81f4b361bb11e9b62
3
+ size 435301
2202.08xxx/2202.08792/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08814/0e4977c0-9108-464d-ba63-1e8d8819dfe2_content_list.json ADDED
@@ -0,0 +1,1236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 119,
8
+ 99,
9
+ 879,
10
+ 151
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Lei Jiang*",
17
+ "bbox": [
18
+ 189,
19
+ 162,
20
+ 269,
21
+ 179
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "jiang60@iu.edu",
28
+ "bbox": [
29
+ 176,
30
+ 180,
31
+ 284,
32
+ 193
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Indiana University",
39
+ "bbox": [
40
+ 166,
41
+ 194,
42
+ 295,
43
+ 209
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Qian Lou",
50
+ "bbox": [
51
+ 460,
52
+ 162,
53
+ 537,
54
+ 178
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "qlou@iu.edu",
61
+ "bbox": [
62
+ 455,
63
+ 180,
64
+ 542,
65
+ 193
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Indiana University",
72
+ "bbox": [
73
+ 436,
74
+ 194,
75
+ 562,
76
+ 209
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "Nrushad Joshi",
83
+ "bbox": [
84
+ 709,
85
+ 162,
86
+ 826,
87
+ 178
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "nrujoshi@iu.edu",
94
+ "bbox": [
95
+ 710,
96
+ 180,
97
+ 823,
98
+ 193
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "text",
104
+ "text": "Indiana University",
105
+ "bbox": [
106
+ 704,
107
+ 194,
108
+ 831,
109
+ 209
110
+ ],
111
+ "page_idx": 0
112
+ },
113
+ {
114
+ "type": "text",
115
+ "text": "Abstract",
116
+ "text_level": 1,
117
+ "bbox": [
118
+ 83,
119
+ 218,
120
+ 156,
121
+ 232
122
+ ],
123
+ "page_idx": 0
124
+ },
125
+ {
126
+ "type": "text",
127
+ "text": "Fully Homomorphic Encryption over the Torus (TFHE) allows arbitrary computations to happen directly on ciphertexts using homomorphic logic gates. However, each TFHE gate on state-of-the-art hardware platforms such as GPUs and FPGAs is extremely slow ( $>0.2ms$ ). Moreover, even the latest FPGA-based TFHE accelerator cannot achieve high energy efficiency, since it frequently invokes expensive double-precision floating point FFT and IFFT kernels. In this paper, we propose a fast and energy-efficient accelerator, MATCHA, to process TFHE gates. MATCHA supports aggressive bootstrapping key unrolling to accelerate TFHE gates without decryption errors by approximate multiplication-less integer FFTs and IFFTs, and a pipelined datapath. Compared to prior accelerators, MATCHA improves the TFHE gate processing throughput by $2.3\\times$ and the throughput per Watt by $6.3\\times$ .",
128
+ "bbox": [
129
+ 81,
130
+ 234,
131
+ 482,
132
+ 429
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "text",
138
+ "text": "CCS Concepts",
139
+ "text_level": 1,
140
+ "bbox": [
141
+ 83,
142
+ 433,
143
+ 200,
144
+ 449
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "text",
150
+ "text": "- Hardware $\\rightarrow$ Application-specific VLSI designs; $\\cdot$ Security and privacy $\\rightarrow$ Cryptography.",
151
+ "bbox": [
152
+ 83,
153
+ 450,
154
+ 480,
155
+ 478
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "text",
161
+ "text": "Keywords",
162
+ "text_level": 1,
163
+ "bbox": [
164
+ 83,
165
+ 481,
166
+ 168,
167
+ 497
168
+ ],
169
+ "page_idx": 0
170
+ },
171
+ {
172
+ "type": "text",
173
+ "text": "accelerator, fully homomorphic encryption, TFHE, bootstrapping",
174
+ "bbox": [
175
+ 83,
176
+ 500,
177
+ 477,
178
+ 513
179
+ ],
180
+ "page_idx": 0
181
+ },
182
+ {
183
+ "type": "text",
184
+ "text": "ACM Reference Format:",
185
+ "text_level": 1,
186
+ "bbox": [
187
+ 83,
188
+ 518,
189
+ 230,
190
+ 529
191
+ ],
192
+ "page_idx": 0
193
+ },
194
+ {
195
+ "type": "text",
196
+ "text": "Lei Jiang, Qian Lou, and Nrushad Joshi. 2022. MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus. In The 59th Annual Design Automation Conference 2022 (DAC '22), July 10-14, 2022, San Francisco, CA, USA. ACM, New York, NY, USA, 6 pages.",
197
+ "bbox": [
198
+ 83,
199
+ 530,
200
+ 482,
201
+ 580
202
+ ],
203
+ "page_idx": 0
204
+ },
205
+ {
206
+ "type": "text",
207
+ "text": "1 Introduction",
208
+ "text_level": 1,
209
+ "bbox": [
210
+ 84,
211
+ 594,
212
+ 218,
213
+ 607
214
+ ],
215
+ "page_idx": 0
216
+ },
217
+ {
218
+ "type": "text",
219
+ "text": "In cloud computing, it is dangerous for clients upload their raw data to untrusted cloud servers, due to potential data breaches. Moreover, recent legislation [12] requires cloud computing enterprises to provide sufficient security for clients' personal data.",
220
+ "bbox": [
221
+ 81,
222
+ 609,
223
+ 482,
224
+ 666
225
+ ],
226
+ "page_idx": 0
227
+ },
228
+ {
229
+ "type": "text",
230
+ "text": "Recently, Fully Homomorphic Encryption (FHE) [3, 5, 6] emerges as one of the most promising cryptographic solutions to allowing arbitrary computations on encrypted data in untrusted cloud servers. Compared to Secure Multi-Party Computation, FHE requires neither frequent communications between clients and cloud servers, nor significant circuit garbling overhead on the client side. FHE",
231
+ "bbox": [
232
+ 81,
233
+ 666,
234
+ 482,
235
+ 750
236
+ ],
237
+ "page_idx": 0
238
+ },
239
+ {
240
+ "type": "text",
241
+ "text": "*This work was partially supported by NSF through awards CCF-1908992, CCF-1909509, and CCF-210597. Work done while Nrushad Joshi was at UROC@Luddy IU.",
242
+ "bbox": [
243
+ 81,
244
+ 758,
245
+ 482,
246
+ 781
247
+ ],
248
+ "page_idx": 0
249
+ },
250
+ {
251
+ "type": "text",
252
+ "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.",
253
+ "bbox": [
254
+ 81,
255
+ 790,
256
+ 482,
257
+ 862
258
+ ],
259
+ "page_idx": 0
260
+ },
261
+ {
262
+ "type": "text",
263
+ "text": "DAC '22, July 10-14, 2022, San Francisco, CA, USA",
264
+ "bbox": [
265
+ 84,
266
+ 862,
267
+ 316,
268
+ 873
269
+ ],
270
+ "page_idx": 0
271
+ },
272
+ {
273
+ "type": "text",
274
+ "text": "© 2022 Association for Computing Machinery.",
275
+ "bbox": [
276
+ 84,
277
+ 875,
278
+ 303,
279
+ 885
280
+ ],
281
+ "page_idx": 0
282
+ },
283
+ {
284
+ "type": "table",
285
+ "img_path": "images/029f334f11df254c7d65ebf4b707d9ab27c7c681c1a8cc6dde512a97ce78e7f0.jpg",
286
+ "table_caption": [
287
+ "Table 1: The comparison between various HE schemes."
288
+ ],
289
+ "table_footnote": [],
290
+ "table_body": "<table><tr><td>Scheme</td><td>FHE Op.</td><td>Data Type</td><td>Bootstrapping</td></tr><tr><td>BGV [3]</td><td>mult, add</td><td>integer</td><td>~ 800s</td></tr><tr><td>BFV [9]</td><td>mult, add</td><td>integer</td><td>&gt; 1000s</td></tr><tr><td>CKKS [5]</td><td>mult, add</td><td>fixed point</td><td>~ 500s</td></tr><tr><td>FHEW [8]</td><td>Boolean</td><td>binary</td><td>&lt; 1s</td></tr><tr><td>TFHE [6]</td><td>Boolean</td><td>binary</td><td>13ms</td></tr></table>",
291
+ "bbox": [
292
+ 586,
293
+ 233,
294
+ 841,
295
+ 305
296
+ ],
297
+ "page_idx": 0
298
+ },
299
+ {
300
+ "type": "text",
301
+ "text": "enables a client to encrypt her data and to send only ciphertexts to a cloud server that can directly evaluate homomorphic functions, e.g., encrypted neural inferences [4] or encrypted general-purpose computing [14], on the ciphertexts. When all computations are completed, the server returns the encrypted results to the client without learning any intermediate or final output, due to the end-to-end encrypted data flow. Only the client can decrypt the results by her secret key.",
302
+ "bbox": [
303
+ 511,
304
+ 311,
305
+ 913,
306
+ 421
307
+ ],
308
+ "page_idx": 0
309
+ },
310
+ {
311
+ "type": "text",
312
+ "text": "Among all FHE cryptosystems, FHE over the Torus (TFHE) [6] is the most efficient scheme supporting arbitrary operations with an unlimited computation depth, as shown in Table 1. First, TFHE supports arbitrary operations by various homomorphic Boolean logic gates. Traditional FHE schemes such as BGV [3], BFV [9], and CKKS [5] can perform only homomorphic additions and multiplications, while both FHEW [8] and TFHE [6] can enable homomorphic Boolean algebra, e.g., NAND, XOR, and XNOR gates. Second, TFHE obtains the fastest bootstrapping. Each FHE operation inevitably introduces a certain amount of noise into the ciphertext. If there are too many FHE operations on the computational critical path, the accumulated noise in the ciphertext may exceed a threshold, and thus the ciphertext cannot be decrypted successfully. To support an unlimited computation depth, a FHE scheme has to periodically invoke a bootstrapping operation to decrease the amount of noise in the ciphertext. The bootstrapping operation is extremely expensive for BGV, BFV, and CKKS. For example, a BGV bootstrapping typically costs several hundred seconds [11]. Therefore, these FHE schemes can support only a limited computation depth by designing a large enough noise budget. Although a bootstrapping of FHEW takes only 1s, TFHE can obtain a even faster bootstrapping, i.e., a TFHE bootstrapping requires only $13ms$ on a CPU. By fast bootstrapping, TFHE allows an unlimited computation depth.",
313
+ "bbox": [
314
+ 511,
315
+ 422,
316
+ 913,
317
+ 739
318
+ ],
319
+ "page_idx": 0
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "Unfortunately, a TFHE-based complex circuit consisting of multiple TFHE gates is still extremely slow. For instance, a TFHE-based simple RISC-V CPU [14] comprising thousands of TFHE gates can run at only $1.25Hz$ . In order to realize practical TFHE-based computing, it is critical to accelerate TFHE gates by specialized hardware. However, TFHE is only well-implemented on CPUs [16] and GPUs [7]. Although a recent work [10] accelerates TFHE gates on a FPGA, the TFHE gate latency on the FPGA is much longer than that on a GPU. To the best of our knowledge, there is no ASIC-based hardware accelerator for TFHE.",
324
+ "bbox": [
325
+ 511,
326
+ 739,
327
+ 913,
328
+ 878
329
+ ],
330
+ "page_idx": 0
331
+ },
332
+ {
333
+ "type": "aside_text",
334
+ "text": "arXiv:2202.08814v1 [cs.CR] 17 Feb 2022",
335
+ "bbox": [
336
+ 22,
337
+ 263,
338
+ 57,
339
+ 707
340
+ ],
341
+ "page_idx": 0
342
+ },
343
+ {
344
+ "type": "text",
345
+ "text": "In this paper, we propose a fast and energy-efficient accelerator, MATCHA, to process TFHE gates. We find that the bootstrapping dominates the latency of all TFHE logic operations. The kernels of fast Fourier transform (FFT) and inverse FFT (IFFT) are the bottlenecks in a bootstrapping operation. MATCHA is designed to accelerate the TFHE bootstrapping using approximate multiplication-less integer FFTs and IFFTs. We also propose a pipelined datapath for MATCHA to support aggressive bootstrapping key unrolling [2, 22] that invokes FFTs and IFFTs less frequently. Our contributions can be summarized as follows.",
346
+ "bbox": [
347
+ 81,
348
+ 106,
349
+ 482,
350
+ 243
351
+ ],
352
+ "page_idx": 1
353
+ },
354
+ {
355
+ "type": "list",
356
+ "sub_type": "text",
357
+ "list_items": [
358
+ "- In order to fully take advantage of the error tolerance capability of TFHE, MATCHA accelerates polynomial multiplications by approximate multiplication-less integer FFTs and IFFTs requiring only additions and binary shifts. Although approximate FFTs and IFFTs introduce errors in each ciphertext, the ciphertext can still be correctly decrypted, since the errors can be rounded off along with the noise during decryption.",
359
+ "- We build a pipelined datapath consisting of TGSW clusters and external product cores to enable aggressive bootstrapping key unrolling that invokes FFTs and IFFTs less frequently during a bootstrapping operation. The datapath uses different register banks to serve sequential memory accesses during TGSW operations, and irregular memory accesses during FFTs and IFFTs.",
360
+ "- We implemented, evaluated, and compared MATCHA against prior TFHE hardware accelerators. Compared to prior accelerators, MATCHA improves the TFHE gate processing throughput by $2.3 \\times$ , and the throughput per Watt by $6.3 \\times$ ."
361
+ ],
362
+ "bbox": [
363
+ 83,
364
+ 244,
365
+ 482,
366
+ 481
367
+ ],
368
+ "page_idx": 1
369
+ },
370
+ {
371
+ "type": "text",
372
+ "text": "2 Background",
373
+ "text_level": 1,
374
+ "bbox": [
375
+ 83,
376
+ 497,
377
+ 212,
378
+ 512
379
+ ],
380
+ "page_idx": 1
381
+ },
382
+ {
383
+ "type": "text",
384
+ "text": "FHE. Fully Homomorphic Encryption (FHE) enables arbitrary operations on ciphertexts. A FHE operation $\\diamond$ is defined if there is another operation $\\star$ such that $Dec[Enc(x_1)\\diamond Enc(x_2)] = Dec[Enc(x_1\\star x_2)]$ , where $x_{1}$ and $x_{2}$ are input plaintexts, $Enc$ indicates encryption, and $Dec$ is decryption.",
385
+ "bbox": [
386
+ 81,
387
+ 520,
388
+ 482,
389
+ 589
390
+ ],
391
+ "page_idx": 1
392
+ },
393
+ {
394
+ "type": "text",
395
+ "text": "Notation. $\\mathbb{T}$ denotes the torus of real numbers modulo $1, \\mathbb{R} / \\mathbb{Z}$ . For any ring $\\mathcal{R}$ , polynomials of the variable $X$ with coefficients in $\\mathcal{R}$ are represented by $\\mathcal{R}[X]$ . We define $\\mathbb{R}_N[X] := \\mathbb{R}[X] / (X^N + 1)$ , $\\mathbb{Z}_N[X] := \\mathbb{Z}[X] / (X^N + 1)$ , and $\\mathbb{T}_N[X] := \\mathbb{R}_N[X] / \\mathbb{Z}_N[X]$ , which are the ring of polynomials of variable $X$ with quotient $X^N + 1$ and real coefficients modulo $1$ . $\\mathbb{B} := \\{0, 1\\}$ is a set, and we write vectors in bold. Given a set $S$ , we write $\\mathbf{s} \\stackrel{\\$}{\\leftarrow} S$ to indicate that $\\mathbf{s}$ is sampled uniformly at random from $S$ . We write $e \\gets X$ to denote that $e$ is sampled according to $X$ .",
396
+ "bbox": [
397
+ 81,
398
+ 590,
399
+ 482,
400
+ 718
401
+ ],
402
+ "page_idx": 1
403
+ },
404
+ {
405
+ "type": "text",
406
+ "text": "TFHE. In TFHE [6], we assume $m \\in \\mathbb{B}$ is a plaintext. The encryption scheme works as follows:",
407
+ "bbox": [
408
+ 83,
409
+ 719,
410
+ 482,
411
+ 746
412
+ ],
413
+ "page_idx": 1
414
+ },
415
+ {
416
+ "type": "list",
417
+ "sub_type": "text",
418
+ "list_items": [
419
+ "- Setup(λ) first selects public parameters $n = n(\\lambda)$ , and $\\sigma = \\sigma(\\lambda)$ , where $\\lambda$ is the security parameter. It samples and produces a secret key $s \\stackrel{\\$}{\\leftarrow} \\mathbb{B}^n$ .",
420
+ "- Enc[s, m] samples a uniformly random vector $\\mathbf{a} \\stackrel{\\$}{\\leftarrow} \\mathbb{T}^n$ and a noise $e \\gets \\mathcal{D}_{\\mathbb{T}_N[X], \\sigma}$ , where $\\mathcal{D}_{\\mathbb{T}_N[X], \\sigma}$ is the Gaussian distribution over $\\mathbb{T}_N[X]$ with a standard deviation $\\sigma$ . It outputs a ciphertext $(\\mathbf{a}, b)$ , where $b = \\mathbf{a} \\cdot \\mathbf{s} + e + m/2$ .",
421
+ "- $\\text{Dec}[\\mathbf{s}, (\\mathbf{a}, b)]$ returns $\\lceil 2(b - \\mathbf{a} \\cdot \\mathbf{s}) \\rceil$ . It outputs plaintext correctly if the size of noise $e$ is bounded as $|e| < 1/4$ , since $2(b - \\mathbf{a} \\cdot \\mathbf{s}) = 2e + m$ , $|2e| < 1/2$ , and thus $\\lceil 2(b - \\mathbf{a} \\cdot \\mathbf{s}) \\rceil = m$ ."
422
+ ],
423
+ "bbox": [
424
+ 83,
425
+ 747,
426
+ 482,
427
+ 896
428
+ ],
429
+ "page_idx": 1
430
+ },
431
+ {
432
+ "type": "code",
433
+ "sub_type": "algorithm",
434
+ "code_caption": [
435
+ "Algorithm 1: The bootstrapping operation of TFHE."
436
+ ],
437
+ "code_body": "Input: A TLWE sample $(\\mathbf{a},b)$ whose plaintext is $m_{in}$ ; a constant $m_{set}$ ; a bootstrapping key $\\mathbf{BK}_{\\mathbf{s} \\rightarrow \\mathbf{s}^{\\prime \\prime},\\alpha}$ ; and a key-switching key $\\mathbf{KS}_{\\mathbf{s}^{\\prime} \\rightarrow \\mathbf{s},\\mathbf{y}^{\\prime}}$ ( $\\mathbf{s}^{\\prime} = \\mathbf{KeyExtract}(\\mathbf{s}^{\\prime \\prime})$ ). Output: A TLWE sample encrypting $m_{out} = m_{in} \\cdot m_{set}$ . \n1 $\\mu = m_{set} / 2, \\mu^{\\prime} = \\mu / 2$ /* Initialization */ \n2 $\\bar{b} = \\lceil 2Nb \\rceil, \\bar{a}_i = \\lceil 2Na_i \\rceil$ for each $i \\in [1,n] \\wedge$ Rounding */ \n3 $testv = (1 + X + \\ldots + X^{N+1}) \\cdot X^{N/2} \\cdot \\mu'$ \n4 ACC $\\leftarrow X^{\\bar{b}} \\cdot (0, testv) / *$ ACC = TLWE $(X^{(\\bar{b}-\\bar{a}s)} \\cdot testv)$ */ \n5 for $i = 1$ to $n$ do \n6 $\\mathbf{BK}_i = \\mathbf{h} + (X^{-\\bar{a}_i} - 1) \\cdot \\mathbf{BK}_i$ \n7 ACC $\\leftarrow \\mathbf{BK}_i \\square ACC / *$ BlindRotate */ \n8 $\\mathbf{u} = (0,\\mu') + SampleExtract(ACC) / *$ Extract */ \n9 return KeySwitchKS(u) /* KeySwitch */",
438
+ "bbox": [
439
+ 519,
440
+ 125,
441
+ 913,
442
+ 324
443
+ ],
444
+ "page_idx": 1
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "- Logic $[c_0, c_1]$ returns the ciphertext of the result of the logic operation between two ciphertexts $c_0$ and $c_1$ , and the logic operation can be XOR, NAND, AND, and OR. A TFHE logic operation involves an addition between $c_0$ and $c_1$ , and a bootstrapping.",
449
+ "bbox": [
450
+ 514,
451
+ 334,
452
+ 913,
453
+ 388
454
+ ],
455
+ "page_idx": 1
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "TLWE. TLWE is a torus analogue of the learning with error (LWE) problem [3]. $k$ is a positive integer. $N$ is a power of 2, and $\\mathcal{X}$ is a probability distribution over $\\mathbb{R}_N[X]$ . A TLWE secret key $\\bar{\\mathbf{s}}$ is a vector of $k$ polynomials over $\\mathbb{Z}_N[X]$ with binary coefficients, denoted as $\\bar{\\mathbf{s}} \\in \\mathbb{R}_N[X]^k$ . Given a polynomial message $\\mu \\in \\mathbb{T}_N[X]$ , a TLWE ciphertext of $\\mu$ under the key $\\bar{\\mathbf{s}}$ is a TLWE sample $(\\bar{\\mathbf{a}},\\bar{b}) \\in \\mathbb{T}_N[X]^k \\times \\mathbb{T}_N[X]$ , where $\\bar{\\mathbf{a}} \\gets \\mathbb{T}_N[X]^k$ and $\\bar{b} = \\bar{\\mathbf{s}}\\cdot \\bar{\\mathbf{a}} +\\mu +e$ , where $e\\gets \\mathcal{X}$ .",
460
+ "bbox": [
461
+ 513,
462
+ 388,
463
+ 913,
464
+ 500
465
+ ],
466
+ "page_idx": 1
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "TGSW. TGSW is the matrix extension of TLWE. Each row of a TGSW sample is a TLWE sample. An external product $\\boxdot$ that maps $\\boxdot$ : $TGSW \\times TWLE \\rightarrow TLWE$ can be defined by TFHE [6]. The product of the TGSW ciphertext of a polynomial message $\\mu_{TGSW} \\in \\mathbb{T}_N[X]$ and the TLWE ciphertext of a polynomial message $\\mu_{TLWE} \\in \\mathbb{T}_N[X]$ becomes a TLWE ciphertext of a polynomial message $\\mu_{TGSW} \\cdot \\mu_{TLWE} \\in \\mathbb{T}_N[X]$",
471
+ "bbox": [
472
+ 513,
473
+ 501,
474
+ 913,
475
+ 598
476
+ ],
477
+ "page_idx": 1
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "Bootstrapping. Each TFHE logic operation inevitably introduces a certain amount of noise into the resulting ciphertext. A bootstrapping has to be performed to remove the noise at the end of each TFHE logic operation. In various TFHE logic operations, the bootstrapping step is the largest performance bottleneck. The details of a TFHE bootstrapping can be viewed in [6]. The bootstrapping procedure is shown in Algorithm 1. The dimension of the TLWE sample is set as $k = 1$ [6], which means that the TLWE sample is simply the Ring-LWE sample $(\\bar{a},\\bar{b})\\in \\mathbb{T}_N[X]\\times \\mathbb{T}_N[X]$ . The most computationally intensive step of a bootstrapping is the homomorphic decryption in line 7, where the message of ACC becomes a polynomial $X^{\\bar{b} -\\bar{\\mathrm{as}}}\\cdot testv$ . Particularly, homomorphically computing $X^{-\\bar{\\mathrm{as}}} = X^{\\sum_{i = 1}^{n} - \\bar{\\mathrm{a}}_i\\bar{s}_i} = \\prod_{i = 1}^{n}X^{-\\bar{\\mathrm{a}}_i\\bar{s}_i}$ involves a great number of polynomial multiplications. Naively multiplying two degree $N$ polynomials has the complexity of $O(N^2)$ . FFT and IFFT are used to reduce the complexity of a polynomial multiplication to $O(N\\log (N))$ [7], where $N$ is the degree of polynomials.",
482
+ "bbox": [
483
+ 511,
484
+ 598,
485
+ 915,
486
+ 835
487
+ ],
488
+ "page_idx": 1
489
+ },
490
+ {
491
+ "type": "text",
492
+ "text": "Torus Implementation. Theoretically, the scale invariant scheme of TFHE is defined over the real torus $\\mathbb{T}$ , where all operations are modulo 1. But TFHE rescales the elements over $\\mathbb{T}$ by a factor $2^{32}$ ,",
493
+ "bbox": [
494
+ 513,
495
+ 835,
496
+ 924,
497
+ 878
498
+ ],
499
+ "page_idx": 1
500
+ },
501
+ {
502
+ "type": "header",
503
+ "text": "DAC '22, July 10-14, 2022, San Francisco, CA, USA",
504
+ "bbox": [
505
+ 84,
506
+ 75,
507
+ 323,
508
+ 85
509
+ ],
510
+ "page_idx": 1
511
+ },
512
+ {
513
+ "type": "image",
514
+ "img_path": "images/cba23848c9e6933ce89a781ba5a247746686c22d786c675176a8fce9e6477a3d.jpg",
515
+ "image_caption": [
516
+ "Figure 1: Latency breakdown."
517
+ ],
518
+ "image_footnote": [],
519
+ "bbox": [
520
+ 86,
521
+ 103,
522
+ 326,
523
+ 181
524
+ ],
525
+ "page_idx": 2
526
+ },
527
+ {
528
+ "type": "image",
529
+ "img_path": "images/29d298698e5ec909b0fd13d6247c5f858c5de6ca778236397cfcf6c8a5ee42ef.jpg",
530
+ "image_caption": [
531
+ "Figure 2: The depth-first FFT."
532
+ ],
533
+ "image_footnote": [],
534
+ "bbox": [
535
+ 336,
536
+ 104,
537
+ 529,
538
+ 181
539
+ ],
540
+ "page_idx": 2
541
+ },
542
+ {
543
+ "type": "image",
544
+ "img_path": "images/75047fa77d34549cb9268610a2627d84c733d9a7574c7eef196486c5682cf78c.jpg",
545
+ "image_caption": [
546
+ "Figure 3: The lifting butterfly w/o multiplication."
547
+ ],
548
+ "image_footnote": [],
549
+ "bbox": [
550
+ 540,
551
+ 103,
552
+ 897,
553
+ 181
554
+ ],
555
+ "page_idx": 2
556
+ },
557
+ {
558
+ "type": "text",
559
+ "text": "and maps them to 32-bit integers [6], since it can work with approximations. Therefore, TFHE does not have to actively perform modular reduction, since all operations on 32-bit integers implicitly call a native and automatic mod $2^{32}$ operation. To maintain high conversion accuracy, TFHE uses 64-bit double-precision floating point FFT and IFFT kernels [6].",
560
+ "bbox": [
561
+ 81,
562
+ 203,
563
+ 482,
564
+ 286
565
+ ],
566
+ "page_idx": 2
567
+ },
568
+ {
569
+ "type": "text",
570
+ "text": "3 Related Work and Motivation",
571
+ "text_level": 1,
572
+ "bbox": [
573
+ 83,
574
+ 292,
575
+ 354,
576
+ 306
577
+ ],
578
+ "page_idx": 2
579
+ },
580
+ {
581
+ "type": "text",
582
+ "text": "Related Work. Except some TFHE implementations on CPUs [6] GPUs [7], and FPGAs [10], there is no specialized hardware accelerator that can process TFHE. A TFHE accelerator is different from the accelerators designed for other FHE schemes such as BGV, BFV, and CKKS in two points. First, although few prior accelerators [19] support BGV and CKKS bootstrapping along a tiny multiplicative depth datapath, most prior works [15, 18, 20] design hardware accelerators to process leveled BFV or CKKS homomorphic operations without bootstrapping. However, a TFHE accelerator must perform bootstrapping at the end of each TFHE gate. Second, BGV, BFV, and CKKS require NTT and INTT kernels, while TFHE needs only FFT and IFFT kernels without modular reduction.",
583
+ "bbox": [
584
+ 81,
585
+ 310,
586
+ 482,
587
+ 474
588
+ ],
589
+ "page_idx": 2
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "Motivation. A TFHE gate performs not only polynomial additions but also a bootstrapping (FFT+IFFT+other) that costs $99\\%$ of the gate latency on a CPU, as shown in Figure 1. Therefore, in order to shorten the latency of TFHE gates, we need to accelerate the bootstrapping step in TFHE gates. Moreover, FFTs and IFFTs consume $80\\%$ of the bootstrapping latency in various TFHE gates. In order to accelerate TFHE gates, MATCHA adopts approximate multiplication-less integer FFTs and IFFTs, and uses a pipelined datapath to support aggressive bootstrapping key unrolling [2, 22].",
594
+ "bbox": [
595
+ 81,
596
+ 477,
597
+ 482,
598
+ 602
599
+ ],
600
+ "page_idx": 2
601
+ },
602
+ {
603
+ "type": "text",
604
+ "text": "4 MATCHA",
605
+ "text_level": 1,
606
+ "bbox": [
607
+ 83,
608
+ 608,
609
+ 194,
610
+ 622
611
+ ],
612
+ "page_idx": 2
613
+ },
614
+ {
615
+ "type": "text",
616
+ "text": "4.1 Approximate Fast Integer FFT and IFFT",
617
+ "text_level": 1,
618
+ "bbox": [
619
+ 83,
620
+ 628,
621
+ 447,
622
+ 643
623
+ ],
624
+ "page_idx": 2
625
+ },
626
+ {
627
+ "type": "text",
628
+ "text": "Despite the fact that elements over $\\mathbb{T}$ are mapped to 32-bit integers, TFHE still uses 64-bit double-precision floating point FFT and IFFT kernels, since 32-bit integer or single-precision floating point FFT and IFFT kernels are not accurate enough to guarantee the correct decryption of a ciphertext [6]. However, processing 64-bit double-precision floating point FFT and IFFT kernels incurs significant hardware overhead and power consumption.",
629
+ "bbox": [
630
+ 81,
631
+ 646,
632
+ 482,
633
+ 743
634
+ ],
635
+ "page_idx": 2
636
+ },
637
+ {
638
+ "type": "text",
639
+ "text": "Novelty. We first identify the opportunity to use approximate integer FFTs and IFFTs to accelerate TFHE without decryption errors for MATCHA. It is difficult to apply approximate NTTs and INTTs in accelerating other FHE schemes, e.g., BGV, BFV, and CKKS, which do not include a bootstrapping step after each homomorphic multiplication or addition. The errors introduced by approximate NTTs and INTTs will be quickly accumulated in the ciphertext and result in a decryption error, if a bootstrapping step cannot be performed in time. On the contrary, TFHE keeps the approximation errors of integer FFTs and IFFTs in check by performing a bootstrapping step at the end of each TFHE gate.",
640
+ "bbox": [
641
+ 81,
642
+ 743,
643
+ 482,
644
+ 896
645
+ ],
646
+ "page_idx": 2
647
+ },
648
+ {
649
+ "type": "text",
650
+ "text": "Depth-first FFT. Most prior FHE accelerators [18-20] perform NTTs and INTTs by the Cooley-Tukey data flow that introduces irregular memory accesses particularly in its bit-reversal stage. In order to remove the bit-reversal overhead, a prior ideal-lattice-based cryptographic accelerator [13] uses the Cooley-Tukey flow for NTTs and the Gentlemen-Sande flow for INTTs. These cryptographic accelerators store a polynomial mod $X^N + 1$ as a list of $N$ coefficients. For each multiplication between two polynomials, they execute two NTT kernels on two polynomials respectively, perform element-wise multiplications, and then run an NTT kernel on the result. The invoking frequency ratio between NTTs and INTTs is $2:1$ . These FHE accelerators have are many opportunities (i.e., switchings from NTT to INTT) to reduce the bit-reversal overhead. In contrast, TFHE saves a polynomial mod $X^N + 1$ as either a list of $N$ coefficients or the Lagrange half-complex representation consisting in the complex evaluations of the polynomial over the roots of unity $\\exp(i(2j + 1)\\pi / N)$ for $j \\in \\mathbb{I}[0, \\frac{N}{2}][\\cdot]$ . FFT and IFFT kernels are required only during the conversion between these two representations. The invoking frequency ratio between FFTs and IFFTs in a TFHE gate is $1:4$ . As Figure 1 shows, the latency of IFFT kernels is much longer than FFT kernels. TFHE does not have many opportunities to reduce the bit-reversal overhead. Instead, for MATCHA, we focus on decreasing the computing overhead of a single FFT or IFFT kernel. We adopt the depth-first iterative conjugate-pair FFT (CPFFT) algorithm [1]. Unlike the Cooley-Tukey or Gentlemen-Sande flow, the CPFFT requires only a single complex root of unity read per radix-4 butterfly. Two butterflies in the same block can share the same twiddle factor, further halving the number of reads to the twiddle-factor buffer [1]. Moreover, the Cooley-Tukey and Gentlemen-Sande flows process FFTs/IFFTs stage by stage in a breadth-first manner, as shown in Figure 2(a). To capture the spatial locality, as Figure 2(b) shows, CPFFT traverses the FFT flow in a depth-first fashion by completing a sub-transform before moving to the next.",
651
+ "bbox": [
652
+ 511,
653
+ 203,
654
+ 915,
655
+ 674
656
+ ],
657
+ "page_idx": 2
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "A Multiplication-less Butterfly. The lifting structure [17], a special type of lattice substrate implemented by cascading identity matrices with a single nonzero off-diagonal element, is proposed to approximate multiplications in FFT and IFFT kernels by additions and binary shifts. The basic lifting step shown in Figure 3(a) can be expressed by $y_{j}(n) = x_{j}(n)$ , $y_{i}(n) = x_{i}(n) + \\lceil T_{x_{j}}(n)\\rceil$ , $z_{j}(n) = y_{j}(n)$ , and $z_{i}(n) = y_{i}(n) - \\lceil T_{y_{j}}(n)\\rceil$ , where $T$ is a lifting coefficient. And thus, the lifting structure with the rounding operation can achieve integer-to-integer transform. Also, the lifting and its inverse matrices in this case are represented as $\\begin{bmatrix} 1 & T \\\\ 0 & 1 \\end{bmatrix}$ and $\\begin{bmatrix} 1 & T \\\\ 0 & 1 \\end{bmatrix}^{-1} = \\begin{bmatrix} 1 & -T \\\\ 0 & 1 \\end{bmatrix}$ , respectively. A floating-point lifting coefficient can be quantized as an approximate dyadic-valued coefficient $\\alpha /2^{\\beta}$ , and hence computed with only adders and shifters, where we allocate $\\beta$",
662
+ "bbox": [
663
+ 513,
664
+ 674,
665
+ 915,
666
+ 888
667
+ ],
668
+ "page_idx": 2
669
+ },
670
+ {
671
+ "type": "header",
672
+ "text": "MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus",
673
+ "bbox": [
674
+ 83,
675
+ 75,
676
+ 563,
677
+ 87
678
+ ],
679
+ "page_idx": 2
680
+ },
681
+ {
682
+ "type": "header",
683
+ "text": "DAC '22, July 10-14, 2022, San Francisco, CA, USA",
684
+ "bbox": [
685
+ 673,
686
+ 75,
687
+ 913,
688
+ 85
689
+ ],
690
+ "page_idx": 2
691
+ },
692
+ {
693
+ "type": "image",
694
+ "img_path": "images/abb1f77f6c955b95a39afc2d6469b285dcdc8f5095e16b4a0d611c1912b6d0b5.jpg",
695
+ "image_caption": [
696
+ "Figure 4: The truth table of $X^{-\\overline{a_{2i - 1}}}\\cdot s_{2i - 1} - \\overline{a_{2i}}\\cdot s_{2i}$ ."
697
+ ],
698
+ "image_footnote": [],
699
+ "bbox": [
700
+ 122,
701
+ 104,
702
+ 251,
703
+ 207
704
+ ],
705
+ "page_idx": 3
706
+ },
707
+ {
708
+ "type": "image",
709
+ "img_path": "images/e4775e4ce02e2d47bd9dab157995f074d2b175576c87e4e8e048b1b400c837fd.jpg",
710
+ "image_caption": [
711
+ "Figure 5: Bootstrapping key unrolling."
712
+ ],
713
+ "image_footnote": [],
714
+ "bbox": [
715
+ 295,
716
+ 104,
717
+ 454,
718
+ 205
719
+ ],
720
+ "page_idx": 3
721
+ },
722
+ {
723
+ "type": "image",
724
+ "img_path": "images/04e2cbef94695dcbb38033df54771e883fd899326bd04272917b4938bd0cc2b1.jpg",
725
+ "image_caption": [
726
+ "(a) the computing flow",
727
+ "Figure 6: The pipelined MATCHA for aggressive BKU."
728
+ ],
729
+ "image_footnote": [],
730
+ "bbox": [
731
+ 88,
732
+ 241,
733
+ 316,
734
+ 333
735
+ ],
736
+ "page_idx": 3
737
+ },
738
+ {
739
+ "type": "image",
740
+ "img_path": "images/cab03815d3c8af0d7b2329d154ef2214fcce0187d176e31556e9fa226b0fcd7f.jpg",
741
+ "image_caption": [
742
+ "(b) the pipeline"
743
+ ],
744
+ "image_footnote": [],
745
+ "bbox": [
746
+ 326,
747
+ 243,
748
+ 475,
749
+ 333
750
+ ],
751
+ "page_idx": 3
752
+ },
753
+ {
754
+ "type": "text",
755
+ "text": "bits to the lifting coefficient, and $\\alpha, \\beta \\in \\mathbb{N}$ . For example, a coefficient $9/128$ can be operated as $\\frac{9}{128} = \\frac{2^3 + 2^0}{2^7} = \\frac{1}{2^4} + \\frac{1}{2^7}$ . Hence, the lifting with its coefficient $9/128$ and a rounding operation is replaced to the summation of 4 and 7 bit-shifters illustrated in Figure 3(b). The perfect reconstruction in lifting is always kept if floating-point coefficients are approximated to dyadic-valued coefficients.",
756
+ "bbox": [
757
+ 81,
758
+ 375,
759
+ 480,
760
+ 460
761
+ ],
762
+ "page_idx": 3
763
+ },
764
+ {
765
+ "type": "text",
766
+ "text": "4.2 Aggressive Bootstrapping Key Unrolling",
767
+ "text_level": 1,
768
+ "bbox": [
769
+ 83,
770
+ 460,
771
+ 455,
772
+ 477
773
+ ],
774
+ "page_idx": 3
775
+ },
776
+ {
777
+ "type": "text",
778
+ "text": "Bootstrapping Key Unrolling. A TFHE bootstrapping needs to compute external produces, i.e., $X^{-\\widetilde{\\mathbf{a}} s} = X^{\\sum_{i=1}^{n} -\\widetilde{\\mathbf{a}}_i s_i}$ sequentially, thereby becoming the performance bottleneck of a TFHE gate. Instead, bootstrapping key unrolling (BKU) [2, 22] is proposed to compute $X^{\\sum_{i=1}^{n/2} -\\overline{a}_{2i-1} s_{2i-1} -\\overline{a}_{2i} s_{2i}}$ in each external product, so that the number of homomorphic additions can be reduced from $n$ to $n/2$ . The secret key $s$ is sampled from $\\mathbb{B}^n$ , so $s_i \\in \\{0,1\\}$ , where $0 \\leq i \\leq n$ . Based on the values of $s_{2i}$ and $s_{2i+1}$ , the truth table of $X^{\\sum_{i=1}^{n/2} -\\overline{a}_{2i-1} s_{2i-1} -\\overline{a}_{2i} s_{2i}}$ can be shown in Figure 4. So BKU rewrites $X^{-\\overline{a}_{2i-1} \\cdot s_{2i-1} -\\overline{a}_{2i} \\cdot s_{2i}}$ as $X^{-\\overline{a}_{2i-1} -\\overline{a}_{2i}} \\cdot s_{2i-1} s_{2i} - X^{-\\overline{a}_{2i-1}} \\cdot s_{2i-1}(1 - s_{2i}) - X^{-\\overline{a}_{2i}} \\cdot (1 - s_{2i-1}) s_{2i} - (1 - s_{2i-1})(1 - s_{2i})$ . Due to the fact that $s_{2i-1} s_{2i} + (1 - s_{2i}) s_{2i-1} + s_{2i}(1 - s_{2i-1}) + (1 - s_{2i-1})(1 - s_{2i})$ is always equal to 1 [2], $X^{-\\overline{a}_{2i-1} \\cdot s_{2i-1} -\\overline{a}_{2i} \\cdot s_{2i}}$ can be further simplified to $(X^{-\\overline{a}_{2i-1} -\\overline{a}_{2i}} - 1) \\cdot s_{2i-1} s_{2i} + (X^{-\\overline{a}_{2i-1}} - 1) \\cdot s_{2i-1}(1 - s_{2i}) - (X^{-\\overline{a}_{2i}} - 1) \\cdot (1 - s_{2i-1}) s_{2i} + 1$ . As Figure 5 shows, BKU encrypts $s_{2i-1} s_{2i}$ , $s_{2i-1}(1 - s_{2i})$ , and $(1 - s_{2i-1}) s_{2i}$ as TGSW ciphertexts, and builds a bootstrapping key bundle to unroll the original bootstrapping key for two times.",
779
+ "bbox": [
780
+ 81,
781
+ 478,
782
+ 482,
783
+ 734
784
+ ],
785
+ "page_idx": 3
786
+ },
787
+ {
788
+ "type": "text",
789
+ "text": "Aggressive BKU Performing Badly on CPUs. BKU can be further generalized as",
790
+ "bbox": [
791
+ 83,
792
+ 737,
793
+ 482,
794
+ 763
795
+ ],
796
+ "page_idx": 3
797
+ },
798
+ {
799
+ "type": "equation",
800
+ "text": "\n$$\nX ^ {\\sum_ {i = 1} ^ {\\frac {n}{m}} - \\overline {{\\mathsf {a} _ {m \\cdot i}}} \\mathsf {s} _ {m \\cdot i} - \\overline {{\\mathsf {a} _ {m \\cdot i + 1}}} \\mathsf {s} _ {m \\cdot i + 1} - \\dots - \\overline {{\\mathsf {a} _ {m \\cdot i + m - 1}}} \\mathsf {s} _ {m \\cdot i + m - 1}}, \\tag {1}\n$$\n",
801
+ "text_format": "latex",
802
+ "bbox": [
803
+ 143,
804
+ 770,
805
+ 480,
806
+ 792
807
+ ],
808
+ "page_idx": 3
809
+ },
810
+ {
811
+ "type": "text",
812
+ "text": "where $m \\in [2, n]$ . So it is possible to more aggressively unroll the bootstrapping key by increasing $m$ . Although unrolling the bootstrapping key for two times ( $m = 2$ ) reduces the bootstrapping latency by $49\\%$ , we find that further enlarging $m$ beyond 2 even prolongs the bootstrapping latency on a CPU, as explained in Section 6. Our experimental methodology is described in Section 5. The reason can be summarized as follows.",
813
+ "bbox": [
814
+ 81,
815
+ 797,
816
+ 482,
817
+ 895
818
+ ],
819
+ "page_idx": 3
820
+ },
821
+ {
822
+ "type": "image",
823
+ "img_path": "images/8ddcaafd391279cdcfdfa803562bb5cab997d92b01dfa8dc11761b39bc0297e8.jpg",
824
+ "image_caption": [
825
+ "Figure 7: The architecture of MATCHA (mem. ctrl: memory controller; addr gen.: address generation; twid: twiddle factor; butt.: butterfly; and shift.: shifter)."
826
+ ],
827
+ "image_footnote": [],
828
+ "bbox": [
829
+ 519,
830
+ 104,
831
+ 908,
832
+ 215
833
+ ],
834
+ "page_idx": 3
835
+ },
836
+ {
837
+ "type": "list",
838
+ "sub_type": "text",
839
+ "list_items": [
840
+ "- The limited number of cores on a CPU. With an enlarged $m$ , there are more terms in the exponent part of Equation 1. For instance, when $m = 4$ , there are 15 terms, each of which requires a TGSW scale-and-add operation. Unfortunately, our CPU baseline has only 8 physical cores. Mapping each terms to a core, and summing the results from all cores introduce significant communication overhead.",
841
+ "- More cache conflicts. The size of bootstrapping key increases exponentially with an enlarged $m$ . For example, as Figure 5 shows, instead of a single bootstrapping key, BKU with $m = 2$ requires three bootstrapping keys. Each TGSW scale-and-add operation happening on a term fetches its corresponding bootstrapping key to the shared last level cache, generating more cache conflicts.",
842
+ "- The lack of a pipelined design. As Figure 5 highlights, in each iteration, the construction of the bootstrapping key bundle BKB and the external product operation are executed sequentially. Although it is possible to start the computation of BKB for the next iteration and perform the external product operation of this iteration at the same time, the current BKU implementation [22] cannot do this, due to the lack of a pipelined design."
843
+ ],
844
+ "bbox": [
845
+ 514,
846
+ 266,
847
+ 913,
848
+ 542
849
+ ],
850
+ "page_idx": 3
851
+ },
852
+ {
853
+ "type": "text",
854
+ "text": "MATCHA for Aggressive BKU. In this paper, we propose a pipeline flow for MATCHA to support aggressive BKU with a larger $m$ . Compared to our CPU baseline, our pipeline flow can be easily accelerated by a large number of specialized hardware components including TGSW clusters and External Product (EP) cores. As Figure 6(a) shows, we divide the bottleneck of a TFHE bootstrapping into two steps, i.e., the construction of the bootstrapping key bundle, and the EP operation. A TGSW cluster is used to construct the bootstrapping key bundle, while an EP core processes EP operations between the bootstrapping key bundle and ACC. A TGSW cluster consists of a TGSW adder tree and multiple TGSW scale units, each of which computes one term in the bootstrapping key bundle, e.g., when $m = 2$ , $(X^{-\\overline{a}_{2i-1} - \\overline{a}_{2i}} - 1) \\cdot \\mathbf{BK}_{i,0}$ , where $\\mathbf{BK}_{i,0}$ is the TGSW ciphertext of $s_{2i-1}s_{2i}$ . And then, the TGSW adder sums all terms and generates the bootstrapping key bundle. With the bootstrapping key bundle $(\\mathbf{BKB_i})$ , an EP core computes $ACC \\gets \\mathbf{BKB_i} \\square ACC$ . The TGSW cluster and the EP core have their separated register file banks to reduce on-chip memory conflicts. Moreover, these two steps of a TFHE bootstrapping can be deployed on a TGSW cluster and an EP core in a pipelined manner, as shown in Figure 6(b). In each time step, the EP core computes the EP operation with the bootstrapping key bundle generated by the TGSW cluster in the previous time step. When $m$ is increased, the workload of the bootstrapping key bundle construction becomes larger. The workloads",
855
+ "bbox": [
856
+ 511,
857
+ 544,
858
+ 913,
859
+ 876
860
+ ],
861
+ "page_idx": 3
862
+ },
863
+ {
864
+ "type": "header",
865
+ "text": "DAC '22, July 10-14, 2022, San Francisco, CA, USA",
866
+ "bbox": [
867
+ 84,
868
+ 75,
869
+ 323,
870
+ 85
871
+ ],
872
+ "page_idx": 3
873
+ },
874
+ {
875
+ "type": "table",
876
+ "img_path": "images/8b2b232fb7efb8eb0de3da7db741d983ea5ae505ac61546b2ffd31bf3325ff68.jpg",
877
+ "table_caption": [
878
+ "Table 2: The power and area of MATCHA operating at ${2GHz}$ ."
879
+ ],
880
+ "table_footnote": [],
881
+ "table_body": "<table><tr><td>Name</td><td>Spec</td><td>Power (W)</td><td>Area (mm2)</td></tr><tr><td>TGSW cluster</td><td>×16 multipliers &amp; adders, and a 16KB, 2-bank reg. file</td><td>0.98</td><td>0.368</td></tr><tr><td>EP core</td><td>4 IFFT, 1 FFT, ×4 multipliers &amp; adders, and a 256KB, 8-bank reg. file</td><td>2.87</td><td>1.89</td></tr><tr><td>Sub-total</td><td>×8 EP cores and TGSW clusters</td><td>30.8</td><td>18.06</td></tr><tr><td>polynomial unit</td><td>×32 adders &amp; cmps &amp; logic units, and a 8KB, 2-bank reg. file</td><td>2.33</td><td>0.32</td></tr><tr><td>crossbar</td><td>1/2 8 × 32/8 NoCs (256b bit-sliced)</td><td>2.11</td><td>0.44</td></tr><tr><td>SPM</td><td>a 4MB, 32-bank SPM</td><td>3.52</td><td>3.25</td></tr><tr><td>mem ctrl</td><td>memory controller and HBM2 PHY</td><td>1.225</td><td>14.9</td></tr><tr><td>Total</td><td></td><td>39.98</td><td>36.96</td></tr></table>",
882
+ "bbox": [
883
+ 86,
884
+ 119,
885
+ 475,
886
+ 257
887
+ ],
888
+ "page_idx": 4
889
+ },
890
+ {
891
+ "type": "text",
892
+ "text": "of the two steps in the pipeline can be approximately balanced by adjusting $m$ .",
893
+ "bbox": [
894
+ 81,
895
+ 263,
896
+ 480,
897
+ 292
898
+ ],
899
+ "page_idx": 4
900
+ },
901
+ {
902
+ "type": "text",
903
+ "text": "4.3 The Architecture of MATCHA",
904
+ "text_level": 1,
905
+ "bbox": [
906
+ 83,
907
+ 297,
908
+ 372,
909
+ 311
910
+ ],
911
+ "page_idx": 4
912
+ },
913
+ {
914
+ "type": "text",
915
+ "text": "Architecture. The overall architecture of MATCHA is shown in Figure 7(a). MATCHA has multiple computing components including a polynomial unit, eight TGSW clusters, and eight External Product (EP) cores. All computing components of MATCHA are connected to 32 scratchpad memory (SPM) banks by crossbars. MATCHA also employs a memory controller to manage the off-chip memory requests issued to HBM2 DRAMs. The polynomial unit is in charge of performing polynomial additions/subtractions for each TFHE logic operation, initializing bootstrapping operations, extracting samples, and conducting key-switching operations that consist of additions, logic comparisons, and Boolean logic operations. One TGSW cluster and an EP core can support one bootstrapping pipeline. As Figure 7(b) shows, a TGSW cluster 16 32-bit integer multipliers and 16 32-bit integer adders to support TGSW scale operations. Each TGSW cluster has only two register banks, since the memory accesses during a TGSW scale operation have strong spatial locality. The TGSW cluster can read a register bank while write the other bank concurrently. An EP core consists of an FFT core and four IFFT cores to accelerate the FFT and IFFT kernels during an EP operation, as shown in Figure 7(c). It has 8 register banks to serve the irregular memory accesses in FFT and IFFT kernels. An EP core also has four 32-bit integer multipliers and four 32-bit integer adders to manipulate TGSW ciphertexts during an EP operation. An FFT core is similar to an IFFT core, except its data flow. As Figure 7(d) highlights, an FFT core comprises an address generation unit, a twiddle factor buffer, two input/output FIFOs, and 128 butterfly cores, each of which consists of two 64-bit integer adders and two 64-bit binary shifters. The address generation unit guides butterfly cores to access the twiddle factor buffer.",
916
+ "bbox": [
917
+ 81,
918
+ 314,
919
+ 482,
920
+ 715
921
+ ],
922
+ "page_idx": 4
923
+ },
924
+ {
925
+ "type": "text",
926
+ "text": "Design Overhead. We implemented MATCHA in RTL, and synthesized it in $16nm$ PTM process technology using state-of-the-art tools. We used CACTI to model all SPM components and register file banks. Due to its simple structure, the entire design of MATCHA can run at $2GHz$ . Among various on-chip network architectures, e.g., meshes, rings, and crossbars, we selected two $8 \\times 32$ , and one $8 \\times 8$ bit-sliced crossbars, i.e., SPM $\\rightarrow$ cores/clusters, cores/clusters $\\rightarrow$ SPM, and cores/clusters $\\rightarrow$ cores/clusters. The hardware overhead and power consumption of MATCHA are shown in Table 2. Totally, MATCHA occupies $36.96mm^2$ and consumes 39.98 Watt. The HBM2 bandwidth is $640GB/s$ .",
927
+ "bbox": [
928
+ 81,
929
+ 715,
930
+ 482,
931
+ 867
932
+ ],
933
+ "page_idx": 4
934
+ },
935
+ {
936
+ "type": "text",
937
+ "text": "Error and Noise. The error of the polynomial multiplication result caused by approximate multiplication-less integer FFT and",
938
+ "bbox": [
939
+ 81,
940
+ 867,
941
+ 482,
942
+ 896
943
+ ],
944
+ "page_idx": 4
945
+ },
946
+ {
947
+ "type": "image",
948
+ "img_path": "images/8e6806f9e37cb34b4abea783f0da3727f56d2e82dc2a684786e30624bbaeecf9.jpg",
949
+ "image_caption": [
950
+ "Figure 8: The error of approx. FFT & IFFT.",
951
+ "Table 3: The noise comparison (δ: the noise of EPs; RO: the noise of roundings; BK: the noise of bootstrapping keys)."
952
+ ],
953
+ "image_footnote": [],
954
+ "bbox": [
955
+ 517,
956
+ 103,
957
+ 651,
958
+ 195
959
+ ],
960
+ "page_idx": 4
961
+ },
962
+ {
963
+ "type": "table",
964
+ "img_path": "images/67aa0afd04be92ef09093dd96ae4c119ddcdf5669eecd65b8c710d8617a9dd8a.jpg",
965
+ "table_caption": [],
966
+ "table_footnote": [],
967
+ "table_body": "<table><tr><td>metric</td><td>BKU [2, 22]</td><td>MATCHA</td></tr><tr><td>EP</td><td>δ/2</td><td>δ/m</td></tr><tr><td>rounding</td><td>RO/2</td><td>RO/m</td></tr><tr><td>BK</td><td>3βK</td><td>(2m-1)βK</td></tr><tr><td>I/FFT</td><td>-150dB</td><td>-141dB</td></tr></table>",
968
+ "bbox": [
969
+ 707,
970
+ 103,
971
+ 887,
972
+ 162
973
+ ],
974
+ "page_idx": 4
975
+ },
976
+ {
977
+ "type": "text",
978
+ "text": "IFFT kernels is shown in Figure 8. All polynomial coefficients are 32-bit integers, while we quantize the twiddle factors of FFT and IFFT with various bitwidths. With an increasing bitwidth of twiddle factors, the error caused by approximate FFT and IFFT decreases, and is similar to that generated by original double-precision floating point FFT and IFFT. With 64-bit dyadic-value-quantized twiddle factors (DVQTFs), the error caused by approximate FFT and IFFT is $\\sim 141dB$ , which is still larger than that produced by 64-bit double-precision floating point FFT and IFFT, since the approximate FFT and IFFT perform only additions and binary shifts. At the TFHE gate level, the noise comparison between BKU and MATCHA is exhibited in Table 3, where BKU unrolls the bootstrapping key for two times while MATCHA unrolls that for $m$ times ( $m \\geq 2$ ). With an enlarging $m$ , the noise from EP and rounding operations decreases linearly, but the noise caused by bootstrapping keys increases exponentially. As a result, TFHE with a smaller $m$ can tolerate more errors caused by approximate FFT and IFFT. Based on our experiments, 38-bit DVQTFs produce no decryption failure in the test of $10^{8}$ TFHE gates. However, for a large $m$ , e.g., $m = 5$ , we have to use 64-bit DVQTFs to guarantee there is no decryption failure in the same test, since the noise caused by more bootstrapping keys dominates the total noise in ciphertexts. Therefore, MATCHA adopts 64-bit DVQTFs for all approximate multiplication-less integer FFT and IFFT kernels.",
979
+ "bbox": [
980
+ 511,
981
+ 228,
982
+ 913,
983
+ 559
984
+ ],
985
+ "page_idx": 4
986
+ },
987
+ {
988
+ "type": "text",
989
+ "text": "5 Experimental Methodology",
990
+ "text_level": 1,
991
+ "bbox": [
992
+ 514,
993
+ 561,
994
+ 769,
995
+ 575
996
+ ],
997
+ "page_idx": 4
998
+ },
999
+ {
1000
+ "type": "text",
1001
+ "text": "Simulation and Compilation: To simulate the performance of MATCHA at cycle level, we used a CGRA modeling framework, OpenCGRA [21], which has been validated against multiple ASIC accelerators. OpenCGRA first compiles a TFHE logic operation into a data flow graph (DFG) of the operations supported by MATCHA, solves its dependencies, and removes structural hazards. The architecture of MATCHA is abstracted to an architecture description (AD) in OpenCGRA, which computes the latency and the energy consumption of each TFHE logic operation by scheduling and mapping the DFG onto the AD.",
1002
+ "bbox": [
1003
+ 511,
1004
+ 577,
1005
+ 913,
1006
+ 715
1007
+ ],
1008
+ "page_idx": 4
1009
+ },
1010
+ {
1011
+ "type": "text",
1012
+ "text": "Our Baselines. We compared MATCHA against state-of-the-art CPU-, GPU-, FPGA-, and ASIC-based TFHE hardware platforms. Our CPU baseline is a 8-core $3.7GHz$ Xeon E-2288G processor executing the TFHE library [6], while our GPU baseline is a 5120-core Tesla-V100 GPU equipped with a 16GB HBM2 DRAM running the cuFHE library [7]. TFHE Vector Engine (TVE) [10] was implemented on a low-end ZedBoard Zynq-7000 FPGA. We implemented 8 copies of TVE on a Stratix-10 GX2800 FPGA, and used it as our FPGA baseline, since the Stratix-10 board has more resources. Because there is no existing ASIC-based design, we synthesized our FPGA baseline with the $16nm$ PTM process as our ASIC baseline. We enable BKU on CPU, GPU, and MATCHA but fix $m = 1$ on FPGA and ASIC, since they do not support BKU.",
1013
+ "bbox": [
1014
+ 511,
1015
+ 715,
1016
+ 913,
1017
+ 896
1018
+ ],
1019
+ "page_idx": 4
1020
+ },
1021
+ {
1022
+ "type": "header",
1023
+ "text": "MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus",
1024
+ "bbox": [
1025
+ 83,
1026
+ 75,
1027
+ 560,
1028
+ 87
1029
+ ],
1030
+ "page_idx": 4
1031
+ },
1032
+ {
1033
+ "type": "header",
1034
+ "text": "DAC '22, July 10-14, 2022, San Francisco, CA, USA",
1035
+ "bbox": [
1036
+ 673,
1037
+ 75,
1038
+ 913,
1039
+ 87
1040
+ ],
1041
+ "page_idx": 4
1042
+ },
1043
+ {
1044
+ "type": "image",
1045
+ "img_path": "images/47f902bb44e6721b2523b8f5aec04bff481f9b6a8cc68429b8df5dfe6e0ecae5.jpg",
1046
+ "image_caption": [
1047
+ "Figure 9: Latency comparison."
1048
+ ],
1049
+ "image_footnote": [],
1050
+ "bbox": [
1051
+ 94,
1052
+ 103,
1053
+ 352,
1054
+ 190
1055
+ ],
1056
+ "page_idx": 5
1057
+ },
1058
+ {
1059
+ "type": "image",
1060
+ "img_path": "images/d4e690c27e64d7fb861f66ffeac4e8e29e82007373f8579d79d85a2a17bc4f73.jpg",
1061
+ "image_caption": [
1062
+ "Figure 10: Throughput comparison."
1063
+ ],
1064
+ "image_footnote": [],
1065
+ "bbox": [
1066
+ 369,
1067
+ 103,
1068
+ 629,
1069
+ 190
1070
+ ],
1071
+ "page_idx": 5
1072
+ },
1073
+ {
1074
+ "type": "image",
1075
+ "img_path": "images/1f2f6281bb82b4a864ae85692ca1ae5e9fe13dece293e7507ab72faaa6b18828.jpg",
1076
+ "image_caption": [
1077
+ "Figure 11: Thrght/Watt comparison."
1078
+ ],
1079
+ "image_footnote": [],
1080
+ "bbox": [
1081
+ 645,
1082
+ 103,
1083
+ 901,
1084
+ 191
1085
+ ],
1086
+ "page_idx": 5
1087
+ },
1088
+ {
1089
+ "type": "text",
1090
+ "text": "TFHE Operations and Parameters. We studied all TFHE logic operations including NOT, AND, OR, NAND, XOR, and XNOR, but we only report the results on NAND in Section 6. This is because AND, OR, NAND, XOR, and XNOR have almost the same latency which is dominated by the bootstrapping step, while NOT has no bootstrapping at all. To maintain the standard 110-bit security, we adopt the TFHE parameters from [6], i.e., the polynomial degree in the ring $N = 1024$ , the TLWE dimension $k = 1$ , the basis and length for the TGSW ciphertext decomposition $Bg = 1024$ and $\\ell = 3$ .",
1091
+ "bbox": [
1092
+ 81,
1093
+ 212,
1094
+ 480,
1095
+ 335
1096
+ ],
1097
+ "page_idx": 5
1098
+ },
1099
+ {
1100
+ "type": "text",
1101
+ "text": "6 Results and Analysis",
1102
+ "text_level": 1,
1103
+ "bbox": [
1104
+ 83,
1105
+ 335,
1106
+ 282,
1107
+ 349
1108
+ ],
1109
+ "page_idx": 5
1110
+ },
1111
+ {
1112
+ "type": "text",
1113
+ "text": "Latency. The latency comparison of a TFHE NAND gate between our various baselines and MATCHA is shown in Figure 9. The NAND gate on CPU costs $13.1ms$ , while $m = 2$ reduces its latency to $6.67ms$ . Aggressive BKU with an increasing $m$ cannot further reduce the NAND gate latency anymore on CPU, due to the limited number of cores, more cache conflicts, and the non-pipelined processing style. It takes only $0.37ms$ for GPU to process a NAND gate. With an enlarging $m$ , GPU gradually reduces the NAND gate latency. When $m = 4$ , the NAND gate latency on GPU is $0.18ms$ . MATCHA reduces the NAND gate latency by $13\\%$ over GPU only when $m = 3$ , since GPU can fully use its all resources to process one TFHE gate when $m = 1$ or 2. MATCHA cannot support aggressive BKU with $m = 4$ efficiently either, since it has only 8 TGSW clusters. FPGA and ASIC do not have any pipelined design or memory optimization to support BKU, and they need $>6.8ms$ to complete a NAND gate when $m = 1$ .",
1114
+ "bbox": [
1115
+ 81,
1116
+ 351,
1117
+ 482,
1118
+ 571
1119
+ ],
1120
+ "page_idx": 5
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "text": "Throughput. The NAND gate throughput comparison between various baselines and MATCHA is shown in Figure 10. FPGA and ASIC duplicate 8 copies of the TVE [10], so they support only $m = 1$ . By enabling aggressive BKU, even CPU ( $m = 2$ ) can achieve higher gate processing throughput than ASIC and FPGA with $m = 1$ . GPU and MATCHA obtain much higher throughput than ASIC, FPGA and CPU. Compared to GPU, MATCH improves the NAND gate throughput by $2.3 \\times (m = 3)$ , due to its pipelined architecture for aggressive BKU.",
1125
+ "bbox": [
1126
+ 81,
1127
+ 571,
1128
+ 482,
1129
+ 696
1130
+ ],
1131
+ "page_idx": 5
1132
+ },
1133
+ {
1134
+ "type": "text",
1135
+ "text": "Throughput per Watt. The comparison of the NAND gate throughput per Watt between various baselines and MATCHA is shown in Figure 11. FPGA and ASIC consume only $\\sim 40W$ and $\\sim 26W$ , and improve the NAND gate throughput per Watt by $2.4\\times$ and $8.3\\times$ over CPU respectively, when $m = 1$ . Due to the large power consumption ( $>200W$ ) of GPU, the best throughput per Watt of GPU ( $m = 4$ ) is only about $58\\%$ of that of ASIC. Compared to ASIC, MATCHA improves the NAND gate throughput per Watt by $6.3\\times$ , since it consumes only $39.98W$ .",
1136
+ "bbox": [
1137
+ 81,
1138
+ 696,
1139
+ 482,
1140
+ 821
1141
+ ],
1142
+ "page_idx": 5
1143
+ },
1144
+ {
1145
+ "type": "text",
1146
+ "text": "7 Conclusion",
1147
+ "text_level": 1,
1148
+ "bbox": [
1149
+ 83,
1150
+ 825,
1151
+ 207,
1152
+ 838
1153
+ ],
1154
+ "page_idx": 5
1155
+ },
1156
+ {
1157
+ "type": "text",
1158
+ "text": "TFHE enables arbitrary computations with an unlimited multiplicative depth to directly occur on ciphertexts. However, TFHE gates are time-consuming and power-hungry on state-of-the-art hardware platforms. In this paper, we build MATCHA to accelerate",
1159
+ "bbox": [
1160
+ 81,
1161
+ 840,
1162
+ 482,
1163
+ 896
1164
+ ],
1165
+ "page_idx": 5
1166
+ },
1167
+ {
1168
+ "type": "text",
1169
+ "text": "TFHE gates. MATCHA allows aggressive bootstrapping key unrolling to process TFHE gates without decryption errors by approximate multiplication-less integer FFTs and IFFTs, and a pipelined datapath. Compared to prior CPU-, GPU-, FPGA- and ASIC-based solutions, MATCHA improves the TFHE gate processing throughput by $2.3\\times$ , and the throughput per Watt by $6.3\\times$ .",
1170
+ "bbox": [
1171
+ 511,
1172
+ 212,
1173
+ 915,
1174
+ 295
1175
+ ],
1176
+ "page_idx": 5
1177
+ },
1178
+ {
1179
+ "type": "text",
1180
+ "text": "References",
1181
+ "text_level": 1,
1182
+ "bbox": [
1183
+ 514,
1184
+ 299,
1185
+ 607,
1186
+ 311
1187
+ ],
1188
+ "page_idx": 5
1189
+ },
1190
+ {
1191
+ "type": "list",
1192
+ "sub_type": "ref_text",
1193
+ "list_items": [
1194
+ "[1] A. Becoulet and A. Verguet, \"A Depth-First Iterative Algorithm for the Conjugate Pair Fast Fourier Transform,\" IEEE Transactions on Signal Processing, 2021.",
1195
+ "[2] F. Bourse, et al., \"Fast Homomorphic Evaluation of Deep Discretized Neural Networks,\" in Annual International Cryptology Conference, 2018.",
1196
+ "[3] Z. Brakerski, et al., (Leveled) Fully Homomorphic Encryption without Bootstrapping,\" ACM Transaction Computing Theory, 6(3), July 2014.",
1197
+ "[4] A. Brutzkus, et al., \"Low Latency Privacy Preserving Inference,\" in International Conference on Machine Learning, pages 812-821, 2019.",
1198
+ "[5] J. H. Cheon, et al., \"Remark on the Security of CKKS Scheme in Practice,\" Cryptology ePrint Archive, Report 2020/1581, 2020, https://eprint.iacr.org/2020/1581.",
1199
+ "[6] I. Chillotti, et al., \"TFHE: Fast Fully Homomorphic Encryption Over The Torus,\" Journal of Cryptology, 33(1):34-91, 2020.",
1200
+ "[7] W. Dai, \"CUDA-accelerated Fully Homomorphic Encryption Library\", https://github.com/vernamlab/cuFHE, 2018, worcester Polytechnic Institute.",
1201
+ "[8] L. Ducas and D. Miccianio, “FHEW: Bootstrapping Homomorphic Encryption in Less than A Second,” in International Conference on the Theory and Applications of Cryptographic Techniques, pages 617–640, Springer, 2015.",
1202
+ "[9] J. Fan and F. Vercauteren, \"Somewhat Practical Fully Homomorphic Encryption,\" Cryptology ePrint Archive, Report 2012/144, 2012.",
1203
+ "[10] S. Gener, et al., \"An FPGA-based Programmable Vector Engine for Fast Fully Homomorphic Encryption over the Torus,\" SPSL: Secure and Private Systems for Machine Learning, 2021.",
1204
+ "[11] S. Halevi and V. Shoup, \"Bootstrapping for HElib,\" in International conference on the theory and applications of cryptographic techniques, 2015.",
1205
+ "[12] C. J. Hoofnagle, et al., \"The European Union General Data Protection Regulation: What It Is & What It Means,\" Information & Communications Technology Law, 2019.",
1206
+ "[13] Z. Liu, et al., \"High-Performance Ideal Lattice-Based Cryptography on 8-Bit AVR Microcontrollers,\" ACM Transactions on Embedded Computing Systems, 16(4), July 2017, https://doi.org/10.1145/3092951.",
1207
+ "[14] K. Matsuoka, et al., \"Virtual Secure Platform: A Five-Stage Pipeline Processor over TFHE,\" in USENIX Security Symposium, pages 4007-4024, 2021.",
1208
+ "[15] A. C. Mert, et al., \"A Flexible and Scalable NTT Hardware: Applications from Homomorphically Encrypted Deep Learning to Post-Quantum Cryptography,\" in Design, Automation & Test in Europe Conference & Exhibition, 2020.",
1209
+ "[16] T. Morshed, et al., \"CPU and GPU Accelerated Fully Homomorphic Encryption,\" in IEEE International Symposium on Hardware Oriented Security and Trust, pages 142-153, 2020.",
1210
+ "[17] S. Oraintara, et al., \"Integer fast Fourier transform,\" IEEE Transactions on Signal Processing, 50(3):607-618, 2002.",
1211
+ "[18] M. S. Riazi, et al., \"HEAX: An Architecture for Computing on Encrypted Data,\" in ACM International Conference on Architectural Support for Programming Languages and Operating Systems, 2020.",
1212
+ "[19] N. Samardzic, et al., \"F1: A Fast and Programmable Accelerator for Fully Homomorphic Encryption,\" in IEEE/ACM International Symposium on Microarchitecture, 2021.",
1213
+ "[20] S. Sinha Roy, et al., \"FPGA-Based High-Performance Parallel Architecture for Homomorphic Computing on Encrypted Data,\" in IEEE International Symposium on High Performance Computer Architecture, pages 387-398, 2019.",
1214
+ "[21] C. Tan, et al., \"OpenCGRA: An Open-Source Unified Framework for Modeling, Testing, and Evaluating CGRAs,\" in 2020 IEEE 38th International Conference on Computer Design, pages 381-388, 2020.",
1215
+ "[22] T. Zhou, et al., \"Faster Bootstrapping With Multiple Addends,\" IEEE Access, 6:49868-49876, 2018."
1216
+ ],
1217
+ "bbox": [
1218
+ 517,
1219
+ 316,
1220
+ 913,
1221
+ 859
1222
+ ],
1223
+ "page_idx": 5
1224
+ },
1225
+ {
1226
+ "type": "header",
1227
+ "text": "DAC '22, July 10-14, 2022, San Francisco, CA, USA",
1228
+ "bbox": [
1229
+ 84,
1230
+ 75,
1231
+ 323,
1232
+ 85
1233
+ ],
1234
+ "page_idx": 5
1235
+ }
1236
+ ]
2202.08xxx/2202.08814/0e4977c0-9108-464d-ba63-1e8d8819dfe2_model.json ADDED
@@ -0,0 +1,1653 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.265,
8
+ 0.058,
9
+ 0.708
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2202.08814v1 [cs.CR] 17 Feb 2022"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.12,
18
+ 0.101,
19
+ 0.88,
20
+ 0.152
21
+ ],
22
+ "angle": 0,
23
+ "content": "MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.191,
29
+ 0.163,
30
+ 0.27,
31
+ 0.18
32
+ ],
33
+ "angle": 0,
34
+ "content": "Lei Jiang*"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.178,
40
+ 0.181,
41
+ 0.285,
42
+ 0.194
43
+ ],
44
+ "angle": 0,
45
+ "content": "jiang60@iu.edu"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.168,
51
+ 0.195,
52
+ 0.296,
53
+ 0.21
54
+ ],
55
+ "angle": 0,
56
+ "content": "Indiana University"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.461,
62
+ 0.164,
63
+ 0.539,
64
+ 0.179
65
+ ],
66
+ "angle": 0,
67
+ "content": "Qian Lou"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.456,
73
+ 0.181,
74
+ 0.544,
75
+ 0.194
76
+ ],
77
+ "angle": 0,
78
+ "content": "qlou@iu.edu"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.437,
84
+ 0.195,
85
+ 0.563,
86
+ 0.21
87
+ ],
88
+ "angle": 0,
89
+ "content": "Indiana University"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.71,
95
+ 0.163,
96
+ 0.827,
97
+ 0.179
98
+ ],
99
+ "angle": 0,
100
+ "content": "Nrushad Joshi"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.711,
106
+ 0.181,
107
+ 0.825,
108
+ 0.194
109
+ ],
110
+ "angle": 0,
111
+ "content": "nrujoshi@iu.edu"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.705,
117
+ 0.195,
118
+ 0.832,
119
+ 0.21
120
+ ],
121
+ "angle": 0,
122
+ "content": "Indiana University"
123
+ },
124
+ {
125
+ "type": "title",
126
+ "bbox": [
127
+ 0.084,
128
+ 0.219,
129
+ 0.158,
130
+ 0.233
131
+ ],
132
+ "angle": 0,
133
+ "content": "Abstract"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.082,
139
+ 0.236,
140
+ 0.483,
141
+ 0.43
142
+ ],
143
+ "angle": 0,
144
+ "content": "Fully Homomorphic Encryption over the Torus (TFHE) allows arbitrary computations to happen directly on ciphertexts using homomorphic logic gates. However, each TFHE gate on state-of-the-art hardware platforms such as GPUs and FPGAs is extremely slow (\\(>0.2ms\\)). Moreover, even the latest FPGA-based TFHE accelerator cannot achieve high energy efficiency, since it frequently invokes expensive double-precision floating point FFT and IFFT kernels. In this paper, we propose a fast and energy-efficient accelerator, MATCHA, to process TFHE gates. MATCHA supports aggressive bootstrapping key unrolling to accelerate TFHE gates without decryption errors by approximate multiplication-less integer FFTs and IFFTs, and a pipelined datapath. Compared to prior accelerators, MATCHA improves the TFHE gate processing throughput by \\(2.3\\times\\) and the throughput per Watt by \\(6.3\\times\\)."
145
+ },
146
+ {
147
+ "type": "title",
148
+ "bbox": [
149
+ 0.084,
150
+ 0.434,
151
+ 0.202,
152
+ 0.45
153
+ ],
154
+ "angle": 0,
155
+ "content": "CCS Concepts"
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.084,
161
+ 0.451,
162
+ 0.482,
163
+ 0.479
164
+ ],
165
+ "angle": 0,
166
+ "content": "- Hardware \\(\\rightarrow\\) Application-specific VLSI designs; \\(\\cdot\\) Security and privacy \\(\\rightarrow\\) Cryptography."
167
+ },
168
+ {
169
+ "type": "title",
170
+ "bbox": [
171
+ 0.084,
172
+ 0.482,
173
+ 0.169,
174
+ 0.498
175
+ ],
176
+ "angle": 0,
177
+ "content": "Keywords"
178
+ },
179
+ {
180
+ "type": "text",
181
+ "bbox": [
182
+ 0.084,
183
+ 0.5,
184
+ 0.478,
185
+ 0.515
186
+ ],
187
+ "angle": 0,
188
+ "content": "accelerator, fully homomorphic encryption, TFHE, bootstrapping"
189
+ },
190
+ {
191
+ "type": "title",
192
+ "bbox": [
193
+ 0.084,
194
+ 0.519,
195
+ 0.231,
196
+ 0.53
197
+ ],
198
+ "angle": 0,
199
+ "content": "ACM Reference Format:"
200
+ },
201
+ {
202
+ "type": "text",
203
+ "bbox": [
204
+ 0.084,
205
+ 0.531,
206
+ 0.483,
207
+ 0.582
208
+ ],
209
+ "angle": 0,
210
+ "content": "Lei Jiang, Qian Lou, and Nrushad Joshi. 2022. MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus. In The 59th Annual Design Automation Conference 2022 (DAC '22), July 10-14, 2022, San Francisco, CA, USA. ACM, New York, NY, USA, 6 pages."
211
+ },
212
+ {
213
+ "type": "title",
214
+ "bbox": [
215
+ 0.085,
216
+ 0.595,
217
+ 0.219,
218
+ 0.608
219
+ ],
220
+ "angle": 0,
221
+ "content": "1 Introduction"
222
+ },
223
+ {
224
+ "type": "text",
225
+ "bbox": [
226
+ 0.082,
227
+ 0.611,
228
+ 0.483,
229
+ 0.667
230
+ ],
231
+ "angle": 0,
232
+ "content": "In cloud computing, it is dangerous for clients upload their raw data to untrusted cloud servers, due to potential data breaches. Moreover, recent legislation [12] requires cloud computing enterprises to provide sufficient security for clients' personal data."
233
+ },
234
+ {
235
+ "type": "text",
236
+ "bbox": [
237
+ 0.082,
238
+ 0.667,
239
+ 0.483,
240
+ 0.751
241
+ ],
242
+ "angle": 0,
243
+ "content": "Recently, Fully Homomorphic Encryption (FHE) [3, 5, 6] emerges as one of the most promising cryptographic solutions to allowing arbitrary computations on encrypted data in untrusted cloud servers. Compared to Secure Multi-Party Computation, FHE requires neither frequent communications between clients and cloud servers, nor significant circuit garbling overhead on the client side. FHE"
244
+ },
245
+ {
246
+ "type": "text",
247
+ "bbox": [
248
+ 0.083,
249
+ 0.759,
250
+ 0.483,
251
+ 0.782
252
+ ],
253
+ "angle": 0,
254
+ "content": "*This work was partially supported by NSF through awards CCF-1908992, CCF-1909509, and CCF-210597. Work done while Nrushad Joshi was at UROC@Luddy IU."
255
+ },
256
+ {
257
+ "type": "text",
258
+ "bbox": [
259
+ 0.082,
260
+ 0.791,
261
+ 0.483,
262
+ 0.863
263
+ ],
264
+ "angle": 0,
265
+ "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org."
266
+ },
267
+ {
268
+ "type": "text",
269
+ "bbox": [
270
+ 0.085,
271
+ 0.863,
272
+ 0.318,
273
+ 0.874
274
+ ],
275
+ "angle": 0,
276
+ "content": "DAC '22, July 10-14, 2022, San Francisco, CA, USA"
277
+ },
278
+ {
279
+ "type": "text",
280
+ "bbox": [
281
+ 0.085,
282
+ 0.875,
283
+ 0.304,
284
+ 0.886
285
+ ],
286
+ "angle": 0,
287
+ "content": "© 2022 Association for Computing Machinery."
288
+ },
289
+ {
290
+ "type": "table_caption",
291
+ "bbox": [
292
+ 0.531,
293
+ 0.218,
294
+ 0.897,
295
+ 0.23
296
+ ],
297
+ "angle": 0,
298
+ "content": "Table 1: The comparison between various HE schemes."
299
+ },
300
+ {
301
+ "type": "table",
302
+ "bbox": [
303
+ 0.588,
304
+ 0.234,
305
+ 0.842,
306
+ 0.306
307
+ ],
308
+ "angle": 0,
309
+ "content": "<table><tr><td>Scheme</td><td>FHE Op.</td><td>Data Type</td><td>Bootstrapping</td></tr><tr><td>BGV [3]</td><td>mult, add</td><td>integer</td><td>~ 800s</td></tr><tr><td>BFV [9]</td><td>mult, add</td><td>integer</td><td>&gt; 1000s</td></tr><tr><td>CKKS [5]</td><td>mult, add</td><td>fixed point</td><td>~ 500s</td></tr><tr><td>FHEW [8]</td><td>Boolean</td><td>binary</td><td>&lt; 1s</td></tr><tr><td>TFHE [6]</td><td>Boolean</td><td>binary</td><td>13ms</td></tr></table>"
310
+ },
311
+ {
312
+ "type": "text",
313
+ "bbox": [
314
+ 0.513,
315
+ 0.312,
316
+ 0.915,
317
+ 0.422
318
+ ],
319
+ "angle": 0,
320
+ "content": "enables a client to encrypt her data and to send only ciphertexts to a cloud server that can directly evaluate homomorphic functions, e.g., encrypted neural inferences [4] or encrypted general-purpose computing [14], on the ciphertexts. When all computations are completed, the server returns the encrypted results to the client without learning any intermediate or final output, due to the end-to-end encrypted data flow. Only the client can decrypt the results by her secret key."
321
+ },
322
+ {
323
+ "type": "text",
324
+ "bbox": [
325
+ 0.513,
326
+ 0.423,
327
+ 0.915,
328
+ 0.741
329
+ ],
330
+ "angle": 0,
331
+ "content": "Among all FHE cryptosystems, FHE over the Torus (TFHE) [6] is the most efficient scheme supporting arbitrary operations with an unlimited computation depth, as shown in Table 1. First, TFHE supports arbitrary operations by various homomorphic Boolean logic gates. Traditional FHE schemes such as BGV [3], BFV [9], and CKKS [5] can perform only homomorphic additions and multiplications, while both FHEW [8] and TFHE [6] can enable homomorphic Boolean algebra, e.g., NAND, XOR, and XNOR gates. Second, TFHE obtains the fastest bootstrapping. Each FHE operation inevitably introduces a certain amount of noise into the ciphertext. If there are too many FHE operations on the computational critical path, the accumulated noise in the ciphertext may exceed a threshold, and thus the ciphertext cannot be decrypted successfully. To support an unlimited computation depth, a FHE scheme has to periodically invoke a bootstrapping operation to decrease the amount of noise in the ciphertext. The bootstrapping operation is extremely expensive for BGV, BFV, and CKKS. For example, a BGV bootstrapping typically costs several hundred seconds [11]. Therefore, these FHE schemes can support only a limited computation depth by designing a large enough noise budget. Although a bootstrapping of FHEW takes only 1s, TFHE can obtain a even faster bootstrapping, i.e., a TFHE bootstrapping requires only \\(13ms\\) on a CPU. By fast bootstrapping, TFHE allows an unlimited computation depth."
332
+ },
333
+ {
334
+ "type": "text",
335
+ "bbox": [
336
+ 0.513,
337
+ 0.741,
338
+ 0.915,
339
+ 0.879
340
+ ],
341
+ "angle": 0,
342
+ "content": "Unfortunately, a TFHE-based complex circuit consisting of multiple TFHE gates is still extremely slow. For instance, a TFHE-based simple RISC-V CPU [14] comprising thousands of TFHE gates can run at only \\(1.25Hz\\). In order to realize practical TFHE-based computing, it is critical to accelerate TFHE gates by specialized hardware. However, TFHE is only well-implemented on CPUs [16] and GPUs [7]. Although a recent work [10] accelerates TFHE gates on a FPGA, the TFHE gate latency on the FPGA is much longer than that on a GPU. To the best of our knowledge, there is no ASIC-based hardware accelerator for TFHE."
343
+ }
344
+ ],
345
+ [
346
+ {
347
+ "type": "header",
348
+ "bbox": [
349
+ 0.085,
350
+ 0.076,
351
+ 0.325,
352
+ 0.087
353
+ ],
354
+ "angle": 0,
355
+ "content": "DAC '22, July 10-14, 2022, San Francisco, CA, USA"
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.082,
361
+ 0.107,
362
+ 0.483,
363
+ 0.244
364
+ ],
365
+ "angle": 0,
366
+ "content": "In this paper, we propose a fast and energy-efficient accelerator, MATCHA, to process TFHE gates. We find that the bootstrapping dominates the latency of all TFHE logic operations. The kernels of fast Fourier transform (FFT) and inverse FFT (IFFT) are the bottlenecks in a bootstrapping operation. MATCHA is designed to accelerate the TFHE bootstrapping using approximate multiplication-less integer FFTs and IFFTs. We also propose a pipelined datapath for MATCHA to support aggressive bootstrapping key unrolling [2, 22] that invokes FFTs and IFFTs less frequently. Our contributions can be summarized as follows."
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.084,
372
+ 0.246,
373
+ 0.483,
374
+ 0.342
375
+ ],
376
+ "angle": 0,
377
+ "content": "- In order to fully take advantage of the error tolerance capability of TFHE, MATCHA accelerates polynomial multiplications by approximate multiplication-less integer FFTs and IFFTs requiring only additions and binary shifts. Although approximate FFTs and IFFTs introduce errors in each ciphertext, the ciphertext can still be correctly decrypted, since the errors can be rounded off along with the noise during decryption."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.084,
383
+ 0.343,
384
+ 0.483,
385
+ 0.426
386
+ ],
387
+ "angle": 0,
388
+ "content": "- We build a pipelined datapath consisting of TGSW clusters and external product cores to enable aggressive bootstrapping key unrolling that invokes FFTs and IFFTs less frequently during a bootstrapping operation. The datapath uses different register banks to serve sequential memory accesses during TGSW operations, and irregular memory accesses during FFTs and IFFTs."
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.084,
394
+ 0.426,
395
+ 0.483,
396
+ 0.482
397
+ ],
398
+ "angle": 0,
399
+ "content": "- We implemented, evaluated, and compared MATCHA against prior TFHE hardware accelerators. Compared to prior accelerators, MATCHA improves the TFHE gate processing throughput by \\(2.3 \\times\\), and the throughput per Watt by \\(6.3 \\times\\)."
400
+ },
401
+ {
402
+ "type": "list",
403
+ "bbox": [
404
+ 0.084,
405
+ 0.246,
406
+ 0.483,
407
+ 0.482
408
+ ],
409
+ "angle": 0,
410
+ "content": null
411
+ },
412
+ {
413
+ "type": "title",
414
+ "bbox": [
415
+ 0.084,
416
+ 0.498,
417
+ 0.214,
418
+ 0.513
419
+ ],
420
+ "angle": 0,
421
+ "content": "2 Background"
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.082,
427
+ 0.521,
428
+ 0.483,
429
+ 0.59
430
+ ],
431
+ "angle": 0,
432
+ "content": "FHE. Fully Homomorphic Encryption (FHE) enables arbitrary operations on ciphertexts. A FHE operation \\(\\diamond\\) is defined if there is another operation \\(\\star\\) such that \\(Dec[Enc(x_1)\\diamond Enc(x_2)] = Dec[Enc(x_1\\star x_2)]\\), where \\(x_{1}\\) and \\(x_{2}\\) are input plaintexts, \\(Enc\\) indicates encryption, and \\(Dec\\) is decryption."
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.082,
438
+ 0.591,
439
+ 0.483,
440
+ 0.719
441
+ ],
442
+ "angle": 0,
443
+ "content": "Notation. \\(\\mathbb{T}\\) denotes the torus of real numbers modulo \\(1, \\mathbb{R} / \\mathbb{Z}\\). For any ring \\(\\mathcal{R}\\), polynomials of the variable \\(X\\) with coefficients in \\(\\mathcal{R}\\) are represented by \\(\\mathcal{R}[X]\\). We define \\(\\mathbb{R}_N[X] := \\mathbb{R}[X] / (X^N + 1)\\), \\(\\mathbb{Z}_N[X] := \\mathbb{Z}[X] / (X^N + 1)\\), and \\(\\mathbb{T}_N[X] := \\mathbb{R}_N[X] / \\mathbb{Z}_N[X]\\), which are the ring of polynomials of variable \\(X\\) with quotient \\(X^N + 1\\) and real coefficients modulo \\(1\\). \\(\\mathbb{B} := \\{0, 1\\}\\) is a set, and we write vectors in bold. Given a set \\(S\\), we write \\(\\mathbf{s} \\stackrel{\\$}{\\leftarrow} S\\) to indicate that \\(\\mathbf{s}\\) is sampled uniformly at random from \\(S\\). We write \\(e \\gets X\\) to denote that \\(e\\) is sampled according to \\(X\\)."
444
+ },
445
+ {
446
+ "type": "text",
447
+ "bbox": [
448
+ 0.084,
449
+ 0.72,
450
+ 0.483,
451
+ 0.747
452
+ ],
453
+ "angle": 0,
454
+ "content": "TFHE. In TFHE [6], we assume \\( m \\in \\mathbb{B} \\) is a plaintext. The encryption scheme works as follows:"
455
+ },
456
+ {
457
+ "type": "text",
458
+ "bbox": [
459
+ 0.084,
460
+ 0.748,
461
+ 0.483,
462
+ 0.794
463
+ ],
464
+ "angle": 0,
465
+ "content": "- Setup(λ) first selects public parameters \\( n = n(\\lambda) \\), and \\( \\sigma = \\sigma(\\lambda) \\), where \\( \\lambda \\) is the security parameter. It samples and produces a secret key \\( s \\stackrel{\\$}{\\leftarrow} \\mathbb{B}^n \\)."
466
+ },
467
+ {
468
+ "type": "text",
469
+ "bbox": [
470
+ 0.084,
471
+ 0.796,
472
+ 0.483,
473
+ 0.854
474
+ ],
475
+ "angle": 0,
476
+ "content": "- Enc[s, m] samples a uniformly random vector \\(\\mathbf{a} \\stackrel{\\$}{\\leftarrow} \\mathbb{T}^n\\) and a noise \\(e \\gets \\mathcal{D}_{\\mathbb{T}_N[X], \\sigma}\\), where \\(\\mathcal{D}_{\\mathbb{T}_N[X], \\sigma}\\) is the Gaussian distribution over \\(\\mathbb{T}_N[X]\\) with a standard deviation \\(\\sigma\\). It outputs a ciphertext \\((\\mathbf{a}, b)\\), where \\(b = \\mathbf{a} \\cdot \\mathbf{s} + e + m/2\\)."
477
+ },
478
+ {
479
+ "type": "text",
480
+ "bbox": [
481
+ 0.084,
482
+ 0.855,
483
+ 0.483,
484
+ 0.897
485
+ ],
486
+ "angle": 0,
487
+ "content": "- \\( \\text{Dec}[\\mathbf{s}, (\\mathbf{a}, b)] \\) returns \\( \\lceil 2(b - \\mathbf{a} \\cdot \\mathbf{s}) \\rceil \\). It outputs plaintext correctly if the size of noise \\( e \\) is bounded as \\( |e| < 1/4 \\), since \\( 2(b - \\mathbf{a} \\cdot \\mathbf{s}) = 2e + m \\), \\( |2e| < 1/2 \\), and thus \\( \\lceil 2(b - \\mathbf{a} \\cdot \\mathbf{s}) \\rceil = m \\)."
488
+ },
489
+ {
490
+ "type": "list",
491
+ "bbox": [
492
+ 0.084,
493
+ 0.748,
494
+ 0.483,
495
+ 0.897
496
+ ],
497
+ "angle": 0,
498
+ "content": null
499
+ },
500
+ {
501
+ "type": "code_caption",
502
+ "bbox": [
503
+ 0.523,
504
+ 0.109,
505
+ 0.845,
506
+ 0.123
507
+ ],
508
+ "angle": 0,
509
+ "content": "Algorithm 1: The bootstrapping operation of TFHE."
510
+ },
511
+ {
512
+ "type": "algorithm",
513
+ "bbox": [
514
+ 0.52,
515
+ 0.125,
516
+ 0.915,
517
+ 0.325
518
+ ],
519
+ "angle": 0,
520
+ "content": "Input: A TLWE sample \\((\\mathbf{a},b)\\) whose plaintext is \\(m_{in}\\); a constant \\(m_{set}\\); a bootstrapping key \\(\\mathbf{BK}_{\\mathbf{s} \\rightarrow \\mathbf{s}^{\\prime \\prime},\\alpha}\\); and a key-switching key \\(\\mathbf{KS}_{\\mathbf{s}^{\\prime} \\rightarrow \\mathbf{s},\\mathbf{y}^{\\prime}}\\) (\\(\\mathbf{s}^{\\prime} = \\mathbf{KeyExtract}(\\mathbf{s}^{\\prime \\prime})\\)). Output: A TLWE sample encrypting \\(m_{out} = m_{in} \\cdot m_{set}\\). \n1 \\(\\mu = m_{set} / 2, \\mu^{\\prime} = \\mu / 2\\) /* Initialization */ \n2 \\(\\bar{b} = \\lceil 2Nb \\rceil, \\bar{a}_i = \\lceil 2Na_i \\rceil\\) for each \\(i \\in [1,n] \\wedge\\) Rounding */ \n3 \\(testv = (1 + X + \\ldots + X^{N+1}) \\cdot X^{N/2} \\cdot \\mu'\\) \n4 ACC \\(\\leftarrow X^{\\bar{b}} \\cdot (0, testv) / *\\) ACC = TLWE\\((X^{(\\bar{b}-\\bar{a}s)} \\cdot testv)\\) */ \n5 for \\(i = 1\\) to \\(n\\) do \n6 \\(\\mathbf{BK}_i = \\mathbf{h} + (X^{-\\bar{a}_i} - 1) \\cdot \\mathbf{BK}_i\\) \n7 ACC \\(\\leftarrow \\mathbf{BK}_i \\square ACC / *\\) BlindRotate */ \n8 \\(\\mathbf{u} = (0,\\mu') + SampleExtract(ACC) / *\\) Extract */ \n9 return KeySwitchKS(u) /* KeySwitch */"
521
+ },
522
+ {
523
+ "type": "text",
524
+ "bbox": [
525
+ 0.515,
526
+ 0.335,
527
+ 0.915,
528
+ 0.39
529
+ ],
530
+ "angle": 0,
531
+ "content": "- Logic \\([c_0, c_1]\\) returns the ciphertext of the result of the logic operation between two ciphertexts \\(c_0\\) and \\(c_1\\), and the logic operation can be XOR, NAND, AND, and OR. A TFHE logic operation involves an addition between \\(c_0\\) and \\(c_1\\), and a bootstrapping."
532
+ },
533
+ {
534
+ "type": "text",
535
+ "bbox": [
536
+ 0.514,
537
+ 0.39,
538
+ 0.915,
539
+ 0.501
540
+ ],
541
+ "angle": 0,
542
+ "content": "TLWE. TLWE is a torus analogue of the learning with error (LWE) problem [3]. \\( k \\) is a positive integer. \\( N \\) is a power of 2, and \\( \\mathcal{X} \\) is a probability distribution over \\( \\mathbb{R}_N[X] \\). A TLWE secret key \\( \\bar{\\mathbf{s}} \\) is a vector of \\( k \\) polynomials over \\( \\mathbb{Z}_N[X] \\) with binary coefficients, denoted as \\( \\bar{\\mathbf{s}} \\in \\mathbb{R}_N[X]^k \\). Given a polynomial message \\( \\mu \\in \\mathbb{T}_N[X] \\), a TLWE ciphertext of \\( \\mu \\) under the key \\( \\bar{\\mathbf{s}} \\) is a TLWE sample \\( (\\bar{\\mathbf{a}},\\bar{b}) \\in \\mathbb{T}_N[X]^k \\times \\mathbb{T}_N[X] \\), where \\( \\bar{\\mathbf{a}} \\gets \\mathbb{T}_N[X]^k \\) and \\( \\bar{b} = \\bar{\\mathbf{s}}\\cdot \\bar{\\mathbf{a}} +\\mu +e \\), where \\( e\\gets \\mathcal{X} \\)."
543
+ },
544
+ {
545
+ "type": "text",
546
+ "bbox": [
547
+ 0.514,
548
+ 0.502,
549
+ 0.915,
550
+ 0.599
551
+ ],
552
+ "angle": 0,
553
+ "content": "TGSW. TGSW is the matrix extension of TLWE. Each row of a TGSW sample is a TLWE sample. An external product \\(\\boxdot\\) that maps \\(\\boxdot\\): \\(TGSW \\times TWLE \\rightarrow TLWE\\) can be defined by TFHE [6]. The product of the TGSW ciphertext of a polynomial message \\(\\mu_{TGSW} \\in \\mathbb{T}_N[X]\\) and the TLWE ciphertext of a polynomial message \\(\\mu_{TLWE} \\in \\mathbb{T}_N[X]\\) becomes a TLWE ciphertext of a polynomial message \\(\\mu_{TGSW} \\cdot \\mu_{TLWE} \\in \\mathbb{T}_N[X]\\)"
554
+ },
555
+ {
556
+ "type": "text",
557
+ "bbox": [
558
+ 0.513,
559
+ 0.599,
560
+ 0.916,
561
+ 0.837
562
+ ],
563
+ "angle": 0,
564
+ "content": "Bootstrapping. Each TFHE logic operation inevitably introduces a certain amount of noise into the resulting ciphertext. A bootstrapping has to be performed to remove the noise at the end of each TFHE logic operation. In various TFHE logic operations, the bootstrapping step is the largest performance bottleneck. The details of a TFHE bootstrapping can be viewed in [6]. The bootstrapping procedure is shown in Algorithm 1. The dimension of the TLWE sample is set as \\( k = 1 \\) [6], which means that the TLWE sample is simply the Ring-LWE sample \\( (\\bar{a},\\bar{b})\\in \\mathbb{T}_N[X]\\times \\mathbb{T}_N[X] \\). The most computationally intensive step of a bootstrapping is the homomorphic decryption in line 7, where the message of ACC becomes a polynomial \\( X^{\\bar{b} -\\bar{\\mathrm{as}}}\\cdot testv \\). Particularly, homomorphically computing \\( X^{-\\bar{\\mathrm{as}}} = X^{\\sum_{i = 1}^{n} - \\bar{\\mathrm{a}}_i\\bar{s}_i} = \\prod_{i = 1}^{n}X^{-\\bar{\\mathrm{a}}_i\\bar{s}_i} \\) involves a great number of polynomial multiplications. Naively multiplying two degree \\( N \\) polynomials has the complexity of \\( O(N^2) \\). FFT and IFFT are used to reduce the complexity of a polynomial multiplication to \\( O(N\\log (N)) \\) [7], where \\( N \\) is the degree of polynomials."
565
+ },
566
+ {
567
+ "type": "text",
568
+ "bbox": [
569
+ 0.514,
570
+ 0.837,
571
+ 0.925,
572
+ 0.879
573
+ ],
574
+ "angle": 0,
575
+ "content": "Torus Implementation. Theoretically, the scale invariant scheme of TFHE is defined over the real torus \\(\\mathbb{T}\\), where all operations are modulo 1. But TFHE rescales the elements over \\(\\mathbb{T}\\) by a factor \\(2^{32}\\),"
576
+ }
577
+ ],
578
+ [
579
+ {
580
+ "type": "header",
581
+ "bbox": [
582
+ 0.084,
583
+ 0.076,
584
+ 0.564,
585
+ 0.088
586
+ ],
587
+ "angle": 0,
588
+ "content": "MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus"
589
+ },
590
+ {
591
+ "type": "header",
592
+ "bbox": [
593
+ 0.674,
594
+ 0.076,
595
+ 0.914,
596
+ 0.087
597
+ ],
598
+ "angle": 0,
599
+ "content": "DAC '22, July 10-14, 2022, San Francisco, CA, USA"
600
+ },
601
+ {
602
+ "type": "image",
603
+ "bbox": [
604
+ 0.088,
605
+ 0.104,
606
+ 0.327,
607
+ 0.183
608
+ ],
609
+ "angle": 0,
610
+ "content": null
611
+ },
612
+ {
613
+ "type": "image_caption",
614
+ "bbox": [
615
+ 0.113,
616
+ 0.187,
617
+ 0.312,
618
+ 0.2
619
+ ],
620
+ "angle": 0,
621
+ "content": "Figure 1: Latency breakdown."
622
+ },
623
+ {
624
+ "type": "image",
625
+ "bbox": [
626
+ 0.338,
627
+ 0.106,
628
+ 0.531,
629
+ 0.183
630
+ ],
631
+ "angle": 0,
632
+ "content": null
633
+ },
634
+ {
635
+ "type": "image_caption",
636
+ "bbox": [
637
+ 0.337,
638
+ 0.184,
639
+ 0.534,
640
+ 0.198
641
+ ],
642
+ "angle": 0,
643
+ "content": "Figure 2: The depth-first FFT."
644
+ },
645
+ {
646
+ "type": "image",
647
+ "bbox": [
648
+ 0.541,
649
+ 0.104,
650
+ 0.898,
651
+ 0.183
652
+ ],
653
+ "angle": 0,
654
+ "content": null
655
+ },
656
+ {
657
+ "type": "image_caption",
658
+ "bbox": [
659
+ 0.553,
660
+ 0.186,
661
+ 0.884,
662
+ 0.2
663
+ ],
664
+ "angle": 0,
665
+ "content": "Figure 3: The lifting butterfly w/o multiplication."
666
+ },
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.082,
671
+ 0.204,
672
+ 0.483,
673
+ 0.287
674
+ ],
675
+ "angle": 0,
676
+ "content": "and maps them to 32-bit integers [6], since it can work with approximations. Therefore, TFHE does not have to actively perform modular reduction, since all operations on 32-bit integers implicitly call a native and automatic mod \\(2^{32}\\) operation. To maintain high conversion accuracy, TFHE uses 64-bit double-precision floating point FFT and IFFT kernels [6]."
677
+ },
678
+ {
679
+ "type": "title",
680
+ "bbox": [
681
+ 0.084,
682
+ 0.293,
683
+ 0.356,
684
+ 0.307
685
+ ],
686
+ "angle": 0,
687
+ "content": "3 Related Work and Motivation"
688
+ },
689
+ {
690
+ "type": "text",
691
+ "bbox": [
692
+ 0.082,
693
+ 0.311,
694
+ 0.483,
695
+ 0.476
696
+ ],
697
+ "angle": 0,
698
+ "content": "Related Work. Except some TFHE implementations on CPUs [6] GPUs [7], and FPGAs [10], there is no specialized hardware accelerator that can process TFHE. A TFHE accelerator is different from the accelerators designed for other FHE schemes such as BGV, BFV, and CKKS in two points. First, although few prior accelerators [19] support BGV and CKKS bootstrapping along a tiny multiplicative depth datapath, most prior works [15, 18, 20] design hardware accelerators to process leveled BFV or CKKS homomorphic operations without bootstrapping. However, a TFHE accelerator must perform bootstrapping at the end of each TFHE gate. Second, BGV, BFV, and CKKS require NTT and INTT kernels, while TFHE needs only FFT and IFFT kernels without modular reduction."
699
+ },
700
+ {
701
+ "type": "text",
702
+ "bbox": [
703
+ 0.082,
704
+ 0.478,
705
+ 0.483,
706
+ 0.603
707
+ ],
708
+ "angle": 0,
709
+ "content": "Motivation. A TFHE gate performs not only polynomial additions but also a bootstrapping (FFT+IFFT+other) that costs \\(99\\%\\) of the gate latency on a CPU, as shown in Figure 1. Therefore, in order to shorten the latency of TFHE gates, we need to accelerate the bootstrapping step in TFHE gates. Moreover, FFTs and IFFTs consume \\(80\\%\\) of the bootstrapping latency in various TFHE gates. In order to accelerate TFHE gates, MATCHA adopts approximate multiplication-less integer FFTs and IFFTs, and uses a pipelined datapath to support aggressive bootstrapping key unrolling [2, 22]."
710
+ },
711
+ {
712
+ "type": "title",
713
+ "bbox": [
714
+ 0.084,
715
+ 0.609,
716
+ 0.196,
717
+ 0.623
718
+ ],
719
+ "angle": 0,
720
+ "content": "4 MATCHA"
721
+ },
722
+ {
723
+ "type": "title",
724
+ "bbox": [
725
+ 0.084,
726
+ 0.629,
727
+ 0.449,
728
+ 0.645
729
+ ],
730
+ "angle": 0,
731
+ "content": "4.1 Approximate Fast Integer FFT and IFFT"
732
+ },
733
+ {
734
+ "type": "text",
735
+ "bbox": [
736
+ 0.082,
737
+ 0.647,
738
+ 0.483,
739
+ 0.744
740
+ ],
741
+ "angle": 0,
742
+ "content": "Despite the fact that elements over \\(\\mathbb{T}\\) are mapped to 32-bit integers, TFHE still uses 64-bit double-precision floating point FFT and IFFT kernels, since 32-bit integer or single-precision floating point FFT and IFFT kernels are not accurate enough to guarantee the correct decryption of a ciphertext [6]. However, processing 64-bit double-precision floating point FFT and IFFT kernels incurs significant hardware overhead and power consumption."
743
+ },
744
+ {
745
+ "type": "text",
746
+ "bbox": [
747
+ 0.082,
748
+ 0.744,
749
+ 0.483,
750
+ 0.897
751
+ ],
752
+ "angle": 0,
753
+ "content": "Novelty. We first identify the opportunity to use approximate integer FFTs and IFFTs to accelerate TFHE without decryption errors for MATCHA. It is difficult to apply approximate NTTs and INTTs in accelerating other FHE schemes, e.g., BGV, BFV, and CKKS, which do not include a bootstrapping step after each homomorphic multiplication or addition. The errors introduced by approximate NTTs and INTTs will be quickly accumulated in the ciphertext and result in a decryption error, if a bootstrapping step cannot be performed in time. On the contrary, TFHE keeps the approximation errors of integer FFTs and IFFTs in check by performing a bootstrapping step at the end of each TFHE gate."
754
+ },
755
+ {
756
+ "type": "text",
757
+ "bbox": [
758
+ 0.513,
759
+ 0.204,
760
+ 0.916,
761
+ 0.675
762
+ ],
763
+ "angle": 0,
764
+ "content": "Depth-first FFT. Most prior FHE accelerators [18-20] perform NTTs and INTTs by the Cooley-Tukey data flow that introduces irregular memory accesses particularly in its bit-reversal stage. In order to remove the bit-reversal overhead, a prior ideal-lattice-based cryptographic accelerator [13] uses the Cooley-Tukey flow for NTTs and the Gentlemen-Sande flow for INTTs. These cryptographic accelerators store a polynomial mod \\( X^N + 1 \\) as a list of \\( N \\) coefficients. For each multiplication between two polynomials, they execute two NTT kernels on two polynomials respectively, perform element-wise multiplications, and then run an NTT kernel on the result. The invoking frequency ratio between NTTs and INTTs is \\( 2:1 \\). These FHE accelerators have are many opportunities (i.e., switchings from NTT to INTT) to reduce the bit-reversal overhead. In contrast, TFHE saves a polynomial mod \\( X^N + 1 \\) as either a list of \\( N \\) coefficients or the Lagrange half-complex representation consisting in the complex evaluations of the polynomial over the roots of unity \\( \\exp(i(2j + 1)\\pi / N) \\) for \\( j \\in \\mathbb{I}[0, \\frac{N}{2}][\\cdot] \\). FFT and IFFT kernels are required only during the conversion between these two representations. The invoking frequency ratio between FFTs and IFFTs in a TFHE gate is \\( 1:4 \\). As Figure 1 shows, the latency of IFFT kernels is much longer than FFT kernels. TFHE does not have many opportunities to reduce the bit-reversal overhead. Instead, for MATCHA, we focus on decreasing the computing overhead of a single FFT or IFFT kernel. We adopt the depth-first iterative conjugate-pair FFT (CPFFT) algorithm [1]. Unlike the Cooley-Tukey or Gentlemen-Sande flow, the CPFFT requires only a single complex root of unity read per radix-4 butterfly. Two butterflies in the same block can share the same twiddle factor, further halving the number of reads to the twiddle-factor buffer [1]. Moreover, the Cooley-Tukey and Gentlemen-Sande flows process FFTs/IFFTs stage by stage in a breadth-first manner, as shown in Figure 2(a). To capture the spatial locality, as Figure 2(b) shows, CPFFT traverses the FFT flow in a depth-first fashion by completing a sub-transform before moving to the next."
765
+ },
766
+ {
767
+ "type": "text",
768
+ "bbox": [
769
+ 0.514,
770
+ 0.675,
771
+ 0.916,
772
+ 0.89
773
+ ],
774
+ "angle": 0,
775
+ "content": "A Multiplication-less Butterfly. The lifting structure [17], a special type of lattice substrate implemented by cascading identity matrices with a single nonzero off-diagonal element, is proposed to approximate multiplications in FFT and IFFT kernels by additions and binary shifts. The basic lifting step shown in Figure 3(a) can be expressed by \\( y_{j}(n) = x_{j}(n) \\), \\( y_{i}(n) = x_{i}(n) + \\lceil T_{x_{j}}(n)\\rceil \\), \\( z_{j}(n) = y_{j}(n) \\), and \\( z_{i}(n) = y_{i}(n) - \\lceil T_{y_{j}}(n)\\rceil \\), where \\( T \\) is a lifting coefficient. And thus, the lifting structure with the rounding operation can achieve integer-to-integer transform. Also, the lifting and its inverse matrices in this case are represented as \\( \\begin{bmatrix} 1 & T \\\\ 0 & 1 \\end{bmatrix} \\) and \\( \\begin{bmatrix} 1 & T \\\\ 0 & 1 \\end{bmatrix}^{-1} = \\begin{bmatrix} 1 & -T \\\\ 0 & 1 \\end{bmatrix} \\), respectively. A floating-point lifting coefficient can be quantized as an approximate dyadic-valued coefficient \\( \\alpha /2^{\\beta} \\), and hence computed with only adders and shifters, where we allocate \\( \\beta \\)"
776
+ }
777
+ ],
778
+ [
779
+ {
780
+ "type": "header",
781
+ "bbox": [
782
+ 0.085,
783
+ 0.076,
784
+ 0.325,
785
+ 0.087
786
+ ],
787
+ "angle": 0,
788
+ "content": "DAC '22, July 10-14, 2022, San Francisco, CA, USA"
789
+ },
790
+ {
791
+ "type": "image",
792
+ "bbox": [
793
+ 0.123,
794
+ 0.105,
795
+ 0.253,
796
+ 0.208
797
+ ],
798
+ "angle": 0,
799
+ "content": null
800
+ },
801
+ {
802
+ "type": "image_caption",
803
+ "bbox": [
804
+ 0.106,
805
+ 0.211,
806
+ 0.268,
807
+ 0.238
808
+ ],
809
+ "angle": 0,
810
+ "content": "Figure 4: The truth table of \\( X^{-\\overline{a_{2i - 1}}}\\cdot s_{2i - 1} - \\overline{a_{2i}}\\cdot s_{2i} \\)."
811
+ },
812
+ {
813
+ "type": "image",
814
+ "bbox": [
815
+ 0.297,
816
+ 0.105,
817
+ 0.455,
818
+ 0.207
819
+ ],
820
+ "angle": 0,
821
+ "content": null
822
+ },
823
+ {
824
+ "type": "image_caption",
825
+ "bbox": [
826
+ 0.296,
827
+ 0.21,
828
+ 0.456,
829
+ 0.238
830
+ ],
831
+ "angle": 0,
832
+ "content": "Figure 5: Bootstrapping key unrolling."
833
+ },
834
+ {
835
+ "type": "image",
836
+ "bbox": [
837
+ 0.089,
838
+ 0.242,
839
+ 0.318,
840
+ 0.334
841
+ ],
842
+ "angle": 0,
843
+ "content": null
844
+ },
845
+ {
846
+ "type": "image_caption",
847
+ "bbox": [
848
+ 0.131,
849
+ 0.336,
850
+ 0.293,
851
+ 0.351
852
+ ],
853
+ "angle": 0,
854
+ "content": "(a) the computing flow"
855
+ },
856
+ {
857
+ "type": "image",
858
+ "bbox": [
859
+ 0.327,
860
+ 0.244,
861
+ 0.477,
862
+ 0.334
863
+ ],
864
+ "angle": 0,
865
+ "content": null
866
+ },
867
+ {
868
+ "type": "image_caption",
869
+ "bbox": [
870
+ 0.348,
871
+ 0.336,
872
+ 0.457,
873
+ 0.351
874
+ ],
875
+ "angle": 0,
876
+ "content": "(b) the pipeline"
877
+ },
878
+ {
879
+ "type": "image_caption",
880
+ "bbox": [
881
+ 0.102,
882
+ 0.354,
883
+ 0.463,
884
+ 0.368
885
+ ],
886
+ "angle": 0,
887
+ "content": "Figure 6: The pipelined MATCHA for aggressive BKU."
888
+ },
889
+ {
890
+ "type": "text",
891
+ "bbox": [
892
+ 0.082,
893
+ 0.375,
894
+ 0.482,
895
+ 0.461
896
+ ],
897
+ "angle": 0,
898
+ "content": "bits to the lifting coefficient, and \\(\\alpha, \\beta \\in \\mathbb{N}\\). For example, a coefficient \\(9/128\\) can be operated as \\(\\frac{9}{128} = \\frac{2^3 + 2^0}{2^7} = \\frac{1}{2^4} + \\frac{1}{2^7}\\). Hence, the lifting with its coefficient \\(9/128\\) and a rounding operation is replaced to the summation of 4 and 7 bit-shifters illustrated in Figure 3(b). The perfect reconstruction in lifting is always kept if floating-point coefficients are approximated to dyadic-valued coefficients."
899
+ },
900
+ {
901
+ "type": "title",
902
+ "bbox": [
903
+ 0.084,
904
+ 0.462,
905
+ 0.456,
906
+ 0.478
907
+ ],
908
+ "angle": 0,
909
+ "content": "4.2 Aggressive Bootstrapping Key Unrolling"
910
+ },
911
+ {
912
+ "type": "text",
913
+ "bbox": [
914
+ 0.082,
915
+ 0.479,
916
+ 0.483,
917
+ 0.736
918
+ ],
919
+ "angle": 0,
920
+ "content": "Bootstrapping Key Unrolling. A TFHE bootstrapping needs to compute external produces, i.e., \\( X^{-\\widetilde{\\mathbf{a}} s} = X^{\\sum_{i=1}^{n} -\\widetilde{\\mathbf{a}}_i s_i} \\) sequentially, thereby becoming the performance bottleneck of a TFHE gate. Instead, bootstrapping key unrolling (BKU) [2, 22] is proposed to compute \\( X^{\\sum_{i=1}^{n/2} -\\overline{a}_{2i-1} s_{2i-1} -\\overline{a}_{2i} s_{2i}} \\) in each external product, so that the number of homomorphic additions can be reduced from \\( n \\) to \\( n/2 \\). The secret key \\( s \\) is sampled from \\( \\mathbb{B}^n \\), so \\( s_i \\in \\{0,1\\} \\), where \\( 0 \\leq i \\leq n \\). Based on the values of \\( s_{2i} \\) and \\( s_{2i+1} \\), the truth table of \\( X^{\\sum_{i=1}^{n/2} -\\overline{a}_{2i-1} s_{2i-1} -\\overline{a}_{2i} s_{2i}} \\) can be shown in Figure 4. So BKU rewrites \\( X^{-\\overline{a}_{2i-1} \\cdot s_{2i-1} -\\overline{a}_{2i} \\cdot s_{2i}} \\) as \\( X^{-\\overline{a}_{2i-1} -\\overline{a}_{2i}} \\cdot s_{2i-1} s_{2i} - X^{-\\overline{a}_{2i-1}} \\cdot s_{2i-1}(1 - s_{2i}) - X^{-\\overline{a}_{2i}} \\cdot (1 - s_{2i-1}) s_{2i} - (1 - s_{2i-1})(1 - s_{2i}) \\). Due to the fact that \\( s_{2i-1} s_{2i} + (1 - s_{2i}) s_{2i-1} + s_{2i}(1 - s_{2i-1}) + (1 - s_{2i-1})(1 - s_{2i}) \\) is always equal to 1 [2], \\( X^{-\\overline{a}_{2i-1} \\cdot s_{2i-1} -\\overline{a}_{2i} \\cdot s_{2i}} \\) can be further simplified to \\( (X^{-\\overline{a}_{2i-1} -\\overline{a}_{2i}} - 1) \\cdot s_{2i-1} s_{2i} + (X^{-\\overline{a}_{2i-1}} - 1) \\cdot s_{2i-1}(1 - s_{2i}) - (X^{-\\overline{a}_{2i}} - 1) \\cdot (1 - s_{2i-1}) s_{2i} + 1 \\). As Figure 5 shows, BKU encrypts \\( s_{2i-1} s_{2i} \\), \\( s_{2i-1}(1 - s_{2i}) \\), and \\( (1 - s_{2i-1}) s_{2i} \\) as TGSW ciphertexts, and builds a bootstrapping key bundle to unroll the original bootstrapping key for two times."
921
+ },
922
+ {
923
+ "type": "text",
924
+ "bbox": [
925
+ 0.084,
926
+ 0.738,
927
+ 0.483,
928
+ 0.765
929
+ ],
930
+ "angle": 0,
931
+ "content": "Aggressive BKU Performing Badly on CPUs. BKU can be further generalized as"
932
+ },
933
+ {
934
+ "type": "equation",
935
+ "bbox": [
936
+ 0.145,
937
+ 0.771,
938
+ 0.482,
939
+ 0.793
940
+ ],
941
+ "angle": 0,
942
+ "content": "\\[\nX ^ {\\sum_ {i = 1} ^ {\\frac {n}{m}} - \\overline {{\\mathsf {a} _ {m \\cdot i}}} \\mathsf {s} _ {m \\cdot i} - \\overline {{\\mathsf {a} _ {m \\cdot i + 1}}} \\mathsf {s} _ {m \\cdot i + 1} - \\dots - \\overline {{\\mathsf {a} _ {m \\cdot i + m - 1}}} \\mathsf {s} _ {m \\cdot i + m - 1}}, \\tag {1}\n\\]"
943
+ },
944
+ {
945
+ "type": "text",
946
+ "bbox": [
947
+ 0.082,
948
+ 0.799,
949
+ 0.483,
950
+ 0.896
951
+ ],
952
+ "angle": 0,
953
+ "content": "where \\( m \\in [2, n] \\). So it is possible to more aggressively unroll the bootstrapping key by increasing \\( m \\). Although unrolling the bootstrapping key for two times (\\( m = 2 \\)) reduces the bootstrapping latency by \\( 49\\% \\), we find that further enlarging \\( m \\) beyond 2 even prolongs the bootstrapping latency on a CPU, as explained in Section 6. Our experimental methodology is described in Section 5. The reason can be summarized as follows."
954
+ },
955
+ {
956
+ "type": "image",
957
+ "bbox": [
958
+ 0.521,
959
+ 0.105,
960
+ 0.91,
961
+ 0.216
962
+ ],
963
+ "angle": 0,
964
+ "content": null
965
+ },
966
+ {
967
+ "type": "image_caption",
968
+ "bbox": [
969
+ 0.514,
970
+ 0.218,
971
+ 0.916,
972
+ 0.259
973
+ ],
974
+ "angle": 0,
975
+ "content": "Figure 7: The architecture of MATCHA (mem. ctrl: memory controller; addr gen.: address generation; twid: twiddle factor; butt.: butterfly; and shift.: shifter)."
976
+ },
977
+ {
978
+ "type": "text",
979
+ "bbox": [
980
+ 0.516,
981
+ 0.267,
982
+ 0.915,
983
+ 0.362
984
+ ],
985
+ "angle": 0,
986
+ "content": "- The limited number of cores on a CPU. With an enlarged \\( m \\), there are more terms in the exponent part of Equation 1. For instance, when \\( m = 4 \\), there are 15 terms, each of which requires a TGSW scale-and-add operation. Unfortunately, our CPU baseline has only 8 physical cores. Mapping each terms to a core, and summing the results from all cores introduce significant communication overhead."
987
+ },
988
+ {
989
+ "type": "text",
990
+ "bbox": [
991
+ 0.516,
992
+ 0.364,
993
+ 0.913,
994
+ 0.446
995
+ ],
996
+ "angle": 0,
997
+ "content": "- More cache conflicts. The size of bootstrapping key increases exponentially with an enlarged \\( m \\). For example, as Figure 5 shows, instead of a single bootstrapping key, BKU with \\( m = 2 \\) requires three bootstrapping keys. Each TGSW scale-and-add operation happening on a term fetches its corresponding bootstrapping key to the shared last level cache, generating more cache conflicts."
998
+ },
999
+ {
1000
+ "type": "text",
1001
+ "bbox": [
1002
+ 0.516,
1003
+ 0.447,
1004
+ 0.915,
1005
+ 0.544
1006
+ ],
1007
+ "angle": 0,
1008
+ "content": "- The lack of a pipelined design. As Figure 5 highlights, in each iteration, the construction of the bootstrapping key bundle BKB and the external product operation are executed sequentially. Although it is possible to start the computation of BKB for the next iteration and perform the external product operation of this iteration at the same time, the current BKU implementation [22] cannot do this, due to the lack of a pipelined design."
1009
+ },
1010
+ {
1011
+ "type": "list",
1012
+ "bbox": [
1013
+ 0.516,
1014
+ 0.267,
1015
+ 0.915,
1016
+ 0.544
1017
+ ],
1018
+ "angle": 0,
1019
+ "content": null
1020
+ },
1021
+ {
1022
+ "type": "text",
1023
+ "bbox": [
1024
+ 0.513,
1025
+ 0.545,
1026
+ 0.915,
1027
+ 0.877
1028
+ ],
1029
+ "angle": 0,
1030
+ "content": "MATCHA for Aggressive BKU. In this paper, we propose a pipeline flow for MATCHA to support aggressive BKU with a larger \\( m \\). Compared to our CPU baseline, our pipeline flow can be easily accelerated by a large number of specialized hardware components including TGSW clusters and External Product (EP) cores. As Figure 6(a) shows, we divide the bottleneck of a TFHE bootstrapping into two steps, i.e., the construction of the bootstrapping key bundle, and the EP operation. A TGSW cluster is used to construct the bootstrapping key bundle, while an EP core processes EP operations between the bootstrapping key bundle and ACC. A TGSW cluster consists of a TGSW adder tree and multiple TGSW scale units, each of which computes one term in the bootstrapping key bundle, e.g., when \\( m = 2 \\), \\( (X^{-\\overline{a}_{2i-1} - \\overline{a}_{2i}} - 1) \\cdot \\mathbf{BK}_{i,0} \\), where \\( \\mathbf{BK}_{i,0} \\) is the TGSW ciphertext of \\( s_{2i-1}s_{2i} \\). And then, the TGSW adder sums all terms and generates the bootstrapping key bundle. With the bootstrapping key bundle \\( (\\mathbf{BKB_i}) \\), an EP core computes \\( ACC \\gets \\mathbf{BKB_i} \\square ACC \\). The TGSW cluster and the EP core have their separated register file banks to reduce on-chip memory conflicts. Moreover, these two steps of a TFHE bootstrapping can be deployed on a TGSW cluster and an EP core in a pipelined manner, as shown in Figure 6(b). In each time step, the EP core computes the EP operation with the bootstrapping key bundle generated by the TGSW cluster in the previous time step. When \\( m \\) is increased, the workload of the bootstrapping key bundle construction becomes larger. The workloads"
1031
+ }
1032
+ ],
1033
+ [
1034
+ {
1035
+ "type": "header",
1036
+ "bbox": [
1037
+ 0.084,
1038
+ 0.076,
1039
+ 0.562,
1040
+ 0.088
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus"
1044
+ },
1045
+ {
1046
+ "type": "header",
1047
+ "bbox": [
1048
+ 0.674,
1049
+ 0.076,
1050
+ 0.914,
1051
+ 0.088
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "DAC '22, July 10-14, 2022, San Francisco, CA, USA"
1055
+ },
1056
+ {
1057
+ "type": "table_caption",
1058
+ "bbox": [
1059
+ 0.084,
1060
+ 0.105,
1061
+ 0.48,
1062
+ 0.119
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "Table 2: The power and area of MATCHA operating at \\( {2GHz} \\) ."
1066
+ },
1067
+ {
1068
+ "type": "table",
1069
+ "bbox": [
1070
+ 0.088,
1071
+ 0.121,
1072
+ 0.476,
1073
+ 0.258
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "<table><tr><td>Name</td><td>Spec</td><td>Power (W)</td><td>Area (mm2)</td></tr><tr><td>TGSW cluster</td><td>×16 multipliers &amp; adders, and a 16KB, 2-bank reg. file</td><td>0.98</td><td>0.368</td></tr><tr><td>EP core</td><td>4 IFFT, 1 FFT, ×4 multipliers &amp; adders, and a 256KB, 8-bank reg. file</td><td>2.87</td><td>1.89</td></tr><tr><td>Sub-total</td><td>×8 EP cores and TGSW clusters</td><td>30.8</td><td>18.06</td></tr><tr><td>polynomial unit</td><td>×32 adders &amp; cmps &amp; logic units, and a 8KB, 2-bank reg. file</td><td>2.33</td><td>0.32</td></tr><tr><td>crossbar</td><td>1/2 8 × 32/8 NoCs (256b bit-sliced)</td><td>2.11</td><td>0.44</td></tr><tr><td>SPM</td><td>a 4MB, 32-bank SPM</td><td>3.52</td><td>3.25</td></tr><tr><td>mem ctrl</td><td>memory controller and HBM2 PHY</td><td>1.225</td><td>14.9</td></tr><tr><td>Total</td><td></td><td>39.98</td><td>36.96</td></tr></table>"
1077
+ },
1078
+ {
1079
+ "type": "text",
1080
+ "bbox": [
1081
+ 0.082,
1082
+ 0.264,
1083
+ 0.482,
1084
+ 0.293
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "of the two steps in the pipeline can be approximately balanced by adjusting \\( m \\)."
1088
+ },
1089
+ {
1090
+ "type": "title",
1091
+ "bbox": [
1092
+ 0.084,
1093
+ 0.298,
1094
+ 0.373,
1095
+ 0.312
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "4.3 The Architecture of MATCHA"
1099
+ },
1100
+ {
1101
+ "type": "text",
1102
+ "bbox": [
1103
+ 0.082,
1104
+ 0.315,
1105
+ 0.483,
1106
+ 0.716
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "Architecture. The overall architecture of MATCHA is shown in Figure 7(a). MATCHA has multiple computing components including a polynomial unit, eight TGSW clusters, and eight External Product (EP) cores. All computing components of MATCHA are connected to 32 scratchpad memory (SPM) banks by crossbars. MATCHA also employs a memory controller to manage the off-chip memory requests issued to HBM2 DRAMs. The polynomial unit is in charge of performing polynomial additions/subtractions for each TFHE logic operation, initializing bootstrapping operations, extracting samples, and conducting key-switching operations that consist of additions, logic comparisons, and Boolean logic operations. One TGSW cluster and an EP core can support one bootstrapping pipeline. As Figure 7(b) shows, a TGSW cluster 16 32-bit integer multipliers and 16 32-bit integer adders to support TGSW scale operations. Each TGSW cluster has only two register banks, since the memory accesses during a TGSW scale operation have strong spatial locality. The TGSW cluster can read a register bank while write the other bank concurrently. An EP core consists of an FFT core and four IFFT cores to accelerate the FFT and IFFT kernels during an EP operation, as shown in Figure 7(c). It has 8 register banks to serve the irregular memory accesses in FFT and IFFT kernels. An EP core also has four 32-bit integer multipliers and four 32-bit integer adders to manipulate TGSW ciphertexts during an EP operation. An FFT core is similar to an IFFT core, except its data flow. As Figure 7(d) highlights, an FFT core comprises an address generation unit, a twiddle factor buffer, two input/output FIFOs, and 128 butterfly cores, each of which consists of two 64-bit integer adders and two 64-bit binary shifters. The address generation unit guides butterfly cores to access the twiddle factor buffer."
1110
+ },
1111
+ {
1112
+ "type": "text",
1113
+ "bbox": [
1114
+ 0.082,
1115
+ 0.717,
1116
+ 0.483,
1117
+ 0.868
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": "Design Overhead. We implemented MATCHA in RTL, and synthesized it in \\(16nm\\) PTM process technology using state-of-the-art tools. We used CACTI to model all SPM components and register file banks. Due to its simple structure, the entire design of MATCHA can run at \\(2GHz\\). Among various on-chip network architectures, e.g., meshes, rings, and crossbars, we selected two \\(8 \\times 32\\), and one \\(8 \\times 8\\) bit-sliced crossbars, i.e., SPM \\(\\rightarrow\\) cores/clusters, cores/clusters \\(\\rightarrow\\) SPM, and cores/clusters \\(\\rightarrow\\) cores/clusters. The hardware overhead and power consumption of MATCHA are shown in Table 2. Totally, MATCHA occupies \\(36.96mm^2\\) and consumes 39.98 Watt. The HBM2 bandwidth is \\(640GB/s\\)."
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "bbox": [
1125
+ 0.082,
1126
+ 0.868,
1127
+ 0.483,
1128
+ 0.897
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": "Error and Noise. The error of the polynomial multiplication result caused by approximate multiplication-less integer FFT and"
1132
+ },
1133
+ {
1134
+ "type": "image",
1135
+ "bbox": [
1136
+ 0.518,
1137
+ 0.104,
1138
+ 0.652,
1139
+ 0.196
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": null
1143
+ },
1144
+ {
1145
+ "type": "image_caption",
1146
+ "bbox": [
1147
+ 0.515,
1148
+ 0.199,
1149
+ 0.671,
1150
+ 0.226
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "Figure 8: The error of approx. FFT & IFFT."
1154
+ },
1155
+ {
1156
+ "type": "table",
1157
+ "bbox": [
1158
+ 0.709,
1159
+ 0.104,
1160
+ 0.888,
1161
+ 0.163
1162
+ ],
1163
+ "angle": 0,
1164
+ "content": "<table><tr><td>metric</td><td>BKU [2, 22]</td><td>MATCHA</td></tr><tr><td>EP</td><td>δ/2</td><td>δ/m</td></tr><tr><td>rounding</td><td>RO/2</td><td>RO/m</td></tr><tr><td>BK</td><td>3βK</td><td>(2m-1)βK</td></tr><tr><td>I/FFT</td><td>-150dB</td><td>-141dB</td></tr></table>"
1165
+ },
1166
+ {
1167
+ "type": "image_caption",
1168
+ "bbox": [
1169
+ 0.688,
1170
+ 0.167,
1171
+ 0.912,
1172
+ 0.223
1173
+ ],
1174
+ "angle": 0,
1175
+ "content": "Table 3: The noise comparison (δ: the noise of EPs; RO: the noise of roundings; BK: the noise of bootstrapping keys)."
1176
+ },
1177
+ {
1178
+ "type": "text",
1179
+ "bbox": [
1180
+ 0.513,
1181
+ 0.229,
1182
+ 0.915,
1183
+ 0.56
1184
+ ],
1185
+ "angle": 0,
1186
+ "content": "IFFT kernels is shown in Figure 8. All polynomial coefficients are 32-bit integers, while we quantize the twiddle factors of FFT and IFFT with various bitwidths. With an increasing bitwidth of twiddle factors, the error caused by approximate FFT and IFFT decreases, and is similar to that generated by original double-precision floating point FFT and IFFT. With 64-bit dyadic-value-quantized twiddle factors (DVQTFs), the error caused by approximate FFT and IFFT is \\(\\sim 141dB\\), which is still larger than that produced by 64-bit double-precision floating point FFT and IFFT, since the approximate FFT and IFFT perform only additions and binary shifts. At the TFHE gate level, the noise comparison between BKU and MATCHA is exhibited in Table 3, where BKU unrolls the bootstrapping key for two times while MATCHA unrolls that for \\(m\\) times (\\(m \\geq 2\\)). With an enlarging \\(m\\), the noise from EP and rounding operations decreases linearly, but the noise caused by bootstrapping keys increases exponentially. As a result, TFHE with a smaller \\(m\\) can tolerate more errors caused by approximate FFT and IFFT. Based on our experiments, 38-bit DVQTFs produce no decryption failure in the test of \\(10^{8}\\) TFHE gates. However, for a large \\(m\\), e.g., \\(m = 5\\), we have to use 64-bit DVQTFs to guarantee there is no decryption failure in the same test, since the noise caused by more bootstrapping keys dominates the total noise in ciphertexts. Therefore, MATCHA adopts 64-bit DVQTFs for all approximate multiplication-less integer FFT and IFFT kernels."
1187
+ },
1188
+ {
1189
+ "type": "title",
1190
+ "bbox": [
1191
+ 0.515,
1192
+ 0.562,
1193
+ 0.771,
1194
+ 0.577
1195
+ ],
1196
+ "angle": 0,
1197
+ "content": "5 Experimental Methodology"
1198
+ },
1199
+ {
1200
+ "type": "text",
1201
+ "bbox": [
1202
+ 0.513,
1203
+ 0.578,
1204
+ 0.915,
1205
+ 0.716
1206
+ ],
1207
+ "angle": 0,
1208
+ "content": "Simulation and Compilation: To simulate the performance of MATCHA at cycle level, we used a CGRA modeling framework, OpenCGRA [21], which has been validated against multiple ASIC accelerators. OpenCGRA first compiles a TFHE logic operation into a data flow graph (DFG) of the operations supported by MATCHA, solves its dependencies, and removes structural hazards. The architecture of MATCHA is abstracted to an architecture description (AD) in OpenCGRA, which computes the latency and the energy consumption of each TFHE logic operation by scheduling and mapping the DFG onto the AD."
1209
+ },
1210
+ {
1211
+ "type": "text",
1212
+ "bbox": [
1213
+ 0.513,
1214
+ 0.717,
1215
+ 0.915,
1216
+ 0.897
1217
+ ],
1218
+ "angle": 0,
1219
+ "content": "Our Baselines. We compared MATCHA against state-of-the-art CPU-, GPU-, FPGA-, and ASIC-based TFHE hardware platforms. Our CPU baseline is a 8-core \\(3.7GHz\\) Xeon E-2288G processor executing the TFHE library [6], while our GPU baseline is a 5120-core Tesla-V100 GPU equipped with a 16GB HBM2 DRAM running the cuFHE library [7]. TFHE Vector Engine (TVE) [10] was implemented on a low-end ZedBoard Zynq-7000 FPGA. We implemented 8 copies of TVE on a Stratix-10 GX2800 FPGA, and used it as our FPGA baseline, since the Stratix-10 board has more resources. Because there is no existing ASIC-based design, we synthesized our FPGA baseline with the \\(16nm\\) PTM process as our ASIC baseline. We enable BKU on CPU, GPU, and MATCHA but fix \\(m = 1\\) on FPGA and ASIC, since they do not support BKU."
1220
+ }
1221
+ ],
1222
+ [
1223
+ {
1224
+ "type": "header",
1225
+ "bbox": [
1226
+ 0.085,
1227
+ 0.076,
1228
+ 0.325,
1229
+ 0.087
1230
+ ],
1231
+ "angle": 0,
1232
+ "content": "DAC '22, July 10-14, 2022, San Francisco, CA, USA"
1233
+ },
1234
+ {
1235
+ "type": "image",
1236
+ "bbox": [
1237
+ 0.095,
1238
+ 0.104,
1239
+ 0.353,
1240
+ 0.191
1241
+ ],
1242
+ "angle": 0,
1243
+ "content": null
1244
+ },
1245
+ {
1246
+ "type": "image_caption",
1247
+ "bbox": [
1248
+ 0.121,
1249
+ 0.195,
1250
+ 0.324,
1251
+ 0.208
1252
+ ],
1253
+ "angle": 0,
1254
+ "content": "Figure 9: Latency comparison."
1255
+ },
1256
+ {
1257
+ "type": "image",
1258
+ "bbox": [
1259
+ 0.37,
1260
+ 0.104,
1261
+ 0.63,
1262
+ 0.191
1263
+ ],
1264
+ "angle": 0,
1265
+ "content": null
1266
+ },
1267
+ {
1268
+ "type": "image_caption",
1269
+ "bbox": [
1270
+ 0.379,
1271
+ 0.195,
1272
+ 0.618,
1273
+ 0.208
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": "Figure 10: Throughput comparison."
1277
+ },
1278
+ {
1279
+ "type": "image",
1280
+ "bbox": [
1281
+ 0.647,
1282
+ 0.104,
1283
+ 0.903,
1284
+ 0.192
1285
+ ],
1286
+ "angle": 0,
1287
+ "content": null
1288
+ },
1289
+ {
1290
+ "type": "image_caption",
1291
+ "bbox": [
1292
+ 0.649,
1293
+ 0.195,
1294
+ 0.899,
1295
+ 0.209
1296
+ ],
1297
+ "angle": 0,
1298
+ "content": "Figure 11: Thrght/Watt comparison."
1299
+ },
1300
+ {
1301
+ "type": "text",
1302
+ "bbox": [
1303
+ 0.082,
1304
+ 0.213,
1305
+ 0.482,
1306
+ 0.337
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": "TFHE Operations and Parameters. We studied all TFHE logic operations including NOT, AND, OR, NAND, XOR, and XNOR, but we only report the results on NAND in Section 6. This is because AND, OR, NAND, XOR, and XNOR have almost the same latency which is dominated by the bootstrapping step, while NOT has no bootstrapping at all. To maintain the standard 110-bit security, we adopt the TFHE parameters from [6], i.e., the polynomial degree in the ring \\( N = 1024 \\), the TLWE dimension \\( k = 1 \\), the basis and length for the TGSW ciphertext decomposition \\( Bg = 1024 \\) and \\( \\ell = 3 \\)."
1310
+ },
1311
+ {
1312
+ "type": "title",
1313
+ "bbox": [
1314
+ 0.084,
1315
+ 0.337,
1316
+ 0.284,
1317
+ 0.35
1318
+ ],
1319
+ "angle": 0,
1320
+ "content": "6 Results and Analysis"
1321
+ },
1322
+ {
1323
+ "type": "text",
1324
+ "bbox": [
1325
+ 0.082,
1326
+ 0.352,
1327
+ 0.483,
1328
+ 0.572
1329
+ ],
1330
+ "angle": 0,
1331
+ "content": "Latency. The latency comparison of a TFHE NAND gate between our various baselines and MATCHA is shown in Figure 9. The NAND gate on CPU costs \\(13.1ms\\), while \\(m = 2\\) reduces its latency to \\(6.67ms\\). Aggressive BKU with an increasing \\(m\\) cannot further reduce the NAND gate latency anymore on CPU, due to the limited number of cores, more cache conflicts, and the non-pipelined processing style. It takes only \\(0.37ms\\) for GPU to process a NAND gate. With an enlarging \\(m\\), GPU gradually reduces the NAND gate latency. When \\(m = 4\\), the NAND gate latency on GPU is \\(0.18ms\\). MATCHA reduces the NAND gate latency by \\(13\\%\\) over GPU only when \\(m = 3\\), since GPU can fully use its all resources to process one TFHE gate when \\(m = 1\\) or 2. MATCHA cannot support aggressive BKU with \\(m = 4\\) efficiently either, since it has only 8 TGSW clusters. FPGA and ASIC do not have any pipelined design or memory optimization to support BKU, and they need \\(>6.8ms\\) to complete a NAND gate when \\(m = 1\\)."
1332
+ },
1333
+ {
1334
+ "type": "text",
1335
+ "bbox": [
1336
+ 0.082,
1337
+ 0.573,
1338
+ 0.483,
1339
+ 0.697
1340
+ ],
1341
+ "angle": 0,
1342
+ "content": "Throughput. The NAND gate throughput comparison between various baselines and MATCHA is shown in Figure 10. FPGA and ASIC duplicate 8 copies of the TVE [10], so they support only \\( m = 1 \\). By enabling aggressive BKU, even CPU (\\( m = 2 \\)) can achieve higher gate processing throughput than ASIC and FPGA with \\( m = 1 \\). GPU and MATCHA obtain much higher throughput than ASIC, FPGA and CPU. Compared to GPU, MATCH improves the NAND gate throughput by \\( 2.3 \\times (m = 3) \\), due to its pipelined architecture for aggressive BKU."
1343
+ },
1344
+ {
1345
+ "type": "text",
1346
+ "bbox": [
1347
+ 0.082,
1348
+ 0.697,
1349
+ 0.483,
1350
+ 0.822
1351
+ ],
1352
+ "angle": 0,
1353
+ "content": "Throughput per Watt. The comparison of the NAND gate throughput per Watt between various baselines and MATCHA is shown in Figure 11. FPGA and ASIC consume only \\(\\sim 40W\\) and \\(\\sim 26W\\), and improve the NAND gate throughput per Watt by \\(2.4\\times\\) and \\(8.3\\times\\) over CPU respectively, when \\(m = 1\\). Due to the large power consumption (\\(>200W\\)) of GPU, the best throughput per Watt of GPU (\\(m = 4\\)) is only about \\(58\\%\\) of that of ASIC. Compared to ASIC, MATCHA improves the NAND gate throughput per Watt by \\(6.3\\times\\), since it consumes only \\(39.98W\\)."
1354
+ },
1355
+ {
1356
+ "type": "title",
1357
+ "bbox": [
1358
+ 0.084,
1359
+ 0.826,
1360
+ 0.208,
1361
+ 0.839
1362
+ ],
1363
+ "angle": 0,
1364
+ "content": "7 Conclusion"
1365
+ },
1366
+ {
1367
+ "type": "text",
1368
+ "bbox": [
1369
+ 0.082,
1370
+ 0.841,
1371
+ 0.483,
1372
+ 0.897
1373
+ ],
1374
+ "angle": 0,
1375
+ "content": "TFHE enables arbitrary computations with an unlimited multiplicative depth to directly occur on ciphertexts. However, TFHE gates are time-consuming and power-hungry on state-of-the-art hardware platforms. In this paper, we build MATCHA to accelerate"
1376
+ },
1377
+ {
1378
+ "type": "text",
1379
+ "bbox": [
1380
+ 0.513,
1381
+ 0.213,
1382
+ 0.916,
1383
+ 0.296
1384
+ ],
1385
+ "angle": 0,
1386
+ "content": "TFHE gates. MATCHA allows aggressive bootstrapping key unrolling to process TFHE gates without decryption errors by approximate multiplication-less integer FFTs and IFFTs, and a pipelined datapath. Compared to prior CPU-, GPU-, FPGA- and ASIC-based solutions, MATCHA improves the TFHE gate processing throughput by \\(2.3\\times\\), and the throughput per Watt by \\(6.3\\times\\)."
1387
+ },
1388
+ {
1389
+ "type": "title",
1390
+ "bbox": [
1391
+ 0.516,
1392
+ 0.3,
1393
+ 0.609,
1394
+ 0.313
1395
+ ],
1396
+ "angle": 0,
1397
+ "content": "References"
1398
+ },
1399
+ {
1400
+ "type": "ref_text",
1401
+ "bbox": [
1402
+ 0.522,
1403
+ 0.317,
1404
+ 0.914,
1405
+ 0.338
1406
+ ],
1407
+ "angle": 0,
1408
+ "content": "[1] A. Becoulet and A. Verguet, \"A Depth-First Iterative Algorithm for the Conjugate Pair Fast Fourier Transform,\" IEEE Transactions on Signal Processing, 2021."
1409
+ },
1410
+ {
1411
+ "type": "ref_text",
1412
+ "bbox": [
1413
+ 0.523,
1414
+ 0.338,
1415
+ 0.914,
1416
+ 0.358
1417
+ ],
1418
+ "angle": 0,
1419
+ "content": "[2] F. Bourse, et al., \"Fast Homomorphic Evaluation of Deep Discretized Neural Networks,\" in Annual International Cryptology Conference, 2018."
1420
+ },
1421
+ {
1422
+ "type": "ref_text",
1423
+ "bbox": [
1424
+ 0.523,
1425
+ 0.359,
1426
+ 0.915,
1427
+ 0.378
1428
+ ],
1429
+ "angle": 0,
1430
+ "content": "[3] Z. Brakerski, et al., (Leveled) Fully Homomorphic Encryption without Bootstrapping,\" ACM Transaction Computing Theory, 6(3), July 2014."
1431
+ },
1432
+ {
1433
+ "type": "ref_text",
1434
+ "bbox": [
1435
+ 0.523,
1436
+ 0.378,
1437
+ 0.914,
1438
+ 0.398
1439
+ ],
1440
+ "angle": 0,
1441
+ "content": "[4] A. Brutzkus, et al., \"Low Latency Privacy Preserving Inference,\" in International Conference on Machine Learning, pages 812-821, 2019."
1442
+ },
1443
+ {
1444
+ "type": "ref_text",
1445
+ "bbox": [
1446
+ 0.523,
1447
+ 0.399,
1448
+ 0.914,
1449
+ 0.418
1450
+ ],
1451
+ "angle": 0,
1452
+ "content": "[5] J. H. Cheon, et al., \"Remark on the Security of CKKS Scheme in Practice,\" Cryptology ePrint Archive, Report 2020/1581, 2020, https://eprint.iacr.org/2020/1581."
1453
+ },
1454
+ {
1455
+ "type": "ref_text",
1456
+ "bbox": [
1457
+ 0.523,
1458
+ 0.418,
1459
+ 0.914,
1460
+ 0.438
1461
+ ],
1462
+ "angle": 0,
1463
+ "content": "[6] I. Chillotti, et al., \"TFHE: Fast Fully Homomorphic Encryption Over The Torus,\" Journal of Cryptology, 33(1):34-91, 2020."
1464
+ },
1465
+ {
1466
+ "type": "ref_text",
1467
+ "bbox": [
1468
+ 0.523,
1469
+ 0.439,
1470
+ 0.914,
1471
+ 0.458
1472
+ ],
1473
+ "angle": 0,
1474
+ "content": "[7] W. Dai, \"CUDA-accelerated Fully Homomorphic Encryption Library\", https://github.com/vernamlab/cuFHE, 2018, worcester Polytechnic Institute."
1475
+ },
1476
+ {
1477
+ "type": "ref_text",
1478
+ "bbox": [
1479
+ 0.523,
1480
+ 0.459,
1481
+ 0.914,
1482
+ 0.489
1483
+ ],
1484
+ "angle": 0,
1485
+ "content": "[8] L. Ducas and D. Miccianio, “FHEW: Bootstrapping Homomorphic Encryption in Less than A Second,” in International Conference on the Theory and Applications of Cryptographic Techniques, pages 617–640, Springer, 2015."
1486
+ },
1487
+ {
1488
+ "type": "ref_text",
1489
+ "bbox": [
1490
+ 0.523,
1491
+ 0.489,
1492
+ 0.914,
1493
+ 0.509
1494
+ ],
1495
+ "angle": 0,
1496
+ "content": "[9] J. Fan and F. Vercauteren, \"Somewhat Practical Fully Homomorphic Encryption,\" Cryptology ePrint Archive, Report 2012/144, 2012."
1497
+ },
1498
+ {
1499
+ "type": "ref_text",
1500
+ "bbox": [
1501
+ 0.518,
1502
+ 0.509,
1503
+ 0.914,
1504
+ 0.539
1505
+ ],
1506
+ "angle": 0,
1507
+ "content": "[10] S. Gener, et al., \"An FPGA-based Programmable Vector Engine for Fast Fully Homomorphic Encryption over the Torus,\" SPSL: Secure and Private Systems for Machine Learning, 2021."
1508
+ },
1509
+ {
1510
+ "type": "ref_text",
1511
+ "bbox": [
1512
+ 0.518,
1513
+ 0.539,
1514
+ 0.914,
1515
+ 0.559
1516
+ ],
1517
+ "angle": 0,
1518
+ "content": "[11] S. Halevi and V. Shoup, \"Bootstrapping for HElib,\" in International conference on the theory and applications of cryptographic techniques, 2015."
1519
+ },
1520
+ {
1521
+ "type": "ref_text",
1522
+ "bbox": [
1523
+ 0.519,
1524
+ 0.559,
1525
+ 0.914,
1526
+ 0.588
1527
+ ],
1528
+ "angle": 0,
1529
+ "content": "[12] C. J. Hoofnagle, et al., \"The European Union General Data Protection Regulation: What It Is & What It Means,\" Information & Communications Technology Law, 2019."
1530
+ },
1531
+ {
1532
+ "type": "ref_text",
1533
+ "bbox": [
1534
+ 0.519,
1535
+ 0.589,
1536
+ 0.914,
1537
+ 0.62
1538
+ ],
1539
+ "angle": 0,
1540
+ "content": "[13] Z. Liu, et al., \"High-Performance Ideal Lattice-Based Cryptography on 8-Bit AVR Microcontrollers,\" ACM Transactions on Embedded Computing Systems, 16(4), July 2017, https://doi.org/10.1145/3092951."
1541
+ },
1542
+ {
1543
+ "type": "ref_text",
1544
+ "bbox": [
1545
+ 0.519,
1546
+ 0.62,
1547
+ 0.914,
1548
+ 0.64
1549
+ ],
1550
+ "angle": 0,
1551
+ "content": "[14] K. Matsuoka, et al., \"Virtual Secure Platform: A Five-Stage Pipeline Processor over TFHE,\" in USENIX Security Symposium, pages 4007-4024, 2021."
1552
+ },
1553
+ {
1554
+ "type": "ref_text",
1555
+ "bbox": [
1556
+ 0.519,
1557
+ 0.64,
1558
+ 0.914,
1559
+ 0.669
1560
+ ],
1561
+ "angle": 0,
1562
+ "content": "[15] A. C. Mert, et al., \"A Flexible and Scalable NTT Hardware: Applications from Homomorphically Encrypted Deep Learning to Post-Quantum Cryptography,\" in Design, Automation & Test in Europe Conference & Exhibition, 2020."
1563
+ },
1564
+ {
1565
+ "type": "ref_text",
1566
+ "bbox": [
1567
+ 0.519,
1568
+ 0.67,
1569
+ 0.914,
1570
+ 0.699
1571
+ ],
1572
+ "angle": 0,
1573
+ "content": "[16] T. Morshed, et al., \"CPU and GPU Accelerated Fully Homomorphic Encryption,\" in IEEE International Symposium on Hardware Oriented Security and Trust, pages 142-153, 2020."
1574
+ },
1575
+ {
1576
+ "type": "ref_text",
1577
+ "bbox": [
1578
+ 0.519,
1579
+ 0.7,
1580
+ 0.914,
1581
+ 0.72
1582
+ ],
1583
+ "angle": 0,
1584
+ "content": "[17] S. Oraintara, et al., \"Integer fast Fourier transform,\" IEEE Transactions on Signal Processing, 50(3):607-618, 2002."
1585
+ },
1586
+ {
1587
+ "type": "ref_text",
1588
+ "bbox": [
1589
+ 0.519,
1590
+ 0.72,
1591
+ 0.914,
1592
+ 0.751
1593
+ ],
1594
+ "angle": 0,
1595
+ "content": "[18] M. S. Riazi, et al., \"HEAX: An Architecture for Computing on Encrypted Data,\" in ACM International Conference on Architectural Support for Programming Languages and Operating Systems, 2020."
1596
+ },
1597
+ {
1598
+ "type": "ref_text",
1599
+ "bbox": [
1600
+ 0.519,
1601
+ 0.751,
1602
+ 0.914,
1603
+ 0.779
1604
+ ],
1605
+ "angle": 0,
1606
+ "content": "[19] N. Samardzic, et al., \"F1: A Fast and Programmable Accelerator for Fully Homomorphic Encryption,\" in IEEE/ACM International Symposium on Microarchitecture, 2021."
1607
+ },
1608
+ {
1609
+ "type": "ref_text",
1610
+ "bbox": [
1611
+ 0.519,
1612
+ 0.78,
1613
+ 0.914,
1614
+ 0.811
1615
+ ],
1616
+ "angle": 0,
1617
+ "content": "[20] S. Sinha Roy, et al., \"FPGA-Based High-Performance Parallel Architecture for Homomorphic Computing on Encrypted Data,\" in IEEE International Symposium on High Performance Computer Architecture, pages 387-398, 2019."
1618
+ },
1619
+ {
1620
+ "type": "ref_text",
1621
+ "bbox": [
1622
+ 0.519,
1623
+ 0.811,
1624
+ 0.914,
1625
+ 0.841
1626
+ ],
1627
+ "angle": 0,
1628
+ "content": "[21] C. Tan, et al., \"OpenCGRA: An Open-Source Unified Framework for Modeling, Testing, and Evaluating CGRAs,\" in 2020 IEEE 38th International Conference on Computer Design, pages 381-388, 2020."
1629
+ },
1630
+ {
1631
+ "type": "ref_text",
1632
+ "bbox": [
1633
+ 0.519,
1634
+ 0.841,
1635
+ 0.914,
1636
+ 0.861
1637
+ ],
1638
+ "angle": 0,
1639
+ "content": "[22] T. Zhou, et al., \"Faster Bootstrapping With Multiple Addends,\" IEEE Access, 6:49868-49876, 2018."
1640
+ },
1641
+ {
1642
+ "type": "list",
1643
+ "bbox": [
1644
+ 0.518,
1645
+ 0.317,
1646
+ 0.915,
1647
+ 0.861
1648
+ ],
1649
+ "angle": 0,
1650
+ "content": null
1651
+ }
1652
+ ]
1653
+ ]
2202.08xxx/2202.08814/0e4977c0-9108-464d-ba63-1e8d8819dfe2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b77defb43e9624df47859c40e226e6e0871eabd31a6a8bbc490ebdff2dab72a4
3
+ size 1261254
2202.08xxx/2202.08814/full.md ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus
2
+
3
+ Lei Jiang*
4
+
5
+ jiang60@iu.edu
6
+
7
+ Indiana University
8
+
9
+ Qian Lou
10
+
11
+ qlou@iu.edu
12
+
13
+ Indiana University
14
+
15
+ Nrushad Joshi
16
+
17
+ nrujoshi@iu.edu
18
+
19
+ Indiana University
20
+
21
+ # Abstract
22
+
23
+ Fully Homomorphic Encryption over the Torus (TFHE) allows arbitrary computations to happen directly on ciphertexts using homomorphic logic gates. However, each TFHE gate on state-of-the-art hardware platforms such as GPUs and FPGAs is extremely slow ( $>0.2ms$ ). Moreover, even the latest FPGA-based TFHE accelerator cannot achieve high energy efficiency, since it frequently invokes expensive double-precision floating point FFT and IFFT kernels. In this paper, we propose a fast and energy-efficient accelerator, MATCHA, to process TFHE gates. MATCHA supports aggressive bootstrapping key unrolling to accelerate TFHE gates without decryption errors by approximate multiplication-less integer FFTs and IFFTs, and a pipelined datapath. Compared to prior accelerators, MATCHA improves the TFHE gate processing throughput by $2.3\times$ and the throughput per Watt by $6.3\times$ .
24
+
25
+ # CCS Concepts
26
+
27
+ - Hardware $\rightarrow$ Application-specific VLSI designs; $\cdot$ Security and privacy $\rightarrow$ Cryptography.
28
+
29
+ # Keywords
30
+
31
+ accelerator, fully homomorphic encryption, TFHE, bootstrapping
32
+
33
+ # ACM Reference Format:
34
+
35
+ Lei Jiang, Qian Lou, and Nrushad Joshi. 2022. MATCHA: A Fast and Energy-Efficient Accelerator for Fully Homomorphic Encryption over the Torus. In The 59th Annual Design Automation Conference 2022 (DAC '22), July 10-14, 2022, San Francisco, CA, USA. ACM, New York, NY, USA, 6 pages.
36
+
37
+ # 1 Introduction
38
+
39
+ In cloud computing, it is dangerous for clients upload their raw data to untrusted cloud servers, due to potential data breaches. Moreover, recent legislation [12] requires cloud computing enterprises to provide sufficient security for clients' personal data.
40
+
41
+ Recently, Fully Homomorphic Encryption (FHE) [3, 5, 6] emerges as one of the most promising cryptographic solutions to allowing arbitrary computations on encrypted data in untrusted cloud servers. Compared to Secure Multi-Party Computation, FHE requires neither frequent communications between clients and cloud servers, nor significant circuit garbling overhead on the client side. FHE
42
+
43
+ *This work was partially supported by NSF through awards CCF-1908992, CCF-1909509, and CCF-210597. Work done while Nrushad Joshi was at UROC@Luddy IU.
44
+
45
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
46
+
47
+ DAC '22, July 10-14, 2022, San Francisco, CA, USA
48
+
49
+ © 2022 Association for Computing Machinery.
50
+
51
+ Table 1: The comparison between various HE schemes.
52
+
53
+ <table><tr><td>Scheme</td><td>FHE Op.</td><td>Data Type</td><td>Bootstrapping</td></tr><tr><td>BGV [3]</td><td>mult, add</td><td>integer</td><td>~ 800s</td></tr><tr><td>BFV [9]</td><td>mult, add</td><td>integer</td><td>&gt; 1000s</td></tr><tr><td>CKKS [5]</td><td>mult, add</td><td>fixed point</td><td>~ 500s</td></tr><tr><td>FHEW [8]</td><td>Boolean</td><td>binary</td><td>&lt; 1s</td></tr><tr><td>TFHE [6]</td><td>Boolean</td><td>binary</td><td>13ms</td></tr></table>
54
+
55
+ enables a client to encrypt her data and to send only ciphertexts to a cloud server that can directly evaluate homomorphic functions, e.g., encrypted neural inferences [4] or encrypted general-purpose computing [14], on the ciphertexts. When all computations are completed, the server returns the encrypted results to the client without learning any intermediate or final output, due to the end-to-end encrypted data flow. Only the client can decrypt the results by her secret key.
56
+
57
+ Among all FHE cryptosystems, FHE over the Torus (TFHE) [6] is the most efficient scheme supporting arbitrary operations with an unlimited computation depth, as shown in Table 1. First, TFHE supports arbitrary operations by various homomorphic Boolean logic gates. Traditional FHE schemes such as BGV [3], BFV [9], and CKKS [5] can perform only homomorphic additions and multiplications, while both FHEW [8] and TFHE [6] can enable homomorphic Boolean algebra, e.g., NAND, XOR, and XNOR gates. Second, TFHE obtains the fastest bootstrapping. Each FHE operation inevitably introduces a certain amount of noise into the ciphertext. If there are too many FHE operations on the computational critical path, the accumulated noise in the ciphertext may exceed a threshold, and thus the ciphertext cannot be decrypted successfully. To support an unlimited computation depth, a FHE scheme has to periodically invoke a bootstrapping operation to decrease the amount of noise in the ciphertext. The bootstrapping operation is extremely expensive for BGV, BFV, and CKKS. For example, a BGV bootstrapping typically costs several hundred seconds [11]. Therefore, these FHE schemes can support only a limited computation depth by designing a large enough noise budget. Although a bootstrapping of FHEW takes only 1s, TFHE can obtain a even faster bootstrapping, i.e., a TFHE bootstrapping requires only $13ms$ on a CPU. By fast bootstrapping, TFHE allows an unlimited computation depth.
58
+
59
+ Unfortunately, a TFHE-based complex circuit consisting of multiple TFHE gates is still extremely slow. For instance, a TFHE-based simple RISC-V CPU [14] comprising thousands of TFHE gates can run at only $1.25Hz$ . In order to realize practical TFHE-based computing, it is critical to accelerate TFHE gates by specialized hardware. However, TFHE is only well-implemented on CPUs [16] and GPUs [7]. Although a recent work [10] accelerates TFHE gates on a FPGA, the TFHE gate latency on the FPGA is much longer than that on a GPU. To the best of our knowledge, there is no ASIC-based hardware accelerator for TFHE.
60
+
61
+ In this paper, we propose a fast and energy-efficient accelerator, MATCHA, to process TFHE gates. We find that the bootstrapping dominates the latency of all TFHE logic operations. The kernels of fast Fourier transform (FFT) and inverse FFT (IFFT) are the bottlenecks in a bootstrapping operation. MATCHA is designed to accelerate the TFHE bootstrapping using approximate multiplication-less integer FFTs and IFFTs. We also propose a pipelined datapath for MATCHA to support aggressive bootstrapping key unrolling [2, 22] that invokes FFTs and IFFTs less frequently. Our contributions can be summarized as follows.
62
+
63
+ - In order to fully take advantage of the error tolerance capability of TFHE, MATCHA accelerates polynomial multiplications by approximate multiplication-less integer FFTs and IFFTs requiring only additions and binary shifts. Although approximate FFTs and IFFTs introduce errors in each ciphertext, the ciphertext can still be correctly decrypted, since the errors can be rounded off along with the noise during decryption.
64
+ - We build a pipelined datapath consisting of TGSW clusters and external product cores to enable aggressive bootstrapping key unrolling that invokes FFTs and IFFTs less frequently during a bootstrapping operation. The datapath uses different register banks to serve sequential memory accesses during TGSW operations, and irregular memory accesses during FFTs and IFFTs.
65
+ - We implemented, evaluated, and compared MATCHA against prior TFHE hardware accelerators. Compared to prior accelerators, MATCHA improves the TFHE gate processing throughput by $2.3 \times$ , and the throughput per Watt by $6.3 \times$ .
66
+
67
+ # 2 Background
68
+
69
+ FHE. Fully Homomorphic Encryption (FHE) enables arbitrary operations on ciphertexts. A FHE operation $\diamond$ is defined if there is another operation $\star$ such that $Dec[Enc(x_1)\diamond Enc(x_2)] = Dec[Enc(x_1\star x_2)]$ , where $x_{1}$ and $x_{2}$ are input plaintexts, $Enc$ indicates encryption, and $Dec$ is decryption.
70
+
71
+ Notation. $\mathbb{T}$ denotes the torus of real numbers modulo $1, \mathbb{R} / \mathbb{Z}$ . For any ring $\mathcal{R}$ , polynomials of the variable $X$ with coefficients in $\mathcal{R}$ are represented by $\mathcal{R}[X]$ . We define $\mathbb{R}_N[X] := \mathbb{R}[X] / (X^N + 1)$ , $\mathbb{Z}_N[X] := \mathbb{Z}[X] / (X^N + 1)$ , and $\mathbb{T}_N[X] := \mathbb{R}_N[X] / \mathbb{Z}_N[X]$ , which are the ring of polynomials of variable $X$ with quotient $X^N + 1$ and real coefficients modulo $1$ . $\mathbb{B} := \{0, 1\}$ is a set, and we write vectors in bold. Given a set $S$ , we write $\mathbf{s} \stackrel{\$}{\leftarrow} S$ to indicate that $\mathbf{s}$ is sampled uniformly at random from $S$ . We write $e \gets X$ to denote that $e$ is sampled according to $X$ .
72
+
73
+ TFHE. In TFHE [6], we assume $m \in \mathbb{B}$ is a plaintext. The encryption scheme works as follows:
74
+
75
+ - Setup(λ) first selects public parameters $n = n(\lambda)$ , and $\sigma = \sigma(\lambda)$ , where $\lambda$ is the security parameter. It samples and produces a secret key $s \stackrel{\$}{\leftarrow} \mathbb{B}^n$ .
76
+ - Enc[s, m] samples a uniformly random vector $\mathbf{a} \stackrel{\$}{\leftarrow} \mathbb{T}^n$ and a noise $e \gets \mathcal{D}_{\mathbb{T}_N[X], \sigma}$ , where $\mathcal{D}_{\mathbb{T}_N[X], \sigma}$ is the Gaussian distribution over $\mathbb{T}_N[X]$ with a standard deviation $\sigma$ . It outputs a ciphertext $(\mathbf{a}, b)$ , where $b = \mathbf{a} \cdot \mathbf{s} + e + m/2$ .
77
+ - $\text{Dec}[\mathbf{s}, (\mathbf{a}, b)]$ returns $\lceil 2(b - \mathbf{a} \cdot \mathbf{s}) \rceil$ . It outputs plaintext correctly if the size of noise $e$ is bounded as $|e| < 1/4$ , since $2(b - \mathbf{a} \cdot \mathbf{s}) = 2e + m$ , $|2e| < 1/2$ , and thus $\lceil 2(b - \mathbf{a} \cdot \mathbf{s}) \rceil = m$ .
78
+
79
+ Algorithm 1: The bootstrapping operation of TFHE.
80
+ Input: A TLWE sample $(\mathbf{a},b)$ whose plaintext is $m_{in}$ ; a constant $m_{set}$ ; a bootstrapping key $\mathbf{BK}_{\mathbf{s} \rightarrow \mathbf{s}^{\prime \prime},\alpha}$ ; and a key-switching key $\mathbf{KS}_{\mathbf{s}^{\prime} \rightarrow \mathbf{s},\mathbf{y}^{\prime}}$ ( $\mathbf{s}^{\prime} = \mathbf{KeyExtract}(\mathbf{s}^{\prime \prime})$ ). Output: A TLWE sample encrypting $m_{out} = m_{in} \cdot m_{set}$ .
81
+ 1 $\mu = m_{set} / 2, \mu^{\prime} = \mu / 2$ /* Initialization */
82
+ 2 $\bar{b} = \lceil 2Nb \rceil, \bar{a}_i = \lceil 2Na_i \rceil$ for each $i \in [1,n] \wedge$ Rounding */
83
+ 3 $testv = (1 + X + \ldots + X^{N+1}) \cdot X^{N/2} \cdot \mu'$
84
+ 4 ACC $\leftarrow X^{\bar{b}} \cdot (0, testv) / *$ ACC = TLWE $(X^{(\bar{b}-\bar{a}s)} \cdot testv)$ */
85
+ 5 for $i = 1$ to $n$ do
86
+ 6 $\mathbf{BK}_i = \mathbf{h} + (X^{-\bar{a}_i} - 1) \cdot \mathbf{BK}_i$
87
+ 7 ACC $\leftarrow \mathbf{BK}_i \square ACC / *$ BlindRotate */
88
+ 8 $\mathbf{u} = (0,\mu') + SampleExtract(ACC) / *$ Extract */
89
+ 9 return KeySwitchKS(u) /* KeySwitch */
90
+
91
+ - Logic $[c_0, c_1]$ returns the ciphertext of the result of the logic operation between two ciphertexts $c_0$ and $c_1$ , and the logic operation can be XOR, NAND, AND, and OR. A TFHE logic operation involves an addition between $c_0$ and $c_1$ , and a bootstrapping.
92
+
93
+ TLWE. TLWE is a torus analogue of the learning with error (LWE) problem [3]. $k$ is a positive integer. $N$ is a power of 2, and $\mathcal{X}$ is a probability distribution over $\mathbb{R}_N[X]$ . A TLWE secret key $\bar{\mathbf{s}}$ is a vector of $k$ polynomials over $\mathbb{Z}_N[X]$ with binary coefficients, denoted as $\bar{\mathbf{s}} \in \mathbb{R}_N[X]^k$ . Given a polynomial message $\mu \in \mathbb{T}_N[X]$ , a TLWE ciphertext of $\mu$ under the key $\bar{\mathbf{s}}$ is a TLWE sample $(\bar{\mathbf{a}},\bar{b}) \in \mathbb{T}_N[X]^k \times \mathbb{T}_N[X]$ , where $\bar{\mathbf{a}} \gets \mathbb{T}_N[X]^k$ and $\bar{b} = \bar{\mathbf{s}}\cdot \bar{\mathbf{a}} +\mu +e$ , where $e\gets \mathcal{X}$ .
94
+
95
+ TGSW. TGSW is the matrix extension of TLWE. Each row of a TGSW sample is a TLWE sample. An external product $\boxdot$ that maps $\boxdot$ : $TGSW \times TWLE \rightarrow TLWE$ can be defined by TFHE [6]. The product of the TGSW ciphertext of a polynomial message $\mu_{TGSW} \in \mathbb{T}_N[X]$ and the TLWE ciphertext of a polynomial message $\mu_{TLWE} \in \mathbb{T}_N[X]$ becomes a TLWE ciphertext of a polynomial message $\mu_{TGSW} \cdot \mu_{TLWE} \in \mathbb{T}_N[X]$
96
+
97
+ Bootstrapping. Each TFHE logic operation inevitably introduces a certain amount of noise into the resulting ciphertext. A bootstrapping has to be performed to remove the noise at the end of each TFHE logic operation. In various TFHE logic operations, the bootstrapping step is the largest performance bottleneck. The details of a TFHE bootstrapping can be viewed in [6]. The bootstrapping procedure is shown in Algorithm 1. The dimension of the TLWE sample is set as $k = 1$ [6], which means that the TLWE sample is simply the Ring-LWE sample $(\bar{a},\bar{b})\in \mathbb{T}_N[X]\times \mathbb{T}_N[X]$ . The most computationally intensive step of a bootstrapping is the homomorphic decryption in line 7, where the message of ACC becomes a polynomial $X^{\bar{b} -\bar{\mathrm{as}}}\cdot testv$ . Particularly, homomorphically computing $X^{-\bar{\mathrm{as}}} = X^{\sum_{i = 1}^{n} - \bar{\mathrm{a}}_i\bar{s}_i} = \prod_{i = 1}^{n}X^{-\bar{\mathrm{a}}_i\bar{s}_i}$ involves a great number of polynomial multiplications. Naively multiplying two degree $N$ polynomials has the complexity of $O(N^2)$ . FFT and IFFT are used to reduce the complexity of a polynomial multiplication to $O(N\log (N))$ [7], where $N$ is the degree of polynomials.
98
+
99
+ Torus Implementation. Theoretically, the scale invariant scheme of TFHE is defined over the real torus $\mathbb{T}$ , where all operations are modulo 1. But TFHE rescales the elements over $\mathbb{T}$ by a factor $2^{32}$ ,
100
+
101
+ ![](images/cba23848c9e6933ce89a781ba5a247746686c22d786c675176a8fce9e6477a3d.jpg)
102
+ Figure 1: Latency breakdown.
103
+
104
+ ![](images/29d298698e5ec909b0fd13d6247c5f858c5de6ca778236397cfcf6c8a5ee42ef.jpg)
105
+ Figure 2: The depth-first FFT.
106
+
107
+ ![](images/75047fa77d34549cb9268610a2627d84c733d9a7574c7eef196486c5682cf78c.jpg)
108
+ Figure 3: The lifting butterfly w/o multiplication.
109
+
110
+ and maps them to 32-bit integers [6], since it can work with approximations. Therefore, TFHE does not have to actively perform modular reduction, since all operations on 32-bit integers implicitly call a native and automatic mod $2^{32}$ operation. To maintain high conversion accuracy, TFHE uses 64-bit double-precision floating point FFT and IFFT kernels [6].
111
+
112
+ # 3 Related Work and Motivation
113
+
114
+ Related Work. Except some TFHE implementations on CPUs [6] GPUs [7], and FPGAs [10], there is no specialized hardware accelerator that can process TFHE. A TFHE accelerator is different from the accelerators designed for other FHE schemes such as BGV, BFV, and CKKS in two points. First, although few prior accelerators [19] support BGV and CKKS bootstrapping along a tiny multiplicative depth datapath, most prior works [15, 18, 20] design hardware accelerators to process leveled BFV or CKKS homomorphic operations without bootstrapping. However, a TFHE accelerator must perform bootstrapping at the end of each TFHE gate. Second, BGV, BFV, and CKKS require NTT and INTT kernels, while TFHE needs only FFT and IFFT kernels without modular reduction.
115
+
116
+ Motivation. A TFHE gate performs not only polynomial additions but also a bootstrapping (FFT+IFFT+other) that costs $99\%$ of the gate latency on a CPU, as shown in Figure 1. Therefore, in order to shorten the latency of TFHE gates, we need to accelerate the bootstrapping step in TFHE gates. Moreover, FFTs and IFFTs consume $80\%$ of the bootstrapping latency in various TFHE gates. In order to accelerate TFHE gates, MATCHA adopts approximate multiplication-less integer FFTs and IFFTs, and uses a pipelined datapath to support aggressive bootstrapping key unrolling [2, 22].
117
+
118
+ # 4 MATCHA
119
+
120
+ # 4.1 Approximate Fast Integer FFT and IFFT
121
+
122
+ Despite the fact that elements over $\mathbb{T}$ are mapped to 32-bit integers, TFHE still uses 64-bit double-precision floating point FFT and IFFT kernels, since 32-bit integer or single-precision floating point FFT and IFFT kernels are not accurate enough to guarantee the correct decryption of a ciphertext [6]. However, processing 64-bit double-precision floating point FFT and IFFT kernels incurs significant hardware overhead and power consumption.
123
+
124
+ Novelty. We first identify the opportunity to use approximate integer FFTs and IFFTs to accelerate TFHE without decryption errors for MATCHA. It is difficult to apply approximate NTTs and INTTs in accelerating other FHE schemes, e.g., BGV, BFV, and CKKS, which do not include a bootstrapping step after each homomorphic multiplication or addition. The errors introduced by approximate NTTs and INTTs will be quickly accumulated in the ciphertext and result in a decryption error, if a bootstrapping step cannot be performed in time. On the contrary, TFHE keeps the approximation errors of integer FFTs and IFFTs in check by performing a bootstrapping step at the end of each TFHE gate.
125
+
126
+ Depth-first FFT. Most prior FHE accelerators [18-20] perform NTTs and INTTs by the Cooley-Tukey data flow that introduces irregular memory accesses particularly in its bit-reversal stage. In order to remove the bit-reversal overhead, a prior ideal-lattice-based cryptographic accelerator [13] uses the Cooley-Tukey flow for NTTs and the Gentlemen-Sande flow for INTTs. These cryptographic accelerators store a polynomial mod $X^N + 1$ as a list of $N$ coefficients. For each multiplication between two polynomials, they execute two NTT kernels on two polynomials respectively, perform element-wise multiplications, and then run an NTT kernel on the result. The invoking frequency ratio between NTTs and INTTs is $2:1$ . These FHE accelerators have are many opportunities (i.e., switchings from NTT to INTT) to reduce the bit-reversal overhead. In contrast, TFHE saves a polynomial mod $X^N + 1$ as either a list of $N$ coefficients or the Lagrange half-complex representation consisting in the complex evaluations of the polynomial over the roots of unity $\exp(i(2j + 1)\pi / N)$ for $j \in \mathbb{I}[0, \frac{N}{2}][\cdot]$ . FFT and IFFT kernels are required only during the conversion between these two representations. The invoking frequency ratio between FFTs and IFFTs in a TFHE gate is $1:4$ . As Figure 1 shows, the latency of IFFT kernels is much longer than FFT kernels. TFHE does not have many opportunities to reduce the bit-reversal overhead. Instead, for MATCHA, we focus on decreasing the computing overhead of a single FFT or IFFT kernel. We adopt the depth-first iterative conjugate-pair FFT (CPFFT) algorithm [1]. Unlike the Cooley-Tukey or Gentlemen-Sande flow, the CPFFT requires only a single complex root of unity read per radix-4 butterfly. Two butterflies in the same block can share the same twiddle factor, further halving the number of reads to the twiddle-factor buffer [1]. Moreover, the Cooley-Tukey and Gentlemen-Sande flows process FFTs/IFFTs stage by stage in a breadth-first manner, as shown in Figure 2(a). To capture the spatial locality, as Figure 2(b) shows, CPFFT traverses the FFT flow in a depth-first fashion by completing a sub-transform before moving to the next.
127
+
128
+ A Multiplication-less Butterfly. The lifting structure [17], a special type of lattice substrate implemented by cascading identity matrices with a single nonzero off-diagonal element, is proposed to approximate multiplications in FFT and IFFT kernels by additions and binary shifts. The basic lifting step shown in Figure 3(a) can be expressed by $y_{j}(n) = x_{j}(n)$ , $y_{i}(n) = x_{i}(n) + \lceil T_{x_{j}}(n)\rceil$ , $z_{j}(n) = y_{j}(n)$ , and $z_{i}(n) = y_{i}(n) - \lceil T_{y_{j}}(n)\rceil$ , where $T$ is a lifting coefficient. And thus, the lifting structure with the rounding operation can achieve integer-to-integer transform. Also, the lifting and its inverse matrices in this case are represented as $\begin{bmatrix} 1 & T \\ 0 & 1 \end{bmatrix}$ and $\begin{bmatrix} 1 & T \\ 0 & 1 \end{bmatrix}^{-1} = \begin{bmatrix} 1 & -T \\ 0 & 1 \end{bmatrix}$ , respectively. A floating-point lifting coefficient can be quantized as an approximate dyadic-valued coefficient $\alpha /2^{\beta}$ , and hence computed with only adders and shifters, where we allocate $\beta$
129
+
130
+ ![](images/abb1f77f6c955b95a39afc2d6469b285dcdc8f5095e16b4a0d611c1912b6d0b5.jpg)
131
+ Figure 4: The truth table of $X^{-\overline{a_{2i - 1}}}\cdot s_{2i - 1} - \overline{a_{2i}}\cdot s_{2i}$ .
132
+
133
+ ![](images/e4775e4ce02e2d47bd9dab157995f074d2b175576c87e4e8e048b1b400c837fd.jpg)
134
+ Figure 5: Bootstrapping key unrolling.
135
+
136
+ ![](images/04e2cbef94695dcbb38033df54771e883fd899326bd04272917b4938bd0cc2b1.jpg)
137
+ (a) the computing flow
138
+ Figure 6: The pipelined MATCHA for aggressive BKU.
139
+
140
+ ![](images/cab03815d3c8af0d7b2329d154ef2214fcce0187d176e31556e9fa226b0fcd7f.jpg)
141
+ (b) the pipeline
142
+
143
+ bits to the lifting coefficient, and $\alpha, \beta \in \mathbb{N}$ . For example, a coefficient $9/128$ can be operated as $\frac{9}{128} = \frac{2^3 + 2^0}{2^7} = \frac{1}{2^4} + \frac{1}{2^7}$ . Hence, the lifting with its coefficient $9/128$ and a rounding operation is replaced to the summation of 4 and 7 bit-shifters illustrated in Figure 3(b). The perfect reconstruction in lifting is always kept if floating-point coefficients are approximated to dyadic-valued coefficients.
144
+
145
+ # 4.2 Aggressive Bootstrapping Key Unrolling
146
+
147
+ Bootstrapping Key Unrolling. A TFHE bootstrapping needs to compute external produces, i.e., $X^{-\widetilde{\mathbf{a}} s} = X^{\sum_{i=1}^{n} -\widetilde{\mathbf{a}}_i s_i}$ sequentially, thereby becoming the performance bottleneck of a TFHE gate. Instead, bootstrapping key unrolling (BKU) [2, 22] is proposed to compute $X^{\sum_{i=1}^{n/2} -\overline{a}_{2i-1} s_{2i-1} -\overline{a}_{2i} s_{2i}}$ in each external product, so that the number of homomorphic additions can be reduced from $n$ to $n/2$ . The secret key $s$ is sampled from $\mathbb{B}^n$ , so $s_i \in \{0,1\}$ , where $0 \leq i \leq n$ . Based on the values of $s_{2i}$ and $s_{2i+1}$ , the truth table of $X^{\sum_{i=1}^{n/2} -\overline{a}_{2i-1} s_{2i-1} -\overline{a}_{2i} s_{2i}}$ can be shown in Figure 4. So BKU rewrites $X^{-\overline{a}_{2i-1} \cdot s_{2i-1} -\overline{a}_{2i} \cdot s_{2i}}$ as $X^{-\overline{a}_{2i-1} -\overline{a}_{2i}} \cdot s_{2i-1} s_{2i} - X^{-\overline{a}_{2i-1}} \cdot s_{2i-1}(1 - s_{2i}) - X^{-\overline{a}_{2i}} \cdot (1 - s_{2i-1}) s_{2i} - (1 - s_{2i-1})(1 - s_{2i})$ . Due to the fact that $s_{2i-1} s_{2i} + (1 - s_{2i}) s_{2i-1} + s_{2i}(1 - s_{2i-1}) + (1 - s_{2i-1})(1 - s_{2i})$ is always equal to 1 [2], $X^{-\overline{a}_{2i-1} \cdot s_{2i-1} -\overline{a}_{2i} \cdot s_{2i}}$ can be further simplified to $(X^{-\overline{a}_{2i-1} -\overline{a}_{2i}} - 1) \cdot s_{2i-1} s_{2i} + (X^{-\overline{a}_{2i-1}} - 1) \cdot s_{2i-1}(1 - s_{2i}) - (X^{-\overline{a}_{2i}} - 1) \cdot (1 - s_{2i-1}) s_{2i} + 1$ . As Figure 5 shows, BKU encrypts $s_{2i-1} s_{2i}$ , $s_{2i-1}(1 - s_{2i})$ , and $(1 - s_{2i-1}) s_{2i}$ as TGSW ciphertexts, and builds a bootstrapping key bundle to unroll the original bootstrapping key for two times.
148
+
149
+ Aggressive BKU Performing Badly on CPUs. BKU can be further generalized as
150
+
151
+ $$
152
+ X ^ {\sum_ {i = 1} ^ {\frac {n}{m}} - \overline {{\mathsf {a} _ {m \cdot i}}} \mathsf {s} _ {m \cdot i} - \overline {{\mathsf {a} _ {m \cdot i + 1}}} \mathsf {s} _ {m \cdot i + 1} - \dots - \overline {{\mathsf {a} _ {m \cdot i + m - 1}}} \mathsf {s} _ {m \cdot i + m - 1}}, \tag {1}
153
+ $$
154
+
155
+ where $m \in [2, n]$ . So it is possible to more aggressively unroll the bootstrapping key by increasing $m$ . Although unrolling the bootstrapping key for two times ( $m = 2$ ) reduces the bootstrapping latency by $49\%$ , we find that further enlarging $m$ beyond 2 even prolongs the bootstrapping latency on a CPU, as explained in Section 6. Our experimental methodology is described in Section 5. The reason can be summarized as follows.
156
+
157
+ ![](images/8ddcaafd391279cdcfdfa803562bb5cab997d92b01dfa8dc11761b39bc0297e8.jpg)
158
+ Figure 7: The architecture of MATCHA (mem. ctrl: memory controller; addr gen.: address generation; twid: twiddle factor; butt.: butterfly; and shift.: shifter).
159
+
160
+ - The limited number of cores on a CPU. With an enlarged $m$ , there are more terms in the exponent part of Equation 1. For instance, when $m = 4$ , there are 15 terms, each of which requires a TGSW scale-and-add operation. Unfortunately, our CPU baseline has only 8 physical cores. Mapping each terms to a core, and summing the results from all cores introduce significant communication overhead.
161
+ - More cache conflicts. The size of bootstrapping key increases exponentially with an enlarged $m$ . For example, as Figure 5 shows, instead of a single bootstrapping key, BKU with $m = 2$ requires three bootstrapping keys. Each TGSW scale-and-add operation happening on a term fetches its corresponding bootstrapping key to the shared last level cache, generating more cache conflicts.
162
+ - The lack of a pipelined design. As Figure 5 highlights, in each iteration, the construction of the bootstrapping key bundle BKB and the external product operation are executed sequentially. Although it is possible to start the computation of BKB for the next iteration and perform the external product operation of this iteration at the same time, the current BKU implementation [22] cannot do this, due to the lack of a pipelined design.
163
+
164
+ MATCHA for Aggressive BKU. In this paper, we propose a pipeline flow for MATCHA to support aggressive BKU with a larger $m$ . Compared to our CPU baseline, our pipeline flow can be easily accelerated by a large number of specialized hardware components including TGSW clusters and External Product (EP) cores. As Figure 6(a) shows, we divide the bottleneck of a TFHE bootstrapping into two steps, i.e., the construction of the bootstrapping key bundle, and the EP operation. A TGSW cluster is used to construct the bootstrapping key bundle, while an EP core processes EP operations between the bootstrapping key bundle and ACC. A TGSW cluster consists of a TGSW adder tree and multiple TGSW scale units, each of which computes one term in the bootstrapping key bundle, e.g., when $m = 2$ , $(X^{-\overline{a}_{2i-1} - \overline{a}_{2i}} - 1) \cdot \mathbf{BK}_{i,0}$ , where $\mathbf{BK}_{i,0}$ is the TGSW ciphertext of $s_{2i-1}s_{2i}$ . And then, the TGSW adder sums all terms and generates the bootstrapping key bundle. With the bootstrapping key bundle $(\mathbf{BKB_i})$ , an EP core computes $ACC \gets \mathbf{BKB_i} \square ACC$ . The TGSW cluster and the EP core have their separated register file banks to reduce on-chip memory conflicts. Moreover, these two steps of a TFHE bootstrapping can be deployed on a TGSW cluster and an EP core in a pipelined manner, as shown in Figure 6(b). In each time step, the EP core computes the EP operation with the bootstrapping key bundle generated by the TGSW cluster in the previous time step. When $m$ is increased, the workload of the bootstrapping key bundle construction becomes larger. The workloads
165
+
166
+ Table 2: The power and area of MATCHA operating at ${2GHz}$ .
167
+
168
+ <table><tr><td>Name</td><td>Spec</td><td>Power (W)</td><td>Area (mm2)</td></tr><tr><td>TGSW cluster</td><td>×16 multipliers &amp; adders, and a 16KB, 2-bank reg. file</td><td>0.98</td><td>0.368</td></tr><tr><td>EP core</td><td>4 IFFT, 1 FFT, ×4 multipliers &amp; adders, and a 256KB, 8-bank reg. file</td><td>2.87</td><td>1.89</td></tr><tr><td>Sub-total</td><td>×8 EP cores and TGSW clusters</td><td>30.8</td><td>18.06</td></tr><tr><td>polynomial unit</td><td>×32 adders &amp; cmps &amp; logic units, and a 8KB, 2-bank reg. file</td><td>2.33</td><td>0.32</td></tr><tr><td>crossbar</td><td>1/2 8 × 32/8 NoCs (256b bit-sliced)</td><td>2.11</td><td>0.44</td></tr><tr><td>SPM</td><td>a 4MB, 32-bank SPM</td><td>3.52</td><td>3.25</td></tr><tr><td>mem ctrl</td><td>memory controller and HBM2 PHY</td><td>1.225</td><td>14.9</td></tr><tr><td>Total</td><td></td><td>39.98</td><td>36.96</td></tr></table>
169
+
170
+ of the two steps in the pipeline can be approximately balanced by adjusting $m$ .
171
+
172
+ # 4.3 The Architecture of MATCHA
173
+
174
+ Architecture. The overall architecture of MATCHA is shown in Figure 7(a). MATCHA has multiple computing components including a polynomial unit, eight TGSW clusters, and eight External Product (EP) cores. All computing components of MATCHA are connected to 32 scratchpad memory (SPM) banks by crossbars. MATCHA also employs a memory controller to manage the off-chip memory requests issued to HBM2 DRAMs. The polynomial unit is in charge of performing polynomial additions/subtractions for each TFHE logic operation, initializing bootstrapping operations, extracting samples, and conducting key-switching operations that consist of additions, logic comparisons, and Boolean logic operations. One TGSW cluster and an EP core can support one bootstrapping pipeline. As Figure 7(b) shows, a TGSW cluster 16 32-bit integer multipliers and 16 32-bit integer adders to support TGSW scale operations. Each TGSW cluster has only two register banks, since the memory accesses during a TGSW scale operation have strong spatial locality. The TGSW cluster can read a register bank while write the other bank concurrently. An EP core consists of an FFT core and four IFFT cores to accelerate the FFT and IFFT kernels during an EP operation, as shown in Figure 7(c). It has 8 register banks to serve the irregular memory accesses in FFT and IFFT kernels. An EP core also has four 32-bit integer multipliers and four 32-bit integer adders to manipulate TGSW ciphertexts during an EP operation. An FFT core is similar to an IFFT core, except its data flow. As Figure 7(d) highlights, an FFT core comprises an address generation unit, a twiddle factor buffer, two input/output FIFOs, and 128 butterfly cores, each of which consists of two 64-bit integer adders and two 64-bit binary shifters. The address generation unit guides butterfly cores to access the twiddle factor buffer.
175
+
176
+ Design Overhead. We implemented MATCHA in RTL, and synthesized it in $16nm$ PTM process technology using state-of-the-art tools. We used CACTI to model all SPM components and register file banks. Due to its simple structure, the entire design of MATCHA can run at $2GHz$ . Among various on-chip network architectures, e.g., meshes, rings, and crossbars, we selected two $8 \times 32$ , and one $8 \times 8$ bit-sliced crossbars, i.e., SPM $\rightarrow$ cores/clusters, cores/clusters $\rightarrow$ SPM, and cores/clusters $\rightarrow$ cores/clusters. The hardware overhead and power consumption of MATCHA are shown in Table 2. Totally, MATCHA occupies $36.96mm^2$ and consumes 39.98 Watt. The HBM2 bandwidth is $640GB/s$ .
177
+
178
+ Error and Noise. The error of the polynomial multiplication result caused by approximate multiplication-less integer FFT and
179
+
180
+ ![](images/8e6806f9e37cb34b4abea783f0da3727f56d2e82dc2a684786e30624bbaeecf9.jpg)
181
+ Figure 8: The error of approx. FFT & IFFT.
182
+ Table 3: The noise comparison (δ: the noise of EPs; RO: the noise of roundings; BK: the noise of bootstrapping keys).
183
+
184
+ <table><tr><td>metric</td><td>BKU [2, 22]</td><td>MATCHA</td></tr><tr><td>EP</td><td>δ/2</td><td>δ/m</td></tr><tr><td>rounding</td><td>RO/2</td><td>RO/m</td></tr><tr><td>BK</td><td>3βK</td><td>(2m-1)βK</td></tr><tr><td>I/FFT</td><td>-150dB</td><td>-141dB</td></tr></table>
185
+
186
+ IFFT kernels is shown in Figure 8. All polynomial coefficients are 32-bit integers, while we quantize the twiddle factors of FFT and IFFT with various bitwidths. With an increasing bitwidth of twiddle factors, the error caused by approximate FFT and IFFT decreases, and is similar to that generated by original double-precision floating point FFT and IFFT. With 64-bit dyadic-value-quantized twiddle factors (DVQTFs), the error caused by approximate FFT and IFFT is $\sim 141dB$ , which is still larger than that produced by 64-bit double-precision floating point FFT and IFFT, since the approximate FFT and IFFT perform only additions and binary shifts. At the TFHE gate level, the noise comparison between BKU and MATCHA is exhibited in Table 3, where BKU unrolls the bootstrapping key for two times while MATCHA unrolls that for $m$ times ( $m \geq 2$ ). With an enlarging $m$ , the noise from EP and rounding operations decreases linearly, but the noise caused by bootstrapping keys increases exponentially. As a result, TFHE with a smaller $m$ can tolerate more errors caused by approximate FFT and IFFT. Based on our experiments, 38-bit DVQTFs produce no decryption failure in the test of $10^{8}$ TFHE gates. However, for a large $m$ , e.g., $m = 5$ , we have to use 64-bit DVQTFs to guarantee there is no decryption failure in the same test, since the noise caused by more bootstrapping keys dominates the total noise in ciphertexts. Therefore, MATCHA adopts 64-bit DVQTFs for all approximate multiplication-less integer FFT and IFFT kernels.
187
+
188
+ # 5 Experimental Methodology
189
+
190
+ Simulation and Compilation: To simulate the performance of MATCHA at cycle level, we used a CGRA modeling framework, OpenCGRA [21], which has been validated against multiple ASIC accelerators. OpenCGRA first compiles a TFHE logic operation into a data flow graph (DFG) of the operations supported by MATCHA, solves its dependencies, and removes structural hazards. The architecture of MATCHA is abstracted to an architecture description (AD) in OpenCGRA, which computes the latency and the energy consumption of each TFHE logic operation by scheduling and mapping the DFG onto the AD.
191
+
192
+ Our Baselines. We compared MATCHA against state-of-the-art CPU-, GPU-, FPGA-, and ASIC-based TFHE hardware platforms. Our CPU baseline is a 8-core $3.7GHz$ Xeon E-2288G processor executing the TFHE library [6], while our GPU baseline is a 5120-core Tesla-V100 GPU equipped with a 16GB HBM2 DRAM running the cuFHE library [7]. TFHE Vector Engine (TVE) [10] was implemented on a low-end ZedBoard Zynq-7000 FPGA. We implemented 8 copies of TVE on a Stratix-10 GX2800 FPGA, and used it as our FPGA baseline, since the Stratix-10 board has more resources. Because there is no existing ASIC-based design, we synthesized our FPGA baseline with the $16nm$ PTM process as our ASIC baseline. We enable BKU on CPU, GPU, and MATCHA but fix $m = 1$ on FPGA and ASIC, since they do not support BKU.
193
+
194
+ ![](images/47f902bb44e6721b2523b8f5aec04bff481f9b6a8cc68429b8df5dfe6e0ecae5.jpg)
195
+ Figure 9: Latency comparison.
196
+
197
+ ![](images/d4e690c27e64d7fb861f66ffeac4e8e29e82007373f8579d79d85a2a17bc4f73.jpg)
198
+ Figure 10: Throughput comparison.
199
+
200
+ ![](images/1f2f6281bb82b4a864ae85692ca1ae5e9fe13dece293e7507ab72faaa6b18828.jpg)
201
+ Figure 11: Thrght/Watt comparison.
202
+
203
+ TFHE Operations and Parameters. We studied all TFHE logic operations including NOT, AND, OR, NAND, XOR, and XNOR, but we only report the results on NAND in Section 6. This is because AND, OR, NAND, XOR, and XNOR have almost the same latency which is dominated by the bootstrapping step, while NOT has no bootstrapping at all. To maintain the standard 110-bit security, we adopt the TFHE parameters from [6], i.e., the polynomial degree in the ring $N = 1024$ , the TLWE dimension $k = 1$ , the basis and length for the TGSW ciphertext decomposition $Bg = 1024$ and $\ell = 3$ .
204
+
205
+ # 6 Results and Analysis
206
+
207
+ Latency. The latency comparison of a TFHE NAND gate between our various baselines and MATCHA is shown in Figure 9. The NAND gate on CPU costs $13.1ms$ , while $m = 2$ reduces its latency to $6.67ms$ . Aggressive BKU with an increasing $m$ cannot further reduce the NAND gate latency anymore on CPU, due to the limited number of cores, more cache conflicts, and the non-pipelined processing style. It takes only $0.37ms$ for GPU to process a NAND gate. With an enlarging $m$ , GPU gradually reduces the NAND gate latency. When $m = 4$ , the NAND gate latency on GPU is $0.18ms$ . MATCHA reduces the NAND gate latency by $13\%$ over GPU only when $m = 3$ , since GPU can fully use its all resources to process one TFHE gate when $m = 1$ or 2. MATCHA cannot support aggressive BKU with $m = 4$ efficiently either, since it has only 8 TGSW clusters. FPGA and ASIC do not have any pipelined design or memory optimization to support BKU, and they need $>6.8ms$ to complete a NAND gate when $m = 1$ .
208
+
209
+ Throughput. The NAND gate throughput comparison between various baselines and MATCHA is shown in Figure 10. FPGA and ASIC duplicate 8 copies of the TVE [10], so they support only $m = 1$ . By enabling aggressive BKU, even CPU ( $m = 2$ ) can achieve higher gate processing throughput than ASIC and FPGA with $m = 1$ . GPU and MATCHA obtain much higher throughput than ASIC, FPGA and CPU. Compared to GPU, MATCH improves the NAND gate throughput by $2.3 \times (m = 3)$ , due to its pipelined architecture for aggressive BKU.
210
+
211
+ Throughput per Watt. The comparison of the NAND gate throughput per Watt between various baselines and MATCHA is shown in Figure 11. FPGA and ASIC consume only $\sim 40W$ and $\sim 26W$ , and improve the NAND gate throughput per Watt by $2.4\times$ and $8.3\times$ over CPU respectively, when $m = 1$ . Due to the large power consumption ( $>200W$ ) of GPU, the best throughput per Watt of GPU ( $m = 4$ ) is only about $58\%$ of that of ASIC. Compared to ASIC, MATCHA improves the NAND gate throughput per Watt by $6.3\times$ , since it consumes only $39.98W$ .
212
+
213
+ # 7 Conclusion
214
+
215
+ TFHE enables arbitrary computations with an unlimited multiplicative depth to directly occur on ciphertexts. However, TFHE gates are time-consuming and power-hungry on state-of-the-art hardware platforms. In this paper, we build MATCHA to accelerate
216
+
217
+ TFHE gates. MATCHA allows aggressive bootstrapping key unrolling to process TFHE gates without decryption errors by approximate multiplication-less integer FFTs and IFFTs, and a pipelined datapath. Compared to prior CPU-, GPU-, FPGA- and ASIC-based solutions, MATCHA improves the TFHE gate processing throughput by $2.3\times$ , and the throughput per Watt by $6.3\times$ .
218
+
219
+ # References
220
+
221
+ [1] A. Becoulet and A. Verguet, "A Depth-First Iterative Algorithm for the Conjugate Pair Fast Fourier Transform," IEEE Transactions on Signal Processing, 2021.
222
+ [2] F. Bourse, et al., "Fast Homomorphic Evaluation of Deep Discretized Neural Networks," in Annual International Cryptology Conference, 2018.
223
+ [3] Z. Brakerski, et al., (Leveled) Fully Homomorphic Encryption without Bootstrapping," ACM Transaction Computing Theory, 6(3), July 2014.
224
+ [4] A. Brutzkus, et al., "Low Latency Privacy Preserving Inference," in International Conference on Machine Learning, pages 812-821, 2019.
225
+ [5] J. H. Cheon, et al., "Remark on the Security of CKKS Scheme in Practice," Cryptology ePrint Archive, Report 2020/1581, 2020, https://eprint.iacr.org/2020/1581.
226
+ [6] I. Chillotti, et al., "TFHE: Fast Fully Homomorphic Encryption Over The Torus," Journal of Cryptology, 33(1):34-91, 2020.
227
+ [7] W. Dai, "CUDA-accelerated Fully Homomorphic Encryption Library", https://github.com/vernamlab/cuFHE, 2018, worcester Polytechnic Institute.
228
+ [8] L. Ducas and D. Miccianio, “FHEW: Bootstrapping Homomorphic Encryption in Less than A Second,” in International Conference on the Theory and Applications of Cryptographic Techniques, pages 617–640, Springer, 2015.
229
+ [9] J. Fan and F. Vercauteren, "Somewhat Practical Fully Homomorphic Encryption," Cryptology ePrint Archive, Report 2012/144, 2012.
230
+ [10] S. Gener, et al., "An FPGA-based Programmable Vector Engine for Fast Fully Homomorphic Encryption over the Torus," SPSL: Secure and Private Systems for Machine Learning, 2021.
231
+ [11] S. Halevi and V. Shoup, "Bootstrapping for HElib," in International conference on the theory and applications of cryptographic techniques, 2015.
232
+ [12] C. J. Hoofnagle, et al., "The European Union General Data Protection Regulation: What It Is & What It Means," Information & Communications Technology Law, 2019.
233
+ [13] Z. Liu, et al., "High-Performance Ideal Lattice-Based Cryptography on 8-Bit AVR Microcontrollers," ACM Transactions on Embedded Computing Systems, 16(4), July 2017, https://doi.org/10.1145/3092951.
234
+ [14] K. Matsuoka, et al., "Virtual Secure Platform: A Five-Stage Pipeline Processor over TFHE," in USENIX Security Symposium, pages 4007-4024, 2021.
235
+ [15] A. C. Mert, et al., "A Flexible and Scalable NTT Hardware: Applications from Homomorphically Encrypted Deep Learning to Post-Quantum Cryptography," in Design, Automation & Test in Europe Conference & Exhibition, 2020.
236
+ [16] T. Morshed, et al., "CPU and GPU Accelerated Fully Homomorphic Encryption," in IEEE International Symposium on Hardware Oriented Security and Trust, pages 142-153, 2020.
237
+ [17] S. Oraintara, et al., "Integer fast Fourier transform," IEEE Transactions on Signal Processing, 50(3):607-618, 2002.
238
+ [18] M. S. Riazi, et al., "HEAX: An Architecture for Computing on Encrypted Data," in ACM International Conference on Architectural Support for Programming Languages and Operating Systems, 2020.
239
+ [19] N. Samardzic, et al., "F1: A Fast and Programmable Accelerator for Fully Homomorphic Encryption," in IEEE/ACM International Symposium on Microarchitecture, 2021.
240
+ [20] S. Sinha Roy, et al., "FPGA-Based High-Performance Parallel Architecture for Homomorphic Computing on Encrypted Data," in IEEE International Symposium on High Performance Computer Architecture, pages 387-398, 2019.
241
+ [21] C. Tan, et al., "OpenCGRA: An Open-Source Unified Framework for Modeling, Testing, and Evaluating CGRAs," in 2020 IEEE 38th International Conference on Computer Design, pages 381-388, 2020.
242
+ [22] T. Zhou, et al., "Faster Bootstrapping With Multiple Addends," IEEE Access, 6:49868-49876, 2018.
2202.08xxx/2202.08814/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:368ceb70093f580c9bf9beddfa568eaac6e73005011931f6b1f58fa30ad8e05d
3
+ size 286502
2202.08xxx/2202.08814/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08816/882d8f61-0f84-4e48-96e3-ebc90db06a9d_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08816/882d8f61-0f84-4e48-96e3-ebc90db06a9d_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08816/882d8f61-0f84-4e48-96e3-ebc90db06a9d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c79a9cbf402e629391f96637173d71c1701158aa8617fdea9d50eeb80ef8f3b9
3
+ size 1085328
2202.08xxx/2202.08816/full.md ADDED
@@ -0,0 +1,498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Learning and Evaluating Graph Neural Network Explanations based on Counterfactual and Factual Reasoning
2
+
3
+ Juntao Tan, Shijie Geng, Zuohui Fu, Yingqiang Ge, Shuyuan Xu, Yunqi Li, Yongfeng Zhang
4
+
5
+ Department of Computer Science, Rutgers University, New Brunswick, NJ 08854, US
6
+
7
+ {juntao.tan, shijie.geng, zuohui.fu, yingqiang.ge, shuyuan.xu, yunqi.li, yongfeng.zhang}@rutgers.edu
8
+
9
+ # ABSTRACT
10
+
11
+ Structural data well exists in Web applications, such as social networks in social media, citation networks in academic websites, and threads data in online forums. Due to the complex topology, it is difficult to process and make use of the rich information within such data. Graph Neural Networks (GNNs) have shown great advantages on learning representations for structural data. However, the non-transparency of the deep learning models makes it non-trivial to explain and interpret the predictions made by GNNs. Meanwhile, it is also a big challenge to evaluate the GNN explanations, since in many cases, the ground-truth explanations are unavailable.
12
+
13
+ In this paper, we take insights of Counterfactual and Factual $(\mathrm{CF}^2)$ reasoning from causal inference theory, to solve both the learning and evaluation problems in explainable GNNs. For generating explanations, we propose a model-agnostic framework by formulating an optimization problem based on both of the two casual perspectives. This distinguishes $\mathrm{CF}^2$ from previous explainable GNNs that only consider one of them. Another contribution of the work is the evaluation of GNN explanations. For quantitatively evaluating the generated explanations without the requirement of ground-truth, we design metrics based on Counterfactual and Factual reasoning to evaluate the necessity and sufficiency of the explanations. Experiments show that no matter ground-truth explanations are available or not, $\mathrm{CF}^2$ generates better explanations than previous state-of-the-art methods on real-world datasets. Moreover, the statistic analysis justifies the correlation between the performance on ground-truth evaluation and our proposed metrics. Source code is available at https://github.com/chrisjtan/gnn_cff.
14
+
15
+ # KEYWORDS
16
+
17
+ Explainable AI; Graph Neural Networks; Counterfactual Explanation; Machine Learning; Machine Reasoning; Causal Inference
18
+
19
+ # ACM Reference Format:
20
+
21
+ Juntao Tan, Shijie Geng, Zuohui Fu, Yingqiang Ge, Shuyuan Xu, Yunqi Li, Yongfeng Zhang. 2022. Learning and Evaluating Graph Neural Network Explanations based on Counterfactual and Factual Reasoning. In Proceedings of the ACM Web Conference 2022 (WWW '22), April 25-29, 2022, Virtual Event, Lyon, France. ACM, New York, NY, USA, 10 pages. https://doi.org/10.1145/3485447.3511948
22
+
23
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
24
+
25
+ WWW'22, April 25-29, 2022, Virtual Event, Lyon, France
26
+
27
+ © 2022 Copyright held by the owner/author(s). Publication rights licensed to ACM.
28
+
29
+ ACM ISBN 978-1-4503-9096-5/22/04...$15.00
30
+
31
+ https://doi.org/10.1145/3485447.3511948
32
+
33
+ ![](images/2046389141bfc317f3bc33d7b2d92fe4f062f33765194c768363265e1184c063.jpg)
34
+
35
+ ![](images/3564e6d5f79616f1e6e5a5b13fa1b0da1f2aecd8792f851551b2af98c39cb865.jpg)
36
+ Figure 1: An example for extracting explanations for mutagenic prediction. The sub-graph induced by the bold edges is the explanation extracted by (a) factual reasoning, (b) counterfactual reasoning and (c) counterfactual and factual reasoning. The sub-graph in (c) is also the ground-truth explanation, i.e., Nitrobenzene structure is the cause of mutagen.
37
+
38
+ ![](images/9c8a5eb96df8380504b5d559d16ca3513765aee940e6693b55c75a4681878672.jpg)
39
+
40
+ # 1 INTRODUCTION
41
+
42
+ Structured data widely exists in various domains such as social networks [42], citation networks [14, 33] in Web applications, and chemical molecules [9, 40] in biomedical research. Such kind of data, which is commonly represented as graph, contains rich information. However, conducting studies on graph data is exhausting for human because both the topology information and the node features need to be considered.
43
+
44
+ Fortunately, GNNs have shown great advantages on learning graph representations because they aggregate both the feature and structure information by passing the massages in the graph. Thus, GNN-based models achieved promising results in graph prediction tasks such as graph classification, node classification, and link prediction. However, most of the GNN models are non-transparent, which leads to the lack of explainability in model predictions. Exploring the explainability of GNNs is crucial because good explanations not only help to understand the model predictions but also help to identify potential flaws in the model and further refine the GNN model.
45
+
46
+ In a high-level view, recent state-of-the-art GNN explanation methods are based on either factual reasoning [24, 43] or counterfactual reasoning [22, 23]. Methods based on factual reasoning seek a sub-graph whose information is sufficient to produce the same prediction as using the whole original graph, while methods based on counterfactual reasoning seek a sub-graph whose information is necessary which if removed will result in different predictions.
47
+
48
+ Both factual reasoning and counterfactual reasoning are important approaches to explanation extraction, but each of them alone
49
+
50
+ has its disadvantages. Factual reasoning favors sub-graph explanations that contain enough information to make the same prediction, but the extracted sub-graph may include redundant nodes/edges and thus not compact enough. For example, an extreme case is to take the whole graph as the "sub-graph," which will definitely give the same prediction, but such a "sub-graph" does not convey any meaningful information as an explanation.
51
+
52
+ This disadvantage is also illustrated in Figure 1(a). We use a real-world biochemical example since it has known ground-truth explanation which is hardly accessible for most Web-based graphs. In this example, a molecule is predicted to be mutagenic and we want to extract explanations for the prediction. The explanation sub-graph generated by factual reasoning may indeed cover the essential reason—the Nitrobenzene structure (benzene-NO $_2$ ) [9]. However, it also contains some extra edges from other carbon rings because when these edges are included, the sub-graph leads to the same mutagenic prediction. In a nutshell, the extracted explanation tends to be sufficient but not necessary.
53
+
54
+ On the other hand, counterfactual reasoning favors the explanations that only contain the most crucial information, i.e., if the explanation sub-graph is removed, then the graph will result in different predictions. However, because of this, counterfactual reasoning may only extract a small subset of the real explanation.
55
+
56
+ Take Figure 1(b) as an example, counterfactual reasoning generates a sub-graph with only three edges. These edges, if removed, will indeed break the Nitrobenzene structure and thus lead to a different prediction (i.e., non-mutagenic), however, such an explanation does not cover the complete information about what makes the target molecule mutagenic. In a nutshell, the extracted explanation tends to be necessary but not sufficient.
57
+
58
+ To overcome the problems and to seek a balance between necessity and sufficiency, we propose a Counterfactual and Factual $(\mathrm{CF}^2)$ reasoning framework to extract GNN explanations which brings the best of the two worlds. $\mathrm{CF}^2$ formulates an optimization problem to integrate counterfactual and factual reasoning objectives so as to extract explanations that are both necessary and sufficient. As shown in Figure 1(c), the counterfactual objective encourages the necessary edges while the factual objective ensures that the extracted explanation contains sufficient information, and thus an ideal sub-graph explanation can be induced.
59
+
60
+ Another challenge in explainable GNN research is that most real-world graph datasets lack ground-truth explanations, which makes it difficult to evaluate the extracted explanations for these datasets. Fortunately, the fundamental idea of $\mathrm{CF}^2$ can also be adapted into the evaluations. In this paper, we borrow insights from causal inference theory and adopt the Probability of Necessity (PN) and Probability of Sufficiency (PS) to evaluate the necessity and sufficiency of the extracted explanations, which makes it possible to conduct quantitative evaluation of GNN explanations. PN and PS are aligned with counterfactual and factual reasoning respectively. Details are formulated in Section 6.
61
+
62
+ In summary, this work has the following contributions:
63
+
64
+ - We show the relationship between factual (or counterfactual) reasoning and the sufficiency (or necessity) of GNN explanations.
65
+ - We propose a $\mathrm{CF^2}$ framework to consider both factual and counterfactual reasoning for GNN explanations.
66
+
67
+ - We propose a set of quantitative evaluation metrics to evaluate the GNN explanations.
68
+ - We conduct extensive experiments on 2 synthetic datasets and 3 real-world datasets from different domains to justify the proposed model and evaluation metric.
69
+
70
+ # 2 RELATED WORKS
71
+
72
+ # 2.1 Explainability in Deep Learning and AI
73
+
74
+ Explainable AI has been an important topic in recommender systems [5, 6, 13, 36, 41, 46, 47], natural language processing [8, 16, 20] and computer vision [7, 10, 15, 25, 38]. To improve the transparency of deep neural networks, many explanation techniques have been proposed in recent years. Based on how to obtain the importance scores, these approaches can be categorized into gradient/feature-based methods, perturbation/casual-based methods, and surrogate methods [26, 45]. Gradients/feature-based methods [19, 32, 35] are the most straightforward way to achieve saliency maps as explanations. They usually map the final prediction to the input space by gradient back-propagation or by linking hidden features to inputs via interpolation. Perturbation/casual-based methods [11, 12, 15, 27, 36, 37, 39] learn the feature importance through observing the change of predictions with respect to the input perturbation. The idea behind these methods are intuitive: determining which part of the inputs are important by either removing the least important information (i.e., pixels in image, words in text, nodes in graph) to keep the model prediction the same (factual reasoning) or removing the most important information to change the model prediction (counterfactual reasoning). The representative of surrogate methods is LIME [31], which employs a simple linear model to approximate the predictions on a bunch of nearby inputs and provides explanations from the surrogate model.
75
+
76
+ # 2.2 Explainability in Graph Neural Networks
77
+
78
+ The aforementioned methods are developed mainly for images and texts. Besides the individual features, graphs also contain important topological structure. Such graph structures are highly related to the functionalities in specific domains and should not be ignored for GNN-based explanation approaches. In explainable GNN, early attempts directly extend gradients/feature-based methods [1, 29] to identify important input features. While simple and efficient, these approaches either suffer from gradient saturation [34] or lack of the ability to explain node classification predictions [45]. Another line of work [17] follows LIME and adopts a surrogate model for explaining deep graph models. But it ignores the graph structure and cannot explain graph classification models. Hence, these approaches are not suitable for explaining the graph-level predictions of GNNs. To solve the problem, Ying et al. [43] proposed GNNExplainer which treats explanation generation as a mask optimization problem. It follows the idea in perturbation/casual-based methods and learns soft masks that cover the key nodes and edges while maintaining the original prediction score. GISST [21] further extended GNNExplainer by identifying important sub-graphs and generating importance scores for all nodes and edges through a self-attention layer. The above two methods learn soft masks that contain continuous values, which suffer from the "introduce evidence" problem [45]. To solve the problem, PGExplainer [24] adopts
79
+
80
+ the reparameterization trick and learns approximate discrete masks that maximizes the mutual information between key structures and predictions, and XGNN [44] generates a graph based on reinforcement learning to approximate the prediction of the original graph. As generative models, they also facilitate the holistic explanation for multiple instances. Apart from these factual reasoning approaches, there are also recent works exploring counterfactual reasoning. CF-GNNExplainer [23] introduces counterfactual reasoning to renovate GNNExplainer and is able to generate minimal yet crucial explanations for GNNs. Gem [22] distills ground-truth explanations based on Granger causality (a type of counterfactual reasoning) and then trains an auto-encoder architecture to generate adjacency matrix as explanations based on supervised learning. However, these GNN-based explanation approaches only consider factual or counterfactual reasoning alone, and thus will bias towards either sufficiency or necessity rather than achieving a balance when extracting explanations. In this paper, we seek to integrate counterfactual and factual reasoning to extract GNN explanations that are both sufficient and necessary.
81
+
82
+ # 3 PRELIMINARIES AND NOTATIONS
83
+
84
+ In this section, we briefly introduce how GNNs learn the node and graph representations, as well as its application in the node classification and graph classification tasks. We also introduce the basic notations to the used throughout the paper.
85
+
86
+ # 3.1 Learning Representations
87
+
88
+ Given a graph $G = \{\mathcal{V},\mathcal{E}\}$ , and each node $v_{i}\in \mathcal{V}$ has a $d$ -dimensional node feature $x_{i}\in \mathbb{R}^{d}$ . GNN learns the representation of $v_{i}$ by iteratively aggregating the information of its neighbors $N(i)$ . At the $l$ -th layer of a GNN model, $v_{i}$ 's representation $h_i = \mathrm{update}(h_i^{l - 1},h_{N(i)}^l)$ where $h_i^{l - 1}$ is the representation of $v_{i}$ in the previous layer, and $h_{N(i)}$ is aggregated from the neighbors of $v_{i}$ via an aggregation function: $h_{N(i)} = \mathrm{aggregate}(h_j^{l - 1},\forall v_j\in N(i))$ . The implantation of the update(·) and aggregate(·) functions can be different for different GNN models. For a GNN model with $L$ layers in total, $h_i^L$ is the final representation of the node $v_{i}$ .
89
+
90
+ After aggregating the node representations, the graph representation can be computed by taking the average of all the node representations in the graph.
91
+
92
+ # 3.2 Graph Classification
93
+
94
+ Given a set of $n$ graphs $\mathcal{G} = \{G_1, G_2, \dots, G_n\}$ , and each graph $G_k \in \mathcal{G}$ is associated with a ground-truth class label $y_k \in C$ , where $C = \{1, 2, \dots, r\}$ is the set of graph classes. The graph classification task aims to learn a graph classifier $\Phi$ that predicts the estimated label $\hat{y}_k$ for an input graph $G_k$ .
95
+
96
+ Each input graph $G_{k} = \{\mathcal{V}_{k},\mathcal{E}_{k}\}$ is associated with an adjacency matrix $A_{k}\in \{0,1\}^{|\mathcal{V}_{k}|\times |\mathcal{V}_{k}|}$ and a node feature matrix $X_{k}\in \mathbb{R}^{|V_{k}|\times d}$ . After the training process, the GNN model will predict the estimated label $\hat{y}_k$ for $G_{k}$ by:
97
+
98
+ $$
99
+ \hat {y} _ {k} = \underset {c \in \mathcal {C}} {\arg \max } P _ {\Phi} (c \mid A _ {k}, X _ {k}) \tag {1}
100
+ $$
101
+
102
+ where $\Phi$ is the trained GNN model.
103
+
104
+ # 3.3 Node Classification
105
+
106
+ For the node classification task, the goal is to predict the class label for each node in a given graph $G = \{\mathcal{V},\mathcal{E}\}$ . Each node $v_{i}\in \mathcal{V}$ is associated with a ground-truth node label $y_{i}\in C$ , where $C = \{1,2,\dots ,r\}$ is the set of node classes. In node classification task, since only the $L$ -hop neighbors of the node $v_{i}$ will influence $h_i^L$ we define the $L$ -hop sub-graph of the node $v_{i}$ as $G_{s(i)}$ which is the computational graph that will be the input of the GNN model. $A_{s(i)}$ and $X_{s(i)}$ are the related adjacency matrix and feature matrix of the computational sub-graph. The trained GNN model will thus predict the estimated label $\hat{y}_i$ for the node $v_{i}$ as:
107
+
108
+ $$
109
+ \hat {y} _ {i} = \underset {c \in C} {\arg \max } P _ {\Phi} (c \mid A _ {s (i)}, X _ {s (i)}) \tag {2}
110
+ $$
111
+
112
+ # 4 PROBLEM FORMULATION
113
+
114
+ In this section, we first introduce the explainable GNN problem for the classification task. Then, we mathematically define two objectives for extracting explanations and adjust them into the $\mathrm{CF^2}$ framework. The two objectives are 1) an effective explanation should be both sufficient and necessary, which are reflected by the factual and counterfactual conditions, respectively; and 2) a good explanation should not only be effective, but also be simple, which is driven by the Occam's Razor Principle [2]. We formulate the Explanation Strength to reflect the effectiveness and formulate the Explanation Complexity to reflect the simplicity. The above two objectives are the foundation of the $\mathrm{CF^2}$ framework for extracting explanations.
115
+
116
+ We note that in the rest of the paper, all the concepts, examples and mathematical definitions are introduced under the graph classification problem setting and they can be easily generalized to the node classification task. We provide another version for node classification in Appendix A.
117
+
118
+ # 4.1 Explainable Graph Neural Networks
119
+
120
+ Suppose a graph $G_{k} = \{\mathcal{V}_{k},\mathcal{E}_{k}\}$ has the predicted label $\hat{y}_k$ , following the setup of Ying et al. [43], we generate the explanation for this prediction as a sub-graph, which consists of a subset of the edges and a subset of the feature space of the original graph. The sub-graph can be either connected or unconnected. Thus, the goal of $\mathrm{CF^2}$ is to learn an edge mask $M_{k}\in \{0,1\}^{|\mathcal{V}_{k}|\times |\mathcal{V}_{k}|}$ and a feature mask $F_{k}\in \{0,1\}^{|\mathcal{V}_{k}|\times d}$ , which will be applied on the adjacency matrix $A_{k}\in \{0,1\}^{|\mathcal{V}_{k}|\times |\mathcal{V}_{k}|}$ and the node feature matrix $X_{k}\in \mathbb{R}^{|\mathcal{V}_{k}|\times d}$ of the original graph $G_{k}$ . After optimization, the sub-graph will be $A_{k}\odot M_{k}$ with the sub-features $X_{k}\odot F_{k}$ , which is the generated explanation for the prediction of graph $G_{k}$ .
121
+
122
+ # 4.2 Counterfactual and Factual Conditions
123
+
124
+ As discussed above, an ideal explanation should be both necessary and sufficient. $\mathrm{CF}^2$ achieves this goal by considering both factual and counterfactual reasoning.
125
+
126
+ Factual and counterfactual reasoning are two opposite but very symmetric ways of reasoning. Factual reasoning asks the question "Given A already happened, will B happen?" Counterfactual reasoning, on the contrary, asks "If A did not happen, will B still happen?" [30]. Under the context of GNN explanations, factual reasoning generates sub-edges/sub-features that satisfy the condition "With
127
+
128
+ these sub-edges/sub-features, which is consistent with the fact, the GNN prediction will be the same." Counterfactual reasoning generates sub-edges/sub-features that satisfy the condition "Without these sub-edges/sub-features, which is inconsistent with the fact, the GNN prediction will be different." Intuitively, factual reasoning seeks a sufficient set of edges/features that produce the same prediction as using the whole graph, while counterfactual reasoning seeks a necessary set of edges/features that if removed will lead to different predictions.
129
+
130
+ In $\mathrm{CF}^2$ , both factual and counterfactual reasoning are formulated into the model. The condition for factual reasoning is mathematically formulated as following:
131
+
132
+ Condition for Factual Reasoning :
133
+
134
+ $$
135
+ \underset {c \in \mathcal {C}} {\arg \max } P _ {\Phi} \left(c \mid A _ {k} \odot M _ {k}, X _ {k} \odot F _ {k}\right) = \hat {y} _ {k} \tag {3}
136
+ $$
137
+
138
+ Similarly, the condition for counterfactual reasoning is formulated as:
139
+
140
+ Condition for Counterfactual Reasoning :
141
+
142
+ $$
143
+ \underset {c \in \mathcal {C}} {\arg \max } P _ {\Phi} \left(c \mid A _ {k} - A _ {k} \odot M _ {k}, X _ {k} - X _ {k} \odot F _ {k}\right) \neq \hat {y} _ {k} \tag {4}
144
+ $$
145
+
146
+ These two conditions will be reflected as objectives for explanation extraction in the loss function, which will be introduced in Section 5.
147
+
148
+ # 4.3 Simple and Effective Explanations
149
+
150
+ According to the Occam's Razor Principle [2], if two explanations are equally effective, we tend to prefer the simpler one. To achieve this goal, we introduce Explanation Complexity and Explanation Strength for GNN explanations. These two concepts help $\mathrm{CF^2}$ to seek simple and effective explanations for GNN predictions.
151
+
152
+ Explanation complexity $C(M, F)$ measures how complicated the explanation is, which is defined as the number of edges/features used to construct the explanation. Note that $M$ and $F$ are binary matrices indicating which edges and features are included in the sub-graph explanation. As a result, $C(M, F)$ can be defined as the number of 1's in $M$ and $F$ matrices, i.e.,
153
+
154
+ $$
155
+ C (M, F) = \| M \| _ {0} + \| F \| _ {0} \tag {5}
156
+ $$
157
+
158
+ However, to make $C(M,F)$ optimalizable, we will relax it from 0-norm to 1-norm. We will explain in Section 5.
159
+
160
+ Explanation strength $S(M, F)$ measures how effective the explanation is. As mentioned above, an effective explanation should be both sufficient and necessary, which is pursued by the factual and counterfactual conditions (Eq.(3) and (4)). As a result, the explanation strength can be defined as two parts: factual explanation strength $S_{f}(M, F)$ and counterfactual explanation strength $S_{c}(M, F)$ , both are the larger the better.
161
+
162
+ The mathematical definition of $S_{f}(M,F)$ is consistent with the condition for factual reasoning, which is:
163
+
164
+ $$
165
+ S _ {f} (M, F) = P _ {\Phi} \left(\hat {y} _ {k} \mid A _ {k} \odot M _ {k}, X _ {k} \odot F _ {k}\right) \tag {6}
166
+ $$
167
+
168
+ On the contrary, $S_{c}(M,F)$ is consistent with the condition for counterfactual reasoning, which is:
169
+
170
+ $$
171
+ S _ {c} (M, F) = - P _ {\Phi} \left(\hat {y} _ {k} \mid A _ {k} - A _ {k} \odot M _ {k}, X _ {k} - X _ {k} \odot F _ {k}\right) \tag {7}
172
+ $$
173
+
174
+ Table 1: $\mathrm{CF^2}$ generates explanations with two goals: 1) the explanation should be simple, i.e., low in explanation complexity, which means that the generated explanation subgraph should have a small number of edges and features, which can be achieved by 0-norm or 1-norm regularization. 2) the explanation should be effective, i.e., high in explanation strength. An effective explanation should be both sufficient and necessary. Sufficiency can be achieved via factual reasoning and necessity via counterfactual reasoning.
175
+
176
+ <table><tr><td>Objs</td><td>Simple
177
+ (↓ Complexity)</td><td colspan="2">Effective
178
+ (↑ Strength)</td></tr><tr><td>Measure</td><td># edges, # features</td><td>Sufficiency</td><td>Necessity</td></tr><tr><td>Method</td><td>Regularization</td><td>Factual</td><td>Counterfactual</td></tr></table>
179
+
180
+ Explanation complexity and strength will serve as the learning objective and learning constraint in the explanation extraction algorithm, which will also be introduced in Section 5.
181
+
182
+ In Table 1, we provide an overview of the relationships among the aforementioned concepts.
183
+
184
+ # 5 THE $\mathbf{CF^2}$ FRAMEWORK
185
+
186
+ In this section, we first introduce the $\mathrm{CF^2}$ constrained optimization framework. Then we provide a relaxed version to make the framework estimizable.
187
+
188
+ # 5.1 CF2 Optimization Problem
189
+
190
+ $\mathrm{CF^2}$ is able to generate explanation for any prediction made by a GNN model. As mentioned before, $\mathrm{CF^2}$ aims to find simple (i.e., low complexity) and effective (i.e., high strength) explanations, which can be shown as the following constrained optimization framework:
191
+
192
+ $$
193
+ \text {m i n i m i z e}
194
+ $$
195
+
196
+ $$
197
+ s. t., \text {E x p l a n a t i o n i s S t r o n g E n o u g h} \tag {8}
198
+ $$
199
+
200
+ According to the mathematical definition of explanation complexity and strength in Section 4.3, for a given graph $G_{k}$ with predicted label $\hat{y}_k$ , Eq.(8) can be rewritten as:
201
+
202
+ $$
203
+ \text {m i n i m i z e} C \left(M _ {k}, F _ {k}\right)
204
+ $$
205
+
206
+ $$
207
+ s. t., S _ {f} \left(M _ {k}, F _ {k}\right) > P _ {\Phi} \left(\hat {y} _ {k, s} \mid A _ {k} \odot M _ {k}, X _ {k} \odot F _ {k}\right), \tag {9}
208
+ $$
209
+
210
+ $$
211
+ S _ {c} \left(M _ {k}, F _ {k}\right) > - P _ {\Phi} \left(\hat {y} _ {k, s} \mid A _ {k} - A _ {k} \odot M _ {k}, X _ {k} - X _ {k} \odot F _ {k}\right)
212
+ $$
213
+
214
+ where $\hat{y}_{k,s}$ is the label other than $\hat{y}_k$ that has the largest probability score predicted by the GNN model. Intuitively, the constraint aims to ensure that when only using the information in the explanation sub-graph, the predicted label $\hat{y}_k$ 's probability is higher than any other label and thus the prediction does not change, while if information in the explanation sub-graph is removed, $\hat{y}_k$ 's probability will be smaller than at least one other label and thus the prediction will change.
215
+
216
+ # 5.2 Relaxed Optimization
217
+
218
+ Directly optimizing Eq.(9) is challenging because both the objective part and the constraint part are not differentiable. As a result, we relax the two parts to make them estimizable.
219
+
220
+ For the objective part, we relax the masks $M_{k}$ and $F_{k}$ to real values, which are $M_{k}^{*}\in \mathbb{R}^{\lvert\mathcal{V}_{k}\rvert\times\lvert\mathcal{V}_{k}\rvert}$ and $F_{k}^{*}\in \mathbb{R}^{\lvert\mathcal{V}_{k}\rvert\times d}$ . Meanwhile, since the 0-norm in the original equation is also not differentiable,
221
+
222
+ we use 1-norm to ensure the sparsity of $M_k^*$ and $F_k^*$ , which has been proven to be effective in [3, 4].
223
+
224
+ For the constraint part, we relax it as pairwise contrastive loss $L_{f}$ and $L_{c}$ , where
225
+
226
+ $$
227
+ \begin{array}{l} L _ {f} = \operatorname {R e L U} \left(\gamma + P _ {\Phi} \left(\hat {y} _ {k, s} \mid A _ {k} \odot M _ {k} ^ {*}, X _ {k} \odot F _ {k} ^ {*}\right) \right. \tag {10} \\ - S _ {f} (M _ {k} ^ {*}, F _ {k} ^ {*})) \\ \end{array}
228
+ $$
229
+
230
+ Similarly,
231
+
232
+ $$
233
+ \begin{array}{l} L _ {c} = \operatorname {R e L U} \left(\gamma - S _ {c} \left(M _ {k} ^ {*}, F _ {k} ^ {*}\right) \right. \tag {11} \\ - P _ {\Phi} \left(\hat {y} _ {k, s} \mid A _ {k} - A _ {k} \odot M _ {k} ^ {*}, \mathcal {X} _ {k} - \mathcal {X} _ {k} \odot F _ {k} ^ {*}\right) \\ \end{array}
234
+ $$
235
+
236
+ After relaxation, Eq.(9) becomes estimizable, which is:
237
+
238
+ $$
239
+ \text {m i n i m i z e} \left\| M _ {k} ^ {*} \right\| _ {1} + \left\| F _ {k} ^ {*} \right\| _ {1} + \lambda (\alpha L _ {f} + (1 - \alpha) L _ {c}) \tag {12}
240
+ $$
241
+
242
+ When solving the relaxed optimization equation, the margin value $\gamma$ in Eq.(10) and Eq.(11) is set to 0.5. After the optimization, 0.5 is also used as the threshold to be applied on the optimized masks to generate explanations (i.e., when the value in the masks $M^{*} / F^{*}$ is larger than 0.5, we keep the related edge/feature in the generated explanation).
243
+
244
+ In Eq.(12), the hyper-parameter $\lambda$ controls the trade-off between the explanation complexity and the explanation strength. By increasing $\lambda$ , the model will focus more on the effectiveness of the generated explanations but less on the complexity, which may result in a bigger sub-graph and feature space. Another hyper-parameter $\alpha$ controls the trade-off between the sufficiency and the necessity of the generated explanation. By increasing (or deceasing) $\alpha$ , the generated explanation will focus more on the sufficiency (or necessity).
245
+
246
+ # 6 EVALUATING GNN EXPLANATIONS
247
+
248
+ Most of the real-world datasets for graph/node classification do not have ground-truth explanations, which makes the evaluation of GNN explanations a big challenge for the community. As mentioned in section 4, a good explanation should be both sufficient and necessary, which is aligned with the factual and counterfactual condition, respectively.
249
+
250
+ In logic and mathematics, necessity and sufficiency are terms used to describe a conditional or implicational relationship between two statements. Suppose we have $S \Rightarrow N$ , i.e., if $S$ happens then $N$ will happen, then we say $S$ is a sufficient condition for $N$ . Meanwhile, we have the logically equivalent contrapositive $\neg N \Rightarrow \neg S$ , i.e., if $N$ does not happen, then $S$ will not happen, as a result, we say $N$ is a necessary condition for $S$ . In light of this idea, we adopt the concepts of Probability of Sufficiency (PS) and Probability of Negessity (PN) from causal inference theory [28, p.112], which enable us to conduct quantitative evaluation of the GNN explanations.
251
+
252
+ # 6.1 Probability of Sufficiency
253
+
254
+ For an explanation A that is generated to explain event B, suppose A happens then B will happen, then A satisfies the factual condition and A is a sufficient explanation. We define PS as the percentage of generated explanations that are sufficient for the instance to achieve the same prediction as using the whole graph. In explainable GNN
255
+
256
+ problem, Probability of Sufficiency is defined as:
257
+
258
+ $$
259
+ \mathrm {P S} = \frac {\sum_ {G _ {k} \in \mathcal {G}} \mathrm {p s} _ {k}}{| \mathcal {G} |}, \text {w h e r e} \mathrm {p s} _ {k} = \left\{ \begin{array}{l} 1, \text {i f} \hat {y} _ {k} ^ {\prime} = \hat {y} _ {k} \\ 0, \text {e l s e} \end{array} \right. \tag {13}
260
+ $$
261
+
262
+ $$
263
+ \text{where}\hat{y}_{k}^{\prime} = \operatorname *{arg max}_{c\in \mathcal{C}}P_{\Phi}(c\mid A_{k}\odot M_{k},X_{k}\odot F_{k})
264
+ $$
265
+
266
+ Intuitively, PS measures the percentage of graphs whose explanation sub-graph alone can keep the GNN prediction unchanged, and thus it is sufficient.
267
+
268
+ # 6.2 Probability of Necessity
269
+
270
+ Similarly, suppose A does not happen then B will not happen, we say A satisfies the counterfactual condition and A is a necessary explanation. We define PN as the percentage of generated explanations that are necessary for the instance to achieve the same prediction as using the whole graph. In explainable GNN problem, Probability of Necessity is defined as:
271
+
272
+ $$
273
+ \mathrm {P N} = \frac {\sum_ {G _ {k} \in \mathcal {G}} \mathrm {p n} _ {k}}{| \mathcal {G} |}, \text {w h e r e} \mathrm {p n} _ {k} = \left\{ \begin{array}{l} 1, \text {i f} \hat {y} _ {k} ^ {\prime} \neq \hat {y} _ {k} \\ 0, \text {e l s e} \end{array} \right. \tag {14}
274
+ $$
275
+
276
+ $$
277
+ \text {w h e r e} \hat {y} _ {k} ^ {\prime} = \underset {c \in \mathcal {C}} {\arg \max } P _ {\Phi} (c \mid A _ {k} - A _ {k} \odot M _ {k}, X _ {k} - X _ {k} \odot F _ {k})
278
+ $$
279
+
280
+ Intuitively, PN measures the percentage of graphs whose explanation sub-graph, if removed, will change the GNN prediction, and thus it is necessary.
281
+
282
+ Both PS and PN are the higher the better. Similar to the definition of $F_{1}$ score, we use $F_{NS} = \frac{2\cdot\mathrm{PN}\cdot\mathrm{PS}}{\mathrm{PN} + \mathrm{PS}}$ to measure the overall performance of a GNN explanation method.
283
+
284
+ # 7 EXPERIMENTS
285
+
286
+ In this section, we first introduce the datasets and the comparison baselines. Then, we report the main experimental results and the analyses. Finally, we conduct experiments to show the influence of factual and counterfactual reasoning, which helps to gain deeper understanding of the key concepts of the paper. We also conduct studies to justify the effectiveness of the PN/PS-based evaluation.
287
+
288
+ # 7.1 Datasets
289
+
290
+ We test our algorithm on two synthetic and three real-world datasets. The two synthetic datasets are BA-shapes and Tree-Cycles, which were introduced in Ying et al. [43]. We follow exactly the same setup when generating these two datasets. The three real-world datasets are Mutag [9], NCI1 [40] and CiteSeer [14, 33]. The Mutag dataset contains 4,337 molecules classified into two categories: mutagenic or non-mutagenic. The NCI1 dataset contains 4,110 chemical compounds which are categorized as either positive or negative to cell lung cancer. The CiteSeer dataset contains 3,312 scientific publications classified into six classes, in which the nodes are the papers and the links represent that one paper is cited by another one.
291
+
292
+ BA-Shapes, Tree-Cycles and CiteSeer are for node classification, while Mutag and NCI1 are for graph classification. BA-Shapes and Tree-Cycles have ground-truth motifs (i.e., "house" and "cycle" structures) for explaining the classification since they are human-designed. However, NCI1 and CiteSeer do not have such ground-truth motifs. We would like to especially mention the motifs in the Mutag dataset. Luo et al. [24] assumed that the nitro group
293
+
294
+ $\mathrm{(NO_2)}$ and amino group $(\mathrm{NH}_2)$ are the true reasons for mutagenicity and filtered out the mutagens that do not contain them. However, according to Debnath et al. [9], which is the work that published the Mutag dataset, $\mathrm{NH}_2$ requires microsomal activation to achieve full mutagenic potency and the dataset is limited to studies without such activation. Thus, $\mathrm{NH}_2$ has very small influence in the Mutag dataset. This is also mentioned in Lin et al. [22], which shows that the presence of $\mathrm{NH}_2$ has very low correlation with the classification result on this dataset. In fact, benzene- $\mathrm{NO}_2$ is the only discriminative motif in this dataset. As a result, we extract a sub-dataset, $\mathrm{Mutag}_0$ , which only includes those chemical compounds that contain benzene- $\mathrm{NO}_2$ and are mutagenic, or that does not contain benzene- $\mathrm{NO}_2$ and are not mutagenic. The statistics of the Mutag dataset are shown in Table 2. Table 3 provides the statistics of all the datasets used.
295
+
296
+ # 7.2 Baselines
297
+
298
+ The comparable baselines in this paper should satisfy such conditions: 1) They generate sub-graphs for explanation; 2) They can generate explanations for any graph dataset, with or without prior knowledge, e.g., Luo et al. [24] requires explicit motif to generate explanations thus could not be applied on NCI1 and CiteSeer, which is the reason why it is not included. The baselines are as follows:
299
+
300
+ GNNExplainer [43]: An explanation model base on perturbation. It selects a compact sub-graph while maximizing the mutual information with the whole graph.
301
+
302
+ CF-GNNExplainer [23]: An extension of GNNExplainer by generating explanations based on counterfactual reasoning.
303
+
304
+ Gem [22]: A generative explanation model based on Granger causality, it trains auto-encoder to generate explanation sub-graphs.
305
+
306
+ # 7.3 Experimental Setup
307
+
308
+ There are two phases in the experiments: 1) Training the base GNN model for classification; and 2) Generating the explanations.
309
+
310
+ For the base model, a GCN with three layers is used for all the datasets. The hidden dimensions are 16 for BA-Shapes, Tree-Cycles, Mutag and NCI1, and 32 for CiteSeer. The model for Mutag and NCI1 datasets requires an extra pooling and fully convolution layers for computing the graph embeddings. We apply ReLU activation function after all the layers except for the last layer, which is followed by a Softmax function for classification. The learning rate is 0.001 during training for all datasets and the ratio between training and test set is $8:2$ . In Table 6, we report the number of training epochs and the accuracy of the base model we used in this paper. We use the same base model for all the baselines to fairly compare the explanation ability. Since the explanation method is model-agnostic, the base model can be any classification model for graphs.
311
+
312
+ In the explanation phase, GNNExplainer and Gem require a human-selected $K$ value to decide the size of the explanations in their settings. When implementing these two methods, we follow the same setup in Gem: for the synthetic datasets, we set $K$ equal to the size (#edges) of the ground-truth motifs, and we set $K = 15$ (#edges) for Mutag and NCI1. We run two experiments on the CiteSeer dataset: edge-based explanation ( $K = 5$ ) and feature-based explanation ( $K = 60$ ). CF-GNNExplainer and $\mathrm{CF}^2$ do not require
313
+
314
+ Table 2: Statistics of the Mutag dataset, the molecules with \*\*" are the graphs we used to build the $\mathrm{Mutag}_0$ dataset.
315
+
316
+ <table><tr><td></td><td>w/ benzene-NO2</td><td>w/o benzene-NO2</td></tr><tr><td>mutagen</td><td>448*</td><td>1,953</td></tr><tr><td>non-mutagen</td><td>83</td><td>1,853*</td></tr></table>
317
+
318
+ Table 3: Statistics of all datasets. "#ave n" and "#ave e" are the number of nodes/edges per graph. "#feat" is the number of features. In the "task" column, "node" and "graph" indicate the dataset is used for the node classification task or graph classification task, respectively. The check marks in the "gt" column means the existence of ground-truth motifs.
319
+
320
+ <table><tr><td>Dataset</td><td>#graph</td><td>#ave n</td><td>#ave e</td><td>#class</td><td>#feat</td><td>task</td><td>gt</td></tr><tr><td>BA-Shapes</td><td>1</td><td>700</td><td>4100</td><td>4</td><td>-</td><td>node</td><td>✓</td></tr><tr><td>Tree-Cycles</td><td>1</td><td>871</td><td>1950</td><td>2</td><td>-</td><td>node</td><td>✓</td></tr><tr><td>Mutag</td><td>4337</td><td>30.32</td><td>30.77</td><td>2</td><td>14</td><td>graph</td><td></td></tr><tr><td>\( Mutag_0 \)</td><td>2301</td><td>31.74</td><td>32.54</td><td>2</td><td>14</td><td>graph</td><td>✓</td></tr><tr><td>NCI1</td><td>4110</td><td>29.87</td><td>32.30</td><td>2</td><td>37</td><td>graph</td><td></td></tr><tr><td>CiteSeer</td><td>1</td><td>3312</td><td>4732</td><td>6</td><td>3703</td><td>node</td><td></td></tr></table>
321
+
322
+ prior knowledge about the $K$ value. The size of explanations are automatically decided by the model themselves via optimization.
323
+
324
+ For the hyper-parameters in $\mathrm{CF^2}$ , the $\lambda$ is decided by normalizing the 1-norm loss and the pairwise contrastive loss into the same scale, which are [500, 500, 1000, 20, 100] for BA-Shapes, Tree-Cycles, Mutag0, NCI1, and CiteSeer, respectively. For the $\alpha$ value, we set it to be 0.6 to make factual reasoning slightly leading the optimization. We will conduct ablation study on $\alpha$ in Section 7.6 to show its influence.
325
+
326
+ We evaluate the explanation methods based on the graphs in the test dataset. Since the BA-Shapes, Tree-Cycles and Mutag0 datasets have ground-truth explanations, we report the Accuracy, Precision, Recall and $F_{1}$ scores of the generated explanations of each method. Besides, for all datasets, we evaluate the explanation model with the PS, PN and $F_{NS}$ metrics introduced in Section 6. Note that we not only generate explanations based on the edges, but also generate explanations on the node features and test them on the CiteSeer dataset, which is not examined in previous works.
327
+
328
+ # 7.4 Quantitative Analysis
329
+
330
+ In Table 4, we report the evaluation of the generated explanations with respect to the ground-truth motifs. $\mathrm{CF}^2$ has an overall better performance than all the other baselines according to Accuracy and $\mathbf{F}_1$ scores. The only exception is when comparing with Gem on the BA-Shapes dataset with respect to Accuracy, which is lower by $0.62\%$ . However, since Gem requires the size of the ground-truth motif to select exactly the same size of explanation, which is a strong prior knowledge, this minor difference is considered acceptable. Another observation is that CF-GNNExplainer is higher in Precision and GNNExplainer is higher in Recall when comparing with each other. This justifies our initial motivation about factual and counterfactual reasoning: The factual reasoning focuses on the sufficiency of the explanation, which results in a higher coverage on the ground-truth motifs, while counterfactual reasoning focuses on the necessity, which provides more precise explanations but
331
+
332
+ Table 4: Explanation evaluation w.r.t ground-truth. Acc, Pr and Re represent Accuracy, Precision and Recall, respectively. Models with $\dagger$ are the models that fix the size of explanations with pre-defined $K$ values. For the metrics that measure the overall explanation performance (e.g., $F_{1}$ score), we use bold font to mark the highest scores. For the metrics that only measure partial performance (e.g., precision, recall), we mark the highest scores with underlines.
333
+
334
+ <table><tr><td rowspan="2">Models</td><td colspan="4">BA-Shapes</td><td colspan="4">Tree-Cycles</td><td colspan="4">Mutag0</td></tr><tr><td>Acc%</td><td>Pr%</td><td>Re%</td><td>F1%</td><td>Acc%</td><td>Pr%</td><td>Re%</td><td>F1%</td><td>Acc%</td><td>Pr%</td><td>Re%</td><td>F1%</td></tr><tr><td>GNNExplainer†</td><td>95.25</td><td>60.08</td><td>60.08</td><td>60.08</td><td>92.78</td><td>68.06</td><td>68.06</td><td>68.06</td><td>96.96</td><td>59.71</td><td>85.17</td><td>68.85</td></tr><tr><td>CF-GNNExplainer</td><td>94.39</td><td>67.19</td><td>54.11</td><td>56.79</td><td>90.27</td><td>87.40</td><td>47.45</td><td>59.10</td><td>96.91</td><td>66.09</td><td>39.46</td><td>47.39</td></tr><tr><td>Gem†</td><td>96.97</td><td>64.16</td><td>64.16</td><td>64.16</td><td>89.88</td><td>57.23</td><td>57.23</td><td>57.23</td><td>96.43</td><td>63.12</td><td>47.11</td><td>54.68</td></tr><tr><td>CF2</td><td>96.37</td><td>73.15</td><td>68.18</td><td>66.61</td><td>93.26</td><td>84.92</td><td>73.84</td><td>75.69</td><td>97.34</td><td>65.28</td><td>88.59</td><td>72.56</td></tr></table>
335
+
336
+ Table 5: Explanation evaluation on PN/PS-based metrics. #exp is the size of the generated explanations. Models with $\dagger$ are the models that fix the size of explanations with pre-defined $K$ values. For the metrics that measure the overall explanation performance (e.g., $F_{NS}$ score), we use bold font to mark the highest scores. For the metrics that only measure partial performance (e.g., PN, PS), we mark the highest scores with underlines.
337
+
338
+ <table><tr><td rowspan="2">Models</td><td colspan="4">BA-Shapes</td><td colspan="4">Tree-Cycles</td><td colspan="4">Mutag0</td></tr><tr><td>PN%</td><td>PS%</td><td>FNs%</td><td>#exp</td><td>PN%</td><td>PS%</td><td>FNs%</td><td>#exp</td><td>PN%</td><td>PS%</td><td>FNs%</td><td>#exp</td></tr><tr><td>GNNExplainer†</td><td>72.19</td><td>45.62</td><td>55.91</td><td>6.00</td><td>100.00</td><td>59.72</td><td>74.78</td><td>6.00</td><td>71.79</td><td>97.44</td><td>82.67</td><td>15.00</td></tr><tr><td>CF-GNNExplainer</td><td>75.34</td><td>41.10</td><td>53.18</td><td>5.79</td><td>100.00</td><td>31.94</td><td>48.42</td><td>3.44</td><td>96.26</td><td>7.48</td><td>13.88</td><td>7.72</td></tr><tr><td>Gem†</td><td>61.36</td><td>52.27</td><td>56.45</td><td>6.00</td><td>100.00</td><td>29.89</td><td>46.02</td><td>6.00</td><td>83.01</td><td>76.42</td><td>79.58</td><td>15.00</td></tr><tr><td>CF2</td><td>76.73</td><td>68.22</td><td>72.07</td><td>6.21</td><td>100.00</td><td>81.94</td><td>90.08</td><td>5.81</td><td>97.44</td><td>100.00</td><td>98.70</td><td>14.95</td></tr><tr><td rowspan="2">Models</td><td colspan="4">NCI1</td><td colspan="4">CiteSeer (edge)</td><td colspan="4">CiteSeer (feature)</td></tr><tr><td>PN%</td><td>PS%</td><td>FNs%</td><td>#exp</td><td>PN%</td><td>PS%</td><td>FNs%</td><td>#exp</td><td>PN%</td><td>PS%</td><td>FNs%</td><td>#exp</td></tr><tr><td>GNNExplainer†</td><td>92.13</td><td>62.16</td><td>74.24</td><td>15.00</td><td>66.67</td><td>90.05</td><td>76.61</td><td>5.00</td><td>71.64</td><td>99.50</td><td>72.79</td><td>60.00</td></tr><tr><td>CF-GNNExplainer</td><td>97.14</td><td>31.43</td><td>47.49</td><td>7.75</td><td>69.50</td><td>82.00</td><td>75.23</td><td>2.58</td><td>72.14</td><td>92.54</td><td>81.07</td><td>72.91</td></tr><tr><td>Gem†</td><td>99.03</td><td>52.15</td><td>68.32</td><td>15.00</td><td>61.05</td><td>72.67</td><td>66.36</td><td>5.00</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CF2</td><td>100.00</td><td>63.81</td><td>77.91</td><td>17.70</td><td>71.00</td><td>94.50</td><td>81.08</td><td>3.18</td><td>74.63</td><td>95.02</td><td>83.60</td><td>62.73</td></tr></table>
339
+
340
+ worse in coverage. As a result, $\mathrm{CF^2}$ is balancing between them and has an overall higher performance in $\mathbf{F}_1$ .
341
+
342
+ Then, for all the datasets, we test the generated explanations with the PN, PS, and $\mathrm{F}_{NS}$ scores, as shown in Table 5. $\mathrm{CF}^2$ performs the best among all the baselines on PN in $100\%$ cases, on PS in $83\%$ cases, and on $\mathrm{F}_{NS}$ in $100\%$ cases. Moreover, $\mathrm{CF}^2$ has $13.57\%$ average improvement than the best performance of the baselines on $\mathrm{F}_{NS}$ , which is significant. Similar to the observations in the ground-truth evaluation, we note that the counterfactual-based methods perform better in PN and factual-based methods perform better in PS. This is in line with our previous analysis on the advantages and disadvantages of factual and counterfactual reasoning. Besides, this result also gives us insights about the relationship between Precision/Recall and PN/PS.
343
+
344
+ # 7.5 Qualitative Analysis
345
+
346
+ In Figure 2, we illustrate explanations based on topology structures to qualitatively compare $\mathrm{CF}^2$ with the methods based on only factual (GNNExplainer) or counterfactual (CF-GNNExplainer) reasoning. Results show that $\mathrm{CF}^2$ better discovers graph motifs than the other two methods. Moreover, counterfactual-based optimization has
347
+
348
+ Table 6: The classification accuracy of the trained base model on each dataset.
349
+
350
+ <table><tr><td>Datasets</td><td>BA-Shapes</td><td>Tree-Cycles</td><td>Mutag0</td><td>NCI1</td><td>CiteSeer</td></tr><tr><td>Epochs</td><td>3000</td><td>3000</td><td>1000</td><td>200</td><td>200</td></tr><tr><td>Accuracy</td><td>97.86</td><td>98.29</td><td>98.05</td><td>69.03</td><td>71.04</td></tr></table>
351
+
352
+ more precise prediction but tends to be conservative and low in coverage. Factual-based optimization discovers larger portion of the motifs but also covers redundant edges. In general, $\mathrm{CF}^2$ outperforms the other two methods by considering both necessity and sufficiency in the optimization.
353
+
354
+ # 7.6 Influence of $\alpha$
355
+
356
+ The $\alpha$ in Eq.(12) controls the balance between factual reasoning and counterfactual reasoning. When $\alpha$ is greater than 0.5, $\mathrm{CF}^2$ considers factual reasoning more than counterfactual reasoning, and when it is less than 0.5, counterfactual reasoning is considered more than factual reasoning. Figure 3 shows the influence of $\alpha$ on $\mathrm{CF}^2$ when generating explanations for BA-Shapes and Mutag $_0$ datasets. Result shows that the value of $\alpha$ is not sensitive, and no matter which
357
+
358
+ ![](images/9f3cd264215ad2dc3c826006d145713dbc3388509c3c4ffaa26b5c7b0841aa2a.jpg)
359
+ Figure 2: Qualitative Analysis. Illustration of the generated explanations on instances from two synthetic datasets, BA-Shapes and Tree-Cycles, and one real-word dataset, Mutag. From left to right, we show the explanations generated by the methods based on counterfactual reasoning (i.e., CF-GNNExplainer), factual reasoning (i.e., GNNExplainer), $\mathrm{CF}^2$ , and ground-truth explanation.
360
+
361
+ $\alpha$ value we choose in (0, 1), the generated explanations are better than only considering one type of reasoning (i.e., $\alpha = 0$ or $\alpha = 1$ ).
362
+
363
+ # 7.7 Justification of the Evaluation Metric
364
+
365
+ To justify the effectiveness of our PN/PS-based evaluation, we test it on the three datasets with ground-truth explanations, i.e., BA-Shapes, Tree-Cycles, and $\mathrm{Mutag}_0$ . We use two non-parametric methods to test the correlation between the performance on ground-truth evaluation and PN/PS-based evaluation, which are the Kendall's $\tau$ [18] and Spearman's $\rho$ [48] scores. These two scores are in the range of (-1, 1). Two values are considered positively correlated if $\tau$ and $\rho$ are positive scores. The higher the scores are, the closer our proposed evaluation metric is compared to ground-truth evaluation, i.e., can be trusted more to evaluate a given explainable GNN model when the ground-truth is not accessible. We test the correlation between $\mathrm{F}_{NS}$ and $\mathrm{F}_1/\mathrm{Accuracy}$ . The results are reported in Table 7. The $\tau$ and $\rho$ show that they are highly positively correlated. This is important since for a dataset without ground-truth motifs, if one explanation method performs better than another one according to the PN/PS-based evaluation, then we can have a good confidence to expect the same conclusion if traditional evaluation metrics are used assuming ground-truth is available.
366
+
367
+ # 8 CONCLUSIONS AND FUTURE WORK
368
+
369
+ In this work, we propose a Counterfactual and Factual reasoning $(\mathrm{CF}^2)$ framework, which generates GNN explanations by simultaneously considering the necessity and sufficiency of the explanations. Moreover, we leverage the insights from causal inference theory by taking the Probability of Necessity (PN) and Probability of Sufficiency (PS) to evaluate the necessity and sufficiency of the extracted explanations, making it possible to conduct quantitative evaluation
370
+
371
+ Table 7: Correlation between PN/PS-based evaluation and ground-truth evaluation.
372
+
373
+ <table><tr><td rowspan="2">Models</td><td colspan="2">BA-Shapes</td><td colspan="2">Tree-Cycles</td><td colspan="2">Mutag0</td></tr><tr><td>τ↑</td><td>ρ↑</td><td>τ↑</td><td>ρ↑</td><td>τ↑</td><td>ρ↑</td></tr><tr><td>FN5 &amp; F1</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td><td>1.00</td></tr><tr><td>FN5 &amp; Acc</td><td>0.66</td><td>0.79</td><td>1.00</td><td>1.00</td><td>0.66</td><td>0.79</td></tr></table>
374
+
375
+ ![](images/5b6639df2a10e3ed95626717e15ec5f19b2f795108a5dcf54f67a4715e58d951.jpg)
376
+ (a) Influence of $\alpha$ on BA-Shapes
377
+
378
+ ![](images/978531fdf15260885795f54784d95fbcb985dbc3ea3dbf1450ff76eb48d5815e.jpg)
379
+ (b) Influence of $\alpha$ on Mutag0
380
+ Figure 3: Influence of $\alpha$ on (a) BA-Shapes and (b) Mutag0.
381
+
382
+ of GNN explanations. Experiments on both synthetic and real-world datasets verify the superiority of the proposed method as well as the usefulness of the evaluation metrics. In the future, we will generalize our framework beyond graph-based explanations, including but not limited to vision- and language-based explanations.
383
+
384
+ # ACKNOWLEDGEMENT
385
+
386
+ This work was supported in part by NSF IIS 1910154, 2007907, and 2046457. Any opinions, findings, conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect those of the sponsors.
387
+
388
+ # REFERENCES
389
+
390
+ [1] Federico Baldassarre and Hossein Azizpour. 2019. Explainability Techniques for Graph Convolutional Networks. In International Conference on Machine Learning (ICML) Workshops, 2019 Workshop on Learning and Reasoning with Graph-Structured Representations.
391
+ [2] Anselm Blumer, Andrzej Ehrenfeucht, David Haussler, and Manfred K Warmuth. 1987. Occam's razor. Information processing letters 24, 6 (1987), 377-380.
392
+ [3] Emmanuel J Candes, Justin K Romberg, and Terence Tao. 2006. Stable signal recovery from incomplete and inaccurate measurements. Communications on Pure and Applied Mathematics: A Journal Issued by the Courant Institute of Mathematical Sciences 59, 8 (2006), 1207-1223.
393
+ [4] Emmanuel J Candes and Terence Tao. 2005. Decoding by linear programming. IEEE transactions on information theory 51, 12 (2005), 4203-4215.
394
+ [5] Hanxiong Chen, Yunqi Li, Shaoyun Shi, Shuchang Liu, He Zhu, and Yongfeng Zhang. 2021. Graph Collaborative Reasoning. WSDM (2021).
395
+ [6] Hanxiong Chen, Shaoyun Shi, Yunqi Li, and Yongfeng Zhang. 2021. Neural collaborative reasoning. In Proceedings of the Web Conference 2021. 1516-1527.
396
+ [7] Eric Chu, Deb Roy, and Jacob Andreas. 2020. Are visual explanations useful? a case study in model-in-the-loop prediction. arXiv preprint arXiv:2007.12248 (2020).
397
+ [8] Marina Danilevsky, Kun Qian, Ranit Aharonov, Yannis Katsis, Ban Kawas, and Prithviraj Sen. 2020. A Survey of the State of Explainable AI for Natural Language Processing. In AACL. 447-459.
398
+ [9] Asim Kumar Debnath, Rosa L. Lopez de Compadre, Gargi Debnath, Alan J. Shusterman, and Corwin Hansch. 1991. Structure-activity relationship of mutagenic aromatic and heteroaromatic nitro compounds. Correlation with molecular orbital energies and hydrophobicity. Journal of Medicinal Chemistry 34, 2 (1991), 786-797.
399
+ [10] Hugo Jair Escalante, Sergio Escalera, Isabelle Guyon, Xavier Baró, Yagmur Güçlütürk, Umut Güçlü, Marcel van Gerven, and Rob van Lier. 2018. Explainable and interpretable models in computer vision and machine learning. Springer.
400
+ [11] Shi Feng, Eric Wallace, Alvin Grissom II, Mohit Iyyer, Pedro Rodriguez, and Jordan Boyd-Graber. 2018. Pathologies of neural models make interpretations difficult. EMNLP (2018).
401
+ [12] Matthew Finlayson, Aaron Mueller, Sebastian Gehrmann, Stuart Shieber, Tal Linzen, and Yonatan Belinkov. 2021. Causal Analysis of Syntactic Agreement Mechanisms in Neural Language Models. In ACL-JJCNLP.
402
+ [13] Shijie Geng, Zuohui Fu, Juntao Tan, Yingqiang Ge, Gerard De Melo, and Yongfeng Zhang. 2022. Path Language Modeling over Knowledge Graphs for Explainable Recommendation. WWW (2022).
403
+ [14] Lise Getoor. 2005. Link-based classification. In Advanced methods for knowledge discovery from complex data. Springer, 189-207.
404
+ [15] Yash Goyal, Ziyan Wu, Jan Ernst, Dhruv Batra, Devi Parikh, and Stefan Lee. 2019. Counterfactual visual explanations. In International Conference on Machine Learning. PMLR, 2376-2384.
405
+ [16] Braden Hancock, Martin Bringmann, Paroma Varma, Percy Liang, Stephanie Wang, and Christopher Re. 2018. Training classifiers with natural language explanations. In Proceedings of the conference. Association for Computational Linguistics. Meeting, Vol. 2018. NIH Public Access, 1884.
406
+ [17] Qiang Huang, Makoto Yamada, Yuan Tian, Dinesh Singh, Dawei Yin, and Yi Chang. 2020. Graphlime: Local interpretable model explanations for graph neural networks. arXiv preprint arXiv:201.06216 (2020).
407
+ [18] M. G. Kendall. 1945. The Treatment of ties in ranking problems. Biometrika 33, 3 (11 1945), 239-251. https://doi.org/10.1093/biomet/33.3.239 arXiv:https://academic.oup.com/biomet/article-pdf/33/3/239/573257/33-3-239.pdf
408
+ [19] Jiwei Li, Will Monroe, and Dan Jurafsky. 2016. Understanding neural networks through representation erasure. arXiv preprint arXiv:1612.08220 (2016).
409
+ [20] Lei Li, Yongfeng Zhang, and Li Chen. 2021. Personalized Transformer for Explainable Recommendation. In ACL. 4947-4957.
410
+ [21] Chris Lin, Gerald J Sun, Krishna C Bulusu, Jonathan R Dry, and Marylens Hernandez. 2020. Graph neural networks including sparse interpretability. arXiv preprint arXiv:2007.00119 (2020).
411
+ [22] Wanyu Lin, Hao Lan, and Baochun Li. 2021. Generative Causal Explanations for Graph Neural Networks. In Proceedings of the 38th International Conference on Machine Learning. 6666-6679.
412
+ [23] Ana Lucic, Maartje ter Hoeve, Gabriele Tolomei, Maarten de Rijke, and Fabrizio Silvestri. 2021. CF-GNNExplainer: counterfactual explanations for graph neural networks. arXiv:2102.03322 (2021).
413
+ [24] Dongsheng Luo, Wei Cheng, Dongkuan Xu, Wenchao Yu, Bo Zong, Haifeng Chen, and Xiang Zhang. 2020. Parameterized Explainer for Graph Neural Network. In Advances in Neural Information Processing Systems.
414
+ [25] Oisin Mac Aodha, Shihan Su, Yuxin Chen, Pietro Perona, and Yisong Yue. 2018. Teaching categories to human learners with visual explanations. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. 3820-3828.
415
+ [26] Andreas Madsen, Siva Reddy, and Sarath Chandar. 2021. Post-hoc Interpretability for Neural NLP: A Survey. arXiv preprint arXiv:2108.04840 (2021).
416
+
417
+ [27] Yulei Niu, Kaihua Tang, Hanwang Zhang, Zhiwu Lu, Xian-Sheng Hua, and Ji-Rong Wen. 2021. Counterfactual vqa: A cause-effect look at language bias. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 12700-12710.
418
+ [28] Judea Pearl, Madelyn Glymour, and Nicholas P Jewell. 2016. Causal inference in statistics: A primer. John Wiley & Sons.
419
+ [29] Phillip E Pope, Soheil Kolouri, Mohammad Rostami, Charles E Martin, and Heiko Hoffmann. 2019. Explainability methods for graph convolutional neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 10772-10781.
420
+ [30] Ana Cristina Quelhas, Célia Rasga, and P. N. Johnson-Laird. 2018. The Relation Between Factual and Counterfactual Conditionals. Cognitive Science 42, 7 (2018), 2205-2228.
421
+ [31] Marco Tulio Ribeiro, Sameer Singh, and Carlos Guestrin. 2016. "Why should i trust you?" Explaining the predictions of any classifier. In Proceedings of the 22nd ACM SIGKDD international conference on knowledge discovery and data mining. 1135-1144.
422
+ [32] Ramprasaath R Selvaraju, Michael Cogswell, Abhishek Das, Ramakrishna Vedantam, Devi Parikh, and Dhruv Batra. 2017. Grad-cam: Visual explanations from deep networks via gradient-based localization. In Proceedings of the IEEE international conference on computer vision. 618-626.
423
+ [33] Prithviraj Sen, Galileo Namata, Mustafa Bilgic, Lise Getoor, Brian Galligher, and Tina Eliassi-Rad. 2008. Collective classification in network data. AI magazine 29, 3 (2008), 93-93.
424
+ [34] Avanti Shrikumar, Peyton Greenside, and Anshul Kundaje. 2017. Learning important features through propagating activation differences. In International Conference on Machine Learning. PMLR, 3145-3153.
425
+ [35] Karen Simonyan, Andrea Vedaldi, and Andrew Zisserman. 2014. Deep Inside Convolutional Networks: Visualising Image Classification Models and Saliency Maps. In Workshop at International Conference on Learning Representations.
426
+ [36] Juntao Tan, Shuyuan Xu, Yingqiang Ge, Yunqi Li, Xu Chen, and Yongfeng Zhang. 2021. Counterfactual Explainable Recommendation. CIKM (2021).
427
+ [37] Kaihua Tang, Yulei Niu, Jianqiang Huang, Jiaxin Shi, and Hanwang Zhang. 2020. Unbiased scene graph generation from biased training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 3716-3725.
428
+ [38] Tom Vermeire, Dieter Brughmans, Sofie Goethals, Raphael Mazzine Barbossa de Oliveira, and David Martens. 2022. Explainable image classification with evidence counterfactual. Pattern Analysis and Applications (2022), 1-21.
429
+ [39] Jesse Vig, Sebastian Gehrmann, Yonatan Belinkov, Sharon Qian, Daniel Nevo, Yaron Singer, and Stuart M Shieber. 2020. Investigating Gender Bias in Language Models Using Causal Mediation Analysis. In Advances in Neural Information Processing Systems.
430
+ [40] Nikil Wale and George Karypis. 2006. Comparison of Descriptor Spaces for Chemical Compound Retrieval and Classification. In International Conference on Data Mining, 678-689.
431
+ [41] Yikun Xian, Zuohui Fu, Shan Muthukrishnan, Gerard De Melo, and Yongfeng Zhang. 2019. Reinforcement knowledge graph reasoning for explainable recommendation. In Proceedings of the 42nd international ACM SIGIR conference on research and development in information retrieval. 285-294.
432
+ [42] Pinar Yanardag and SVN Vishwanathan. 2015. Deep graph kernels. In Proceedings of the 21th ACM SIGKDD international conference on knowledge discovery and data mining. 1365-1374.
433
+ [43] Zhitao Ying, Dylan Bourgeois, Jiaxuan You, Marinka Zitnik, and Jure Leskovec. 2019. GNExplainer: Generating Explanations for Graph Neural Networks. In Advances in Neural Information Processing Systems.
434
+ [44] Hao Yuan, Jiliang Tang, Xia Hu, and Shuiwang Ji. 2020. Xgnn: Towards model-level explanations of graph neural networks. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 430-438.
435
+ [45] Hao Yuan, Haiyang Yu, Shurui Gui, and Shuiwang Ji. 2020. Explainability in graph neural networks: A taxonomic survey. arXiv preprint arXiv:2012.15445 (2020).
436
+ [46] Yongfeng Zhang and Xu Chen. 2020. Explanable recommendation: A survey and new perspectives. Foundations and Trends in Information Retrieval (2020).
437
+ [47] Yongfeng Zhang, Guokun Lai, Min Zhang, Yi Zhang, Yiqun Liu, and Shaoping Ma. 2014. Explicit factor models for explainable recommendation based on phrase-level sentiment analysis. In Proceedings of the 37th international ACM SIGIR conference on Research & development in information retrieval. 83-92.
438
+ [48] Daniel Zwillinger and Stephen Kokoska. 1999. CRC standard probability and statistics tables and formulae. Crc Press.
439
+
440
+ # A MATHEMATICAL DEFINITIONS FOR NODE CLASSIFICATION
441
+
442
+ In Section 4 and Section 5, we formulate the Explainable GNN problem as well as $\mathrm{CF^2}$ framework, under the graph classification setting. In this section, we provide the same mathematical definition under node classification task.
443
+
444
+ # A.1 Problem Formulation (Node Classification)
445
+
446
+ Explainable Graph Neural Networks In a given graph $G = \{\mathcal{V},\mathcal{E}\}$ , Suppose a node $v_{i}\in G$ has the predicted label $\hat{y}_i$ . The computational graph for node $v_{i}$ is defined as $G_{s(i)} = \{\mathcal{V}_{s(i)},\mathcal{E}_{s(i)}\}$ , which is a sub-graph of $G$ that consists of the $L$ -hop neighbors of node $v_{i}$ . $A_{s(i)}\in \{0,1\}^{|\mathcal{V}_{s(i)}|\times |\mathcal{V}_{s(i)}|}$ and $X_{s(i)}\in \mathbb{R}^{|V_{s(i)}|\times d}$ are the related adjacency matrix and feature matrix of the computational graph. Since only $G_{s(i)}$ will influence the prediction made by the GNN model, the generated explanation should be a sub-graph of $G_{s(i)}$ . Thus, for node classification task, the goal of the explainable GNN problem is to learn an edge mask $M_{s(i)}\in \{0,1\}^{|\mathcal{V}_{s(i)}|\times |\mathcal{V}_{s(i)}|}$ and a feature mask $F_{s(i)}\in \{0,1\}^{|\mathcal{V}_{s(i)}|\times d}$ , which will be applied on $A_{s(i)}$ and $X_{s(i)}$ , respectively. After optimization, the sub-graph will be $A_{s(i)}\odot M_{s(i)}$ with the sub-features $X_{s(i)}\odot F_{s(i)}$ , which is the generated explanation for the prediction of node $v_{i}$ .
447
+
448
+ Counterfactual and Factual Conditions For node classification task, the definition of the conditions for factual and counterfactual is similar to graph classification, which are defined as following:
449
+
450
+ Condition for Factual Reasoning :
451
+
452
+ $$
453
+ \underset {c \in C} {\arg \max } P _ {\Phi} (c \mid A _ {s (i)} \odot M _ {s (i)}, X _ {s (i)} \odot F _ {s (i)}) = \hat {y} _ {i} \tag {15}
454
+ $$
455
+
456
+ Condition for Counterfactual Reasoning :
457
+
458
+ $$
459
+ \underset {c \in C} {\arg \max } P _ {\Phi} \left(c \mid A _ {s (i)} - A _ {s (i)} \odot M _ {s (i)}, X _ {s (i)} - X _ {s (i)} \odot F _ {s (i)}\right) \neq \hat {y} _ {i} \tag {16}
460
+ $$
461
+
462
+ Simple and Effective Explanations For node classification task, the explanation complexity is defined exactly the same to graph classification, which is:
463
+
464
+ $$
465
+ C (M, F) = \| M \| _ {0} + \| F \| _ {0} \tag {17}
466
+ $$
467
+
468
+ The factual explanation strength and counterfactual explanation strength are defined with the node classification settings as:
469
+
470
+ $$
471
+ S _ {f} (M, F) = P _ {\Phi} \left(\hat {y} _ {s (i)} \mid A _ {s (i)} \odot M _ {s (i)}, X _ {s (i)} \odot F _ {s (i)}\right) \tag {18}
472
+ $$
473
+
474
+ and
475
+
476
+ $$
477
+ S _ {c} (M, F) = - P _ {\Phi} \left(\hat {y} _ {s (i)} \mid A _ {s (i)} - A _ {s (i)} \odot M _ {s (i)}, X _ {s (i)} - X _ {s (i)} \odot F _ {s (i)}\right) \tag {19}
478
+ $$
479
+
480
+ # A.2 The $\mathbf{CF^2}$ Framework (Node Classification)
481
+
482
+ The basic idea of $\mathrm{CF^2}$ for node and graph classification are same, which is minimizing the explanation complexity while the generated explanation is strong enough. Therefore, we directly provide the final relaxed optimization and omit the derivation process. $\mathrm{CF^2}$ generates explanations via solving the relaxed optimization equation:
483
+
484
+ $$
485
+ \text {m i n i m i z e} \left\| M _ {s (i)} ^ {*} \right\| _ {1} + \left\| F _ {s (i)} ^ {*} \right\| _ {1} + \lambda (\alpha L _ {f} + (1 - \alpha) L _ {c}) \tag {20}
486
+ $$
487
+
488
+ where
489
+
490
+ $$
491
+ \begin{array}{l} L _ {f} = \operatorname {R e L U} \left(\gamma + P _ {\Phi} \left(\hat {y} _ {i, s} \mid A _ {s (i)} \odot M _ {s (i)} ^ {*}, \mathcal {X} _ {s (i)} \odot F _ {s (i)} ^ {*}\right)\right) \tag {21} \\ - S _ {f} \left(M _ {s (i)} ^ {*}, F _ {s (i)} ^ {*}\right)) \\ \end{array}
492
+ $$
493
+
494
+ Similarly,
495
+
496
+ $$
497
+ \begin{array}{l} L _ {c} = \mathrm {R e L U} (\gamma - S _ {c} (M _ {s (i)} ^ {*}, F _ {s (i)} ^ {*}) \\ - P _ {\Phi} \left(\hat {y} _ {i, s} \mid A _ {s (i)} - A _ {s (i)} \odot M _ {s (i)} ^ {*}, \chi_ {s (i)} - \chi_ {s (i)} \odot F _ {s (i)} ^ {*}\right)) \tag {22} \\ \end{array}
498
+ $$
2202.08xxx/2202.08816/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:262da1d780c1f3c3b05c231337c7b7762f0f8a107ce3ab7fd9cf410148124b62
3
+ size 508273
2202.08xxx/2202.08816/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08818/4cb6ce6c-2c4f-416d-8656-8e7ab063d642_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08818/4cb6ce6c-2c4f-416d-8656-8e7ab063d642_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08818/4cb6ce6c-2c4f-416d-8656-8e7ab063d642_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b4ea917bbcfa4a030a3266301b70db47ff9cf4f4296685e2aa43f1c6f7bf2df7
3
+ size 2996567
2202.08xxx/2202.08818/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08818/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0c16f9ddad20b5aabed84ae1448485977502fc37a7b29729045eee6e137c93a8
3
+ size 406768
2202.08xxx/2202.08818/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08821/a330e14c-eb6d-4e85-bc1d-a5b4b5c46483_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08821/a330e14c-eb6d-4e85-bc1d-a5b4b5c46483_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08821/a330e14c-eb6d-4e85-bc1d-a5b4b5c46483_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d7d6fe5106f0e56fe8201766f5ffb8da2618e8b9954415d74aee0ab88cee1787
3
+ size 794546
2202.08xxx/2202.08821/full.md ADDED
@@ -0,0 +1,911 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Human-Algorithm Collaboration: Achieving Complementarity and Avoiding Unfairness
2
+
3
+ KATE DONAHUE*, Cornell University, USA
4
+
5
+ ALEXANDRA CHOULDECHOVA, Carnegie Mellon University and Amazon Web Services, USA
6
+
7
+ KRISHNARAM KENTHAPADI†, Fiddler AI, USA
8
+
9
+ Much of machine learning research focuses on predictive accuracy: given a task, create a machine learning model (or algorithm) that maximizes accuracy. In many settings, however, the final prediction or decision of a system is under the control of a human, who uses an algorithm's output along with their own personal expertise in order to produce a combined prediction. One ultimate goal of such collaborative systems is complementarity: that is, to produce lower loss (equivalently, greater payoff or utility) than either the human or algorithm alone. However, experimental results have shown that even in carefully-designed systems, complementary performance can be elusive. Our work provides three key contributions. First, we provide a theoretical framework for modeling simple human-algorithm systems and demonstrate that multiple prior analyses can be expressed within it. Next, we use this model to prove conditions where complementarity is impossible, and give constructive examples of where complementarity is achievable. Finally, we discuss the implications of our findings, especially with respect to the fairness of a classifier. In sum, these results deepen our understanding of key factors influencing the combined performance of human-algorithm systems, giving insight into how algorithmic tools can best be designed for collaborative environments.
10
+
11
+ # ACM Reference Format:
12
+
13
+ Kate Donahue, Alexandra Chouldechova, and Krishnamaram Kenthapadi. 2022. Human-Algorithm Collaboration: Achieving Complementarity and Avoiding Unfairness. In *FAccT'22: ACM Conference on Fairness, Accountability and Transparency*, June 21–24, 2022, Seoul, South Korea. ACM, New York, NY, USA, 26 pages. https://doi.org/10.1145/3531146.3533221
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Consider a prediction task where the goal is to take a set of features about the world as input and predict an outcome of interest. A typical machine learning approach to such a task is to attempt to select a model with low (generalization) loss for the problem at hand. If such a model is applied directly to the prediction task, it will minimize expected loss.
18
+
19
+ However, this standard approach does not necessarily reflect the way that machine learning tools are actually implemented. Often, algorithmic predictions are presented to humans, who then make a final decision by additionally relying on their own expertise [1, 18, 36, 39]. For example, consider a doctor looking at a medical record and trying to make a determination of whether disease is present. An algorithmic prediction based on the record may be useful, but it almost certainly will not be the sole factor influencing the doctor's diagnosis. For example, the doctor may have access to different data, such as conversations with the patient. The doctor may also have access to different knowledge, such as distilled expertise from years of practice. The doctor's decision will be a function of the algorithm's prediction, as
20
+
21
+ *Work conducted while on an internship at Amazon.
22
+
23
+ $\dagger$ Work done while at Amazon.
24
+
25
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
26
+
27
+ © 2022 Association for Computing Machinery.
28
+
29
+ Manuscript submitted to ACM
30
+
31
+ 2022 ACM Conference on Fairness, Accountability, and Transparency (FAccT '22), Seoul, Republic of Korea,
32
+
33
+ Donahue, Chouldechova, Kenthapadi
34
+
35
+ ![](images/16491961a410e387769bee0182739310880becf15c6d35f566c252376d012dc0.jpg)
36
+ (a) Scenario 1: Combined system has higher loss than either unaided human or algorithm.
37
+
38
+ ![](images/c13a5e51b0b53de31cffc78f63b84acbd0bf0224982b15c0b02e8ce21566a5c2.jpg)
39
+ (b) Scenario 2: Combined system has lower loss than unaided human, but higher loss than algorithm.
40
+ Fig. 1. Three possible scenarios for human-algorithm collaboration, each with the same algorithmic and unaided human loss. However, the loss of the combined system (human using the algorithm) might vary substantially. Section 4 gives a more detailed analysis.
41
+
42
+ ![](images/b81ec20eedfab18a59cde3cd4b970dbc3dfefafac828cb7b39b782b35a763f41.jpg)
43
+ (c) Scenario 3: Complementary performance: lower error than either unaided human or algorithm.
44
+
45
+ well as their own inherent belief. Note that the doctor's decision-making may be imperfect, such as relying on their own judgement when the algorithm may have better performance. A successful outcome occurs when the combined system (the doctor using algorithmic output) has low loss, not when the algorithm alone has low loss. Figure 1 illustrates three scenarios where a combined human-algorithm system could have differing levels of loss.
46
+
47
+ In particular, one especially valuable goal is complementarity (or complementary performance). Complementarity (originally defined in Bansal et al. [2]) is achieved whenever the combined human-algorithm system has strictly lower expected loss than either the human or the algorithm alone (Figure 1c). Complementarity is not necessary for a combined system to be deemed successful: for example, a combined system that does better than the human alone, but not necessarily better than the algorithm alone, would still reflect an improvement from a human-alone status-quo. However, complementarity creates the strongest incentive for adoption of a combined human-algorithm system, which is why it is the focus of our analysis.
48
+
49
+ Contributions: At a high level, we address the following problems: (i) How do we formally and tractably model human-algorithm collaborative systems? (ii) When can human-algorithm collaborative systems produce higher accuracy than either the human or algorithm alone? (iii) What are the fairness implications of such collaborative systems?
50
+
51
+ The contributions of this work are three-fold. First, in Section 3, we introduce a simple theoretical framework for analyzing human-algorithm collaboration, and demonstrate the richness of this framework by showing that it can encapsulate models from previous works analyzing human decision-making. In Section 4, we provide a simple, concrete motivating example using this framework that illustrates the core results of this paper.
52
+
53
+ Next, in Section 5, we use this approach to analyze complementarity. First, we present several impossibility results that characterize regimes in which human-algorithm collaboration can never achieve complementarity. We then give concrete conditions for when complementarity can be achieved. In particular, our results suggest that complementarity is easier to achieve when loss rates are highly variable: when the unaided human (or algorithm) has very low loss on some inputs and very high loss on inputs. Disparate levels of loss raises issues of fairness, which we turn to next.
54
+
55
+ In Section 6 we conclude our analysis by examining the fairness impacts of complementarity. The variability in loss rates implied by our results has implications for fairness, since types of inputs with very high error rates may correspond to protected attributes, such as race, gender, or ethnicity. To investigate this concern, we propose and analyze three types of fairness relating to human-algorithm systems, giving conditions for when they can and cannot be achieved. One of main results shows that when complementarity is achieved, at least one group does worse in the combined system than under the human-only status quo. Additionally, we give a simple condition where the combined human-algorithm system will guarantee that loss disparity between different protected groups will not increase.
56
+
57
+ # 2 RELATED WORK
58
+
59
+ # 2.1 Human-Algorithm Collaboration
60
+
61
+ A series of papers have explored issues related to human-algorithm collaboration. For example, Poursabzi-Sangdeh et al. [29] and Yin et al. [39] analyze how explainability and accuracy, respectively, influence how humans use algorithmic predictions. Similarly, Dietvorst and Bharti [9] hypothesize that humans may prefer algorithmic predictions that are more variable in their loss rates and Dietvorst et al. [10] suggests that allowing algorithmic predictions to be modified may make humans more likely to use them.
62
+
63
+ Other papers center more on in-depth qualitative assessments of how professionals incorporate tailor-made algorithmic tools into their workflow. For example, Lebovitz et al. [22, 23] studies how doctors in major US hospitals use AI predictions in their daily work. Similarly, Okolo et al. [28] studies how community healthcare workers in India believe AI tools could influence their work. Finally, Yang et al. [38] studies how UX designers work with machine learning tools and the data scientists who create them.
64
+
65
+ Some research teams that develop tools for human-in-the-loop settings have run experiments analyzing how their tools perform with human collaboration. For example, Beede et al. [3], Raghu et al. [30] both study how an AI tool for predicting diabetic retinopathy fits in with a broader ecosystem (human doctors and the overall healthcare system). Similarly, De-Arteaga et al. [7] studies how child welfare call screeners incorporate algorithmic predictions in their risk assessments. Tan et al. [34] studies how human and algorithmic distributions of loss rates differ for recidivism predictions for the COMPAS dataset (but not in ways that allowed for complementary performance by combined systems). Similarly, Geirhos et al. [14] compares the similarity (consistency) of loss in predictions made by humans and a deep learning algorithm.
66
+
67
+ Some computer science papers specifically analyze models of human-algorithm interaction, such as [1, 5, 18, 31, 32, 36]. Of these, Bansal et al. [2] is especially relevant because it is framed through the goal of complementarity. Bansal et al. [1] also highlights the fact that the optimizing for the algorithm's error may not minimize the loss of the combined system. In Section 3, we show how human decision-making processes rules inspired by analyses Bansal et al. [1] and Vodrahalli et al. [36] can be represented in our model. Some papers show how to build models optimized for a human-algorithm deferral system, where the final decision is made by either the human or the algorithm [6, 26, 27]. Straitouri et al. [33] studies a variant of this problem for classification where the algorithm presents a subset of possible labels to the human, who selects the final decision from among them. Finally, Cabitza et al. [4] studies multiple methods of aggregating human and algorithmic predictions.
68
+
69
+ # 2.2 Fairness in Human-Algorithm Collaboration
70
+
71
+ Some papers specifically consider the fairness implications of combined human-algorithm systems. For example, Madras et al. [24] studies fairness and accuracy in deferring to a human expert, while Keswani et al. [19, 20] extends this analysis to deferring to multiple different human experts. For example, Gillis et al. [15] takes a theoretical approach towards modeling the human-algorithm system and gives conditions where adding a biased (unfair) human can change the fairness properties of the overall system. Valera et al. [35] studies a system with multiple biased "experts", where assigning the correct expert to each task can improve accuracy while still satisfying fairness requirements.
72
+
73
+ # 2.3 Related Papers From Other Areas
74
+
75
+ Finally, some papers in seemingly unrelated areas end up being relevant to our analysis. For example, ensemble learning studies how to incorporate predictions from multiple algorithms into a unified (more accurate) system [21]. Ensemble learning differs from our analysis in that each expert (predictor) is assumed to be an algorithm, and predictions are assumed to be combined by some additional algorithm under our control (rather than a human decision-maker we cannot control). However, certain factors identified in the ensemble learning literature as affecting overall performance, such as diversity, are relevant for our analysis [8]. Additionally, multiple works study how fairness properties of predictors change when they are composed [11, 12, 37]. These works are relevant to our analysis in Section 6 of the fairness of a combined human-algorithm system, but differ somewhat from ours: in general, these other papers tend to study fairness of allocating or achieving some desired prediction, while our analysis describes fairness as equal loss across groups. Finally, Meehl [25] compares statistical and clinical methods of reasoning, a framing that parallels to our analysis of algorithmic versus human prediction methods.
76
+
77
+ # 3 MODEL AND ASSUMPTIONS
78
+
79
+ # 3.1 Model
80
+
81
+ Our model considers a prediction task: given some element $x \in \mathcal{X}$ , make a prediction $y \in \mathcal{Y}$ that minimizes some loss function $\mathcal{L}$ , with loss bounded $\geq 0$ . This loss could reflect any error rates for any type of learning problem—for example, regression and classification tasks could both be represented by this loss function. We model the input space $\mathcal{X}$ as being made up of $N$ discrete regimes: all inputs within the same regime are identical from the perspective of algorithmic and human loss. This is without loss of generality, given that $N$ could be arbitrarily large. We are not assuming that either the human or algorithm has knowledge of these regimes, simply that they exist. We will denote the probability of seeing regime $i$ is given by $p_i$ , with $\sum_{i=1}^{N} p_i = 1$ .
82
+
83
+ The human-algorithm system consists of three components:
84
+
85
+ (1) An algorithm, which for each regime in the input space $x_{i} \in \mathcal{X}$ makes a prediction $\hat{y}_i^a$ with some loss rate $a_i$ . The average loss is given by $\sum_{i=1}^{N} p_i \cdot a_i = A$ . We can write $a_i = A + \delta_{ai}$ , with $\sum_{i=1}^{N} p_i \cdot \delta_{ai} = 0$ . The term $\delta_{ai}$ represents how much $a_i$ varies (differs from the average loss $A$ ).
86
+ (2) A unaided human, which similarly for each regime in the input space $x_{i} \in X$ makes some prediction $\hat{y}_i^h$ . The average loss of the human is given by $\sum_{i=1}^{N} p_i \cdot h_i = H$ . Similarly, we write write $h_i = H + \delta_{hi}$ , with $\sum_{i=1}^{N} p_i \cdot \delta_{hi} = 0$ .
87
+ (3) Finally, some combiner (a human using algorithmic input) $g(\hat{y}_i^a,\hat{y}_i^h)$ , which takes predictions given by the algorithm and unaided human and returns a combined prediction, $\hat{y}_i^c$ . The combining function reflects human decision-making: it could select the algorithm's prediction, the unaided human's prediction, or interpolate between the two of them. We could also view this as a (loss) combining function $c(a_i,h_i)$ that takes the algorithmic loss and human loss on a particular instance and returns some combined loss.
88
+
89
+ In general, we may not have control over all (or even any) of these components. For example, the combining function reflects human judgement, which typically can't be directly manipulated. A primary goal of our analyses is to determine when a human-algorithm system displays complementarity, defined in Definition 1 below.
90
+
91
+ Definition 1 (From Bansal et al. [2]). A human-algorithm system displays complementary performance when the combined system has (strictly) lower loss than either the human or algorithm:
92
+
93
+ $$
94
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot c (a _ {i}, h _ {i}) < \min \left(\sum_ {i = 1} ^ {N} p _ {i} \cdot a _ {i}, \sum_ {i = 1} ^ {N} p _ {i} \cdot h _ {i}\right) = \min (A, H)
95
+ $$
96
+
97
+ # 3.2 Assumptions
98
+
99
+ The combining function models the key question in human-algorithm collaboration: how do humans incorporate algorithmic predictions with their own expertise? In this work, we will make two main assumptions about how such combination occurs. First, throughout this paper, we will find it useful to work in the space of combining losses, rather than combining predictions. Specifically, we will use the $c(a_i, h_i)$ loss combining function. Assumption 1 describes the assumption this implies.
100
+
101
+ Assumption 1. The loss of a combined human-algorithm system can be modeled by a combining rule relying only on the loss rates of the unaided human and algorithm in a particular regime: $c(a_i, h_i)$ . That is, regimes with identical (unaided human, algorithm) pairs of loss rates are treated identically.
102
+
103
+ This assumption reflects the case where the level of accuracy in the algorithm and (unaided) human is the only feature influencing the accuracy of the combined system. An example of a situation that might violate this assumption is if regime 1 and 2 both have loss of $3\%$ for the unaided human and $5\%$ for the algorithm, but the human using the algorithm (combined system) has loss of $3\%$ for regime 1 and $4\%$ for regime 2. Considering the case where this assumption is relaxed could be an interesting avenue for future work: however, it would likely result in much more complicated analysis.
104
+
105
+ Next, Assumption 2 below, describes the assumption that the combining rule's outputs are bounded.
106
+
107
+ Assumption 2. For each regime, the loss of the combined system is bounded between the loss of the human and algorithm:
108
+
109
+ $$
110
+ \min \left(a _ {i}, h _ {i}\right) \leq c \left(a _ {i}, h _ {i}\right) \leq \max \left(a _ {i}, h _ {i}\right)
111
+ $$
112
+
113
+ This assumption reflects a case where the combiner operates by interpolating between the predictions made by the human or algorithm. An example of a situation that might violate this assumption is if the combined system has loss $3\%$ in a certain regime, where the human has loss $4\%$ and the algorithm has loss $6\%$ in that regime. A bounded combining rule makes modeling human-algorithm collaboration more realistic: complementarity is trivial to achieve if the combining rule's loss can be arbitrarily disconnected from the loss of the human and algorithm.
114
+
115
+ It's worth considering why a system may satisfy bounded inputs (Assumption 2) and still exhibit complementarity. Assumption 2 refers to bounds at the level of each regime, while complementarity refers to overall average loss. For example, Scenario 3 (Table 3) in Section 4 obeys the bound in Assumption 2, and yet also achieves complementarity.
116
+
117
+ # 3.3 Weighting function
118
+
119
+ In this work, we will find it helpful to think about the combined human-algorithm system as involving a weighting function $0 \leq w_{h}(a_{i}, h_{i}) \leq 1$ controlling how much the human influences the final prediction:
120
+
121
+ $$
122
+ c \left(a _ {i}, h _ {i}\right) = \left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i} \tag {1}
123
+ $$
124
+
125
+ Lemma 1, below, shows that using a weighting function requires no new assumptions.
126
+
127
+ Lemma 1. Any combining rule relying only on loss rates (Assumption 1) with bounded output (Assumption 2) can be written as a combining rule with a weighting function $0 \leq w_{h}(a_{i}, h_{i}) \leq 1$ .
128
+
129
+ One simple (ideal) combining function is given by Example 1: it simply selects whichever of the unaided human or algorithm has lower loss. While this is the best possible combining function (given our assumptions), it is likely not a realistic model of how human decision-makers incorporate algorithmic advice.
130
+
131
+ Example 1 (Min). The combining function becomes $c(a, h) = \min(a, h)$ is represented by the weighting function:
132
+
133
+ $$
134
+ w _ {h} (a, h) = \left\{ \begin{array}{l l} 1 & h \leq a \\ 0 & o t h e r w i s e \end{array} \right.
135
+ $$
136
+
137
+ While our framework is simple, it is also sufficiently flexible to capture models of human-algorithm collaboration studied in multiple previous papers (described in greater detail in Appendix A). Examples 2 and 3 demonstrate this in reference two particular models suggested by prior literature. First, Example 2 selects whichever of the human or algorithm has lower loss rate with probability $p_s$ . For high $p_s$ , this reflects a decision-maker who accurately trusts whichever has lower loss.
138
+
139
+ Example 2 (Bansal et al. [1]). The analysis in Bansal et al. [1] suggests the weighting function:
140
+
141
+ $$
142
+ w _ {h} (a, h) = \left\{ \begin{array}{l l} p _ {s} & h \leq a \\ 1 - p _ {s} & o t h e r w i s e \end{array} \right.
143
+ $$
144
+
145
+ Next, in Example 3, the decision-maker first decides whether to consider algorithmic advice at all: it does so only if the loss rate is $\epsilon$ lower than the human loss rate. Then, the decision-maker incorporates algorithmic advice with some probability $p_{s}(\cdot)$ that is a function depending on the gap between human and algorithmic loss rates.
146
+
147
+ Example 3 (Vodrahalli et al. [36]). The two-stage model in Vodrahalli et al. [36] could be written as:
148
+
149
+ $$
150
+ w _ {h} (a, h) = \left\{ \begin{array}{l l} 1 & a \geq h - \epsilon \\ p _ {s} (h - a) & o t h e r w i s e \end{array} \right.
151
+ $$
152
+
153
+ # 3.4 Research Ethics and Social Impact
154
+
155
+ While our paper is primarily theoretical, its application area prompts a number of ethical considerations. For example, in this work, we are primarily concerned with building better prediction functions. In general, such functions could be used for positive means (helping a doctor correctly diagnose a disease) or negative ones (enabling the identification and repression of minority groups). Additionally, even if a function is being used for positive goals, it could be the case that factors besides the loss rate are ultimately more important. For example, it could be that the process of coming to a prediction, rather than the prediction itself, is more important. This is especially salient for our discussion of fairness, which assumes that the fairness of outcomes (of loss rate disparities) is the relevant factor to consider, rather than fairness of the prediction process. The issue of explanation of algorithmic predictions, which is orthogonal to our main analysis, could be relevant for this consideration.
156
+
157
+ # 4 MOTIVATING EXAMPLE
158
+
159
+ To further motivate the analysis, let us revisit the medical application from the introduction (Figure 1) and introduce further specifics. Consider the medical prediction task of using information from a patient's medical record to predict
160
+
161
+ Human-Algorithm Collaboration: Achieving Complementarity and Avoiding Unfairness
162
+
163
+ 2022 ACM Conference on Fairness, Accountability, and Transparency (FAccT '22),
164
+
165
+ Seoul, Republic of Korea,
166
+
167
+ <table><tr><td></td><td>(Unaided) human</td><td>Algorithm</td><td>Combined (human using algorithm)</td><td>Weight (on unaided human)</td></tr><tr><td>Regime 1</td><td>1</td><td>0.35</td><td>0.94</td><td>0.9</td></tr><tr><td>Regime 2</td><td>0.5</td><td>0.65</td><td>0.64</td><td>0.1</td></tr><tr><td>Average</td><td>0.75</td><td>0.5</td><td>0.79</td><td>0.5</td></tr></table>
168
+
169
+ Table 1. Scenario 1 Loss rates: An example of a combined human-algorithm system. There are two regimes, each making up equal proportions of the input space $(p_0 = p_1 = 0.5)$ . Note here that complementarity is not satisfied: in fact, the combined system has higher loss than either the human or algorithm alone!
170
+
171
+ <table><tr><td></td><td>(Unaided) human</td><td>Algorithm</td><td>Combined (human using algorithm)</td><td>Weight (on unaided human)</td></tr><tr><td>Regime 1</td><td>1</td><td>0.35</td><td>0.51</td><td>0.25</td></tr><tr><td>Regime 2</td><td>0.5</td><td>0.65</td><td>0.54</td><td>0.75</td></tr><tr><td>Average</td><td>0.75</td><td>0.5</td><td>0.53</td><td>0.5</td></tr></table>
172
+
173
+ Table 2. Scenario 2 Loss rates: A second example of a combined human-algorithm system, but with different loss distributions. Here, the combined system (human using algorithm) has average loss which is lower than the loss of the unaided human, but higher than the loss of the algorithm alone.
174
+
175
+ <table><tr><td></td><td>(Unaided) human</td><td>Algorithm</td><td>Combined (human using algorithm)</td><td>Weight (on unaided human)</td></tr><tr><td>Regime 1</td><td>1.15</td><td>0.2</td><td>0.44</td><td>0.25</td></tr><tr><td>Regime 2</td><td>0.35</td><td>0.8</td><td>0.46</td><td>0.75</td></tr><tr><td>Average</td><td>0.75</td><td>0.5</td><td>0.45</td><td>0.5</td></tr></table>
176
+
177
+ Table 3. Scenario 3 Loss rates: A third example of a combined human-algorithm system. Here, the combined system (human using algorithm) displays complementary performance: its average loss of 0.45 is lower than the loss of either the unaided human (0.75) or algorithm alone (0.5).
178
+
179
+ Table 4. Three possible scenarios for human-algorithm collaboration. In each, the algorithm and unaided human have the same average loss. However, the loss of the human using the algorithm varies. Figure 1 gives a visual description of these scenarios. disease severity (on a scale from 0 to 5, as in [30]). As illustrated in Figure 1, we will assume that doctors relying on their medical training (unaided humans) have an average loss rate of 0.75: they are off by 0.75 grades, on average. A data science team has created a machine learning algorithm that has average loss of 0.5.
180
+
181
+ Even though the algorithm has lower loss, doctors won't simply rubber-stamp algorithmic suggestions. Because they have specialized training and access to additional information (such as conversations with patients), a doctor might reasonably incorporate algorithm advice only partially, or only sometimes. However, this leaves open a crucial question: what is the combined human-algorithm loss? That is, what is the average loss once doctors start incorporating the new machine-learning algorithm into their decision-making process?
182
+
183
+ # 4.1 Three scenarios
184
+
185
+ To help build intuition before delving into our formal theoretical results, we will consider three different example scenarios for how a human-algorithm collaborative system might look like, given in Tables 1, 2, 3. Each scenario has the same average loss for the unaided human and for the algorithm (0.75 and 0.5, respectively). However, each scenario differs in 1) the way (unaided) human and algorithm loss is distributed and 2) the way the human combines algorithmic advice. Specifically, these three scenarios illustrate the simplified case where patient records ("regimes") come in one of two types: regime 1 and regime 2, each of which makes up $50\%$ of the total input space. These regimes might differ in multiple ways: say disease progression, data quality, or patient characteristics. We will treat observations from within the same regime as identical: the doctor or algorithm or combined system each has uniform loss for every record within
186
+
187
+ the same regime. (In later sections, we will relax this consider arbitrary numbers of regimes, reflecting arbitrarily complex distributions of loss).
188
+
189
+ Scenario 1 (combined loss higher than unaided human or algorithm): For example, in Table 1, the unaided human has loss 1 on instances of type 1, but loss 0.5 for instances of type 2. The algorithm's loss rate distribution differs: 0.35 for type 1, and 0.65 for type 2. Finally, the combined loss (loss of the human using algorithmic input) for a particular regime is a function of the loss of the unaided human and the algorithm: for this example, it's 0.94 in regime 1, and 0.64 in regime 2. Note that this results in an average loss of 0.79—the human using the algorithm has a strictly greater loss rate than either the unaided human or the algorithm! This unfortunate case could result from inappropriately relying on the algorithm: for example, the doctor might mistakenly incorporate algorithmic advice more frequently in regimes where it happens to have higher loss. The fourth column concretely reflects this cause: it calculates the weighting function for each regime (reliance on the unaided human, as opposed to the algorithm). In this case, the regime 1 weighting function is 0.9, meaning the combined system is relying heavily on the unaided human for this instance, even though it has higher loss than the algorithm. Similarly, in regime 2, the weighting function is 0.1, indicating that the combined system is relying more on the algorithm, even though (for this regime), the algorithm has higher loss than the human. This inappropriate reliance explains why the combined system has higher loss than either the unaided human or algorithm alone.
190
+
191
+ Scenario 2 (combined system lower error than human, higher error than algorithm): Table 2 presents a slightly more optimistic Scenario 2. Here, the distribution of loss rates for the unaided human and the algorithm are the same as in Scenario 1, but the combined loss differs: the average loss of the human using the algorithm is 0.53—lower than the loss of the unaided human (0.75), but higher than the loss of the algorithm alone (0.5). This reflects a common scenario in human-algorithm collaboration: the combined system ends up improving over the human alone, but still falls short of the loss rate achievable by the algorithm alone [2]. In this case, the reason is because the combined system is doing a better job than in Scenario 1 of appropriately relying on the unaided human or algorithm. Note that the weighting function is lower in regime 1 (when the unaided human has higher loss) and higher in regime 2 (where the unaided human has lower loss). This more appropriate reliance explains why the combined system has better performance than Scenario 1.
192
+
193
+ Scenario 3 (complementarity: lower than unaided human or algorithm): Finally, Table 3 presents Scenario 3. In this case, we keep the weighting function the same as in Scenario 2, as well as the average loss of the unaided human and the algorithm. However, we change the distribution of loss rates across the regimes: we make them more variable. For example, the unaided human now has loss 1.15 in regime 1 and 0.35 in regime 2, while the algorithm has loss 0.2 in regime 1 and 0.8 in regime 2. While average (unaided) human and algorithmic loss are the same as in Scenarios 1 and 2, the combined human-algorithmic system ends up having average loss of 0.45, which strictly lower than either the unaided human or algorithm alone. Complementarity arises here because diversity of errors and appropriate reliance when algorithmic error is lower than the human's. As we show in Section 5, these conditions are necessary for complementarity to arise.
194
+
195
+ Let's return to our motivating example: a doctor using algorithmic input to make predictions about patients. These three examples illustrate different possible overall loss rates from a doctor and algorithm with the same average loss rates alone. It is clearly important for overall performance (including patient well-being) to determine whether the human-algorithm system will result in something like Scenario 1 (with higher loss than either the doctor or algorithm alone) or Scenario 3 (with lower loss than either alone). In the coming sections we lay out a theoretical framework for analyzing combined human-algorithm systems more generally to understand their implications for fairness and
196
+
197
+ complementary performance. Our analysis formalizes the observations made in the scenarios above by precisely characterizing the role that "appropriate" reliance on the algorithm and variability in performance across regimes plays in complementarity and fairness.
198
+
199
+ # 4.2 Complementarity and fairness
200
+
201
+ Complementarity is the best-case scenario for human-algorithm collaboration, and much of our paper will revolve around proving when it can and cannot exist. For instance, in Section 5 we will give theoretical results describing the kinds of factors necessary for complementarity to be achievable. In particular, we will give conditions on the distributions of loss rates as well as the way predictions are combined. We will show that, all else being equal, complementarity is easier to achieve when distributions of loss for the algorithm and unaided human are highly variable. For example, the algorithm's loss rates are less variable in Tables 1 and 2 (ranging from 0.35 to 0.65) than they are in Table 3 (where they go from 0.2 to 0.8). However, variable loss rates naturally have fairness implications.
202
+
203
+ Fairness concerns are especially salient if the regimes are correlated with sensitive attributes, such as race, ethnicity, sex, gender, or socioeconomic status. One fairness question could revolve around the "loss disparity" - the difference in loss rates between multiple regimes. Many papers in algorithmic fairness focus on loss disparity (also called accuracy parity or disparate mistreatment [13, 40]), making them a natural focus in our work. In Table 3, there's a loss disparity (difference in loss rates between regimes) of 0.8 for the unaided human and 0.6 for the algorithm. However, the combined system has an loss disparity of 0.02—much lower than either the human or algorithm! Can we guarantee that combined systems will always have lower loss disparities? In Section 6 we will show that, under some conditions, complementarity implies a bounded loss disparity. Another fairness concern could revolve around whether the benefits of incorporating an algorithm are shared among all groups. For example, in Table 3, regime 2 sees a reduction in loss when the algorithm is incorporated, going from 1.15 (unaided human) to 0.44 (combined human with algorithm). However, regime 1 sees an increase in loss, from 0.35 to 0.46. Ideally, a combined system should benefit all regimes—but (when) is this possible? In Section 6, we will show that, unfortunately, any system exhibiting complementarity can't be one where all regimes see their loss decrease from what it was with the unaided human.
204
+
205
+ # 5 COMPLEMENTARITY
206
+
207
+ In this section, we analyze complementarity: when will a combined human-algorithm system have lower average loss than either the unaided human or algorithm? First, we give general results for when complementarity is impossible to achieve. Secondly, we build on these previous results in order to give constructive examples where complementarity is possible. Finally, we discuss some implications of our findings. All proofs are given in Appendix B.
208
+
209
+ # 5.1 Cases Where Complementarity is Impossible
210
+
211
+ This section gives cases where complementarity is impossible to achieve. These results help to narrow the scope of cases that we must consider for future analysis. They could also be helpful for practitioners: if any of their cases is addressed by these lemmas, then they can immediately know that their system can never achieve complementarity.
212
+
213
+ We begin by presenting two lemmas that concern the distribution of loss rates for the unaided human or the algorithm. Lemma 2 considers a case where the loss of the algorithm and unaided human are constant across regimes. Constant loss is of course unlikely to arise in practice, but is often a setting considered in the computational science literature.
214
+
215
+ Lemma 2. A human-algorithm system where unaided human and algorithm loss rates are constant over regimes can never achieve complementary performance.
216
+
217
+ The next result, Lemma 3, considers the setting where one of the components outperforms the other in every regime. Specifically, it says that if one of the unaided human or algorithm always has lower loss than the other, then complementarity is impossible.
218
+
219
+ Lemma 3. Complementarity is impossible if one of the human or algorithm always weakly dominates the loss of the other: that is, if $a_i \leq h_i$ for all $i$ , or $a_i \geq h_i$ for all $i$ .
220
+
221
+ This result may have implications for tasks where the algorithm has extremely high performance, achieving lower loss than the human for all types of inputs. While this could mean that the combined system will have lower loss than the human alone, Lemma 3 tells us that it can't achieve lower loss than both the human and algorithm.
222
+
223
+ Next, we will consider two lemmas that concern properties of the combining rules (the way the human incorporates advice from the unaided human and the algorithm). First, Lemma 4 analyzes a case where the combining function is convex in its arguments.
224
+
225
+ Lemma 4. A combining function $c(a_i, h_i)$ that is convex in $a_i, h_i$ can never achieve complementary performance.
226
+
227
+ Recalling that the maximum function, $\max(a, h)$ , that returns whichever of the unaided human or algorithm has higher loss, is convex, this result is very intuitive.
228
+
229
+ A simple but important corollary (Corollary 1) is that complementarity is impossible whenever the weighting function $w_{h}(a_{i}, h_{i})$ is constant (independent of the algorithm or human's loss rate). This might reflect a situation where the decision-maker either is ignorant of the loss rates by the human or algorithm, or else decides to ignore them.
230
+
231
+ Corollary 1. A combining function with a constant weighting function $w_{h}(a_{i}, h_{i}) = w_{h}$ can never achieve complementarity performance.
232
+
233
+ PROOF. Note that $c(a, h) = w_h \cdot h + (1 - w_h) \cdot a$ is convex in both $a$ and $h$ .
234
+
235
+ These results, taken together, show that any system that could potentially achieve complementarity must have weighting function that varies with the inputs, must have human or algorithmic loss that varies, must have a combining function that is not convex, and cannot have either the human or algorithm dominate the loss of the other.
236
+
237
+ # 5.2 Cases Where Complementarity is Possible: $N = 2$ regimes
238
+
239
+ Having shown cases where complementarity is impossible, in this section we give conditions where complementarity is possible. As we described in Section 3, our notation describes the loss of the unassisted human in regime $i$ by $h_i = H + \delta_{hi}$ and the loss of the algorithm by $a_i = A + \delta_{ai}$ , where $H$ and $A$ are the average losses of the unassisted human and algorithm, respectively. Additionally, $w_h(a_i, h_i)$ (as defined in Equation 1) is the weighting function—the weight that the human places on themselves in making a final prediction.
240
+
241
+ We will first build intuition with the $N = 2$ case: there are exactly two regimes, with regime 1 having probability $p$ of occurring. By assumption, $\sum_{i=1}^{N} p_i \cdot \delta_{ai} = 0$ (and similarly for unaided human loss). This allows us to simplify the $N = 2$ loss distributions into:
242
+
243
+ $$
244
+ \text {u n a i d e d h u m a n l o s s :} \left\{ \begin{array}{l} h _ {1} = H + \delta_ {h} \\ h _ {2} = H - \frac {p}{1 - p} \delta_ {h} \end{array} \right. \quad \text {a l g o r i t h m i c l o s s :} \left\{ \begin{array}{l} a _ {1} = A + \delta_ {a} \\ a _ {2} = A - \frac {p}{1 - p} \delta_ {a} \end{array} \right.
245
+ $$
246
+
247
+ ![](images/8b01d29a78732d841c6c41948a20208d84b8342cece0ad7fc4179b1df5cfffa0.jpg)
248
+ Fig. 2. Low combined loss occurs for high variability (large $|\delta_a - \delta_h|$ ). The plot displays combined loss for a $N = 2$ system, for four combining functions. The black dashed line gives loss of algorithm alone: since for this setting the algorithm has lower error than the unaided human, complementarity occurs below this line. Note that for all four combining functions, complementarity occurs where $|\delta_a - \delta_h|$ is large. For details on the combining functions, including the exemplar function (original to this work), see Appendix A.
249
+
250
+ This formulation is without loss of generality because we allow $\delta_{a},\delta_{h}$ to be positive, negative, or zero. In the case that $\delta_{a},\delta_{h}$ are the same sign, loss rates for the human and algorithm are correlated. If they are of a different sign, then loss rates are anti-correlated. $\delta_{a},\delta_{h}$ values of larger magnitude correspond to a more variable distribution of losses.
251
+
252
+ Lemma 5 gives a condition for when a $N = 2$ example can achieve complementarity. Note that the condition gives a lower bound on the magnitude of $|\delta_{a} - \delta_{h}|$ , terms which reflect the variability in human and algorithmic loss. This bound depends on $A$ and $H$ , the average loss rates of the algorithm and human. It also depends on $w_{h}(a_{1},h_{1}),w_{h}(a_{2},h_{2})$ , the weighting functions for the human in the 1st and 2nd regime, respectively. Recall that Corollary 1 tells us that any system exhibiting complementarity cannot have $w_{h}(a_{1},h_{1}) = w_{h}(a_{2},h_{2})$ , because that would imply a constant weighting function.
253
+
254
+ Lemma 5. Consider the case where $N = 2$ , and WLOG assume that $A \leq H$ : the algorithm has lower average loss than the human. Then, the combined system exhibits complementarity whenever:
255
+
256
+ $$
257
+ (H - A) \cdot \frac {w _ {h} \left(a _ {1} , h _ {1}\right) + \frac {1 - p}{p} \cdot w _ {h} \left(a _ {2} , h _ {2}\right)}{\left| w _ {h} \left(a _ {2} , h _ {2}\right) - w _ {h} \left(a _ {1} , h _ {1}\right) \right|} < \left| \delta_ {a} - \delta_ {h} \right|
258
+ $$
259
+
260
+ Figure 2 displays the loss of the combined system for four different possible combining functions (models of how humans incorporate algorithmic advice). For each of them, complementarity occurs when $|\delta_{a} - \delta_{h}|$ is high, as Lemma 5 suggests. However, as Lemma 6 states, this does not mean that unaided human and algorithm loss need to be anti-correlated.
261
+
262
+ Lemma 6. A system can exhibit complementarity even if the unaided human and the algorithm have correlated loss (both have higher loss in the same regime).
263
+
264
+ This is a reassuring result because, otherwise, complementarity would not be possible in settings where a given regime is fundamentally harder than another. For example, one regime might be low-resolution images while another
265
+
266
+ <table><tr><td></td><td>(Unaided) human</td><td>Algorithm</td><td>Combined (human using algorithm)</td><td>Weight (on unaided human)</td></tr><tr><td>Regime 1</td><td>1.15</td><td>0.2</td><td>0.44</td><td>0.25</td></tr><tr><td>Regime 2</td><td>0.35</td><td>0.8</td><td>0.46</td><td>0.75</td></tr><tr><td>Average</td><td>0.75</td><td>0.5</td><td>0.45</td><td>0.5</td></tr></table>
267
+
268
+ Table 5. (Reproduced version of Table 3). The unaided human has loss 0.75, the algorithm has loss 0.5, and the combined system has loss 0.45 (complementary performance). In this table, we have $\delta_{a} = 0.4$ , $\delta_{h} = -0.3$ , for $|\delta_{a} - \delta_{h}| = 0.7$ . Note that here, losses are anti-correlated: the unaided human has higher loss in regime 1, while the algorithm has higher loss in regime 2.
269
+
270
+ <table><tr><td></td><td>(Unaided) human</td><td>Algorithm</td><td>Combined (human using algorithm)</td><td>Weight (on unaided human)</td></tr><tr><td>Regime 1</td><td>1.48</td><td>0.53</td><td>0.77</td><td>0.25</td></tr><tr><td>Regime 2</td><td>0.02</td><td>0.47</td><td>0.13</td><td>0.75</td></tr><tr><td>Average</td><td>0.75</td><td>0.5</td><td>0.45</td><td>0.5</td></tr></table>
271
+
272
+ Table 6. Correlated loss: The combined system (human using algorithm) displays complementary performance even though the unaided human and the algorithm both have higher loss for regime 1. In this table, we have $\delta_{a} = 0.73$ , $\delta_{h} = 0.03$ , for $|\delta_{a} - \delta_{h}| = 0.7$ .
273
+
274
+ might be high-resolution images, so both humans and algorithms are expected to perform worse in the low-resolution regime. In our notation, if $\delta_{a} > 0$ and $\delta_h > 0$ , then both the unaided human and algorithm have lower loss in regime 2. If $|\delta_{a} - \delta_{h}|$ is large, complementarity still may be possible. However, because $\delta_{a}, \delta_{h}$ have the same signs, the variability must be larger in order to make $|\delta_{a} - \delta_{h}|$ satisfy the lower bound.
275
+
276
+ Tables 5 and 6 prove Lemma 6 through an example. Table 5 is a copy of Scenario 3 (Table 3) from the motivating example in Section 4. The values in the table correspond to $\delta_{a} = 0.4$ , $\delta_{h} = -0.3$ (giving $|\delta_{a} - \delta_{h}| = 0.7$ ). Here, the losses are anti-correlated: the unaided human has higher loss in regime 1, while the algorithm has higher loss in regime 2.
277
+
278
+ Table 6 gives an example where the losses are correlated: both the unaided human and algorithm have higher loss for regime 1. The values in the table are given by $\delta_{a} = 0.03$ , $\delta_{h} = 0.73$ , which again gives $|\delta_{a} - \delta_{h}| = 0.7$ . Even though losses are correlated, the overall system still displays complementarity because $|\delta_{a} - \delta_{h}|$ has remained the same, as Lemma 5 suggests.
279
+
280
+ # 5.3 Cases Where Complementarity is Possible: $N > 2$ regimes
281
+
282
+ Finally, we will consider the general $N > 2$ case.
283
+
284
+ Lemma 7. WLOG, assume that $A \leq H$ : the algorithm has lower loss, on average. Then, the condition below gives necessary and sufficient conditions for complementarity of the human-algorithm system:
285
+
286
+ $$
287
+ (H - A) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) < \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {a i} - \delta_ {h i})
288
+ $$
289
+
290
+ If we view $\mathsf{w}_h(a_i, h_i)$ and $\delta_{ai}, \delta_{hi}$ as random variables over the instance space with probability mass governed the distribution of instances given by $\{p_i\}$ , then we can interpret the condition as:
291
+
292
+ $$
293
+ (H - A) \cdot \mathbb {E} \left[ w _ {h} \left(a _ {i}, h _ {i}\right) \right] < C o v \left(w _ {h} \left(a _ {i}, h _ {i}\right), \delta_ {a i} - \delta_ {h i}\right)
294
+ $$
295
+
296
+ where $\text{Cov}(\cdot)$ gives the covariance.
297
+
298
+ Lemma 7 gives conditions on complementarity, requiring that the weighting function $w_{h}(a_{i}, h_{i})$ have high covariance with the difference between $\delta_{ai}$ and $\delta_{hi}$ . Intuitively, this means that when the algorithm is more above its typical loss than the unaided human (when $\delta_{ai} > \delta_{hi}$ ), then the combined loss should rely more heavily on the unaided human
299
+
300
+ $(w_{h}(a_{i}, h_{i})$ should be large). Conversely, if the algorithm is more below its typical loss than the unaided human (when $\delta_{ai} < \delta_{hi}$ ), then the combined loss should rely more heavily on the algorithm $(w_{h}(a_{i}, h_{i})$ should be small). The lefthand side of the equation lower bounds how large this covariance must, as a function of the gap between the average loss of the unaided human and the algorithm, and the expected value of the weighting function.
301
+
302
+ The results in this section imply that unaided human and algorithmic error rates must be highly variable. One question this raises is about achievability: is it even possible to arrange human and algorithmic loss rates like this? For example, achieving a highly variable loss rate for an algorithm may require retraining, and for certain cases may not be possible. Manipulating the loss rate for a human may be possible through re-assigning human effort: for example, assigning multiple humans to certain portions of the input space in order to reduce loss.
303
+
304
+ Even if highly variable loss rates are possible, they may not be desirable for other reasons. In particular, the next section discusses the fairness implications of complementarity.
305
+
306
+ # 6 FAIRNESS
307
+
308
+ There are numerous possible notions of fairness that could be relevant for a human-algorithm classifier. In this section, we will analyze three of them and describe how they relate to complementarity. (All proofs are given in Appendix B). In general, the notions of fairness concern disparities in loss across different regimes. Disparities in loss rates could be alarming (if they align or correlate with sensitive attributes, such as race, gender, or socioeconomic status) or innocuous (if they are unrelated to sensitive attributes). In this paper, we will consider two general classes of fairness concerns: fairness of benefit and loss disparity rates. Fairness of benefit relates to which regimes see their loss rate decrease when the algorithm is incorporated into the decision-making process. Ideally, all regimes would benefit from human-algorithm collaboration. We will show that is unfortunately not possible. Loss disparity rates relate to the gap in loss rates between different regimes: ideally, this gap would be small, which would reflect equal loss rates for all regimes. In this work, we will show that complementarity can sometimes help bound loss disparity.
309
+
310
+ # 6.1 Fairness of benefit
311
+
312
+ First, Definition 2 says that a system has "fairness of benefit" if all regimes experience a lower loss from the combined human-algorithm system than they would experience with the unassisted human.
313
+
314
+ Definition 2 (Fairness of benefit). A human/algorithm system exhibits fairness of benefit if all regimes benefit from the combined system (experience a lower loss than with the unaided human).
315
+
316
+ This notion captures the desideratum that the benefits resulting from switching to a combined human-algorithm system are shared by all. However, Lemma 8 shows that this notion of fairness is incompatible with complementarity.
317
+
318
+ Lemma 8. Any system exhibiting fairness of benefits cannot have complementary performance.
319
+
320
+ Proof. This result is largely due to Lemma 3. Note that we have:
321
+
322
+ $$
323
+ c \left(a _ {i}, h _ {i}\right) = \left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}
324
+ $$
325
+
326
+ for $s_i \in [0,1]$ . Then, if we have fairness of benefits, we know:
327
+
328
+ $$
329
+ \left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i} < h _ {i} \forall i \in [ N ]
330
+ $$
331
+
332
+ In order to achieve this, we need $w_{h}(a_{i}, h_{i}) < 1$ and $a_{i} < h_{i}$ , for all $i \in [N]$ . However, this is exactly the condition addressed in Lemma 3: the algorithm always has lower loss than the unassisted human, which means that complementarity is impossible.
333
+
334
+ Note that Definition 2 could be defined symmetrically, defining "benefit" as a reduction in loss as compared to the algorithmic prediction. In this case, an analogous version of Lemma 8 would hold, similarly showing that this notion of fairness is incompatible with complementarity.
335
+
336
+ Lemma 8 tells us that there is an inherent tension between achieving complementarity and ensuring all people who use the system see their loss rates decrease. For certain application areas, fairness of benefit might be more important than complementarity, so practitioners might consciously choose to prioritize it. On the other hand, a practitioner who opts to achieve complementarity instead of fairness of benefit might wish to consider alternate ways to support any groups that see their loss increase in the combined system.
337
+
338
+ # 6.2 Loss disparity
339
+
340
+ Next, Definition 3 describes fairness as the disparity in loss rates between different regimes. Intuitively, this notion relates to group-based notions of fairness: loss rates should be relatively similar between members of different groups. However, Lemma 9 again describes how this notion of fairness is in tension with complementarity, which puts a lower bound on the level of this kind of unfairness.
341
+
342
+ Definition 3 (Loss disparity). A prediction system exhibits $\epsilon$ -loss disparity if the losses in different regimes differ by no more than $\epsilon$ . We will use $\epsilon_h, \epsilon_a, \epsilon_c$ to refer to the loss disparity of the unaided human, algorithm alone, and combined human-algorithm system, respectively.
343
+
344
+ Lemma 9. WLOG assume that $A \leq H$ : the algorithm has lower average loss than the human. Then, any system exhibiting complementarity has a lower bound on $\epsilon_{a} + \epsilon_{h}$ : the combined loss disparity of the unaided human and algorithm.
345
+
346
+ $$
347
+ A - C + (H - A) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) < \epsilon_ {a} + \epsilon_ {h}
348
+ $$
349
+
350
+ Lemma 9 should match our intuition from Section 5. There, results indicated that complementarity is easiest to achieve when loss rates are highly variable. However, this directly contradicts with the goal of minimizing loss disparity.
351
+
352
+ Next, we will consider the loss disparity of the combined human-algorithm system. Unfortunately, it is possible for a system exhibiting complementarity to exacerbate unfairness. Table 7 gives an example where the combined system has a higher loss disparity than either the unaided human or the algorithm, even though it exhibits complementarity. This result (which parallels results from [11, 12, 37]) means that practitioners must be careful when discussing fairness of combined systems: fairness guarantees from the individual components don't necessarily transfer to the combined human-algorithm system. However, Lemma 10 gives a condition where the loss disparity of the combined human-algorithm system is upper bounded by the loss disparity of the unaided human or the algorithm.
353
+
354
+ Lemma 10. Define $i+$ as the regime where the combined human-algorithm system has highest loss and $i-$ as the regime where it has lowest loss. Then, the loss disparity of the combined system is upper bounded by the loss disparity of the unaided human or algorithm, so long as neither the unaided human or algorithm dominates the other in both $i+$ , $i-$ . That is,
355
+
356
+ $$
357
+ \text {I f e i t h e r c a s e i s s a t i s f i e d :} \left\{ \begin{array}{l} h _ {i +} \leq a _ {i +} \text {a n d} h _ {i -} \geq a _ {i -} \\ h _ {i +} \geq a _ {i +} \text {a n d} h _ {i -} \leq a _ {i -} \end{array} \right. \quad \Rightarrow \quad \epsilon_ {c} \leq \max (\epsilon_ {a}, \epsilon_ {h})
358
+ $$
359
+
360
+ Human-Algorithm Collaboration: Achieving Complementarity and Avoiding Unfairness
361
+
362
+ 2022 ACM Conference on Fairness, Accountability, and Transparency (FAccT '22),
363
+
364
+ Seoul, Republic of Korea,
365
+
366
+ <table><tr><td></td><td>(Unaided) Human</td><td>Algorithm</td><td>Combined (human using algorithm)</td></tr><tr><td>Regime 1</td><td>0.95</td><td>0.85</td><td>0.895</td></tr><tr><td>Regime 2</td><td>0.95</td><td>0.02</td><td>0.05</td></tr><tr><td>Regime 3</td><td>0.15</td><td>0.45</td><td>0.255</td></tr><tr><td>Average loss</td><td>0.68</td><td>0.44</td><td>0.40</td></tr></table>
367
+
368
+ Table 7. This system exhibits complementarity, since average loss is lowest in the combined human-algorithm system. However, note that loss disparity is increased in the combined system: $\epsilon_h = 0.8$ , $\epsilon_a = 0.83$ , but $\epsilon_c = 0.84$ .
369
+
370
+ This last lemma gives our first positive result for fairness: it gives conditions where human-algorithm system exhibiting complementarity at least doesn't exacerbate unfairness. Specifically, what it requires is that neither the unaided human nor the algorithm dominates the other for both of the most extreme regimes (where the combined system has highest and lowest loss). As we would expect, the scenario in Table 7 violates this: the unaided human dominates in both regime 1 (highest loss) and regime 2 (lowest loss), which allows the combined loss disparity $\epsilon_{c}$ to be greater than $\max (\epsilon_{a},\epsilon_{h})$ . Interestingly, Lemma 10 is quite powerful: it only relies on the losses within two specific regimes and holds regardless of whether the overall system satisfies complementarity. Practitioners could use Lemma 10 to guide their algorithm development: so long as the preconditions are satisfied, they can guarantee that the combined system will never exacerbate unfairness.
371
+
372
+ As we mentioned previously, these definitions are only a few of the possible fairness concerns we could analyze. However, this analysis highlights the importance of considering fairness, especially ways that it might be in tension with achieving complementarity.
373
+
374
+ # 7 CONCLUSION AND FUTURE DIRECTIONS
375
+
376
+ In this work, we introduce a simple theoretical model of human-algorithm collaboration, which we show is flexible enough to encompass models analyzed in prior work. Using this model, we obtain theoretical impossibility results that characterize settings where complementarity is not achievable. We also use this framework to construct cases where complementarity is possible, given certain conditions on the loss distributions. Finally, we consider the implications, especially fairness, of the requirements in order to achieve complementarity.
377
+
378
+ Our approach admits multiple possible avenues for future work. Our work highlights the importance of variable loss rates: algorithmic loss that is not constant over the input space. However, as mentioned previously, it may not be possible to achieve extremely variable loss rates. Future work could model algorithmic loss rates more explicitly, describing loss distributions that are both achievable and lead to complementary performance. For example, some prior work has demonstrated that, for many algorithms, reducing loss becomes harder as the level of loss decreases, which could make it more difficult to achieve highly variable loss rates [16, 17].
379
+
380
+ Similarly, future work could relax assumptions made in our work. Relaxing Assumption 2, for example, could involve analyzing combining rules that, on any individual regime, could do better or worse than the human or algorithmic input loss rates. Relaxing Assumption 1 could involve modeling cases where regimes with identical human and algorithmic loss rates might be treated differently by the combiner. Any of these future analyses could allow us to have greater insight into a variety of ways human-algorithm systems perform.
381
+
382
+ # ACKNOWLEDGMENTS
383
+
384
+ This work was supported in part by NSF grant DGE-1650441. We are grateful to Vijay Keswani, Jon Kleinberg, Pang Wei Koh, Michela Meister, Emma Pierson, Charvi Rastogi, Aaron Roth, Kiran Tomlinson, Aaron Tucker, Qian Yang, Joyce Zhou, James Zou, the AWS Machine Learning team, the AI, Policy, and Practice working group at Cornell, and the
385
+
386
+ attendees at the NeurIPS 2021 Workshops on Human-Centered AI and Human and Machine Decisions for invaluable discussions.
387
+
388
+ # REFERENCES
389
+
390
+ [1] Gagan Bansal, Besmira Nushi, Ece Kamar, Eric Horvitz, and Daniel S Weld. 2021. Is the Most Accurate AI the Best Teammate? Optimizing AI for Teamwork. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 35. 11405-11414.
391
+ [2] Gagan Bansal, Tongshuang Wu, Joyce Zhou, Raymond Fok, Besmira Nushi, Ece Kamar, Marco Tulio Ribeiro, and Daniel Weld. 2021. Does the whole exceed its parts? The effect of AI explanations on complementary team performance. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 1-16.
392
+ [3] Emma Beede, Elizabeth Baylor, Fred Hersch, Anna Iurchenko, Lauren Wilcox, Paisan Ruamviboonsuk, and Laura M Vardoulakis. 2020. A human-centered evaluation of a deep learning system deployed in clinics for the detection of diabetic retinopathy. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems. 1-12.
393
+ [4] Federico Cabitza, Andrea Campagner, and Luca Maria Sconfienza. 2021. Studying human-AI collaboration protocols: the case of the Kasparov's law in radiological double reading. Health Information Science and Systems 9, 1 (2021), 1-20.
394
+ [5] Bo Cowgill and Megan T Stevenson. 2020. Algorithmic social engineering. In AEA Papers and Proceedings, Vol. 110. 96-100.
395
+ [6] Abir De, Nastaran Okati, Ali Zarezade, and Manuel Gomez-Rodriguez. 2021. Classification under human assistance. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 35. 5905-5913.
396
+ [7] Maria De-Arteaga, Riccardo Fogliato, and Alexandra Chouldechova. 2020. A case for humans-in-the-loop: Decisions in the presence of erroneous algorithmic scores. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems. 1-12.
397
+ [8] Luca Didaci, Giorgio Fumera, and Fabio Roli. 2013. Diversity in classifier ensembles: Fertile concept or dead end?. In International workshop on multiple classifier systems. Springer, 37-48.
398
+ [9] Berkeley J Dietvorst and Soaham Bharti. 2020. People reject algorithms in uncertain decision domains because they have diminishing sensitivity to forecasting error. Psychological science 31, 10 (2020), 1302-1314.
399
+ [10] Berkeley J Dietvorst, Joseph P Simmons, and Cade Massey. 2018. Overcoming algorithm aversion: People will use imperfect algorithms if they can (even slightly) modify them. Management Science 64, 3 (2018), 1155-1170.
400
+ [11] Cynthia Dwork and Christina Ilvento. 2018. Fairness Under Composition. In 10th Innovations in Theoretical Computer Science Conference (ITCS 2019). Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik.
401
+ [12] Cynthia Dwork, Christina Ivento, and Meena Jagadeesan. 2020. Individual Fairness in Pipelines. In 1st Symposium on Foundations of Responsible Computing (FORC 2020). Schloss Dagstuhl-Leibniz-Zentrum für Informatik.
402
+ [13] Aaron Fraenkel. 2020. Fairness and Algorithmic Decision Making. https://afraenkel.github.io/fairness-algo-decision
403
+ [14] Robert Geirhos, Kristof Meding, and Felix A Wichmann. 2020. Beyond accuracy: quantifying trial-by-trial behaviour of CNNs and humans by measuring error consistency. Advances in Neural Information Processing Systems 33 (2020), 13890-13902.
404
+ [15] Talia Gillis, Bryce McLaughlin, and Jann Spiess. 2021. On the Fairness of Machine-Assisted Human Decisions. arXiv preprint arXiv:2110.15310 (2021).
405
+ [16] Joel Hestness, Sharan Narang, Newsha Ardalani, Gregory F. Diamos, Heewoo Jun, Hassan Kianinejad, Md. Mostofa Ali Patwary, Yang Yang, and Yanqi Zhou. 2017. Deep Learning Scaling is Predictable, Empirically. CoRR abs/1712.00409 (2017). arXiv:1712.00409 http://arxiv.org/abs/1712.00409
406
+ [17] Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361 (2020).
407
+ [18] Gavin Kerrigan, Padhraic Smyth, and Mark Steyvers. 2021. Combining Human Predictions with Model Probabilities via Confusion Matrices and Calibration. (2021).
408
+ [19] Vijay Keswani, Matthew Lease, and Krishnamaram Kenthapadi. 2021. Towards unbiased and accurate deferral to multiple experts. In Proceedings of the 2021 AAAI/ACM Conference on AI, Ethics, and Society. 154-165.
409
+ [20] Vijay Keswani, Matthew Lease, and Krishnamaram Kenthapadi. 2022. Designing Closed Human-in-the-loop Deferral Pipelines. arXiv preprint arXiv:2202.04718 (2022).
410
+ [21] Ludmila I Kuncheva. 2014. Combining pattern classifiers: methods and algorithms. John Wiley & Sons.
411
+ [22] Sarah Lebovitz, Natalia Levina, and Hila Lifshitz-Assaf. 2021. Is AI ground truth really “true”? The dangers of training and evaluating AI tools based on experts' know-what. Management Information Systems Quarterly (2021).
412
+ [23] Sarah Lebovitz, Hila Lifshitz-Assaf, and Natalia Levina. 2020. To incorporate or not to incorporate AI for critical judgments: The importance of ambiguity in professionals' judgment process. Collective Intelligence, The Association for Computing Machinery (2020).
413
+ [24] David Madras, Toni Pitassi, and Richard Zemel. 2018. Predict Responsibly: Improving Fairness and Accuracy by Learning to Defer. In Advances in Neural Information Processing Systems, S. Bengio, H. Wallach, H. Larochelle, K. Grauman, N. Cesa-Bianchi, and R. Garnett (Eds.), Vol. 31. Curran Associates, Inc. https://proceedings.neurips.cc/paper/2018/file/09d37c08f7b129e96277388757530c72-Paper.pdf
414
+ [25] Paul E Meehl. 1954. Clinical versus statistical prediction: A theoretical analysis and a review of the evidence. (1954).
415
+ [26] Vahid Balazadeh Meresht, Abir De, Adish Singla, and Manuel Gomez-Rodriguez. 2020. Learning to switch between machines and humans. ICML Workshop on Human-AI Collaboration in Sequential Decision-Making. (2020).
416
+
417
+ Human-Algorithm Collaboration: Achieving Complementarity and Avoiding Unfairness
418
+
419
+ [27] Nastaran Okati, Abir De, and Manuel Rodriguez. 2021. Differentiable learning under triage. Advances in Neural Information Processing Systems 34 (2021).
420
+ [28] Chinasa T Okolo, Srujana Kamath, Nicola Dell, and Aditya Vashistha. 2021. "It cannot do all of my work": Community Health Worker Perceptions of AI-Enabled Mobile Health Applications in Rural India. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 1–20.
421
+ [29] Forough Poursabzi-Sangdeh, Daniel G Goldstein, Jake M Hofman, Jennifer Wortman Wortman Vaughan, and Hanna Wallach. 2021. Manipulating and measuring model interpretability. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. 1-52.
422
+ [30] Maithra Raghu, Katy Blumer, Greg Corrado, Jon Kleinberg, Ziad Obermeyer, and Sendhil Mullainathan. 2018. The algorithmic automation problem: Prediction, triage, and human effort. NeurIPS Workshop on Machine Learning for Health (ML4H) (2018).
423
+ [31] Rory Sayres, Ankur Taly, Ehsan Rahimy, Katy Blumer, David Coz, Naama Hammel, Jonathan Krause, Arunachalam Narayanaswamy, Zahra Rastegar, Derek Wu, Shawn Xu, Scott Barb, Anthony Joseph, Michael Shumski, Jesse Smith, Arjun B. Sood, Greg S. Corrado, Lily Peng, and Dale R. Webster. 2019. Using a deep learning algorithm and integrated gradients explanation to assist grading for diabetic retinopathy. Ophthalmology 126, 4 (2019), 552-564.
424
+ [32] Megha Srivastava, Besmira Nushi, Ece Kamar, Shital Shah, and Eric Horvitz. 2020. An empirical analysis of backward compatibility in machine learning systems. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 3272-3280.
425
+ [33] Eleni Straitouri, Lequn Wang, Nastaran Okati, and Manuel Gomez Rodriguez. 2022. Provably Improving Expert Predictions with Conformal Prediction. arXiv:2201.12006 [cs.LG]
426
+ [34] Sarah Tan, Julius Adebayo, Kori Inkpen, and Ece Kamar. 2018. Investigating human+ machine complementarity for recidivism predictions. NeurIPS 2018 Workshop on Ethical, Social and Governance Issues in AI (2018).
427
+ [35] I. Valera, A. Singla, and M. Gomez Rodriguez. 2018. Enhancing the Accuracy and Fairness of Human Decision Making. In Advances in Neural Information Processing Systems 31 (NeurIPS 2018). Curran Associates, Inc., 1774-1783. http://papers.nips.cc/paper/7448-enhancing-the-accuracy-and-fairness-of-human-decision-making.pdf
428
+ [36] Kailas Vodrahalli, Tobias Gerstenberg, and James Zou. 2021. Do Humans Trust Advice More if it Comes from AI? An Analysis of Human-AI Interactions. arXiv preprint arXiv:2107.07015 (2021).
429
+ [37] Xuezhi Wang, Nithum Thain, Anu Sinha, Flavien Prost, Ed H Chi, Jilin Chen, and Alex Beutel. 2021. Practical Compositional Fairness: Understanding Fairness in Multi-Component Recommender Systems. In Proceedings of the 14th ACM International Conference on Web Search and Data Mining. 436-444.
430
+ [38] Qian Yang, Alex Scuito, John Zimmerman, Jodi Forlizzi, and Aaron Steinfeld. 2018. Investigating how experienced UX designers effectively work with machine learning. In Proceedings of the 2018 Designing Interactive Systems Conference. 585-596.
431
+ [39] Ming Yin, Jennifer Wortman Vaughan, and Hanna Wallach. 2019. Understanding the effect of accuracy on trust in machine learning models. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems. 1-12.
432
+ [40] Muhammad Bilal Zafar, Isabel Valera, Manuel Gomez Rodriguez, and Krishna P. Gummadi. 2017. Fairness Beyond Disparate Treatment & Disparate Impact: Learning Classification without Disparate Mistreatment. In Proceedings of the 26th International Conference on World Wide Web (Perth, Australia) (WWW '17). International World Wide Web Conferences Steering Committee, Republic and Canton of Geneva, CHE, 1171-1180. https://doi.org/10.1145/3038912.3052660
433
+
434
+ # A MODELING COMBINING FUNCTIONS
435
+
436
+ The definitions presented in Section 3 reflect multiple ways humans could incorporate algorithmic inputs. However, their functional forms in some cases are less clean to analyze. Definition 4 gives an exemplar weighting function (original to this work) that we created in order to illustrate common patterns in weighting functions, while also allowing for tractable theoretical analysis. In this function, for $m > 0$ , the combining rule is more likely to select the algorithm when $a < h$ (when the algorithm has lower loss rate). For $m > 0$ , the reverse is true: the combining rule "mistakenly" goes with the input with higher loss.
437
+
438
+ Definition 4 (Exemplar weighting function). The exemplar weighting function is given by:
439
+
440
+ $$
441
+ w _ {h} (a, h) = \left\{ \begin{array}{l l} b - m \cdot (h - a) & 0 \leq b - m \cdot (h - a) \leq 0 \\ 0 & b - m \cdot (h - a) < 0 \\ 1 & b - m \cdot (h - a) > 1 \end{array} \right.
442
+ $$
443
+
444
+ Figure 3 plots the weighting function for each of the selection rules presented. Note that, in general, as the human gets higher loss, the weight on the human decreases.
445
+
446
+ ![](images/49c98b23aebf77387d430689b9fcb76c3bbe0db97b14bc98ee5eece81f79615b.jpg)
447
+ Fig. 3. The weight on the human, given a difference $h - a$ between algorithmic and human loss, for multiple weighting functions. Note that, in general, as the human gets higher loss than the algorithm, the weight on the human decreases.
448
+
449
+ Lemma 11, below, gives an analogous version of Lemma 5 for the exemplar function. Note that it similarly shows that complementarity occurs when $|\delta_{a} - \delta_{h}|$ is large (when losses are highly variable).
450
+
451
+ Lemma 11. Consider the exemplar weighting function with $N = 2$ and $w_{h}(a_{0}, h_{0}), w_{h}(a_{1}, h_{1}) < 1$ , and where $A \leq H$ . Then, the system exhibits complementarity whenever:
452
+
453
+ $$
454
+ \sqrt {H - A} \cdot \sqrt {\frac {1 - p}{p} \cdot \left(\frac {1 - b}{m} - (H - A)\right)} < | \delta_ {a} - \delta_ {h} |
455
+ $$
456
+
457
+ # B PROOFS
458
+
459
+ Lemma 1. Any combining rule relying only on loss rates (Assumption 1) with bounded output (Assumption 2) can be written as a combining rule with a weighting function $0 \leq w_{h}(a_{i}, h_{i}) \leq 1$ .
460
+
461
+ Proof. We will define a weighting function as follows:
462
+
463
+ $$
464
+ w _ {h} (a, h) = \left\{ \begin{array}{l l} \frac {c (a , h) - a}{h - a} & a \neq h \\ \frac {1}{2} & a = h \end{array} \right.
465
+ $$
466
+
467
+ Note that if $c(a, h) = h$ , then $w_h(a, h) = 1$ (the weighting function puts all weight on the unaided human), and if $c(a, h) = a$ , then $w_h(a, h) = 0$ (the weighting function puts no weight on the unaided human).
468
+
469
+ First, we'll show that $w_{h}(a,h) \in [0,1]$ . If $a = h$ , this is true by construction (because $w_{h}(a,h) = \frac{1}{2}$ ). What we want to show is:
470
+
471
+ $$
472
+ 0 \leq \frac {c (a , h) - a}{h - a} \leq 1
473
+ $$
474
+
475
+ If $h > a$ , then this is equivalent to requiring:
476
+
477
+ $$
478
+ 0 \leq c (a, h) - a \leq h - a
479
+ $$
480
+
481
+ The lefthand inequality is satisfied because $c(a, h) \geq \min(a, h) = a$ (in this case). The righthand inequality is satisfied because $c(a, h) \leq \max(a, h) = h$ (in this case).
482
+
483
+ On the other hand, if $h < a$ , then the inequality we want to show is:
484
+
485
+ $$
486
+ 0 \geq c (a, h) - a \geq h - a
487
+ $$
488
+
489
+ Again, the lefthand inequality is satisfied because $c(a,h) \leq \max(a,h) = a$ (in this case). The righthand inequality is satisfied because $c(a,h) \geq \min(a,h) = h$ (in this case).
490
+
491
+ Next, we'll show that this weighting function is correct: that is, that:
492
+
493
+ $$
494
+ c (a, h) = (1 - w _ {h} (a, h)) \cdot a + w _ {h} (a, h) \cdot h
495
+ $$
496
+
497
+ Note that Assumption 2 means that if $a = h$ , then $c(a, h) = a = h$ , so any $w_h(a, h) \in [0,1]$ would result in a correct weighting function. For $a \neq h$ , we can write:
498
+
499
+ $$
500
+ \begin{array}{l} \left(1 - w _ {h} (a, h)\right) \cdot a + w _ {h} (a, h) \cdot h \\ = a \cdot \left(1 - \frac {c (a , h) - a}{h - a}\right) + h \cdot \frac {c (a , h) - a}{h - a} \\ = a \cdot \frac {h - a - c (a , h) + a}{h - a} + h \cdot \frac {c (a , h) - a}{h - a} \\ = a \cdot \frac {h - c (a , h)}{h - a} + h \cdot \frac {c (a , h) - a}{h - a} \\ = \frac {a \cdot h - a \cdot c (a , h) + h \cdot c (a , h) - a \cdot h}{h - a} \\ = \frac {c (a , h) \cdot (h - a)}{h - a} \\ = c (a, h) \\ \end{array}
501
+ $$
502
+
503
+ as desired.
504
+
505
+ Lemma 2. A human-algorithm system where unaided human and algorithm loss rates are constant over regimes can never achieve complementary performance.
506
+
507
+ Proof. Constant loss rates means that $a_{i} = A$ and $h_i = H$ for all $i \in [N]$ . The combined system has loss:
508
+
509
+ $$
510
+ \sum_ {i \in [ N ]} p _ {i} \cdot c (a _ {i}, h _ {i}) = \sum_ {i \in [ N ]} p _ {i} \cdot c (A, H) = c (A, H) \geq \min (A, H)
511
+ $$
512
+
513
+ Lemma 3. Complementarity is impossible if one of the human or algorithm always weakly dominates the loss of the other: that is, if $a_i \leq h_i$ for all $i$ , or $a_i \geq h_i$ for all $i$ .
514
+
515
+ Proof. WLOG, we will assume that $A \leq H$ . Then, complementarity occurs when:
516
+
517
+ $$
518
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot c \left(a _ {i}, h _ {i}\right) < A
519
+ $$
520
+
521
+ $$
522
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(\left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}\right) < \sum_ {i = 1} ^ {N} p _ {i} \cdot a _ {i}
523
+ $$
524
+
525
+ $$
526
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(- w _ {h} \left(a _ {i}, h _ {i}\right) a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}\right) < 0
527
+ $$
528
+
529
+ $$
530
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) \cdot \left(h _ {i} - a _ {i}\right) < 0
531
+ $$
532
+
533
+ If the algorithm weakly dominates the unaided human ( $h_i \geq a_i$ for all $i$ ), then this inequality can never be satisfied because the entire lefthand side is positive or 0. If the unaided human weakly dominates the algorithm ( $a_i \geq h_i$ for all $i$ ) then $A \geq H$ . From our previous assumption that $A \leq H$ , which means that we must have that $A = H$ : both have identical average losses.
534
+
535
+ We will now show that further analysis means in the case that $A = H$ , complementarity is still impossible. If $A = H$ we could have equivalently written the complementarity condition as:
536
+
537
+ $$
538
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot c \left(a _ {i}, h _ {i}\right) < H = A
539
+ $$
540
+
541
+ $$
542
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(\left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}\right) < \sum_ {i = 1} ^ {N} p _ {i} \cdot h _ {i}
543
+ $$
544
+
545
+ $$
546
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(\left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + \left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot h _ {i}\right) < 0
547
+ $$
548
+
549
+ $$
550
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot \left(a _ {i} - h _ {i}\right) < 0
551
+ $$
552
+
553
+ In this case, we've assumed that $a_i \geq h_i$ . However, this means that the lefthand side of the above inequality is positive, which means complementarity isn't satisfied.
554
+
555
+ Lemma 4. A combining function $c(a_i, h_i)$ that is convex in $a_i, h_i$ can never achieve complementary performance.
556
+
557
+ Proof. The first inequality in this proof is by Jensen's inequality and the last inequality is due to our construction of the combining function:
558
+
559
+ $$
560
+ \sum_ {i \in [ N ]} p _ {i} \cdot c \left(a _ {i}, h _ {i}\right) \geq c \left(\sum_ {i \in [ N ]} p _ {i} \cdot a _ {i}, \sum_ {i \in [ N ]} p _ {i} \cdot h _ {i}\right) = c (A, H) \geq \min (A, H)
561
+ $$
562
+
563
+ Lemma 5. Consider the case where $N = 2$ , and WLOG assume that $A \leq H$ : the algorithm has lower average loss than the human. Then, the combined system exhibits complementarity whenever:
564
+
565
+ $$
566
+ (H - A) \cdot \frac {w _ {h} \left(a _ {1} , h _ {1}\right) + \frac {1 - p}{p} \cdot w _ {h} \left(a _ {2} , h _ {2}\right)}{\left| w _ {h} \left(a _ {2} , h _ {2}\right) - w _ {h} \left(a _ {1} , h _ {1}\right) \right|} < \left| \delta_ {a} - \delta_ {h} \right|
567
+ $$
568
+
569
+ Proof. If we assume that $A \leq H$ , then complementarity occurs whenever $C < A$ , or: The system exhibits complementarity when:
570
+
571
+ $$
572
+ p \cdot c (a _ {1}, h _ {1}) + (1 - p) \cdot c (a _ {2}, h _ {2}) < A
573
+ $$
574
+
575
+ $$
576
+ p \cdot \left(\left(1 - w _ {h} \left(a _ {1}, h _ {1}\right)\right) \cdot a _ {1} + w _ {h} \left(a _ {1}, h _ {1}\right) \cdot h _ {1}\right) + (1 - p) \cdot \left(\left(1 - w _ {h} \left(a _ {2}, h _ {2}\right) \cdot a _ {2} + w _ {h} \left(a _ {2}, h _ {2}\right) \cdot h _ {2}\right) < p \cdot a _ {1} + (1 - p) \cdot a _ {2} \right.
577
+ $$
578
+
579
+ $$
580
+ p \cdot w _ {h} \left(a _ {1}, h _ {1}\right) \cdot \left(h _ {1} - a _ {1}\right) + (1 - p) \cdot w _ {h} \left(a _ {2}, h _ {2}\right) \cdot \left(h _ {2} - a _ {2}\right) < 0
581
+ $$
582
+
583
+ We insert the values of $a_{i},h_{i}$ to get:
584
+
585
+ $$
586
+ p \cdot w _ {h} \left(a _ {1}, h _ {1}\right) \cdot \left(H + \delta_ {h} - A - \delta_ {a}\right) + (1 - p) \cdot w _ {h} \left(a _ {2}, h _ {2}\right) \cdot \left(H - \frac {p}{1 - p} \cdot \delta_ {h} - A + \frac {p}{1 - p} \cdot \delta_ {a}\right) < 0
587
+ $$
588
+
589
+ $$
590
+ (H - A) \cdot (p \cdot w _ {h} (a _ {1}, h _ {1}) + (1 - p) \cdot w _ {h} (a _ {2}, h _ {2})) + p \cdot w _ {h} (a _ {1}, h _ {1}) \cdot (\delta_ {h} - \delta_ {a}) - p \cdot w _ {h} (a _ {2}, h _ {2}) \cdot (\delta_ {h} - \delta_ {a}) < 0
591
+ $$
592
+
593
+ $$
594
+ (H - A) \cdot (p \cdot w _ {h} (a _ {1}, h _ {1}) + (1 - p) \cdot w _ {h} (a _ {2}, h _ {2})) - p \cdot (\delta_ {a} - \delta_ {h}) \cdot (w _ {h} (a _ {1}, h _ {1}) - w _ {h} (a _ {2}, h _ {2})) < 0
595
+ $$
596
+
597
+ Note that we have assumed $H \geq A$ , so in order for this inequality to hold, we must have that the other term (involving $\delta_{a}, \delta_{h}$ ) be positive. Note that by Lemma 2 we cannot have $w_{h}(a_{1}, h_{1}) = w_{h}(a_{2}, h_{2})$ if it exhibits complementarity. In order to have the inequality satisfied, one of two conditions must hold:
598
+
599
+ - Case 1: $\delta_{a} > \delta_{h}$ and $w_{h}(a_{1}, h_{1}) > w_{h}(a_{2}, h_{2})$ (the unaided human is weighted more heavily in regime 1). Note that in this case, we must have $h_{2} > a_{2}$ , because:
600
+
601
+ $$
602
+ h _ {2} > a _ {2}
603
+ $$
604
+
605
+ $$
606
+ H - \frac {p}{1 - p} \cdot \delta_ {h} > A - \frac {p}{1 - p} \cdot \delta_ {a}
607
+ $$
608
+
609
+ $$
610
+ H - A - \frac {p}{1 - p} \cdot \left(\delta_ {h} - \delta_ {a}\right) > 0
611
+ $$
612
+
613
+ $$
614
+ H - A + \frac {p}{1 - p} \cdot \left(\delta_ {a} - \delta_ {h}\right) > 0
615
+ $$
616
+
617
+ which is satisfied because $H - A \geq 0$ and $\delta_{a} > \delta_{h}$ . By Lemma 3, we know that $h_2 > a_2$ implies that $h_1 \leq a_1$ . Taken together with $w_{h}(a_{1},h_{1}) > w_{h}(a_{2},h_{2})$ , this means that the combined system must weight the human more heavily in instance 1, where it has lower loss than the human.
618
+
619
+ - Case 2: $\delta_{a} < \delta_{h}$ and $w_{h}(a_{1}, h_{1}) < w_{h}(a_{2}, h_{2})$ (the unaided human is weighted more heavily in regime 2). In this case, we must have $h_{1} > a_{1}$ , because:
620
+
621
+ $$
622
+ H + \delta_ {h} > A + \delta_ {a}
623
+ $$
624
+
625
+ $$
626
+ H - A + \delta_ {h} - \delta_ {a} > 0
627
+ $$
628
+
629
+ This must be satisfied because $H - A > 0$ and $\delta_h > \delta_a$ . By similar reasoning to above, this means that $h_2 \leq a_2$ . Taken with $w_h(a_1, h_1) < w_h(a_2, h_2)$ , this again means that the system must weight the unaided more heavily in the regime where it has lower loss.
630
+
631
+ Finally, we can simplify the inequality:
632
+
633
+ $$
634
+ (H - A) \cdot (p \cdot w _ {h} (a _ {1}, h _ {1}) + (1 - p) \cdot w _ {h} (a _ {2}, h _ {2})) < p \cdot (\delta_ {a} - \delta_ {h}) \cdot (w _ {h} (a _ {1}, h _ {1}) - w _ {h} (a _ {2}, h _ {2}))
635
+ $$
636
+
637
+ If $\delta_a > \delta_h$ , then we know that $w_h(a_1, h_1) > w_h(a_2, h_2)$ , so we can rewrite this as:
638
+
639
+ $$
640
+ (H - A) \cdot \frac {w _ {h} \left(a _ {1} , h _ {1}\right) + \frac {1 - p}{p} \cdot w _ {h} \left(a _ {2} , h _ {2}\right)}{w _ {h} \left(a _ {1} , h _ {1}\right) - w _ {h} \left(a _ {2} , h _ {2}\right)} \leq \delta_ {a} - \delta_ {h}
641
+ $$
642
+
643
+ On the other hand, if $\delta_{a} < \delta_{h}$ , we know that $w_{h}(a_{1}, h_{1}) < w_{h}(a_{2}, h_{2})$ , so we can rewrite this as:
644
+
645
+ $$
646
+ (H - A) \cdot (p \cdot w _ {h} (a _ {1}, h _ {1}) + (1 - p) \cdot w _ {h} (a _ {2}, h _ {2})) < p \cdot (\delta_ {h} - \delta_ {a}) \cdot (w _ {h} (a _ {2}, h _ {2}) - w _ {h} (a _ {1}, h _ {1}))
647
+ $$
648
+
649
+ $$
650
+ (H - A) \cdot \frac {w _ {h} \left(a _ {1} , h _ {1}\right) + \frac {1 - p}{p} \cdot w _ {h} \left(a _ {2} , h _ {2}\right)}{\left(w _ {h} \left(a _ {2} , h _ {2}\right) - w _ {h} \left(a _ {1} , h _ {1}\right)\right)} < \delta_ {a} - \delta_ {h})
651
+ $$
652
+
653
+ Either way simplifies to:
654
+
655
+ $$
656
+ (H - A) \cdot \frac {w _ {h} \left(a _ {1} , h _ {1}\right) + \frac {1 - p}{p} \cdot w _ {h} \left(a _ {2} , h _ {2}\right)}{\left| w _ {h} \left(a _ {2} , h _ {2}\right) - w _ {h} \left(a _ {1} , h _ {1}\right) \right|} < | (\delta_ {a} - \delta_ {h}) |
657
+ $$
658
+
659
+ ![](images/52cbfb129d791ad3dad2ccc5f4de7cb14cb11eab868bc2bea5d071fc19dac8ce.jpg)
660
+
661
+ Lemma 7. WLOG, assume that $A \leq H$ : the algorithm has lower loss, on average. Then, the condition below gives necessary and sufficient conditions for complementarity of the human-algorithm system:
662
+
663
+ $$
664
+ (H - A) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) < \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {a i} - \delta_ {h i})
665
+ $$
666
+
667
+ If we view $w_{h}(a_{i}, h_{i})$ and $\delta_{ai}, \delta_{hi}$ as random variables over the instance space with probability mass governed the distribution of instances given by $\{p_{i}\}$ , then we can interpret the condition as:
668
+
669
+ $$
670
+ (H - A) \cdot \mathbb {E} \left[ w _ {h} \left(a _ {i}, h _ {i}\right) \right] < C o v \left(w _ {h} \left(a _ {i}, h _ {i}\right), \delta_ {a i} - \delta_ {h i}\right)
671
+ $$
672
+
673
+ where $\text{Cov}(\cdot)$ gives the covariance.
674
+
675
+ Proof. If we assume that $A \leq H$ , then complementarity occurs whenever $C < A$ , or:
676
+
677
+ $$
678
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot c \left(a _ {i}, h _ {i}\right) \leq \sum_ {i = 1} ^ {N} p _ {i} \cdot a _ {i}
679
+ $$
680
+
681
+ $$
682
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(\left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}\right) \leq \sum_ {i = 1} ^ {N} p _ {i} \cdot a _ {i}
683
+ $$
684
+
685
+ $$
686
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(- w _ {h} \left(a _ {i}, h _ {i}\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}\right) \leq 0
687
+ $$
688
+
689
+ $$
690
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (h _ {i} - a _ {i}) \leq 0
691
+ $$
692
+
693
+ Plugging in for the values gives:
694
+
695
+ $$
696
+ \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (H + \delta_ {h i} - A - \delta_ {a i}) \leq 0
697
+ $$
698
+
699
+ $$
700
+ (H - A) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) < \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) \cdot \left(\delta_ {a i} - \delta_ {h i}\right)
701
+ $$
702
+
703
+ as desired.
704
+
705
+ Lemma 9. WLOG assume that $A \leq H$ : the algorithm has lower average loss than the human. Then, any system exhibiting complementarity has a lower bound on $\epsilon_{a} + \epsilon_{h}$ : the combined loss disparity of the unaided human and algorithm.
706
+
707
+ $$
708
+ A - C + (H - A) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) < \epsilon_ {a} + \epsilon_ {h}
709
+ $$
710
+
711
+ Proof. First, we will use the complementarity result from partway through the proof of Lemma 7. We calculate the difference between the average algorithmic loss and the average combined human-algorithmic loss (which gives us the average benefit of collaboration):
712
+
713
+ $$
714
+ \begin{array}{l} A - C = \sum_ {i = 1} ^ {N} p _ {i} \cdot a _ {i} - \sum_ {i = 1} ^ {N} p _ {i} \cdot c \left(a _ {i}, h _ {i}\right) \\ = \sum_ {i = 1} ^ {N} p _ {i} \cdot a _ {i} - \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(\left(1 - w _ {h} \left(a _ {i}, h _ {i}\right)\right) \cdot a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}\right) \\ = \sum_ {i = 1} ^ {N} p _ {i} \cdot \left(a _ {i} - a _ {i} + w _ {h} \left(a _ {i}, h _ {i}\right) \cdot a _ {i} - w _ {h} \left(a _ {i}, h _ {i}\right) \cdot h _ {i}\right) \\ = \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) \cdot \left(a _ {i} - h _ {i}\right) \\ = \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) \cdot \left(A + \delta_ {a i} - H - \delta_ {h i}\right) \\ = (A - H) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) + \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) \cdot \left(\delta_ {a i} - \delta_ {h i}\right) \\ \end{array}
715
+ $$
716
+
717
+ Rearranging gives:
718
+
719
+ $$
720
+ A - C + (H - A) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) = \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} \left(a _ {i}, h _ {i}\right) \cdot \left(\delta_ {a i} - \delta_ {h i}\right)
721
+ $$
722
+
723
+ Next, we will define:
724
+
725
+ $$
726
+ \epsilon_ {a} = a _ {a +} - a _ {a -} = A + \delta_ {a +} - A - \delta_ {a -} = \delta_ {a +} - \delta_ {a -}
727
+ $$
728
+
729
+ $$
730
+ \epsilon_ {h} = h _ {h +} - h _ {h -} = H + \delta_ {h +} - H - \delta_ {h -} = \delta_ {h +} - \delta_ {h -}
731
+ $$
732
+
733
+ where $a+, a-, h+, h-$ are the indices of the maximum and minimum loss for the algorithm and unaided human, respectively. Note that we require $\sum_{i=1}^{N} p_i \cdot \delta_{ai} = \sum_{i=1}^{N} p_i \cdot \delta_{hi} = 0$ , which implies that $\delta_{a+}, \delta_{h+} \geq 0$ and $\delta_{a-}, \delta_{h-} \leq 0$ . Then, we know that:
734
+
735
+ $$
736
+ \epsilon_ {a} + \epsilon_ {h} = \delta_ {a +} - \delta_ {a -} + \delta_ {h +} - \delta_ {h -} > \delta_ {a +} - \delta_ {h -}
737
+ $$
738
+
739
+ Define $\mathcal{P} = \{i\mid \delta_{ai}\geq \delta_{hi}$ and $\mathcal{N} = \{i\mid \delta_{ai} < \delta_{hi}$ . By definition, $\delta_{a + }\geq \delta_{ai}$ and $\delta_{h - }\leq \delta_{h i}$ for all $i\in [N]$ . Then, we know that:
740
+
741
+ $$
742
+ \delta_ {a +} - \delta_ {h -} \geq \sum_ {i \in \mathcal {P}} p _ {i} \cdot (\delta_ {h i} - \delta_ {a i}) \geq \sum_ {i \in \mathcal {P}} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {h i} - \delta_ {a i})
743
+ $$
744
+
745
+ where we have used the fact that $w_{h}(a_{i},h_{i})\leq 1$ . Finally, we know that:
746
+
747
+ $$
748
+ \sum_ {i \in \mathcal {P}} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {h i} - \delta_ {a i}) \geq \sum_ {i \in \mathcal {P}} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {h i} - \delta_ {a i}) + \sum_ {i \in \mathcal {N}} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {h i} - \delta_ {a i}) = \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {h i} - \delta_ {a i})
749
+ $$
750
+
751
+ where the first inequality comes by the fact that $\delta_{hi} - \delta_{ai}$ for $i\in \mathcal{N}$ . Finally, we can combine this analysis with our previous analysis on the gap in loss rate between the algorithm and the combined system:
752
+
753
+ $$
754
+ A - C + (H - A) \cdot \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) = \sum_ {i = 1} ^ {N} p _ {i} \cdot w _ {h} (a _ {i}, h _ {i}) \cdot (\delta_ {a i} - \delta_ {h i}) \leq \delta_ {a +} - \delta_ {h -} < \epsilon_ {a} + \epsilon_ {h}
755
+ $$
756
+
757
+ as desired.
758
+
759
+ ![](images/3d38d02db5f9e8da27bafea31e8d6c29683c6b2283dcde6146158d8818e63f99.jpg)
760
+
761
+ Lemma 10. Define $i+$ as the regime where the combined human-algorithm system has highest loss and $i-$ as the regime where it has lowest loss. Then, the loss disparity of the combined system is upper bounded by the loss disparity of the unaided human or algorithm, so long as neither the unaided human or algorithm dominates the other in both $i+$ , $i-$ . That is,
762
+
763
+ $$
764
+ \text {I f e i t h e r c a s e i s s a t i s f i e d :} \left\{ \begin{array}{l} h _ {i +} \leq a _ {i +} \text {a n d} h _ {i -} \geq a _ {i -} \\ h _ {i +} \geq a _ {i +} \text {a n d} h _ {i -} \leq a _ {i -} \end{array} \right. \quad \Rightarrow \quad \epsilon_ {c} \leq \max (\epsilon_ {a}, \epsilon_ {h})
765
+ $$
766
+
767
+ Proof. We wish to upper bound $\epsilon_{c}$ , which is given by:
768
+
769
+ $$
770
+ \epsilon_ {c} = c \left(a _ {i +}, h _ {i +}\right) - c \left(a _ {i -}, h _ {i -}\right)
771
+ $$
772
+
773
+ such that:
774
+
775
+ $$
776
+ i + = \operatorname {a r g m a x} _ {i \in [ N ]} c \left(a _ {i}, h _ {i}\right) \quad i - = \operatorname {a r g m i n} _ {i \in [ N ]} c \left(a _ {i}, h _ {i}\right)
777
+ $$
778
+
779
+ Note that by Assumption 2, we must have:
780
+
781
+ $$
782
+ c \left(a _ {i +}, h _ {i +}\right) \leq \max \left(a _ {i +}, h _ {i +}\right) \text {a n d} c \left(a _ {i -}, h _ {i -}\right) \geq \min \left(a _ {i -}, h _ {i -}\right)
783
+ $$
784
+
785
+ The statement of this lemma gives two cases, which we will consider in turn.
786
+
787
+ In Case 1, we assume that:
788
+
789
+ $$
790
+ h _ {i +} \leq a _ {i +} \text {a n d} h _ {i -} \geq a _ {i -}
791
+ $$
792
+
793
+ In this case, we know that:
794
+
795
+ $$
796
+ c \left(a _ {i +}, h _ {i +}\right) - c \left(a _ {i -}, h _ {i -}\right) \leq a _ {i +} - a _ {i -} \leq \epsilon_ {a} \leq \max \left(\epsilon_ {a}, \epsilon_ {h}\right)
797
+ $$
798
+
799
+ In Case 2, we assume that:
800
+
801
+ $$
802
+ h _ {i +} \geq a _ {i +} \text {a n d} h _ {i -} \leq a _ {i -}
803
+ $$
804
+
805
+ In this case, we know that:
806
+
807
+ $$
808
+ c (a _ {i +}, h _ {i +}) - c (a _ {i -}, h _ {i -}) \leq h _ {i +} - h _ {i -} \leq \epsilon_ {h} \leq \max (\epsilon_ {a}, \epsilon_ {h})
809
+ $$
810
+
811
+ In either case, $\epsilon_c\leq \max (\epsilon_a,\epsilon_h)$
812
+
813
+ ![](images/b44fde5bfcb67ac7090758baf340920b55ea209fa3a3d68a156fbbd0cab89d23.jpg)
814
+
815
+ Lemma 11. Consider the exemplar weighting function with $N = 2$ and $w_{h}(a_{0}, h_{0}), w_{h}(a_{1}, h_{1}) < 1$ , and where $A \leq H$ . Then, the system exhibits complementarity whenever:
816
+
817
+ $$
818
+ \sqrt {H - A} \cdot \sqrt {\frac {1 - p}{p} \cdot \left(\frac {1 - b}{m} - (H - A)\right)} < | \delta_ {a} - \delta_ {h} |
819
+ $$
820
+
821
+ Proof. This lemma is a more specific version of Lemma 5, so we start with an intermediate result from that proof. Complementarity occurs whenever:
822
+
823
+ $$
824
+ p \cdot w _ {h} \left(a _ {1}, h _ {1}\right) \cdot \left(h _ {1} - a _ {1}\right) + (1 - p) \cdot w _ {h} \left(a _ {2}, h _ {2}\right) \cdot \left(h _ {2} - a _ {2}\right) < 0
825
+ $$
826
+
827
+ For the exemplar combining rule, we have that:
828
+
829
+ $$
830
+ w _ {h} \left(a _ {i}, h _ {i}\right) = b - m \cdot \left(h _ {i} - a _ {i}\right)
831
+ $$
832
+
833
+ Plugging in for the values of $w_{h}(a_{1}, h_{1})$ , $w_{h}(a_{2}, h_{2})$ gives:
834
+
835
+ $$
836
+ \begin{array}{l} p \cdot (b - m \cdot (h _ {1} - a _ {1})) \cdot (h _ {1} - a _ {1}) + (1 - p) \cdot (b - m \cdot (h _ {2} - a _ {2})) \cdot (h _ {2} - a _ {2}) < 0 \\ b \cdot (p \cdot (h _ {1} - a _ {1}) + (1 - p) \cdot (h _ {2} - a _ {2})) - m (p \cdot (h _ {1} - a _ {1}) ^ {2} + (1 - p) \cdot (h _ {2} - a _ {2}) ^ {2}) < 0 \\ b \cdot (p \cdot h _ {1} - p \cdot a _ {1} + (1 - p) \cdot h _ {2} - (1 - p) \cdot a _ {2}) - m (p \cdot \cdot (h _ {1} - a _ {1}) ^ {2} + (1 - p) \cdot (h _ {2} - a _ {2}) ^ {2}) < 0 \\ b \cdot (H - A) - m \left(p \cdot \left(h _ {1} - a _ {1}\right) ^ {2} + (1 - p) \cdot \left(h _ {2} - a _ {2}\right) ^ {2}\right) < 0 \\ \end{array}
837
+ $$
838
+
839
+ We can analyze the term with the $m$ coefficient by plugging in for values of $a_{i}, h_{i}$ .
840
+
841
+ $$
842
+ \begin{array}{l} p \cdot (h _ {1} - a _ {1}) ^ {2} + (1 - p) \cdot (h _ {2} - a _ {2}) ^ {2} \\ = p \cdot (H + \delta_ {h} - A - \delta_ {a}) ^ {2} + (1 - p) \cdot \left(H - \frac {p}{1 - p} \cdot \delta_ {h} - A + \frac {p}{1 - p} \cdot \delta_ {a}\right) ^ {2} \\ \end{array}
843
+ $$
844
+
845
+ We expand out each to get:
846
+
847
+ $$
848
+ \begin{array}{l} = p \cdot (H - A) ^ {2} + p \cdot (\delta_ {h} - \delta_ {a}) ^ {2} + 2 p \cdot (H - A) \cdot (\delta_ {h} - \delta_ {a}) + (1 - p) \cdot (H - A) ^ {2} \\ + (1 - p) \cdot \frac {p ^ {2}}{(1 - p) ^ {2}} \cdot (\delta_ {a} - \delta_ {h}) ^ {2} + 2 \cdot (1 - p) \cdot (H - A) \cdot \frac {p}{1 - p} \cdot (\delta_ {a} - \delta_ {h}) \\ \end{array}
849
+ $$
850
+
851
+ We note that two of the terms cancel:
852
+
853
+ $$
854
+ \begin{array}{l} 2 p \cdot (H - A) \cdot (\delta_ {h} - \delta_ {a}) + 2 \cdot (1 - p) \cdot (H - A) \cdot \frac {p}{1 - p} \cdot (\delta_ {a} - \delta_ {h}) \\ = 2 p \cdot (H - A) \cdot (\delta_ {h} - \delta_ {a}) + 2 \cdot p \cdot (H - A) \cdot (\delta_ {a} - \delta_ {h}) \\ = 0 \\ \end{array}
855
+ $$
856
+
857
+ Next, we can simplify the other terms:
858
+
859
+ $$
860
+ \begin{array}{l} = p \cdot (H - A) ^ {2} + p \cdot (\delta_ {h} - \delta_ {a}) ^ {2} + (1 - p) \cdot (H - A) ^ {2} + (1 - p) \cdot \frac {p ^ {2}}{(1 - p) ^ {2}} \cdot (\delta_ {a} - \delta_ {h}) ^ {2} \\ = (H - A) ^ {2} + p (\delta_ {h} - \delta_ {a}) ^ {2} + \frac {p ^ {2}}{1 - p} \cdot (\delta_ {a} - \delta_ {h}) ^ {2} \\ = (H - A) ^ {2} + \left(\delta_ {h} - \delta_ {a}\right) ^ {2} \cdot \left(p + \frac {p ^ {2}}{1 - p}\right) \\ = (H - A) ^ {2} + (\delta_ {h} - \delta_ {a}) ^ {2} \cdot \frac {p}{1 - p} \\ \end{array}
861
+ $$
862
+
863
+ 2022 ACM Conference on Fairness, Accountability, and Transparency (FAccT '22), Seoul, Republic of Korea,
864
+
865
+ Donahue, Chouldechova, Kenthapadi
866
+
867
+ where we have used that $p + \frac{p^2}{1 - p} = \frac{p - p^2 + p^2}{1 - p} = \frac{p}{1 - p}$ . We can combine this with the inequality we were analyzing earlier to get:
868
+
869
+ $$
870
+ (1 - b) \cdot (H - A) - m \cdot \left((H - A) ^ {2} + (\delta_ {h} - \delta_ {a}) ^ {2} \cdot \frac {p}{1 - p}\right) < 0
871
+ $$
872
+
873
+ $$
874
+ (1 - b) \cdot (H - A) - m \cdot (H - A) ^ {2} < m \cdot (\delta_ {h} - \delta_ {a}) ^ {2} \cdot \frac {p}{1 - p}
875
+ $$
876
+
877
+ $$
878
+ \frac {1 - p}{p} \left(\frac {1 - b}{m} \cdot (H - A) - (H - A) ^ {2}\right) < (\delta_ {h} - \delta_ {a}) ^ {2}
879
+ $$
880
+
881
+ where we have used the assumption that $0 < p < 1$ and $m > 0$ . If the lefthand side of the inequality is positive, we can take the square root of both sides to get:
882
+
883
+ $$
884
+ \sqrt {H - A} \cdot \sqrt {\frac {1 - p}{p} \left(\frac {1 - b}{m} - (H - A)\right)} < | \delta_ {h} - \delta_ {a} |
885
+ $$
886
+
887
+ Finally, we will show that the term under the square root, give the assumptions of this lemma. The term under the square root is negative if:
888
+
889
+ $$
890
+ \frac {1 - b}{m} - (H - A) < 0
891
+ $$
892
+
893
+ or:
894
+
895
+ $$
896
+ 1 < b - m \cdot (H - A) \tag {2}
897
+ $$
898
+
899
+ We will show that, if this happens, we must have $w_{h}(a_{1}, h_{1}) > 1$ or $w_{h}(a_{2}, h_{2}) > 0$ (either of which violate the assumptions of this lemma). For this combining rule,
900
+
901
+ $$
902
+ w _ {h} (a _ {1}, h _ {1}) = b - m \cdot (h _ {1} - a _ {1}) = b - m \cdot (H + \delta_ {h} - A - \delta_ {a}) = b - m \cdot (H - A) + m \cdot (\delta_ {h} - \delta_ {a})
903
+ $$
904
+
905
+ In the event that $\delta_h > \delta_a$ , then the above equation is greater than Equation 2. Therefore, if Equation 2 is greater than 1, then $w_h(a_1, h_1)$ is also greater than 1, which violates the assumptions of the lemma. Similarly, we can write:
906
+
907
+ $$
908
+ w _ {h} (a _ {2}, h _ {2}) = b - m \cdot (h _ {2} - a _ {2}) = b - m \cdot \left(H - \delta_ {h} \cdot \frac {p}{1 - p} - A + \delta_ {a} \cdot \frac {p}{1 - p}\right) = b - m \cdot (H - A) + m \cdot \frac {p}{1 - p} \cdot (\delta_ {a} - \delta_ {h})
909
+ $$
910
+
911
+ In the event that $\delta_h < \delta_a$ , then the above equation is greater than Equation 2. Therefore, if Equation 2 is greater than 1, then $w_h(a_2, h_2)$ is also greater than 1, which violates the assumptions of the lemma.
2202.08xxx/2202.08821/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b2a5a7840e3935352c19b4ac0bd688bc30307a4a5bc41412f315206d0253d78
3
+ size 823816
2202.08xxx/2202.08821/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08827/5a452966-07a3-4cce-816d-8e579346dd6c_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08827/5a452966-07a3-4cce-816d-8e579346dd6c_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08827/5a452966-07a3-4cce-816d-8e579346dd6c_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b76b3a984a2e1cfbdf37dcc7892683dae101d31df91d72ba30b367691ac90d4
3
+ size 467454
2202.08xxx/2202.08827/full.md ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LAMP: Extracting Text from Gradients with Language Model Priors
2
+
3
+ Mislav Balunović; Dimitar I. Dimitrov; Nikola Jovanović, Martin Vechev
4
+
5
+ {mislav.balunovic,dimitar.iliev.dimitrov,
6
+
7
+ nikola.jovanovic,martin.vechev}@inf.ethz.ch
8
+
9
+ Department of Computer Science
10
+
11
+ ETH Zurich
12
+
13
+ # Abstract
14
+
15
+ Recent work shows that sensitive user data can be reconstructed from gradient updates, breaking the key privacy promise of federated learning. While success was demonstrated primarily on image data, these methods do not directly transfer to other domains such as text. In this work, we propose LAMP, a novel attack tailored to textual data, that successfully reconstructs original text from gradients. Our attack is based on two key insights: (i) modeling prior text probability with an auxiliary language model, guiding the search towards more natural text, and (ii) alternating continuous and discrete optimization, which minimizes reconstruction loss on embeddings, while avoiding local minima by applying discrete text transformations. Our experiments demonstrate that LAMP is significantly more effective than prior work: it reconstructs 5x more bigrams and $23\%$ longer subsequences on average. Moreover, we are the first to recover inputs from batch sizes larger than 1 for textual models. These findings indicate that gradient updates of models operating on textual data leak more information than previously thought.
16
+
17
+ # 1 Introduction
18
+
19
+ Federated learning [24] (FL) is a widely adopted framework for training machine learning models in a decentralized way. Conceptually, FL aims to enable training of highly accurate models without compromising client data privacy, as the raw data never leaves client machines. However, recent work [28, 43, 41] has shown that the server can in fact recover the client data, by applying a reconstruction attack on the gradient updates sent from the client during training. Such attacks typically start from a randomly sampled input and modify it such that its corresponding gradients match the gradient update originally sent by the client. While most works focus on reconstruction attacks in computer vision, there has comparatively been little work in the text domain, despite the fact that some of the most prominent applications of FL involve learning over textual data, e.g., next-word prediction on mobile phones [30]. A key component of successful attacks in vision has been the use of image priors such as total variation [7]. These priors guide the reconstruction towards natural images, which are more likely to correspond to client data. However, the use of priors has so far been missing from attacks on text [43, 3], limiting their ability to reconstruct real client data.
20
+
21
+ This work: private text reconstruction with priors In this work, we propose LAMP, a new reconstruction attack which leverages language model priors to extract private text from gradients. The overview of our attack is given in Fig. 1. The attacker has access to a snapshot of the transformer network being trained in a federated manner (e.g., BERT), and a gradient $\nabla_{\theta}\mathcal{L}(\boldsymbol{x}^{*},y^{*})$ which the client has computed on that snapshot, using their private data. The attack starts by sampling token
22
+
23
+ ![](images/ca13fc57b4c1f399a5c66a1e71e4c899d6ec44e80f042c9540eb13a432100794.jpg)
24
+ Figure 1: An overview of LAMP. We initialize the reconstruction by sampling from a Gaussian distribution, and alternate between continuous and discrete optimization. Continuous optimization minimizes the reconstruction loss with an embedding regularization term. Discrete optimization forms candidates by applying transformations, and chooses the best candidate based on a combination of reconstruction loss and perplexity, as measured by an auxiliary language model (e.g., GPT-2).
25
+
26
+ embeddings from a Gaussian distribution to create the initial reconstruction. Then, at each step, we improve the reconstruction (shown in yellow) by alternating between continuous (blue) and discrete optimization (green). The continuous part minimizes the reconstruction loss, which measures how close the gradients of the current reconstruction are to the observed client gradients, together with an embedding regularization term. However, this is insufficient as the gradient descent can get stuck in a local optimum due to its inability to make discrete changes to the reconstruction. We address this issue by introducing a discrete step—namely, we generate a list of candidate sentences using several transformations on the sequence of tokens (e.g., moving a token) and select a candidate that minimizes the combined reconstruction loss and perplexity, which measures the likelihood of observing the text in a natural distribution. We use GPT-2 [29] as an auxiliary language model to measure the perplexity of each candidate (however, our method allows using other models). Our final reconstruction is computed by setting each embedding to its nearest neighbor from the vocabulary.
27
+
28
+ Key component of our reconstruction attack is the use of a language model prior combined with a search that alternates continuous and discrete optimization steps. Our experimental evaluation demonstrates the effectiveness of this approach—LAMP is able to extract text from state-of-the-art transformer models on several common datasets, reconstructing up to 5 times more bigrams than prior work. Moreover, we are the first to perform text reconstruction in more complex settings such as batch sizes larger than 1, fine-tuned models, and defended models. Overall, across all settings we demonstrate that LAMP is effective in reconstructing large portions of private text.
29
+
30
+ # Main contributions Our main contributions are:
31
+
32
+ - LAMP, a novel attack for recovering input text from gradients, which leverages an auxiliary language model to guide the search towards natural text, and a search procedure which alternates continuous and discrete optimization.
33
+ - An implementation of LAMP and its extensive experimental evaluation, demonstrating that it can reconstruct significantly more private text than prior work. We make our code publicly available at https://github.com/eth-sri/lamp.
34
+ - The first thorough experimental evaluation of text attacks in more complex settings such as larger batch sizes, fine-tuned models and defended models.
35
+
36
+ # 2 Related Work
37
+
38
+ Federated learning [24, 18] has attracted substantial interest [16] due to its ability to train deep learning models in a decentralized way, such that individual user data is not shared during training. Instead, individual clients calculate local gradient updates on their private data, and share them with a centralized server, which aggregates them to update the model [24]. The underlying assumption is that user data cannot be recovered from gradient updates. Recently, several works [28, 43, 41, 42]
39
+
40
+ demonstrated that gradients can in fact still leak information, invalidating the fundamental privacy assumption. Moreover, recent work achieved near-perfect image reconstruction from gradients [7, 40, 14]. Interestingly, prior work showed that an auxiliary model [14] or prior information [2] can significantly improve reconstruction quality. Finally, Huang et al. [12] noticed that gradient leakage attacks often make strong assumptions, namely that batch normalization statistics and ground truth labels are known. In our work, we do not assume knowledge of batch normalization statistics and as we focus on binary classification tasks, we can simply enumerate all possible labels.
41
+
42
+ Despite substantial progress on image reconstruction, attacks in other domains remain challenging, as the techniques used for images rely extensively on domain specific knowledge. In the domain of text, in particular, where federated learning is often applied [32], only a handful of works exist [43, 3, 22]. DLG [43] was first to attempt reconstruction from gradients coming from a transformer; TAG [3] extended DLG by adding an $L_{1}$ term to the reconstruction loss; finally, unlike TAG and DLG which are optimization-based techniques, APRIL [22] recently demonstrated an exact gradient leakage technique applicable to transformer networks. However, APRIL assumes batch size of 1 and learnable positional embeddings, which makes it simple to defend against. Another attack on NLP is given in Fowl et al. [6], but they use stronger assumption that the server can send malicious updates to the clients. Furthermore, there is a concurrent work [11] on reconstructing text from transformers, but it is limited to the case when token embeddings are trained together with the network.
43
+
44
+ Finally, there have been several works attempting to protect against gradient leakage. Works based on heuristics [34, 31] lack privacy guarantees and have been shown ineffective against stronger attacks [2], while those based on differential privacy do train models with formal guarantees [1], but typically hurt the accuracy of the trained models as they require adding noise to the gradients. We remark that we also evaluate LAMP on defended networks.
45
+
46
+ # 3 Background
47
+
48
+ In this section, we introduce the background necessary to understand our work.
49
+
50
+ # 3.1 Federated Learning
51
+
52
+ In federated learning, $C$ clients aim to jointly optimize a neural network $f$ with parameters $\theta$ on their private data. At iteration $k$ , the parameters $\theta^k$ are sent to all clients, where each client $c$ executes a gradient update $\nabla_{\theta^k} \mathcal{L}(\boldsymbol{x}_c^*, y_c^*)$ on a sample $(\boldsymbol{x}_c^*, y_c^*)$ from their dataset $(\mathcal{X}_c, \mathcal{Y}_c)$ . The updates are sent back to the server and aggregated. While in Sec. 5 we experiment with both FedSGD [24] and FedAvg [19] client updates, throughout the text we assume that clients use FedSGD updates:
53
+
54
+ $$
55
+ \boldsymbol {\theta} ^ {k + 1} = \boldsymbol {\theta} ^ {k} - \frac {\lambda}{C} \sum_ {c = 1} ^ {C} \nabla_ {\boldsymbol {\theta} ^ {k}} \mathcal {L} \left(\boldsymbol {x} _ {c} ^ {*}, y _ {c} ^ {*}\right).
56
+ $$
57
+
58
+ Gradient leakage attacks A gradient leakage attack is an attack executed by the server (or a party which compromised it) that tries to obtain the private data $(\pmb{x}_c^*, y_c^*)$ of a client using the gradient updates $\nabla_{\theta^k} \mathcal{L}(\pmb{x}_c^*, y_c^*)$ sent to the server. Gradient leakage attacks usually assume honest-but-curious servers which are not allowed to modify the federated training protocol outlined above. A common approach, adopted by Zhu et al. [43], Zhao et al. [41], Deng et al. [3] as well as our work, is to obtain the private data by solving the optimization problem:
59
+
60
+ $$
61
+ \operatorname * {a r g m i n} _ {\left(\boldsymbol {x} _ {c}, y _ {c}\right)} \delta \left(\nabla_ {\boldsymbol {\theta} ^ {k}} \mathcal {L} \left(\boldsymbol {x} _ {c} ^ {*}, y _ {c} ^ {*}\right), \nabla_ {\boldsymbol {\theta} ^ {k}} \mathcal {L} \left(\boldsymbol {x} _ {c}, y _ {c}\right)\right),
62
+ $$
63
+
64
+ where $\delta$ is some distance measure and $(\pmb{x}_c, y_c)$ denotes dummy data optimized using gradient descent to have similar gradients $\nabla_{\pmb{\theta}^k} \mathcal{L}(\pmb{x}_c, y_c)$ to true data $(\pmb{x}_c^*, y_c^*)$ . Common choices for $\delta$ are $L_2$ [43], $L_1$ [3] and cosine distances [7]. When the true label $y_c^*$ is known, the problem reduces to
65
+
66
+ $$
67
+ \underset {\boldsymbol {x} _ {c}} {\arg \min } \delta \left(\nabla_ {\boldsymbol {\theta} ^ {k}} \mathcal {L} \left(\boldsymbol {x} _ {c} ^ {*}, y _ {c} ^ {*}\right), \nabla_ {\boldsymbol {\theta} ^ {k}} \mathcal {L} \left(\boldsymbol {x} _ {c}, y _ {c} ^ {*}\right)\right),
68
+ $$
69
+
70
+ which was shown [41, 12] to be simpler to solve with gradient descent approaches.
71
+
72
+ # 3.2 Transformer Networks
73
+
74
+ In this paper, we focus on the problem of gradient leakage of text on transformers [35]. Given some input text, the first step is tockenize it into tokens from some fixed vocabulary of size $V$ . Each token is then converted to a 1-hot vector denoted $\pmb{t}_1,\pmb{t}_2,\dots,\pmb{t}_n\in \mathbb{R}^V$ , where $n$ is the number of tokens in the text. The tokens are then converted to embedding vectors $\pmb{x}_1,\pmb{x}_2,\dots,\pmb{x}_n\in \mathbb{R}^d$ , where $d$ is a chosen embedding size, by multiplying by a trained embedding matrix $\pmb{W}_{\mathrm{embed}}\in \mathbb{R}^{V\times d}$ [4]. The rows of $\pmb{W}_{\mathrm{embed}}$ represent the embeddings of the tokens, and we denote them as $e_1,e_2,\ldots ,e_V\in \mathbb{R}^d$ . In addition to tokens, their positions in the sequence are also encoded using the positional embedding matrix $\pmb{W}_{\mathrm{pos}}\in \mathbb{R}^{P\times d}$ , where $P$ is the longest allowed token sequence. The resulting positional embeddings are denoted $\pmb{p}_1,\pmb{p}_2,\dots,\pmb{p}_n$ . For notational simplicity, we denote $e = e_{1},e_{2},\dots,e_{V}$ , $\pmb {x} = x_{1},x_{2},\dots,x_{n}$ and $\pmb {p} = p_{1},p_{2},\dots,p_{n}$ . We use the token-wise sum of the embeddings $\pmb{x}$ and $\pmb{p}$ as an input to a sequence of self-attention layers [35]. The final classification output is given by the first output of the last self-attention layer that undergoes a final linear layer, followed by a tanh.
75
+
76
+ # 3.3 Calculating Perplexity on Pretrained Models
77
+
78
+ In this work, we rely on large pretrained language models, such as GPT-2 [29], to assess the quality of the text produced by the continuous part of our optimization. Such models are typically trained to calculate the probability $P(\pmb{t}_n \mid \pmb{t}_1, \pmb{t}_2, \dots, \pmb{t}_{n-1})$ of inserting a token $\pmb{t}_n$ from the vocabulary of tokens to the end of the sequence of tokens $\pmb{t}_1, \pmb{t}_2, \dots, \pmb{t}_{n-1}$ . Therefore, such models can be leveraged to calculate the likelihood of a sequence of tokens $\pmb{t}_1, \pmb{t}_2, \dots, \pmb{t}_n$ , as follows:
79
+
80
+ $$
81
+ P \left(\boldsymbol {t} _ {1}, \boldsymbol {t} _ {2}, \dots , \boldsymbol {t} _ {n}\right) = \prod_ {l = 0} ^ {n - 1} P \left(\boldsymbol {t} _ {l + 1} \mid \boldsymbol {t} _ {1}, \boldsymbol {t} _ {2}, \dots , \boldsymbol {t} _ {l}\right).
82
+ $$
83
+
84
+ One can use the likelihood, or the closely-related negative log-likelihood, as a measure of the quality of a produced sequence. However, the likelihood depends on the length of the sequence, as probability decreases with length. To this end, we use the perplexity measure [13], defined as:
85
+
86
+ $$
87
+ \mathcal {L} _ {\mathrm {l m}} \left(\boldsymbol {t} _ {1}, \boldsymbol {t} _ {2}, \dots , \boldsymbol {t} _ {n}\right) = - \frac {1}{n} \sum_ {l = 0} ^ {n - 1} \log P \left(\boldsymbol {t} _ {l + 1} \mid \boldsymbol {t} _ {1}, \boldsymbol {t} _ {2}, \dots , \boldsymbol {t} _ {l}\right).
88
+ $$
89
+
90
+ In the discrete part of our optimization, we rely on this measure to assess the quality of reconstructed sequences produced by the continuous part.
91
+
92
+ # 4 Extracting Text with LAMP
93
+
94
+ In this section we describe the details of our attack which alternates between continuous optimization using gradient descent, presented in Sec. 4.2, and discrete optimization using language models to guide the search towards more natural text reconstruction, presented in Sec. 4.3.
95
+
96
+ # 4.1 Notation
97
+
98
+ We denote the attacked neural network and its parameters with $f$ and $\theta$ , respectively. Further, we denote the client token sequence and its label as $(t^{*}, y^{*})$ , and our reconstructions as $(t, y)$ . For each token $t_{i}^{*}$ in $t^{*}$ and $t_{i}$ in $t$ , we denote their embeddings with $x_{i}^{*} \in \mathbb{R}^{d}$ and $x_{i} \in \mathbb{R}^{d}$ , respectively. Moreover, for each token in our vocabulary, we denote the embedding with $e_{i} \in \mathbb{R}^{d}$ . We collect the individual embeddings $x_{i}, x_{i}^{*}$ , and $e_{i}$ into the matrices $x \in \mathbb{R}^{d \times n}$ , $x^{*} \in \mathbb{R}^{d \times n}$ and $e \in \mathbb{R}^{d \times V}$ , where $n$ is the number of tokens in $t^{*}$ and $V$ is the size of the vocabulary.
99
+
100
+ # 4.2 Continuous Optimization
101
+
102
+ We now describe the continuous part of our attack (blue in Fig. 1). Throughout the paper, we assume knowledge of the ground truth label $y^{*}$ of the client token sequence we aim to reconstruct, meaning $y = y^{*}$ . This assumption is not a significant restriction as we mainly focus on binary classification, with batch sizes such that trying all possible combinations of labels is feasible. Moreover, prior work [9, 40] has demonstrated that labels can easily be recovered for basic network architectures, which can be adapted for transformers in future work. We initialize our reconstruction candidate by sampling embeddings from a Gaussian and pick the one with the smallest reconstruction loss.
103
+
104
+ Reconstruction loss A key component of our attack is a loss measuring how close the reconstructed gradient is to the true gradient. Assuming an $l$ -layer network, where $\theta_{i}$ denotes the parameters of layer $i$ , an option is to use the combination of $L_{2}$ and $L_{1}$ loss proposed by Deng et al. [3],
105
+
106
+ $$
107
+ \mathcal {L} _ {\mathrm {t a g}} (\boldsymbol {x}) = \sum_ {i = 1} ^ {l} | | \nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x} ^ {*}, y ^ {*}) - \nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x}, y) | | _ {2} + \alpha_ {\mathrm {t a g}} | | \nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x} ^ {*}, y ^ {*}) - \nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x}, y) | | _ {1}.
108
+ $$
109
+
110
+ where $\alpha_{\mathrm{tag}}$ is a hyperparameter. Another option is to use the cosine reconstruction loss proposed by Geiping et al. [7] in the image domain:
111
+
112
+ $$
113
+ \mathcal {L} _ {\cos} (\boldsymbol {x}) = 1 - \frac {1}{l} \sum_ {i = 1} ^ {l} \frac {\nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x} ^ {*} , y ^ {*}) \cdot \nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x} , y)}{\| \nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x} ^ {*} , y ^ {*}) \| _ {2} \| \nabla_ {\boldsymbol {\theta} _ {i}} f (\boldsymbol {x} , y) \| _ {2}}.
114
+ $$
115
+
116
+ Naturally, LAMP can also be instantiated using any other loss. Interestingly, we find that there is no loss that is universally better, and the effectiveness is dataset dependent. Intuitively, $L_{1}$ loss is less sensitive to outliers, while cosine loss is independent of the gradient norm, so it works well for small gradients. Thus, we set the gradient loss $\mathcal{L}_{\mathrm{grad}}$ to either $\mathcal{L}_{\mathrm{tag}}$ or $\mathcal{L}_{\mathrm{cos}}$ , depending on the setting.
117
+
118
+ **Embedding regularization** In the process of optimizing the reconstruction loss, we observe the resulting embedding vectors $\boldsymbol{x}_i$ often steadily grow in length. We believe this behavior is due to the self-attention layers in transformer networks that rely predominantly on dot product operations. As a result, the optimization process focuses on optimizing the direction of individual embeddings $\boldsymbol{x}_i$ , disregarding their length. To address this, we propose an embedding length regularization term:
119
+
120
+ $$
121
+ \mathcal {L} _ {\mathrm {r e g}} (\boldsymbol {x}) = \left(\frac {1}{n} \sum_ {i = 1} ^ {n} \| \boldsymbol {x} _ {i} \| _ {2} - \frac {1}{V} \sum_ {j = 1} ^ {V} \| \boldsymbol {e} _ {j} \| _ {2}\right) ^ {2}.
122
+ $$
123
+
124
+ The regularizer forces the mean length of the embeddings of the reconstructed sequence to be close to the mean length of the embeddings in the vocabulary. The final gradient reconstruction error optimized in LAMP is given by:
125
+
126
+ $$
127
+ \mathcal {L} _ {\operatorname {r e c}} (\boldsymbol {x}) = \mathcal {L} _ {\operatorname {g r a d}} (\boldsymbol {x}) + \alpha_ {\operatorname {r e g}} \mathcal {L} _ {\operatorname {r e g}} (\boldsymbol {x}),
128
+ $$
129
+
130
+ where $\alpha_{\mathrm{reg}}$ is a regularization weighting factor.
131
+
132
+ **Optimization** We summarize how described components work together in the setting of continuous optimization. To reconstruct the token sequence $\pmb{t}^*$ , we first randomly initialize a sequence of dummy token embeddings $\pmb{x} = \pmb{x}_0\pmb{x}_1\ldots \pmb{x}_n$ , with $\pmb{x}_i \in \mathbb{R}^d$ . Following prior work on text reconstruction from gradients [3, 43], we apply gradient descent on $\pmb{x}$ to minimize the reconstruction loss $\mathcal{L}_{\mathrm{rec}}(\pmb{x})$ . To this end, a second-order derivative needs to be computed, as $\mathcal{L}_{\mathrm{rec}}(\pmb{x})$ depends on the network gradient at $\pmb{x}$ . Similar to prior work [3, 43], we achieve this using automatic differentiation in Pytorch [27].
133
+
134
+ # 4.3 Discrete Optimization
135
+
136
+ Next, we describe the discrete part of our optimization (green in Fig. 1). While continuous optimization can often successfully recover token embeddings close to the original, they can be in the wrong order, depending on how much positional embeddings influence the output. For example, reconstructions corresponding to sentences "weather is nice" and "nice weather is." might result in a similar reconstruction loss, though the first reconstruction has a higher likelihood of being natural text. To address this issue, we perform several discrete sequence transformations, and choose the one with both a low reconstruction loss and a low perplexity under the auxiliary language model.
137
+
138
+ Generating candidates Given the current reconstruction $\pmb{x} = \pmb{x}_1\pmb{x}_2\dots \pmb{x}_n$ , we generate candidates for the new reconstruction $\pmb{x}'$ using one of the following transformations:
139
+
140
+ - Swap: We select two positions $i$ and $j$ in the sequence uniformly at random, and swap the tokens $\pmb{x}_i$ and $\pmb{x}_j$ at these two positions to obtain a new candidate sequence $\pmb{x}' = \pmb{x}_1\pmb{x}_2\ldots \pmb{x}_{i - 1}\pmb{x}_j\pmb{x}_{i + 1}\ldots \pmb{x}_{j - 1}\pmb{x}_i\pmb{x}_{j + 1}\ldots \pmb{x}_n$ .
141
+
142
+ - MoveToken: Similarly, we select two positions $i$ and $j$ in the sequence uniformly at random, and move the token $\pmb{x}_i$ after the position $j$ in the sequence, thus obtaining $\pmb{x}' = \pmb{x}_1\pmb{x}_2\ldots \pmb{x}_{i - 1}\pmb{x}_{i + 1}\ldots \pmb{x}_{j - 1}\pmb{x}_j\pmb{x}_i\pmb{x}_{j + 1}\ldots \pmb{x}_n$ .
143
+ - MoveSubseq: We select three positions $i, j$ and $p$ (where $i < j$ ) uniformly at random, and move the subsequence of tokens between $i$ and $j$ after position $p$ . The new sequence is thus $\boldsymbol{x}' = \boldsymbol{x}_1 \boldsymbol{x}_2 \ldots \boldsymbol{x}_{i-1} \boldsymbol{x}_{j+1} \ldots \boldsymbol{x}_p \boldsymbol{x}_i \ldots \boldsymbol{x}_j \boldsymbol{x}_{p+1} \ldots \boldsymbol{x}_n$ .
144
+ - MovePrefix: We select a position $i$ uniformly at random, and move the prefix of the sequence ending at position $i$ to the end of the sequence. The modified sequence then is $\pmb{x}^{\prime} = \pmb{x}_{i + 1}\dots \pmb{x}_{n}\pmb{x}_{1}\pmb{x}_{2}\dots \pmb{x}_{i}$
145
+
146
+ Next, we use a language model to check if generated candidates improve over the current sequence.
147
+
148
+ Using a language model to select candidates We accept the new reconstruction $x'$ if it improves the combination of the reconstruction loss and perplexity:
149
+
150
+ $$
151
+ \mathcal {L} _ {\operatorname {r e c}} \left(\boldsymbol {x} ^ {\prime}\right) + \alpha_ {\mathrm {l m}} \mathcal {L} _ {\mathrm {l m}} \left(\boldsymbol {t} ^ {\prime}\right) < \mathcal {L} _ {\operatorname {r e c}} (\boldsymbol {x}) + \alpha_ {\mathrm {l m}} \mathcal {L} _ {\mathrm {l m}} (\boldsymbol {t})
152
+ $$
153
+
154
+ Here $t$ and $t'$ denote sequences of tokens obtained by mapping each embedding of $x$ and $x'$ to the nearest neighbor in the vocabulary according to the cosine distance. The term $\mathcal{L}_{\mathrm{rec}}$ is the reconstruction loss introduced in Sec. 4.2, while $\mathcal{L}_{\mathrm{lm}}$ denotes the perplexity measured by an auxiliary language model, such as GPT-2. The parameter $\alpha_{\mathrm{lm}}$ determines the trade-off between $\mathcal{L}_{\mathrm{rec}}$ and $\mathcal{L}_{\mathrm{lm}}$ : if it is too low then the attack will not utilize the language model, and if it is too high then the attack will disregard the reconstruction loss and only focus on the perplexity. Going back to our example, assume that our reconstruction equals the second sequence "nice weather is." Then, at some point, we might use the MoveToken transformation to move the word "nice" behind the word "is" which would presumably keep the reconstruction loss similar, but drastically improve perplexity.
155
+
156
+ # 4.4 Complete Reconstruction Attack
157
+
158
+ We present our end-to-end attack in Algorithm 1. We initialize the reconstruction $\pmb{x}$ by sampling
159
+
160
+ from a Gaussian distribution $n_{\mathrm{init}}$ times, and choose the sample with minimal reconstruction loss as our initial reconstruction. Then, at each step we alternate between continuous and discrete optimization. We first perform $n_c$ steps of continuous optimization to minimize the reconstruction loss (Lines 4-6, see Sec. 4.2). Then, we perform $n_d$ steps of discrete optimization to minimize the combination of reconstruction loss and perplexity (Lines 10-17, see Sec. 4.3). Finally, in Line 20 we project the continuous embeddings $x$ to respective nearest tokens, according to cosine similarity.
161
+
162
+ Algorithm 1 Extracting text with LAMP
163
+ 1: $\pmb{x}^{(k)} \sim \mathcal{N}(0, I)$ , where $k = 1, \dots, n_{\mathrm{init}}$
164
+ 2: $\pmb{x} \gets \arg \min_{\pmb{x}^{(k)}} \mathcal{L}_{\mathrm{rec}}(\pmb{x}^{(k)})$
165
+ 3: for $i = 1$ to $it$ do
166
+ 4: for $j = 1$ to $n_c$ do
167
+ 5: $\pmb{x} \gets \pmb{x} - \lambda \nabla_{\pmb{x}} \mathcal{L}_{\mathrm{rec}}(\pmb{x})$
168
+ 6: end for
169
+ 7: $\pmb{x}_{\mathrm{best}} \gets \pmb{x}$
170
+ 8: $t_{\mathrm{best}} \gets \mathrm{PROJECTTOVOCAB}(\pmb{x}_{\mathrm{best}})$
171
+ 9: $L_{\mathrm{best}} \gets \mathcal{L}_{\mathrm{rec}}(\pmb{x}_{\mathrm{best}}) + \alpha_{\mathrm{lm}} \mathcal{L}_{\mathrm{lm}}(\pmb{t}_{\mathrm{best}})$
172
+ 10: for $j = 1$ to $n_d$ do
173
+ 11: $\pmb{x}' \gets \mathrm{TRANSFORM}(\pmb{x})$
174
+ 12: $t' \gets \mathrm{PROJECTTOVOCAB}(\pmb{x}')$
175
+ 13: $L' \gets \mathcal{L}_{\mathrm{rec}}(\pmb{x}') + \alpha_{\mathrm{lm}} \mathcal{L}_{\mathrm{lm}}(\pmb{t}')$
176
+ 14: if $L' < L_{\mathrm{best}}$ then
177
+ 15: $\pmb{x}_{\mathrm{best}}, t_{\mathrm{best}}, L_{\mathrm{best}} \gets \pmb{x}', t', L'$
178
+ 16: end if
179
+ 17: end for
180
+ 18: $\pmb{x} \gets \pmb{x}_{\mathrm{best}}$
181
+ 19: end for
182
+ 20: return PROJECTTOVOCAB(x)
183
+
184
+ # 5 Experimental Evaluation
185
+
186
+ We now discuss our experimental results, demonstrating the effectiveness of LAMP compared to prior work in a wide range of settings. We present reconstruction results on several datasets, architectures, and batch sizes, together with the additional ablation study and evaluation of different defenses and training methods.
187
+
188
+ Datasets Prior work [3] has demonstrated that text length is a key factor for the success of reconstruction from gradients. To this end, in our experiments we consider three binary classification datasets of increasing complexity: CoLA [37] and SST-2 [33] from GLUE [36] with typical sequence
189
+
190
+ lengths between 5 and 9 words, and 3 and 13 words, respectively, and RottenTomatoes [26] with typical sequence lengths between 14 and 27 words. The CoLA dataset contains English sentences from language books annotated with binary labels describing if the sentences are grammatically correct, while SST-2 and RottenTomatoes contain movie reviews annotated with a binary sentiment. For all experiments, we evaluate the methods on 100 random sequences from the respective training sets. We remark that attacking in binary classification setting is a more difficult task than in the masking setting considered by prior work [43], where the attacker can utilize strictly more information.
191
+
192
+ Models Our experiments are performed on different target models based on the BERT [4] architecture. The main model we consider is $\mathrm{BERT}_{\mathrm{BASE}}$ , which has 12 layers, 768 hidden units, 3072 feed-forward filter size, and 12 attention heads. To illustrate the generality of our approach with respect to model size, we additionally consider a larger model $\mathrm{BERT}_{\mathrm{LARGE}}$ , which has 24 layers, 1024 hidden units, 4096 feed-forward filter size, and 16 attention heads as well as a smaller model $\mathrm{TinyBERT}_6$ from Jiao et al. [15] with 6 layers, 768 hidden units, feed-forward filter size of 3072 and 12 attention heads. All models were taken from Hugging Face [39]. The $\mathrm{BERT}_{\mathrm{BASE}}$ and $\mathrm{BERT}_{\mathrm{LARGE}}$ were pretrained on Wikipedia [5] and BookCorpus [44] datasets, while $\mathrm{TinyBERT}_6$ was distilled from $\mathrm{BERT}_{\mathrm{BASE}}$ . We perform our main experiments on pretrained models, as this is the most common setting for training classification models from text [25]. For the auxiliary language model we use the pretrained GPT-2 provided by Guo et al. [10], trained on the same tokenizer used to pretrain our target BERT models.
193
+
194
+ Metrics Following TAG [3], we measure the success of our methods based on the ROUGE family of metrics [20]. In particular, we report the aggregated F-scores on ROUGE-1, ROUGE-2 and ROUGE-L, which measure the recovered unigrams, recovered bigrams and the ratio of the length of the longest matching subsequence to the length of whole sequence. When evaluating batch sizes greater than 1, we exclude the padding tokens, used to pad shorter sequences, from the reconstruction and the ROUGE metric computation.
195
+
196
+ Experimental setup In all settings we consider, we compare our method with baselines DLG [43] and TAG [3] discussed in Sec. 2. As TAG does not have public code, we use our own implementation, and remark that the results obtained using our implementation are similar or better than those reported in Deng et al. [3]. We consider two variants of our attack, $\mathrm{LAMP_{cos}}$ and $\mathrm{LAMP}_{L_2 + L_1}$ , that use the $\mathcal{L}_{\mathrm{cos}}$ and $\mathcal{L}_{\mathrm{tag}}$ gradient matching losses for the continuous optimization. For the $\mathrm{BERT}_{\mathrm{BASE}}$ and $\mathrm{TinyBERT}_6$ experiments, we run our attack with $it = 30$ , $n_c = 75$ and $n_d = 200$ , and stop the optimization early once we reach a total of 2000 continuous optimization steps. For the $\mathrm{BERT}_{\mathrm{LARGE}}$ model, whose number of parameters make the optimization harder, we use $it = 25$ and $n_c = 200$ instead, resulting in 5000 continuous optimization steps. We run DLG and TAG for 10000 optimization steps on $\mathrm{BERT}_{\mathrm{LARGE}}$ and 2500 on all other models. For the continuous optimization, we use Adam [17] with a learning rate decay factor $\gamma$ applied every 50 steps for all methods and experiments, except for $\mathrm{BERT}_{\mathrm{LARGE}}$ ones where, following Geiping et al. [8], we use AdamW [21] and linear learning rate decay schedule applied every step. We picked the hyperparameters for TAG, $\mathrm{LAMP_{cos}}$ and $\mathrm{LAMP}_{L_2 + L_1}$ , separately on CoLA and RottenTomatoes using grid search on $\mathrm{BERT}_{\mathrm{BASE}}$ and applied them to all networks. As the optimal hyperparameters for RottenTomatoes exactly matched the ones on CoLA, we used the same hyperparameters on SST-2, as well. To account for the different optimizer used for $\mathrm{BERT}_{\mathrm{LARGE}}$ models, we further tuned the learning rate $\lambda$ for $\mathrm{BERT}_{\mathrm{LARGE}}$ experiments separately, keeping the other hyperparameters fixed. Additionally, for our methods we applied a two-step initialization procedure. We first initialized the embedding vectors with 500 random samples from a standard Gaussian distribution and picked the best one according to $\mathcal{L}_{\mathrm{grad}}(\boldsymbol{x})$ . We then computed 500 permutations on the best initialization and chose the best one in the same way. The effect of this procedure is investigated in App. C.3. Further details on our experimental setup are shown in App. D.
197
+
198
+ Main experiments We evaluate the two variants of LAMP against DLG [43] and TAG [3] on BERT<sub>BASE</sub>, BERT<sub>LARGE</sub>, and TinyBERT<sub>6</sub>. Additionally, we evaluate attacks after BERT<sub>BASE</sub> has already been fine-tuned for 2 epochs on each task (following Devlin et al. [4]), as Balunović et al. [2] showed that in the vision domain it is significantly more difficult to attack already trained networks. For both baselines and our attacks, for simplicity we assume the lengths of sequences are known, as otherwise an adversary can simply run the attack for all possible lengths. In the first experiment we consider setting where batch size is equal to 1. The results are shown in Table 1. From the ROUGE-1
199
+
200
+ Table 1: Main results of text reconstruction from gradients with LAMP, for various datasets and architectures in the setting with batch size equal to 1. FT denotes a fine-tuned model. R-1, R-2, and R-L, denote ROUGE-1, ROUGE-2 and ROUGE-L scores respectively.
201
+
202
+ <table><tr><td rowspan="2" colspan="2"></td><td colspan="3">BERTBASE</td><td colspan="3">BERTBASE-FT</td><td colspan="3">TinyBERT6</td><td colspan="3">BERTLARGE</td></tr><tr><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td rowspan="4">CoLA</td><td>DLG</td><td>59.3</td><td>7.7</td><td>46.2</td><td>36.2</td><td>2.0</td><td>30.4</td><td>37.7</td><td>3.0</td><td>33.7</td><td>82.7</td><td>10.5</td><td>55.8</td></tr><tr><td>TAG</td><td>78.9</td><td>10.2</td><td>53.3</td><td>40.2</td><td>3.1</td><td>32.3</td><td>43.9</td><td>3.8</td><td>37.4</td><td>82.9</td><td>14.6</td><td>55.5</td></tr><tr><td>LAMPcos</td><td>89.6</td><td>51.9</td><td>76.2</td><td>85.8</td><td>46.2</td><td>73.1</td><td>93.9</td><td>59.3</td><td>80.2</td><td>92.0</td><td>56.0</td><td>78.8</td></tr><tr><td>LAMPL2+L1</td><td>87.5</td><td>47.5</td><td>73.2</td><td>40.3</td><td>9.3</td><td>35.2</td><td>94.5</td><td>52.1</td><td>76.0</td><td>91.2</td><td>47.8</td><td>75.4</td></tr><tr><td rowspan="4">SST-2</td><td>DLG</td><td>65.4</td><td>17.7</td><td>54.2</td><td>36.0</td><td>2.7</td><td>33.9</td><td>42.0</td><td>5.4</td><td>39.6</td><td>78.4</td><td>18.1</td><td>59.0</td></tr><tr><td>TAG</td><td>75.6</td><td>18.9</td><td>57.4</td><td>40.0</td><td>5.7</td><td>36.6</td><td>43.5</td><td>9.4</td><td>40.9</td><td>80.8</td><td>16.8</td><td>59.1</td></tr><tr><td>LAMPcos</td><td>88.8</td><td>56.9</td><td>77.7</td><td>87.6</td><td>54.1</td><td>76.1</td><td>91.6</td><td>58.2</td><td>79.7</td><td>88.5</td><td>55.9</td><td>76.5</td></tr><tr><td>LAMPL2+L1</td><td>88.6</td><td>57.4</td><td>75.7</td><td>41.6</td><td>10.9</td><td>39.3</td><td>89.7</td><td>53.2</td><td>75.4</td><td>89.3</td><td>55.5</td><td>75.9</td></tr><tr><td rowspan="4">Rotten Tomatoes</td><td>DLG</td><td>38.6</td><td>1.4</td><td>26.0</td><td>20.1</td><td>0.4</td><td>15.2</td><td>20.4</td><td>1.1</td><td>17.7</td><td>66.8</td><td>3.1</td><td>35.4</td></tr><tr><td>TAG</td><td>60.3</td><td>3.5</td><td>33.6</td><td>26.7</td><td>0.9</td><td>18.2</td><td>25.8</td><td>1.5</td><td>20.2</td><td>73.6</td><td>4.4</td><td>36.8</td></tr><tr><td>LAMPcos</td><td>64.7</td><td>16.3</td><td>43.1</td><td>63.4</td><td>13.8</td><td>42.6</td><td>76.0</td><td>28.6</td><td>55.8</td><td>73.4</td><td>15.7</td><td>45.4</td></tr><tr><td>LAMPL2+L1</td><td>51.4</td><td>10.2</td><td>34.3</td><td>17.2</td><td>1.0</td><td>14.7</td><td>74.0</td><td>19.4</td><td>46.7</td><td>77.6</td><td>16.6</td><td>45.8</td></tr></table>
203
+
204
+ metric, we can observe that we recover more tokens than the baselines in all settings. Moreover, the main advantage of LAMP is that the order of tokens in the reconstructed sequences matches the order in target sequences much more closely, as evidenced by the large increase in ROUGE-2 ( $5 \times$ on CoLA). This observation is further backed by the ROUGE-L metric that shows we are on average able to reconstruct up to $23\%$ longer subsequences on the BERT<sub>BASE</sub> model compared to the baselines. These results confirm our intuition that guiding the search with GPT-2 allows us to reconstruct sequences that are a much closer match to the original sequences. We point out that Table 1 reaffirms the observations first made in Deng et al. [3], that DLG is consistently worse in all metrics compared to both TAG and LAMP, and that the significantly longer sequences in RottenTomatoes still pose challenges to good reconstruction.
205
+
206
+ Our results show that smaller and fine-tuned models also leak significant amount of client information. In particular, $\mathrm{TinyBERT}_6$ is even more vulnerable than $\mathrm{BERT}_{\mathrm{BASE}}$ and $\mathrm{BERT}_{\mathrm{BASE}}$ -FT is shown to be only slightly worse in reconstruction compared to $\mathrm{BERT}_{\mathrm{BASE}}$ , which is surprising given the prior image domain results. This shows that smaller models can not resolve the privacy issue, despite previous suggestions in Deng et al. [3]. Additionally, our $\mathrm{BERT}_{\mathrm{LARGE}}$ experiments reaffirm the observation in Deng et al. [3] that the model is highly vulnerable to all attacks.
207
+
208
+ Further, we examine the variability of our $\mathrm{LAMP}_{\mathrm{Cos}}$ method with respect to random initialization. To this end, we ran the $\mathrm{BERT}_{\mathrm{BASE}}$ experiment on CoLA with 10 random seeds, which produced R-1, R-2 and R-L of $88.2 \pm 1.02$ , $50.0 \pm 2.37$ , $75.0 \pm 1.21$ , respectively, which suggests that our results are consistent. Further, we assess the variability with respect to sentence choice in App. C.1.
209
+
210
+ Larger batch sizes Unlike prior work, we also evaluate the different attacks on updates computed on batch sizes greater than 1 on the $\mathrm{BERT}_{\mathrm{BASE}}$ model to investigate whether we can reconstruct some sequences in this more challenging setting. The results are shown in Table 2. Similarly to the results in Table 1, we observe that we obtain better results than the baselines on all ROUGE metrics in all experiments, except on RottenTomatoes with batch size 2, where TAG obtains slightly better ROGUE-1. Our experiments show that for larger batch sizes we can also reconstruct significant portions of text (see experiments on CoLA and SST-2). To the best of our knowledge, we are the first to show this, suggesting that gradient leakage can be a realistic security threat in practice. Comparing the results for $\mathrm{LAMP}_{L_2 + L_1}$ and $\mathrm{LAMP}_{\mathrm{Cos}}$ , we observe that $\mathcal{L}_{\mathrm{cos}}$ is better than $\mathcal{L}_{\mathrm{tag}}$ in almost all metrics on batch size 1, across models, but the trend reverses as batch size is increased.
211
+
212
+ Sample reconstructions We show sample sequence reconstructions from both LAMP and the TAG baseline on CoLA with $B = 1$ in Table 3, marking the correctly reconstructed bigrams with green and correct unigrams with yellow. We can observe that our reconstruction is more coherent, and that it qualitatively outperforms the baseline. In App. B, we show the convergence rate of our method compared to the baselines on an example sequence, suggesting that LAMP can often converges faster.
213
+
214
+ Table 2: Text reconstruction from gradients for different batch sizes $B$ on the ${\mathrm{{BERT}}}_{\mathrm{{BASE}}}$ model. R-1, R-2, and R-L, denote ROUGE-1, ROUGE-2 and ROUGE-L scores respectively.
215
+
216
+ <table><tr><td rowspan="2" colspan="2"></td><td colspan="3">B=1</td><td colspan="3">B=2</td><td colspan="3">B=4</td></tr><tr><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td rowspan="4">CoLA</td><td>DLG</td><td>59.3</td><td>7.7</td><td>46.2</td><td>49.7</td><td>5.7</td><td>41.0</td><td>37.6</td><td>1.7</td><td>34.0</td></tr><tr><td>TAG</td><td>78.9</td><td>10.2</td><td>53.3</td><td>68.8</td><td>7.6</td><td>49.0</td><td>56.2</td><td>6.7</td><td>44.0</td></tr><tr><td>LAMPcos</td><td>89.6</td><td>51.9</td><td>76.2</td><td>74.4</td><td>29.5</td><td>61.9</td><td>55.2</td><td>14.5</td><td>48.0</td></tr><tr><td>LAMPL2+L1</td><td>87.5</td><td>47.5</td><td>73.2</td><td>78.0</td><td>31.4</td><td>63.7</td><td>66.2</td><td>21.8</td><td>55.2</td></tr><tr><td rowspan="4">SST-2</td><td>DLG</td><td>65.4</td><td>17.7</td><td>54.2</td><td>57.7</td><td>11.7</td><td>48.2</td><td>43.1</td><td>6.8</td><td>39.4</td></tr><tr><td>TAG</td><td>75.6</td><td>18.9</td><td>57.4</td><td>71.8</td><td>16.1</td><td>54.4</td><td>61.0</td><td>12.3</td><td>48.4</td></tr><tr><td>LAMPcos</td><td>88.8</td><td>56.9</td><td>77.7</td><td>72.2</td><td>37.0</td><td>63.6</td><td>57.9</td><td>23.4</td><td>52.3</td></tr><tr><td>LAMPL2+L1</td><td>88.6</td><td>57.4</td><td>75.7</td><td>82.5</td><td>45.8</td><td>70.8</td><td>69.5</td><td>32.5</td><td>59.9</td></tr><tr><td rowspan="4">Rotten Tomatoes</td><td>DLG</td><td>38.6</td><td>1.4</td><td>26.0</td><td>29.2</td><td>1.1</td><td>23.0</td><td>21.2</td><td>0.5</td><td>18.6</td></tr><tr><td>TAG</td><td>60.3</td><td>3.5</td><td>33.6</td><td>47.4</td><td>2.7</td><td>29.5</td><td>32.3</td><td>1.4</td><td>23.5</td></tr><tr><td>LAMPcos</td><td>64.7</td><td>16.3</td><td>43.1</td><td>37.4</td><td>5.6</td><td>29.0</td><td>25.7</td><td>1.8</td><td>22.1</td></tr><tr><td>LAMPL2+L1</td><td>51.4</td><td>10.2</td><td>34.3</td><td>46.3</td><td>7.6</td><td>32.7</td><td>35.1</td><td>4.2</td><td>27.2</td></tr></table>
217
+
218
+ Table 3: The result of text reconstruction on several examples from the dataset (for BERT<sub>BASE</sub> with $B = 1$ ). We show only TAG (better baseline) and LAMP<sub>cos</sub> as it is superior in these cases.
219
+
220
+ <table><tr><td></td><td colspan="10">Sequence</td></tr><tr><td rowspan="3">CoLA</td><td>Reference</td><td colspan="9">mary has never kissed a man who is taller than john.</td></tr><tr><td>TAG</td><td colspan="9">man seem taller than mary . kissed has john mph never</td></tr><tr><td>LAMPcos</td><td colspan="9">mary has never kissed a man who is taller than john.</td></tr><tr><td rowspan="3">SST-2</td><td>Reference</td><td colspan="9">i also believe that resident evil is not it.</td></tr><tr><td>TAG</td><td colspan="9">resident . or. is pack down believe i evil</td></tr><tr><td>LAMPcos</td><td colspan="9">i also believe that resident resident evil not it.</td></tr><tr><td rowspan="3">Rotten Tomatoes</td><td>Reference</td><td colspan="9">a well - made and often lovely depiction of the mysteries of friendship.</td></tr><tr><td>TAG</td><td colspan="9">- the friendship taken and lovely a made often depiction of well mysteries</td></tr><tr><td>LAMPcos</td><td colspan="9">a well often made - and lovely depiction mysteries of mysteries of friendship.</td></tr></table>
221
+
222
+ Ablation studies In the next experiment, we perform ablation studies to examine the influence of each proposed component of our method. We compare the following variants of LAMP: (i) with cosine loss, (ii) with $L_{1} + L_{2}$ loss, (iii) with $L_{2}$ loss, (iv) without the language model $(\alpha_{\mathrm{lm}} = 0)$ , (v) without embedding regularization $(\alpha_{\mathrm{reg}} = 0)$ , (vi) without alternating of the discrete and continuous optimization steps—executing $it \cdot n_c$ continuous optimization steps first, followed by $it$ discrete optimizations with $n_d$ steps each, (vii) without discrete transformations $(n_d = 0)$ . For this experiment, we use the CoLA dataset and BERT<sub>BASE</sub> with $B = 1$ . We show the results in Table 4. We observe that LAMP achieves good results with both losses, though cosine is generally better for batch size 1. More importantly, dropping any of the proposed features makes ROUGE-1 and ROUGE-2 significantly worse. We note the most significant drop in ROUGE-2 reconstruction quality happens when using transformations without using the language model $(\mathrm{LAMP}_{\alpha_{\mathrm{lm}}} = 0)$ , which performs even worse than doing no transformations $(\mathrm{LAMP}_{\mathrm{NoDiscrete}})$ at all. This suggests that the use of the language model is crucial to obtaining good results. Further, we observe that our proposed scheme for alternating the continuous and discrete optimization steps is important, as doing the discrete optimization at the end $(\mathrm{LAMP}_{\mathrm{DiscreteAtEnd}})$ for the same number of steps results in reconstructions only marginally better (in ROUGE-2) compared to the reconstructions obtained without any discrete optimization $(\mathrm{LAMP}_{\mathrm{NoDiscrete}})$ . The experiments also confirm usefulness of other features such as embedding regularization.
223
+
224
+ Attacking defended networks So far, all experiments assumed that clients have not defended against data leakage. Following work on vision attacks [43, 38], we now consider the defense of adding Gaussian noise to gradients (with additional clipping this would correspond to DP-SGD [1]). Note that, as usual, there is a trade-off between privacy and accuracy: adding more noise will lead
225
+
226
+ Table 4: An ablation study with the $\mathrm{BERT}_{\mathrm{BASE}}$ ( $B = 1$ ) model. We restate the results for $\mathrm{LAMP}_{\mathrm{Cos}}$ and $\mathrm{LAMP}_{L_2 + L_1}$ from Table 1 and introduce four ablations, done on the better of the two variants of $\mathrm{LAMP}$ , in these cases $\mathrm{LAMP}_{\mathrm{Cos}}$ .
227
+
228
+ <table><tr><td></td><td colspan="3">CoLA</td><td colspan="3">SST-2</td><td colspan="3">Rotten Tomatoes</td></tr><tr><td></td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td>LAMPcos</td><td>89.6</td><td>51.9</td><td>76.2</td><td>88.8</td><td>56.9</td><td>77.7</td><td>64.7</td><td>16.3</td><td>43.1</td></tr><tr><td>LAMPL2+L1</td><td>87.5</td><td>47.5</td><td>73.2</td><td>88.6</td><td>57.4</td><td>75.7</td><td>51.4</td><td>10.2</td><td>34.3</td></tr><tr><td>LAMPL2</td><td>69.4</td><td>30.1</td><td>58.8</td><td>72.4</td><td>44.1</td><td>65.4</td><td>31.9</td><td>5.5</td><td>25.7</td></tr><tr><td>LAMPαlm=0</td><td>86.7</td><td>26.6</td><td>66.9</td><td>82.6</td><td>37.0</td><td>68.4</td><td>64.0</td><td>9.9</td><td>40.3</td></tr><tr><td>LAMPαreg=0</td><td>84.5</td><td>38.0</td><td>69.1</td><td>83.3</td><td>44.7</td><td>71.9</td><td>57.8</td><td>11.1</td><td>38.3</td></tr><tr><td>LAMPDiscreteAtEnd</td><td>87.4</td><td>28.6</td><td>66.9</td><td>85.4</td><td>42.4</td><td>71.0</td><td>65.0</td><td>11.4</td><td>42.3</td></tr><tr><td>LAMPNoDiscrete</td><td>86.6</td><td>29.6</td><td>67.4</td><td>84.1</td><td>40.0</td><td>70.0</td><td>61.5</td><td>10.2</td><td>40.8</td></tr></table>
229
+
230
+ Table 5: Evaluation on gradients defended with Gaussian noise, with BERT<sub>BASE</sub> ( $B = 1$ ) on the CoLA dataset.
231
+
232
+ <table><tr><td></td><td colspan="3">σ = 0.001
233
+ MCC= 0.551</td><td colspan="3">σ = 0.002
234
+ MCC= 0.526</td><td colspan="3">σ = 0.005
235
+ MCC= 0.464</td><td colspan="3">σ = 0.01
236
+ MCC= 0.364</td></tr><tr><td></td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td>DLG</td><td>60.0</td><td>7.2</td><td>46.3</td><td>61.3</td><td>7.5</td><td>47.0</td><td>58.8</td><td>8.0</td><td>46.4</td><td>56.4</td><td>6.3</td><td>44.8</td></tr><tr><td>TAG</td><td>70.7</td><td>6.0</td><td>50.8</td><td>67.1</td><td>8.4</td><td>49.9</td><td>64.1</td><td>6.5</td><td>47.6</td><td>59.6</td><td>6.5</td><td>46.2</td></tr><tr><td>LAMPcos</td><td>81.2</td><td>42.7</td><td>69.4</td><td>70.6</td><td>29.5</td><td>60.9</td><td>43.3</td><td>9.45</td><td>39.7</td><td>27.7</td><td>2.0</td><td>27.6</td></tr><tr><td>LAMPL2+L1</td><td>79.2</td><td>32.8</td><td>64.1</td><td>74.3</td><td>31.0</td><td>61.9</td><td>73.5</td><td>29.7</td><td>60.9</td><td>69.6</td><td>29.4</td><td>60.6</td></tr></table>
237
+
238
+ to better privacy, but make accuracy worse. We measure the performance of the fine-tuned models on CoLA using the MCC metric [23] for which higher values are better. The fine-tuning was done for 2 epochs with different Gaussian noise levels $\sigma$ , and we obtained the MCC scores depicted in Table 5. We did not explore noises $>0.01$ due to the significant drop in MCC from 0.557 for the undefended model to 0.364. The results of our experiments on these defended networks are presented in Table 5. While all methods' reconstruction metrics degrade, as expected, we see that most text is still recoverable for the chosen noise levels. Moreover, our method still outperforms the baselines, and thus shows the importance of evaluating defenses with strong reconstruction attacks. In App. C.2 we show that LAMP is also useful against a defense which masks some percentage of gradients.
239
+
240
+ Attacking FedAvg So far, we have only considered attacking the FedSGD algorithm. In this experiment, we apply our attack on the commonly used FedAvg [19] algorithm. As NLP models are often fine-tuned using small learning rates (2e-5 to 5e-5 in the original BERT paper), we find that FedAvg reconstruction performance is close to FedSGD performance with batch size multiplied by the number of FedAvg steps. We experimented with attacking FedAvg with 4 steps using $B = 1$ per step, $lr = 5\mathrm{e - }5$ on CoLA and $\mathrm{BERT}_{\mathrm{BASE}}$ with $\mathrm{LAMP}_{L_2 + L_1}$ . We obtained R-1, R-2 and R-L of 66.5, 21.0, 55.1, respectively, comparable to the reported results on FedSGD with $B = 4$ .
241
+
242
+ # 6 Conclusion
243
+
244
+ In this paper, we presented LAMP, a new method for reconstructing private text data from gradients by leveraging language model priors and alternating discrete and continuous optimization. Our extensive experimental evaluation showed that LAMP consistently outperforms prior work on datasets of varying complexity and models of different sizes. Further, we established that LAMP is able to reconstruct private data in a number of challenging settings, including bigger batch sizes, noise-defended gradients, and fine-tuned models. Our work highlights that private text data is not sufficiently protected by federated learning algorithms and that more work is needed to alleviate this issue.
245
+
246
+ # References
247
+
248
+ [1] Martin Abadi, Andy Chu, Ian Goodfellow, H. Brendan McMahan, Ilya Mironov, Kunal Talwar, and Li Zhang. Deep learning with differential privacy. Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security (ACM CCS), pp. 308-318, 2016, 2016. doi: 10.1145/2976749.2978318.
249
+ [2] Mislav Balunović, Dimitar I Dimitrov, Robin Staab, and Martin Vechev. Bayesian framework for gradient leakage. arXiv preprint arXiv:2111.04706, 2021.
250
+ [3] Jieren Deng, Yijue Wang, Ji Li, Chenghong Wang, Chao Shang, Hang Liu, Sanguthevar Rajasekaran, and Caiwen Ding. TAG: Gradient attack on transformer-based language models. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 3600-3610, Punta Cana, Dominican Republic, November 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021-findings-emnlp.305.
251
+ [4] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
252
+ [5] Wikipedia Foundation. Wikipedia downloads. URL https://dumps.wikipedia.org.
253
+ [6] Liam Fowl, Jonas Geiping, Steven Reich, Yuxin Wen, Wojtek Czaja, Micah Goldblum, and Tom Goldstein. Decepticons: Corrupted transformers breach privacy in federated learning for language models. arXiv preprint arXiv:2201.12675, 2022.
254
+ [7] Jonas Geiping, Hartmut Bauermeister, Hannah Droge, and Michael Moeller. Inverting gradients - how easy is it to break privacy in federated learning? In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 16937-16947. Curran Associates, Inc., 2020.
255
+ [8] Jonas Geiping, Liam Fowl, and Yuxin Wen. Breaching - a framework for attacks against privacy in federated learning. 2022. URL https://github.com/JonasGeiping/breaching.
256
+ [9] Jiahui Geng, Yongli Mou, Feifei Li, Qing Li, Oya Bayan, Stefan Decker, and Chunming Rong. Towards general deep leakage in federated learning. arXiv preprint arXiv:2110.09074, 2021.
257
+ [10] Chuan Guo, Alexandre Sablayrolles, Hervé Jégou, and Douwe Kiela. Gradient-based adversarial attacks against text transformers. arXiv preprint arXiv:2104.13733, 2021.
258
+ [11] Samyak Gupta, Yangsibo Huang, Zexuan Zhong, Tianyu Gao, Kai Li, and Danqi Chen. Recovering private text in federated learning of language models. arXiv preprint arXiv:2205.08514, 2022.
259
+ [12] Yangsibo Huang, Samyak Gupta, Zhao Song, Kai Li, and Sanjeev Arora. Evaluating gradient inversion attacks and defenses in federated learning. Advances in Neural Information Processing Systems, 34, 2021.
260
+ [13] Fred Jelinek, Robert L Mercer, Lalit R Bahl, and James K Baker. Perplexity—a measure of the difficulty of speech recognition tasks. The Journal of the Acoustical Society of America, 62(S1): S63-S63, 1977.
261
+ [14] Jiwnoo Jeon, Kangwook Lee, Sewoong Oh, Jungseul Ok, et al. Gradient inversion with generative image prior. Advances in Neural Information Processing Systems, 34, 2021.
262
+ [15] Xiaoqi Jiao, Yichun Yin, Lifeng Shang, Xin Jiang, Xiao Chen, Linlin Li, Fang Wang, and Qun Liu. Tinybert: Distilling bert for natural language understanding. arXiv preprint arXiv:1909.10351, 2019.
263
+ [16] Peter Kairouz, H Brendan McMahan, Brendan Avent, Aurélien Bellet, Mehdi Bennis, Arjun Nitin Bhagoji, Keith Bonawitz, Zachary Charles, Graham Cormode, Rachel Cummings, et al. Advances and open problems in federated learning. arXiv preprint arXiv:1912.04977, 2019.
264
+
265
+ [17] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
266
+ [18] Jakub Konečný, H. Brendan McMahan, Daniel Ramage, and Peter Richtárik. Federated optimization: Distributed machine learning for on-device intelligence. arXiv preprint arXiv:1610.02527, 2016.
267
+ [19] Jakub Konečný, H Brendan McMahan, Felix X Yu, Peter Richtárik, Ananda Theertha Suresh, and Dave Bacon. Federated learning: Strategies for improving communication efficiency. arXiv preprint arXiv:1610.05492, 2016.
268
+ [20] Chin-Yew Lin. Rouge: A package for automatic evaluation of summaries. In Text summarization branches out, pages 74-81, 2004.
269
+ [21] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In International Conference on Learning Representations, 2019. URL https://openreview.net/forum?id=Bkg6RiCqY7.
270
+ [22] Jiahao Lu, Xi Sheryl Zhang, Tianli Zhao, Xiangyu He, and Jian Cheng. April: Finding the achilles' heel on privacy for vision transformers. arXiv preprint arXiv:2112.14087, 2021.
271
+ [23] Brian W Matthews. Comparison of the predicted and observed secondary structure of t4 phage lysozyme. Biochimica et Biophysica Acta (BBA)-Protein Structure, 405(2):442-451, 1975.
272
+ [24] Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Agüera y Arcas. Communication-efficient learning of deep networks from decentralized data. In AISTATS, 2017.
273
+ [25] Shervin Minaee, Nal Kalchbrenner, Erik Cambria, Narjes Nikzad, Meysam Chenaghlu, and Jianfeng Gao. Deep learning-based text classification: A comprehensive review. ACM Computing Surveys (CSUR), 54(3):1-40, 2021.
274
+ [26] Bo Pang and Lillian Lee. Seeing stars: Exploiting class relationships for sentiment categorization with respect to rating scales. In Proceedings of the ACL, 2005.
275
+ [27] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems 32, pages 8024-8035. Curran Associates, Inc., 2019. URL http://papers.neurips.cc/paper/9015-pytorch-an-imperative-style-high-performance-deep-learning-library.pdf.
276
+ [28] Le Trieu Phong, Yoshinori Aono, Takuya Hayashi, Lihua Wang, and Shiho Moriai. Privacy-preserving deep learning: Revisited and enhanced. In ATIS, volume 719 of Communications in Computer and Information Science, pages 100-110. Springer, 2017.
277
+ [29] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.
278
+ [30] Swaroop Ramaswamy, Rajiv Mathews, Kanishka Rao, and Françoise Beaufays. Federated learning for emoji prediction in a mobile keyboard. CoRR, abs/1906.04329, 2019.
279
+ [31] Daniel Scheliga, Patrick Mäder, and Marco Seeland. Precode - a generic model extension to prevent deep gradient leakage, 2021.
280
+ [32] Virat Shejwalkar, Amir Houmansadr, Peter Kairouz, and Daniel Ramage. Back to the drawing board: A critical evaluation of poisoning attacks on federated learning. arXiv preprint arXiv:2108.10241, 2021.
281
+ [33] Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D Manning, Andrew Y Ng, and Christopher Potts. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 conference on empirical methods in natural language processing, pages 1631-1642, 2013.
282
+
283
+ [34] Jingwei Sun, Ang Li, Binghui Wang, Huanrui Yang, Hai Li, and Yiran Chen. Soteria: Provable defense against privacy leakage in federated learning from representation perspective. In CVPR, pages 9311-9319. Computer Vision Foundation / IEEE, 2021.
284
+ [35] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in neural information processing systems, pages 5998-6008, 2017.
285
+ [36] Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R Bowman. Glue: A multi-task benchmark and analysis platform for natural language understanding. arXiv preprint arXiv:1804.07461, 2018.
286
+ [37] Alex Warstadt, Amanpreet Singh, and Samuel R Bowman. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641, 2019.
287
+ [38] Wenqi Wei, Ling Liu, Margaret Loper, Ka-Ho Chow, Mehmet Emre Gursoy, Stacey Truex, and Yanzhao Wu. A framework for evaluating gradient leakage attacks in federated learning. arXiv preprint arXiv:2004.10397, 2020.
288
+ [39] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online, October 2020. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2020.emnlp-demos.6.
289
+ [40] Hongxu Yin, Arun Mallya, Arash Vahdat, Jose M. Alvarez, Jan Kautz, and Pavlo Molchanov. See through gradients: Image batch recovery via grad inversion. In CVPR, 2021.
290
+ [41] Bo Zhao, Konda Reddy Mopuri, and Hakan Bilen. idlg: Improved deep leakage from gradients, 2020.
291
+ [42] Junyi Zhu and Matthew B. Blaschko. R-GAP: recursive gradient attack on privacy. In ICLR, 2021.
292
+ [43] Ligeng Zhu, Zhijian Liu, and Song Han. Deep leakage from gradients. In NeurIPS, 2019.
293
+ [44] Yukun Zhu, Ryan Kiros, Rich Zemel, Ruslan Salakhutdinov, Raquel Urtasun, Antonio Torralba, and Sanja Fidler. Aligning books and movies: Towards story-like visual explanations by watching movies and reading books. In The IEEE International Conference on Computer Vision (ICCV), December 2015.
294
+
295
+ # Checklist
296
+
297
+ 1. For all authors...
298
+
299
+ (a) Do the main claims made in the abstract and introduction accurately reflect the paper's contributions and scope? [Yes]
300
+ (b) Did you describe the limitations of your work? [Yes] We demonstrate the effectiveness of existing defenses against our attack in Sec. 5, further we outline that our work does not deal with reconstructing labels which is left for future work, as described in Sec. 4.
301
+ (c) Did you discuss any potential negative societal impacts of your work? [Yes] We provide a discussion in App. F.
302
+ (d) Have you read the ethics review guidelines and ensured that your paper conforms to them? [Yes]
303
+
304
+ 2. If you are including theoretical results...
305
+
306
+ (a) Did you state the full set of assumptions of all theoretical results? [N/A]
307
+ (b) Did you include complete proofs of all theoretical results? [N/A]
308
+
309
+ 3. If you ran experiments...
310
+
311
+ (a) Did you include the code, data, and instructions needed to reproduce the main experimental results (either in the supplemental material or as a URL)? [Yes]
312
+ (b) Did you specify all the training details (e.g., data splits, hyperparameters, how they were chosen)? [Yes]
313
+ (c) Did you report error bars (e.g., with respect to the random seed after running experiments multiple times)? [Yes]
314
+ (d) Did you include the total amount of compute and the type of resources used (e.g., type of GPUs, internal cluster, or cloud provider)? [Yes] We have reported the type of resources used in the Sec. 5. We have reported the total amount of compute in App. E.
315
+
316
+ 4. If you are using existing assets (e.g., code, data, models) or curating/releasing new assets...
317
+
318
+ (a) If your work uses existing assets, did you cite the creators? [Yes]
319
+ (b) Did you mention the license of the assets? [No] We use standard datasets and cite the authors instead.
320
+ (c) Did you include any new assets either in the supplemental material or as a URL? [No]
321
+ (d) Did you discuss whether and how consent was obtained from people whose data you're using/curating? [N/A]
322
+ (e) Did you discuss whether the data you are using/curating contains personally identifiable information or offensive content? [No] We use standard datasets. We refer the reader to the original authors of the dataset for this discussion.
323
+
324
+ 5. If you used crowdsourcing or conducted research with human subjects...
325
+
326
+ (a) Did you include the full text of instructions given to participants and screenshots, if applicable? [N/A]
327
+ (b) Did you describe any potential participant risks, with links to Institutional Review Board (IRB) approvals, if applicable? [N/A]
328
+ (c) Did you include the estimated hourly wage paid to participants and the total amount spent on participant compensation? [N/A]
329
+
330
+ # Supplementary Material
331
+
332
+ # A Discussion
333
+
334
+ # A.1 Threat Model Discussion
335
+
336
+ In this section, we further discuss the threat model chosen by LAMP and compare it to the related work. To make our attack as generic as possible, we relax common assumptions that have been exploited to reconstruct client's data in the literature before. In particular, LAMP is applicable even if:
337
+
338
+ - The model's word embeddings are not fine-tuned. As the gradients of the word embedding vectors are non-zero for words that are contained in the client's training data and zero otherwise, revealing the gradients to the server will allow it to easily obtain the client sequence up to reorder. This constitutes a serious breach of clients' privacy and, thus, we assume the word embeddings are not trainable.
339
+ - The model's positional embeddings are not fine-tuned. Similarly to the word-embedding gradients, Lu et al. [22] have recently demonstrated that for batch size $B = 1$ positional embedding gradients can leak client's full sequence. To this end, we assume models without trainable positional embeddings.
340
+ - The model's transformer blocks contain no bias terms. Lu et al. [22] have also shown that the popular attack by Phong et al. [28] can be applied on the bias terms of transformer blocks to leak the client's data. To this end, we assume models without transformer block biases.
341
+ - The transformer model is fine-tuned on a classification task. As language modeling tasks are usually self-supervised, they often feed the same data to the model both as inputs and outputs. Based on this observation, Fowl et al. [6] have recently shown that label reconstruction algorithms can be used to obtain the client's word counts. We thus assume the more challenging binary classification setting.
342
+ - The server is honest-but-curious, i.e., it aims to learn as much as possible about clients' data from gradients but does not tamper with the learning protocol. While prior work has shown that a malicious server can force a client to leak much more data [6], this is orthogonal to our work. We focus on the honest-server setting instead, which is the harder setting to attack.
343
+
344
+ Note that the assumptions we make for our transformer networks can result in a small amount of accuracy loss on the final fine-tuned model, but preserve the client's data privacy much better. We emphasize that while LAMP focuses on attacks in the harder setting, it is also applicable to the simpler settings without modification.
345
+
346
+ # A.2 Improvements over Prior Work
347
+
348
+ In this section, we outline the differences between LAMP and TAG, and we discuss how these differences help LAMP to significantly improve its text data reconstruction from gradients compared to TAG.
349
+
350
+ A major difference between the two methods is the introduction of our discrete optimization that takes advantage of a GPT-2 language model to help reconstruct the token order better than TAG. Our discrete optimization step is novel in several ways:
351
+
352
+ - It is based on a set of discrete transformations that fix common token reordering problems arising from the continuous reconstruction.
353
+ - We take advantage of the perplexity computed by the existing language models such as GPT-2 to evaluate the quality of different discrete transformations.
354
+
355
+ Table 6: Visualization of intermediate steps of text reconstruction from gradients, on a sequence from the CoLA dataset. Note that TAG performs 2500 steps, as opposed to $\mathrm{LAMP}_{\mathrm{Cos}}$ which terminates at 2000, as this is usually sufficient for convergence.
356
+
357
+ <table><tr><td>Iteration</td><td>TAG</td><td>LAMPcos</td></tr><tr><td>0</td><td>billie icaohwatch press former spirit technical</td><td>trinity jessie maps extended evidence private peerage whatever</td></tr><tr><td>500</td><td>enough stadium six too 20 le was,</td><td>many marbles have six. too.</td></tr><tr><td>1000</td><td>have stadium seven too three le. marble</td><td>; have six too many marbles.</td></tr><tr><td>1500</td><td>have respect six too manys, marble</td><td>have six. too many marbles.</td></tr><tr><td>2000</td><td>have... six too many i, marble</td><td>have six. too many marbles.</td></tr><tr><td>2500</td><td>have... six too many manys, marble</td><td></td></tr><tr><td>Reference</td><td>i have six too many marbles.</td><td>i have six too many marbles.</td></tr></table>
358
+
359
+ Table 7: In this experiment we reconstruct 100 random selected sentences with our methods and the baselines on the CoLA dataset and the BERT<sub>BASE</sub> ( $B = 1$ ) model 10 times with 10 different randomly selected set of sentences. We report the mean and standard deviation of all ROUGE measures.
360
+
361
+ <table><tr><td></td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td>DLG</td><td>56.2 ± 5.0</td><td>6.5 ± 1.6</td><td>45.0 ± 2.6</td></tr><tr><td>TAG</td><td>74.4 ± 3.1</td><td>10.7 ± 1.8</td><td>53.0 ± 2.1</td></tr><tr><td>LAMPcos</td><td>87.8 ± 2.6</td><td>48.4 ± 5.5</td><td>74.6 ± 2.9</td></tr><tr><td>LAMPL2+L1</td><td>83.1 ± 3.7</td><td>40.7 ± 5.7</td><td>69.3 ± 3.6</td></tr></table>
362
+
363
+ - Finally, our discrete optimization is alternated with the continuous, allowing for both take advantage of the result of the other which ultimately results in better token order reconstruction (See our ablation in Sec. 5).
364
+
365
+ The other major difference between our method and existing work is the choice of the error function $\mathcal{L}_{\mathrm{rec}}$ used in the continuous part of the optimization. Our choice of reconstruction loss results in better reconstruction of individual tokens and thus increases R-1. In particular, we show that the cosine error function, previously applied in the image domain, can often outperform the error function suggested by TAG for text reconstruction and introduce a regularization term $\mathcal{L}_{\mathrm{reg}}$ that helps the continuous optimization to converge faster to more accurate embeddings using prior knowledge about the vector sizes of embeddings.
366
+
367
+ # B Detailed Text Reconstruction Example
368
+
369
+ In Table 6, we show the intermediate steps of text reconstruction for a real example taken from our experiments presented in Table 3. We can observe that $\mathrm{LAMP}_{\mathrm{Cos}}$ reaches convergence significantly faster than the TAG baseline, and that after only 500 iterations most words are already reconstructed by our method.
370
+
371
+ # C Additional Experiments
372
+
373
+ # C.1 Dependency of Experimental Results to the Chosen Sentences
374
+
375
+ Throughout this paper, we conducted our experiments on the same 100 sequences randomly chosen from the test portion of the datasets we attack. In this experiment, we show that our results are consistent when different sets of 100 sequences are used. To achieve this, we ran the BERT<sub>BASE</sub> CoLA experiment with $\mathrm{B} = 1$ on additional 10 different sets of 100 randomly chosen sentences from the COLA test set. We report the mean ± one standard deviation of the resulting R-1, R-2 and R-L metrics averaged across the 10 sets in Table 7. We see that the results are consistent with our original findings.
376
+
377
+ Table 8: This experiment shows the trade-off between the final network accuracy (measured by MCC) and the reconstruction quality from gradients with different percentages of zeroed-out gradient entries on the CoLA dataset on $\mathrm{BERT}_{\mathrm{BASE}}$ ( $B = 1$ ).
378
+
379
+ <table><tr><td>Zeroed %</td><td>MCC</td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td>0</td><td>0.557</td><td>89.6</td><td>51.9</td><td>76.2</td></tr><tr><td>75</td><td>0.557</td><td>79.4</td><td>34.5</td><td>66.3</td></tr><tr><td>90</td><td>0.534</td><td>61.9</td><td>20.1</td><td>53.9</td></tr><tr><td>95</td><td>0.515</td><td>39.0</td><td>5.8</td><td>37.4</td></tr><tr><td>99</td><td>0.371</td><td>24.7</td><td>0.0</td><td>24.7</td></tr></table>
380
+
381
+ Table 9: This experiment shows the effect on reconstruction of the chosen number of initializations $n_{\mathrm{init}}$ used in $\mathrm{LAMP}_{\mathrm{Cos}}$ on all 3 datasets for $\mathrm{BERT}_{\mathrm{BASE}}$ ( $B = 1$ ).
382
+
383
+ <table><tr><td rowspan="2">ninit</td><td colspan="3">CoLA</td><td colspan="3">SST-2</td><td colspan="3">RottenTomatoes</td></tr><tr><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td><td>R-1</td><td>R-2</td><td>R-L</td></tr><tr><td>1</td><td>87.3</td><td>48.1</td><td>73.2</td><td>87.4</td><td>60.8</td><td>78.8</td><td>63.7</td><td>16.6</td><td>43.8</td></tr><tr><td>500</td><td>89.6</td><td>51.9</td><td>76.2</td><td>88.8</td><td>56.9</td><td>77.7</td><td>64.7</td><td>16.3</td><td>43.1</td></tr></table>
384
+
385
+ # C.2 Attacking Gradient Masking Defense
386
+
387
+ We experimented with a defense which zeroes out a percentage of elements in the gradient vector. In Table 8 we vary the percentage and report MCC, R-1 and R-2. While zeroing out most gradients weakens the attack, it also reduces utility (MCC) of the model.
388
+
389
+ # C.3 Dependence on the Number of Initializations
390
+
391
+ In this section, we investigate the influence of our proposed initialization on the reconstructions of $\mathrm{LAMP_{Cos}}$ by comparing a single random initialization ( $n_{\mathrm{init}} = 1$ ) with using our two-step initialization procedure with $n_{\mathrm{init}} = 500$ on the $\mathrm{BERT}_{\mathrm{BASE}}$ model and batch size of 1. The results are shown in Table 9. We observe that the two-step initialization scheme consistently improves individual token recovery (measured in terms of R-1) but may in some cases slightly degrade token ordering results (measured in terms of R-2). Even though we used two-step initialization in the paper (it is strictly better on one dataset and non-comparable to single random initialization on the remaining datasets), it is indeed sometimes possible to get slightly better R-2 results with the latter.
392
+
393
+ # D Additional Experimental Details
394
+
395
+ We run all of our experiments on a single NVIDIA RTX 2080 Ti GPU with 11 GB of RAM, except for the experiments on BERTLarge for which we used a single NVIDIA RTX 3090 Ti GPU with 24 GB of RAM instead.
396
+
397
+ As we explain in Sec. 5, we choose the hyperparameters of our methods using a grid search approach on the CoLA and RottenTomatoes datasets. For CoLA, we first evaluated 50 hyperparameter combinations on 10 randomly selected (in a stratified way with respect to length) sequences from the training set (after removing the 100 test sequences). Then, we further evaluated the best 10 combinations on different 20 sequences from the training set. For RottenTomatoes, we picked the hyperparameters from the same 10 best combinations and evaluated them on the same 20 additional sequences. For both $\mathrm{LAMP}_{\mathrm{Cos}}$ and the baselines, we investigated the following ranges for the hyperparameters: $\alpha_{\mathrm{lm}} \in [0.05, 0.2]$ , $\alpha_{\mathrm{reg}} \in [0.01, 1]$ , $\lambda \in [0.001, 0.5]$ , $\gamma \in [0.8, 1]$ , and $\alpha_{\mathrm{tag}} \in [10^{-5}, 10^{2}]$ . For $\mathrm{LAMP}_{L_2 + L_1}$ , we consider $\alpha_{\mathrm{lm}} \in [30, 240]$ and $\alpha_{\mathrm{reg}} \in [10, 100]$ , as the scale of the loss values is orders of magnitude larger than in $\mathrm{LAMP}_{\mathrm{Cos}}$ . We experimentally found that our algorithm is robust with respect to the exact values of $n_c$ and $n_d$ , provided that they are sufficiently large. To this end, we select $n_d = 200$ because we found that the selected token order from the 200 random transformations is close to the optimal one according to $\mathcal{L}_{\mathrm{rec}}(\boldsymbol{x}) + \alpha_{\mathrm{lm}} \mathcal{L}_{\mathrm{lm}}(\boldsymbol{t})$ for the sentence lengths present in our datasets. Similarly, we set $n_c = 75$ ( $n_c = 200$ for BERTLarge) which allows
398
+
399
+ our continuous optimization to significantly change the embeddings before applying the next discrete optimization step in the process. Finally, we also observed the performance of our algorithm is robust with respect to $n_{\mathrm{init}}$ , so we set it to 500 throughout the experiments. We note that compared to TAG and DLG, the only additional hyperparameters we have to search over are $\alpha_{\mathrm{reg}}$ and $\alpha_{\mathrm{lm}}$ which makes the grid search feasible for our methods.
400
+
401
+ The resulting hyperparameters for $\mathrm{LAMP}_{\mathrm{Cos}}$ are $\alpha_{\mathrm{lm}} = 0.2$ , $\alpha_{\mathrm{reg}} = 1.0$ , $\lambda = 0.01$ , $\gamma = 0.89$ . In contrast, the best hyperparameters for $\mathrm{LAMP}_{L_2 + L_1}$ are $\alpha_{\mathrm{tag}} = 0.01$ , $\alpha_{\mathrm{lm}} = 60$ , $\alpha_{\mathrm{reg}} = 25$ , $\lambda = 0.01$ , $\gamma = 0.89$ , as the loss $\mathcal{L}_{\mathrm{tag}}$ is on a different order of magnitude compared to $\mathcal{L}_{\mathrm{cos}}$ . In our experiments, TAG's best hyperparameters are $\alpha_{\mathrm{tag}} = 0.01$ , $\lambda = 0.1$ , $\gamma = 1.0$ (no decay), which we also use for DLG (with $\alpha_{\mathrm{tag}} = 0.0$ ).
402
+
403
+ To account for the different optimizer used in $\mathrm{BERT}_{\mathrm{LARGE}}$ experiments, we tuned the learning rate $\lambda$ for all methods separately in this setting by evaluating each method on 5 different learning rates in the range [0.01, 0.1] on 10 randomly selected sentences from the CoLA dataset. This resulted in $\lambda = 0.1$ for DLG, and $\lambda = 0.01$ for TAG, $\mathrm{LAMP}_{\mathrm{Cos}}$ , and $\mathrm{LAMP}_{L_2 + L_1}$ . We applied the chosen values of $\lambda$ to all 3 datasets. Additionally, following Geiping et al. [8] we clip the gradient magnitudes $\| \nabla_{\pmb{x}}\mathcal{L}_{\mathrm{rec}}(\pmb {x})\| _2$ for our $\mathrm{BERT}_{\mathrm{LARGE}}$ experiments to 1.0 for DLG and TAG and 0.5 for $\mathrm{LAMP}_{\mathrm{Cos}}$ and $\mathrm{LAMP}_{L_2 + L_1}$ .
404
+
405
+ # E Total Runtime of the Experiments
406
+
407
+ The BERTLarge model experiments in Table 1 were the most computationally expensive to execute. They took between 50 hours per experiment for the $\mathrm{LAMP_{Cos}}$ and $\mathrm{LAMP}_{L_2 + L_1}$ methods and 70 hours for TAG, which executes two times more continuous optimization steps. Our experiments on the rest of the networks for both the baselines and our methods on batch size $1(B = 1)$ all took between 8 and 16 hours to execute on a single GPU with our methods being up to 2x slower due to the additional computational cost of our discrete optimization. Additionally, our experiments on batch size $4(B = 4)$ took between 8 and 36 hours to execute on a single GPU with our methods being up to 4x slower due to the additional computational cost of our discrete optimization.
408
+
409
+ # F Potential Negative Societal Impact of This Work
410
+
411
+ Our work is closely related to the existing works on gradients leakage attacks (See Sec. 2) which are capable of breaking the privacy promise of FL e.g., Zhao et al. [41], Geiping et al. [7], Yin et al. [40], Deng et al. [3]. Similar to these works, LAMP can be used to compromise the privacy of client data in real-world FL setups, especially when no defenses are used by the clients. Our attack emphasizes that text data, which is commonly used in federated settings [30], is highly vulnerable to gradient leakage attacks, similarly to data in other domains, and that when FL is applied in practice extra steps need to be taken to mitigate the potential risks. Further, in line with the related work, we study a range of possible mitigations to our attack in Tables 1, 5 and 8 in the paper, thus promoting possible practical FL implementations that will be less vulnerable including those defended with Gaussian noise and gradient pruning and those using bigger batch sizes.
2202.08xxx/2202.08827/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40abb8a45ae542df7a8c052b3fa4fd21fe5dc58007a67d803fdf4b673aafb2d5
3
+ size 507554
2202.08xxx/2202.08827/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08832/ac21a927-1452-4896-b2be-adb7e1ec7eaa_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08832/ac21a927-1452-4896-b2be-adb7e1ec7eaa_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08832/ac21a927-1452-4896-b2be-adb7e1ec7eaa_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff203b6792605770600428195b03c91f7fb96d98b9576153f68d99690021d9c6
3
+ size 1264577
2202.08xxx/2202.08832/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08832/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ca9200fd3da020d3b94400f8d114beb736e5a9e80e34b1d43490925ff3fe463
3
+ size 5597285
2202.08xxx/2202.08832/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.08xxx/2202.08862/e4a7970f-80af-4e53-a91b-15ca008317bb_content_list.json ADDED
The diff for this file is too large to render. See raw diff