SlowGuess commited on
Commit
88e7143
·
verified ·
1 Parent(s): 75ac3b9

Add Batch af11bfe7-5fcb-4d2b-b592-081bea749677

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2202.13xxx/2202.13090/6a21c459-5fae-422a-9e3d-7a71e1e6fcb9_content_list.json +0 -0
  3. 2202.13xxx/2202.13090/6a21c459-5fae-422a-9e3d-7a71e1e6fcb9_model.json +0 -0
  4. 2202.13xxx/2202.13090/6a21c459-5fae-422a-9e3d-7a71e1e6fcb9_origin.pdf +3 -0
  5. 2202.13xxx/2202.13090/full.md +548 -0
  6. 2202.13xxx/2202.13090/images.zip +3 -0
  7. 2202.13xxx/2202.13090/layout.json +0 -0
  8. 2202.13xxx/2202.13094/519c68f3-731e-4525-ae69-803e7821e6d1_content_list.json +0 -0
  9. 2202.13xxx/2202.13094/519c68f3-731e-4525-ae69-803e7821e6d1_model.json +0 -0
  10. 2202.13xxx/2202.13094/519c68f3-731e-4525-ae69-803e7821e6d1_origin.pdf +3 -0
  11. 2202.13xxx/2202.13094/full.md +472 -0
  12. 2202.13xxx/2202.13094/images.zip +3 -0
  13. 2202.13xxx/2202.13094/layout.json +0 -0
  14. 2202.13xxx/2202.13121/3415fbcf-d5e0-4509-b486-bd2d3fe9e7e4_content_list.json +0 -0
  15. 2202.13xxx/2202.13121/3415fbcf-d5e0-4509-b486-bd2d3fe9e7e4_model.json +0 -0
  16. 2202.13xxx/2202.13121/3415fbcf-d5e0-4509-b486-bd2d3fe9e7e4_origin.pdf +3 -0
  17. 2202.13xxx/2202.13121/full.md +0 -0
  18. 2202.13xxx/2202.13121/images.zip +3 -0
  19. 2202.13xxx/2202.13121/layout.json +0 -0
  20. 2202.13xxx/2202.13142/edddf239-62ff-407e-929c-73252ebf5a33_content_list.json +0 -0
  21. 2202.13xxx/2202.13142/edddf239-62ff-407e-929c-73252ebf5a33_model.json +0 -0
  22. 2202.13xxx/2202.13142/edddf239-62ff-407e-929c-73252ebf5a33_origin.pdf +3 -0
  23. 2202.13xxx/2202.13142/full.md +822 -0
  24. 2202.13xxx/2202.13142/images.zip +3 -0
  25. 2202.13xxx/2202.13142/layout.json +0 -0
  26. 2202.13xxx/2202.13162/747f2044-56ac-435c-b2d3-916c376e22a8_content_list.json +2226 -0
  27. 2202.13xxx/2202.13162/747f2044-56ac-435c-b2d3-916c376e22a8_model.json +0 -0
  28. 2202.13xxx/2202.13162/747f2044-56ac-435c-b2d3-916c376e22a8_origin.pdf +3 -0
  29. 2202.13xxx/2202.13162/full.md +459 -0
  30. 2202.13xxx/2202.13162/images.zip +3 -0
  31. 2202.13xxx/2202.13162/layout.json +0 -0
  32. 2202.13xxx/2202.13169/909a7465-0b93-460b-9a57-ce6ae5e551db_content_list.json +1657 -0
  33. 2202.13xxx/2202.13169/909a7465-0b93-460b-9a57-ce6ae5e551db_model.json +2162 -0
  34. 2202.13xxx/2202.13169/909a7465-0b93-460b-9a57-ce6ae5e551db_origin.pdf +3 -0
  35. 2202.13xxx/2202.13169/full.md +247 -0
  36. 2202.13xxx/2202.13169/images.zip +3 -0
  37. 2202.13xxx/2202.13169/layout.json +0 -0
  38. 2202.13xxx/2202.13200/ea734711-65b5-4be7-8325-31204388aeab_content_list.json +0 -0
  39. 2202.13xxx/2202.13200/ea734711-65b5-4be7-8325-31204388aeab_model.json +0 -0
  40. 2202.13xxx/2202.13200/ea734711-65b5-4be7-8325-31204388aeab_origin.pdf +3 -0
  41. 2202.13xxx/2202.13200/full.md +450 -0
  42. 2202.13xxx/2202.13200/images.zip +3 -0
  43. 2202.13xxx/2202.13200/layout.json +0 -0
  44. 2202.13xxx/2202.13239/57c352a6-ea56-4510-913e-a92f5c7acb7d_content_list.json +1347 -0
  45. 2202.13xxx/2202.13239/57c352a6-ea56-4510-913e-a92f5c7acb7d_model.json +1719 -0
  46. 2202.13xxx/2202.13239/57c352a6-ea56-4510-913e-a92f5c7acb7d_origin.pdf +3 -0
  47. 2202.13xxx/2202.13239/full.md +282 -0
  48. 2202.13xxx/2202.13239/images.zip +3 -0
  49. 2202.13xxx/2202.13239/layout.json +0 -0
  50. 2202.13xxx/2202.13257/df5b9240-81ca-4fc3-8c6d-62c6bd827cf6_content_list.json +1747 -0
.gitattributes CHANGED
@@ -7608,3 +7608,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
7608
  2203.02xxx/2203.02281/05253353-a338-486d-a8b3-daf1aeec7597_origin.pdf filter=lfs diff=lfs merge=lfs -text
7609
  2203.02xxx/2203.02284/46c87e48-7456-48de-98bf-d1e9acc6f213_origin.pdf filter=lfs diff=lfs merge=lfs -text
7610
  2203.04xxx/2203.04115/c158179d-b2cd-4e0f-80c2-0d334ac6b806_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7608
  2203.02xxx/2203.02281/05253353-a338-486d-a8b3-daf1aeec7597_origin.pdf filter=lfs diff=lfs merge=lfs -text
7609
  2203.02xxx/2203.02284/46c87e48-7456-48de-98bf-d1e9acc6f213_origin.pdf filter=lfs diff=lfs merge=lfs -text
7610
  2203.04xxx/2203.04115/c158179d-b2cd-4e0f-80c2-0d334ac6b806_origin.pdf filter=lfs diff=lfs merge=lfs -text
7611
+ 2202.13xxx/2202.13090/6a21c459-5fae-422a-9e3d-7a71e1e6fcb9_origin.pdf filter=lfs diff=lfs merge=lfs -text
7612
+ 2202.13xxx/2202.13094/519c68f3-731e-4525-ae69-803e7821e6d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
7613
+ 2202.13xxx/2202.13121/3415fbcf-d5e0-4509-b486-bd2d3fe9e7e4_origin.pdf filter=lfs diff=lfs merge=lfs -text
7614
+ 2202.13xxx/2202.13142/edddf239-62ff-407e-929c-73252ebf5a33_origin.pdf filter=lfs diff=lfs merge=lfs -text
7615
+ 2202.13xxx/2202.13162/747f2044-56ac-435c-b2d3-916c376e22a8_origin.pdf filter=lfs diff=lfs merge=lfs -text
7616
+ 2202.13xxx/2202.13169/909a7465-0b93-460b-9a57-ce6ae5e551db_origin.pdf filter=lfs diff=lfs merge=lfs -text
7617
+ 2202.13xxx/2202.13200/ea734711-65b5-4be7-8325-31204388aeab_origin.pdf filter=lfs diff=lfs merge=lfs -text
7618
+ 2202.13xxx/2202.13239/57c352a6-ea56-4510-913e-a92f5c7acb7d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7619
+ 2202.13xxx/2202.13257/df5b9240-81ca-4fc3-8c6d-62c6bd827cf6_origin.pdf filter=lfs diff=lfs merge=lfs -text
7620
+ 2202.13xxx/2202.13288/ba17457e-0e63-4cc9-aa74-ad4c71ec13c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
7621
+ 2202.13xxx/2202.13290/7d442c3b-6e00-41d3-b833-4a5eed6713cb_origin.pdf filter=lfs diff=lfs merge=lfs -text
7622
+ 2202.13xxx/2202.13296/2746a0e7-8f0c-40a1-a236-356ba1519b30_origin.pdf filter=lfs diff=lfs merge=lfs -text
7623
+ 2202.13xxx/2202.13330/611162ce-ddb5-4094-82fa-4da129ca9651_origin.pdf filter=lfs diff=lfs merge=lfs -text
7624
+ 2202.13xxx/2202.13347/c10ac5d0-d8b4-4475-a436-245240fa4e7c_origin.pdf filter=lfs diff=lfs merge=lfs -text
7625
+ 2202.13xxx/2202.13393/c44550d5-022f-4d06-b5a1-7e89d314365f_origin.pdf filter=lfs diff=lfs merge=lfs -text
7626
+ 2202.13xxx/2202.13437/88586a7e-f169-47b2-a6fc-a4117323dbbe_origin.pdf filter=lfs diff=lfs merge=lfs -text
7627
+ 2202.13xxx/2202.13460/ba1cd297-b316-4352-be00-4a6d3fa49202_origin.pdf filter=lfs diff=lfs merge=lfs -text
7628
+ 2202.13xxx/2202.13469/0f866330-9105-4e77-a0a4-38ab11cea981_origin.pdf filter=lfs diff=lfs merge=lfs -text
7629
+ 2202.13xxx/2202.13514/038e3e8d-14bf-4beb-81f7-be79e9addac8_origin.pdf filter=lfs diff=lfs merge=lfs -text
7630
+ 2202.13xxx/2202.13517/df9d19a2-0cf2-461d-bb26-3577051f74db_origin.pdf filter=lfs diff=lfs merge=lfs -text
7631
+ 2202.13xxx/2202.13547/f185b98c-491c-4287-bf8b-19f5dee8242b_origin.pdf filter=lfs diff=lfs merge=lfs -text
7632
+ 2202.13xxx/2202.13556/494cb90c-8e86-4bcb-9cf0-929ab7d9da7e_origin.pdf filter=lfs diff=lfs merge=lfs -text
7633
+ 2202.13xxx/2202.13589/54dd7dee-3c6f-4a23-8a81-361d432038cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
7634
+ 2202.13xxx/2202.13617/a6a62640-4c6f-4635-b42a-4cee461b5c12_origin.pdf filter=lfs diff=lfs merge=lfs -text
7635
+ 2202.13xxx/2202.13641/e32b7897-120b-4d2e-92ea-9ab340aca896_origin.pdf filter=lfs diff=lfs merge=lfs -text
7636
+ 2202.13xxx/2202.13669/c842c9bc-99e0-478d-a9ac-ef8910b443cb_origin.pdf filter=lfs diff=lfs merge=lfs -text
7637
+ 2202.13xxx/2202.13670/1c5dbda9-d074-4e0a-bb70-cb8d156560bd_origin.pdf filter=lfs diff=lfs merge=lfs -text
7638
+ 2202.13xxx/2202.13675/7cce9d18-739b-49bd-ae70-079302de9842_origin.pdf filter=lfs diff=lfs merge=lfs -text
7639
+ 2202.13xxx/2202.13708/d55ceaaa-a088-4b79-b150-a2f7144a2396_origin.pdf filter=lfs diff=lfs merge=lfs -text
7640
+ 2202.13xxx/2202.13711/5c5099a0-9695-4f50-ae77-4e030b97da3d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7641
+ 2202.13xxx/2202.13734/f6f9cf62-601d-4fad-b0f8-bbc23365ac33_origin.pdf filter=lfs diff=lfs merge=lfs -text
7642
+ 2202.13xxx/2202.13843/ac954be2-935e-44d4-b14d-9a5e210a5b23_origin.pdf filter=lfs diff=lfs merge=lfs -text
7643
+ 2202.13xxx/2202.13852/85cbcc10-30d0-4db2-8861-3b69803c0ad8_origin.pdf filter=lfs diff=lfs merge=lfs -text
7644
+ 2202.13xxx/2202.13876/8d20dee6-504b-4f53-954b-af0ddb12cbef_origin.pdf filter=lfs diff=lfs merge=lfs -text
7645
+ 2202.13xxx/2202.13890/5a09614c-aabc-4c3f-93bf-cc37427ac8ca_origin.pdf filter=lfs diff=lfs merge=lfs -text
7646
+ 2202.13xxx/2202.13903/eda3e891-f853-48b8-9724-bee1a52248ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
7647
+ 2202.13xxx/2202.13939/a515cfe7-4f43-4e33-871a-6467a5d1e3ad_origin.pdf filter=lfs diff=lfs merge=lfs -text
7648
+ 2202.13xxx/2202.13953/22f58035-3359-4957-aa35-07e9960eecdf_origin.pdf filter=lfs diff=lfs merge=lfs -text
7649
+ 2202.14xxx/2202.14009/2cd96611-e104-46fe-8598-965c10d85bdb_origin.pdf filter=lfs diff=lfs merge=lfs -text
7650
+ 2202.14xxx/2202.14020/e53e853f-195c-495f-ae21-0377b68651cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
7651
+ 2202.14xxx/2202.14026/86d9c1ca-c10a-4da9-8405-bc88a70fbb13_origin.pdf filter=lfs diff=lfs merge=lfs -text
7652
+ 2202.14xxx/2202.14037/340b123f-bbe0-44e8-9c21-c946467deb23_origin.pdf filter=lfs diff=lfs merge=lfs -text
7653
+ 2203.00xxx/2203.00031/f8228d1b-7a40-46de-811d-b33e6746d43d_origin.pdf filter=lfs diff=lfs merge=lfs -text
7654
+ 2203.00xxx/2203.00048/401673e6-ff99-429b-abcf-ccd491850016_origin.pdf filter=lfs diff=lfs merge=lfs -text
7655
+ 2203.00xxx/2203.00112/9105f531-2af4-4326-b151-72fb6bb28b4e_origin.pdf filter=lfs diff=lfs merge=lfs -text
7656
+ 2203.00xxx/2203.00130/e6715391-8b5a-4726-b59b-c90c478fd2b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
7657
+ 2203.00xxx/2203.00131/1aed4a9c-f5de-47a3-98fe-3f888458f4ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
7658
+ 2203.00xxx/2203.00199/9fd48599-bb07-4149-8ed1-e8dc1470c1b6_origin.pdf filter=lfs diff=lfs merge=lfs -text
7659
+ 2203.00xxx/2203.00211/fa2faebe-cca5-4d22-8e50-2e6c3b5f616f_origin.pdf filter=lfs diff=lfs merge=lfs -text
7660
+ 2203.00xxx/2203.00235/e0f122b4-ff35-435c-8bbc-c7c8c29355b9_origin.pdf filter=lfs diff=lfs merge=lfs -text
7661
+ 2203.00xxx/2203.00236/9f8fbffd-19bb-4cb4-b8e0-08f0b8f19253_origin.pdf filter=lfs diff=lfs merge=lfs -text
7662
+ 2203.00xxx/2203.00241/7aaea4a0-6fdd-496c-b918-8376d8c644a4_origin.pdf filter=lfs diff=lfs merge=lfs -text
7663
+ 2203.00xxx/2203.00255/57f81941-2f86-471c-b232-f10fe476db19_origin.pdf filter=lfs diff=lfs merge=lfs -text
7664
+ 2203.00xxx/2203.00259/a238fa2d-d79e-4e50-92a6-4cb0d3fa2f0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
7665
+ 2203.00xxx/2203.00263/24eeac16-18db-425f-b24c-a5197e64eb03_origin.pdf filter=lfs diff=lfs merge=lfs -text
7666
+ 2203.00xxx/2203.00274/dc03d707-0f96-42f3-99c8-e4c043bf6ab3_origin.pdf filter=lfs diff=lfs merge=lfs -text
7667
+ 2203.00xxx/2203.00343/21468612-481b-4410-9801-838c2a2d7e19_origin.pdf filter=lfs diff=lfs merge=lfs -text
7668
+ 2203.00xxx/2203.00386/fffbb4ff-2423-4ece-91f8-d1f1c7c2adaa_origin.pdf filter=lfs diff=lfs merge=lfs -text
7669
+ 2203.00xxx/2203.00497/2f919215-f390-4e74-bafc-467f0bf7add7_origin.pdf filter=lfs diff=lfs merge=lfs -text
7670
+ 2203.00xxx/2203.00555/d7bd9426-546e-4206-ab48-1dd78a3e22db_origin.pdf filter=lfs diff=lfs merge=lfs -text
7671
+ 2203.00xxx/2203.00585/4ef4d89a-c0e1-4192-bfe3-95ff8604071b_origin.pdf filter=lfs diff=lfs merge=lfs -text
7672
+ 2203.00xxx/2203.00633/5d99a0f8-f903-4027-90ee-37267742c2cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
7673
+ 2203.00xxx/2203.00638/86aa3a66-bece-4d13-88cb-8d77ee5dbbda_origin.pdf filter=lfs diff=lfs merge=lfs -text
7674
+ 2203.00xxx/2203.00663/cd5f8cbd-3f25-42ff-a4bf-4bdde5095917_origin.pdf filter=lfs diff=lfs merge=lfs -text
2202.13xxx/2202.13090/6a21c459-5fae-422a-9e3d-7a71e1e6fcb9_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13090/6a21c459-5fae-422a-9e3d-7a71e1e6fcb9_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13090/6a21c459-5fae-422a-9e3d-7a71e1e6fcb9_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeae9624276d729ecf734b72c6a1fabcd790784c48ce48846361f38685cd6112
3
+ size 890145
2202.13xxx/2202.13090/full.md ADDED
@@ -0,0 +1,548 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Disentangling Long and Short-Term Interests for Recommendation
2
+
3
+ Yu Zheng $^{1*}$ , Chen Gao $^{1\dagger}$ , Jianxin Chang $^{2}$ , Yanan Niu $^{2}$ , Yang Song $^{2}$ , Depeng Jin $^{1}$ , Yong Li $^{1}$
4
+
5
+ $^{1}$ Beijing National Research Center for Information Science and Technology,
6
+
7
+ Department of Electronic Engineering, Tsinghua University
8
+
9
+ $^{2}$ Beijing Kuaishou Technology Co., Ltd.
10
+
11
+ # ABSTRACT
12
+
13
+ Modeling user's long-term and short-term interests is crucial for accurate recommendation. However, since there is no manually annotated label for user interests, existing approaches always follow the paradigm of entangling these two aspects, which may lead to inferior recommendation accuracy and interpretability. In this paper, to address it, we propose a Contrastive learning framework to disentangle Long and Short-term interests for Recommendation (CLSR) with self-supervision. Specifically, we first propose two separate encoders to independently capture user interests of different time scales. We then extract long-term and short-term interests proxies from the interaction sequences, which serve as pseudo labels for user interests. Then pairwise contrastive tasks are designed to supervise the similarity between interest representations and their corresponding interest proxies. Finally, since the importance of long-term and short-term interests is dynamically changing, we propose to adaptively aggregate them through an attention-based network for prediction. We conduct experiments on two large-scale real-world datasets for e-commerce and short-video recommendation. Empirical results show that our CLSR consistently outperforms all state-of-the-art models with significant improvements: GAUC is improved by over 0.01, and NDCG is improved by over $4\%$ . Further counterfactual evaluations demonstrate that stronger disentanglement of long and short-term interests is successfully achieved by CLSR. The code and data are available at https://github.com/tsinghua-fib-lab/CLSR.
14
+
15
+ # CCS CONCEPTS
16
+
17
+ - Information systems $\rightarrow$ Personalization.
18
+
19
+ # KEYWORDS
20
+
21
+ Recommendation, Long and Short-Term Interests, Self-supervised Learning, Disentanglement Learning
22
+
23
+ # ACM Reference Format:
24
+
25
+ Yu Zheng, Chen Gao, Jianxin Chang, Yanan Niu, Yang Song, Depeng Jin, Yong Li. 2022. Disentangling Long and Short-Term Interests for Recommendation. In Proceedings of the ACM Web Conference 2022 (WWW'22), April 25-29, 2022, Virtual Event, Lyon, France. ACM, New York, NY, USA, 12 pages. https://doi.org/10.1145/3485447.3512098
26
+
27
+ # 1 INTRODUCTION
28
+
29
+ With the deluge of information growing rapidly, recommender systems have been playing crucial roles in numerous online services, such as news [2], e-commerce [51], videos [10, 27], etc. Specifically, recommender systems provide personalized contents by first inferring users' interests from their historical interactions and then retrieving items that meet these interests. In practice, however, users' interest are difficult to track since they tend to have both stable long-term interests and dynamic short-term interests. For example, a tech-savvy user may always be willing to browse electronics (long-term interest), while he may also exhibit interest in clothes in a short period (short-term interest). As a result, accurately modeling and distinguishing users' long and short-term (LS-term) interests is critical.
30
+
31
+ Let us first review the literature. Collaborative filtering (CF) based recommenders [15, 16, 23, 35, 51] mainly capture the long-term interests and ignore the sequential features, thus they are limited in modeling the dynamic short-term interests. Consequently, sequential models [17, 41, 50, 55] were proposed to exploit convolutional neural networks or recurrent neural networks to learn sequential features of user interests. However, those methods tend to have short-term memory hence recommend items that are more relevant to users' recent behaviors. As a result, recently, a series of approaches [2, 29, 47, 48] were proposed to combine CF-based recommenders and sequential recommenders to cover both long-term and short-term interests. Specifically, in these approaches, CF-based models such as matrix factorization are adopted for long-term interests, and sequential models are utilized to learn short-term interests. However, whether LS-term interests can be effectively captured by the corresponding models is not guaranteed, since they impose no explicit supervision on the learned LS-term interests. In other words, the learned LS-term interests in those methods can be entangled with each other [28].
32
+
33
+ Overall speaking, disentangling LS-term interests faces the following challenges.
34
+
35
+ - First, LS-term interests reflect quite different aspects of user preferences. Specifically, long-term interests can be regarded as user's overall preferences which can remain stable for a long period of time, while short-term interests indicate a user's dynamic
36
+
37
+ preferences that evolve rapidly according to recent interactions. Therefore, learning a unified representation of LS-term interests is insufficient to capture such differences. On the contrary, it is more proper to model the two aspects separately.
38
+
39
+ - Second, it is hard to obtain labeled data for learning LS-term interests. The collected behavior log data only always contains users' implicit feedback such as clicks. Hence the separate modeling of LS-term interests lacks explicit supervision for distinguishing the two aspects.
40
+ - Last, for the final prediction of users' future interactions, both long and short-term interests should be taken into consideration. Nevertheless, the importance of two kinds of interests varies on different user-item interactions. For example, users' short-term interests are more important when they continuously browse similar items, while users' behaviors are largely driven by long-term interests when they switch to quite different items. Therefore, it is critical but challenging to adaptively fuse these two aspects for predicting future interactions.
41
+
42
+ To address the above challenges, we propose a contrastive learning framework that disentangles LS-term interests leveraging the interaction sequences to build self-supervision signals. Specifically, in order to independently capture LS-term interests, we propose to decompose each interaction into three mechanisms: long-term interests representation, short-term interests evolution, and interaction prediction. We design two separate encoders with different dynamics over time to model LS-term interests respectively, which addresses the first challenge. To overcome the key challenge of lacking labeled data for LS-term interests, we propose to use self-supervision [7]. We first generate proxy representations for long/short-term interests by extracting users' entire historical interactions and recent interactions, respectively. We then supervise the interest representations obtained from the two separate encoders to be more similar with their corresponding proxies than the opposite proxies, in a contrastive manner. Different from existing methods which impose no explicit supervision on the learned LS-term interests [2, 47], our self-supervised approach can learn better-disentangled representations for LS-term interests and remove the dependency on labeled data. With the disentangled interest representations, we design an attention-based fusion network that adaptively aggregates the two aspects for prediction, which solves the last challenge.
43
+
44
+ We evaluate the recommendation performance of our method on two real-world datasets. Experimental results illustrate that CLSR outperforms state-of-the-art (SOTA) methods with significant improvements. Specifically, AUC and GAUC are improved by over 0.02, and NDCG are improved by over $10.7\%$ , which can be considered as quite promising gain by existing works [39, 47]. To further investigate the effectiveness of the self-supervised disentanglement design, we conduct counterfactual evaluations with intervened historical interaction sequences which block long or short term interests. The results demonstrate that CLSR achieves steadily stronger disentanglement of LS-term interests against SOTA methods.
45
+
46
+ In summary, the main contributions of this paper are as follows:
47
+
48
+ - We highlight the different dynamics of users' long and short-term interests, and take the pioneer step of disentangling the two factors is critical for accurate recommendation.
49
+
50
+ - We propose a contrastive learning framework to separately capture LS-term interests. Disentangled representations are learned with self-supervision by comparing with proxy representations constructed from the original interaction sequences. An attention-based fusion network is further designed which adaptively aggregates LS-term interests to predict interactions.
51
+ - We conduct extensive experiments on real-world datasets. Experimental results validate that our proposed CLSR achieves significant improvements against SOTA methods. Further counterfactual analyses illustrate that much stronger disentanglement of LS-term interests can be achieved by CLSR.
52
+
53
+ The remainder of the paper is organized as follows. We first formulate the problem in Section 2 and introduce the proposed method in Section 3. We then conduct experiments in Section 4, and review the related works in Section 5. Finally, we conclude the paper in Section 6.
54
+
55
+ # 2 PROBLEM FORMULATION
56
+
57
+ Notations. Let $M$ denote the number of users, and $\{x^u\}_{u = 1}^M$ denote the interaction sequences for all users. Each sequence $x^{u} = [x_{1}^{u},x_{2}^{u},\dots,x_{T_{u}}^{u}]$ denotes a list of items which are ordered by the corresponding interaction timestamps. Here $T_{u}$ denotes the length of user $u$ 's interaction history, and each item $x_{t}^{u}$ is in $[1,N]$ , where $N$ denotes the number of items.
58
+
59
+ Since a user's interaction history $x^{u}$ reflects both long and short-term interests, the recommender system will first learn LS-term interests from $x^{u}$ , and then predict future interactions based on the two aspects. We then can formulate the problem of learning LS-term interests for recommendation as follows:
60
+
61
+ Input: The historical interaction sequences for all users $\{x^u\}_{u = 1}^M$ Output: A predictive model that estimates the probability of whether a user will click an item, considering both LS-term interests.
62
+
63
+ # 3 METHODOLOGY
64
+
65
+ In this section, we elaborate on the proposed Contrastive learning framework of Long and Short-term interests for Recommendation (CLSR).
66
+
67
+ # 3.1 User Interests Modeling
68
+
69
+ Since users' LS-term interests are quite different in terms of the dynamics over time, it is more appropriate to model the two aspects separately instead of using a unified representation to express them. Specifically, long-term interests are relatively stable, while short-term interests are dynamic and changing frequently. Meanwhile, each interaction is determined by both aspects as well as the target item. Therefore, we propose to frame user interests modeling as the following three separate mechanisms:
70
+
71
+ $$
72
+ \zeta = \left\{ \begin{array}{l l} U _ {l} = f _ {1} (U), & \\ U _ {s} ^ {(t)} = f _ {2} (U _ {s} ^ {(t - 1)}, V ^ {(t - 1)}, Y ^ {(t - 1)}, U), & \\ Y ^ {(t)} = f _ {3} (U _ {l}, U _ {s} ^ {(t)}, V ^ {(t)}, U), & \end{array} \right. \tag {1}
73
+ $$
74
+
75
+ where $f_{1}, f_{2}$ and $f_{3}$ are the underlying functions for user $U$ 's long-term interests $(U_{l})$ , short term interests $(U_{s}^{(t)})$ and interaction $(Y^{(t)})$ with item $V^{(t)}$ . Current and last timestamps are denoted as $t$ and
76
+
77
+ ![](images/efccc950c4b2b6fe21b24f329fb4f0d23fb69b8ba486517004eee71e90e1febb.jpg)
78
+ Figure 1: User interests modeling $\zeta$ (best viewed in color) which consists of three mechanisms, namely long-term interests representation (red edges), short-term interests evolution (blue edges) and interaction prediction (yellow edges).
79
+
80
+ $t - 1$ , respectively. It is worthwhile noting that $U$ denotes user profile, which contains the user ID and the interaction history $\boldsymbol{x}^{\boldsymbol{u}}$ .
81
+
82
+ The proposed user interests modeling $\zeta$ decomposes each interaction into three mechanisms: $f_{1}$ long-term interests representation, $f_{2}$ short-term interests evolution, and $f_{3}$ interaction prediction, which are briefly illustrated in Figure 1. We now explain the details of the three mechanisms.
83
+
84
+ - Long-term Interests Representation in Eqn (1). Long-term interests reflect a holistic view of user preferences, and hence it is stable and less affected by recent interactions. In other words, long-term interests can be inferred from the entire historical interaction sequence, thus we include $U$ as the input of $f_{1}$ , which contains the interaction history $x^{u}$ .
85
+ - Short-term Interests Evolution in Eqn (2). Short-term interests are evolving as users continuously interact with recommended items [50]. For example, users may establish new interests after clicking an item. Meanwhile, users may also gradually lose certain interests. That is to say, short-term interests are time-dependent variables, and thus in $f_{2}$ , short-term interests $U_{s}^{(t)}$ at timestamp $t$ are evolved recursively from $U_{s}^{(t - 1)}$ , affected by the last interaction $Y^{t - 1}$ with item $V^{(t - 1)}$ .
86
+ - Interaction Prediction in Eqn (3). When predicting future interactions, whether long or short-term interests play a more important role depends on a wide variety of aspects, including the target item $V^{(t)}$ and the interaction history $x^{\pmb{u}}$ of $U$ [47]. Therefore, we fuse $U_{l}$ and $U_{s}^{(t)}$ according to $V^{(t)}$ and $U$ in an adaptive manner to accurately predict interactions.
87
+
88
+ Disentangling LS-term interests means that $U_{l}$ only captures long-term interests and $U_{s}$ models pure short-term interests. Such disentanglement is helpful to achieve interpretable and controllable recommendation, since we can track and tune the importance of each aspect by adjusting the fusion weights. Meanwhile, effective adjustment of LS-term interests requires the learned representations to only contain the information of the desired aspect. Take the linear case as a toy example, suppose a recommendation model entangles LS-term interests as follows,
89
+
90
+ $$
91
+ U _ {l} ^ {\prime} = 0. 6 U _ {l} + 0. 4 U _ {s}, \quad U _ {s} ^ {\prime} = 0. 4 U _ {l} + 0. 6 U _ {s}, \tag {4}
92
+ $$
93
+
94
+ ![](images/5d406f8a3cc67d46ae0a41c6938ac0c7cfae420e1b90e5ed0b091dd38ac1b253.jpg)
95
+ Figure 2: Our proposed CLSR framework based on self-supervised learning. A) contrastive tasks on the similarity between representations and proxies of LS-term interests to enhance disentanglement; B) long-term interests encoder $\phi$ ; C) short-term interests encoder $\psi$ ; D) adaptive fusion of LS-term interests with attention on the target item and historical interactions; E) interaction prediction network.
96
+
97
+ where $U_{l}^{\prime}$ and $U_{s}^{\prime}$ are the learned entangled interests. Given the fusion weights (importance) of LS-term interests as 0.8 and 0.2 respectively, the actual fused interests are computed as follows,
98
+
99
+ $$
100
+ U _ {f u s e} ^ {\prime} = 0. 8 U _ {l} ^ {\prime} + 0. 2 U _ {s} ^ {\prime} = 0. 5 6 U _ {l} + 0. 4 4 U _ {s}, \tag {5}
101
+ $$
102
+
103
+ which is quite different from the desired interests.
104
+
105
+ However, disentangling LS-term interests is challenging since there is no labeled data for $U_{l}$ and $U_{s}$ . We now elaborate on our contrastive learning framework which can achieve strong disentanglement with self-supervision.
106
+
107
+ # 3.2 Our Self-supervised Implementation
108
+
109
+ In this section, we first provide two separate encoders to implement $f_{1}$ and $f_{2}$ which learn representations of LS-term interests. Then we introduce our designed contrastive tasks to achieve disentanglement with self-supervision. Last, we introduce the adaptive fusion model based on attention technique to accomplish $f_{3}$ . The overview of CLSR is illustrated in Figure 2.
110
+
111
+ 3.2.1 Generating Query Vectors for LS-term Interests. Motivated by recent works [2, 29, 47, 48] that learn LS-term interests separately with two different models, we design two separate attentive encoders, $\phi$ and $\psi$ , to capture the two aspects, respectively. First, we generate query vectors for LS-term interests as follows,
112
+
113
+ $$
114
+ \boldsymbol {q} _ {l} ^ {\boldsymbol {u}} = \operatorname {E m b e d} (u), \tag {6}
115
+ $$
116
+
117
+ $$
118
+ \boldsymbol {q} _ {s} ^ {\boldsymbol {u}, t} = \operatorname {G R U} \left(\left\{x _ {1} ^ {\boldsymbol {u}}, \dots , x _ {t} ^ {\boldsymbol {u}} \right\}\right), \tag {7}
119
+ $$
120
+
121
+ where we use a look-up embedding table and a Gated Recurrent Unit (GRU) [9] to capture different dynamics over time. In order to impose extra self-supervision on embedding similarity, all the embeddings need to be in the same semantic space. Thus, we use the historical sequence of items as keys of the attentive encoders, thus the obtained LS-term interests representations are in the same
122
+
123
+ item embedding space as follows,
124
+
125
+ $$
126
+ \boldsymbol {u} _ {l} ^ {t} = \phi \left(\boldsymbol {q} _ {l} ^ {u}, \left\{x _ {1} ^ {u}, \dots , x _ {t} ^ {u} \right\}\right), \tag {8}
127
+ $$
128
+
129
+ $$
130
+ \boldsymbol {u} _ {s} ^ {t} = \psi \left(\boldsymbol {q} _ {s} ^ {u, t}, \left\{x _ {1} ^ {u}, \dots , x _ {t} ^ {u} \right\}\right), \tag {9}
131
+ $$
132
+
133
+ where $\boldsymbol{u}_l^t$ and $\boldsymbol{u}_s^t$ are the learned representations of LS-term interests. We now introduce the proposed encoders for LS-term interests.
134
+
135
+ 3.2.2 Long-term Interests Encoder. Figure 2 (B) illustrates the proposed long-term interests encoder $\phi$ . We use attention pooling to extract long-term interests representations, and the attention score of each item $x_{j}^{u}$ can be computed as follows,
136
+
137
+ $$
138
+ \boldsymbol {v} _ {j} = \boldsymbol {W} _ {I} E \left(x _ {j} ^ {u}\right), \tag {10}
139
+ $$
140
+
141
+ $$
142
+ \alpha_ {j} = \tau_ {l} \left(\boldsymbol {v} _ {j} \| \boldsymbol {q} _ {l} ^ {\boldsymbol {u}} \| \left(\boldsymbol {v} _ {j} - \boldsymbol {q} _ {l} ^ {\boldsymbol {u}}\right) \| \left(\boldsymbol {v} _ {j} \cdot \boldsymbol {q} _ {l} ^ {\boldsymbol {u}}\right)\right), \tag {11}
143
+ $$
144
+
145
+ $$
146
+ a _ {j} = \frac {\exp \left(\alpha_ {j}\right)}{\sum_ {i = 1} ^ {t} \exp \left(\alpha_ {i}\right)}, \tag {12}
147
+ $$
148
+
149
+ where $W_{l}$ is a transformation matrix, $\tau_{l}$ is a multi-layer perceptrons (MLP) network, and $\parallel$ denotes the concatenation of embeddings. The final learned long-term interests representation is a weighted aggregation of the entire interaction history, with weights computed from the above attentive network, formulated as follows,
150
+
151
+ $$
152
+ \boldsymbol {u} _ {l} ^ {t} = \sum_ {j = 1} ^ {t} a _ {j} \cdot \boldsymbol {E} \left(x _ {j} ^ {u}\right). \tag {13}
153
+ $$
154
+
155
+ 3.2.3 Short-term Interests Encoder. Sequential patterns of user interaction play a crucial role in short-term interests modeling, thus we utilize another attentive network on top of a recurrent neural network (RNN). Specifically, we feed the historical item embeddings to a RNN model and use the output of RNN as the keys, which can be formulated as follows,
156
+
157
+ $$
158
+ \left\{\boldsymbol {o} _ {1} ^ {\boldsymbol {u}}, \dots , \boldsymbol {o} _ {t} ^ {\boldsymbol {u}} \right\} = \rho \left(\left\{E \left(x _ {1} ^ {\boldsymbol {u}}\right), \dots , E \left(x _ {t} ^ {\boldsymbol {u}}\right) \right\}\right), \tag {14}
159
+ $$
160
+
161
+ $$
162
+ v _ {j} = W _ {s} o _ {j} ^ {u}, \tag {15}
163
+ $$
164
+
165
+ where $W_{s}$ is a transformation matrix and $\rho$ represents a RNN model. In Section 4, we conduct experiments to evaluate different implementations of the RNN model, including LSTM [18], GRU [9] and Time4LSTM [47]. Similar as Eqn (18) and (19), we use $q_{s}^{u,t}$ as the query vector, and obtain attention scores $b_{k}$ . Then the learned representation for short-term interests can be computed as follows,
166
+
167
+ $$
168
+ \boldsymbol {u} _ {s} ^ {t} = \sum_ {j = 1} ^ {t} b _ {j} \cdot \boldsymbol {o} _ {j} ^ {u}. \tag {16}
169
+ $$
170
+
171
+ Although separate encoders are adopted, disentanglement of LS-term interests is not guaranteed since $\boldsymbol{u}_l^t$ and $\boldsymbol{u}_s^t$ are extracted in an unsupervised manner [28]. Particularly, there is no labeled data to supervise the learned interests representations. Therefore, we propose to design contrastive tasks which can achieve disentanglement with self-supervision and overcome the challenge of lacking labeled data.
172
+
173
+ # 3.2.4 Self-supervised Disentanglement of LS-Term Interests.
174
+
175
+ As introduced previously, long-term interests provide a holistic view of user preferences which summarize the entire historical interactions, while short-term interests evolve dynamically over time which reflect recent interactions. Therefore, we can obtain proxies for LS-term interests from the interaction sequences themselves to
176
+
177
+ supervise the two interests encoders. Specifically, we calculate the mean representation of the entire interaction history as the proxy for long-term interests, and use the average representation of recent $k$ interactions as the proxy for short-term interests. Formally, the proxies of LS-term interests for a given user $u$ at timestamp $t$ can be calculated as follows,
178
+
179
+ $$
180
+ \boldsymbol {p} _ {l} ^ {u, t} = \mathbf {M E A N} \left(\left\{x _ {1} ^ {u}, \dots , x _ {t} ^ {u} \right\}\right) = \frac {1}{t} \sum_ {j = 1} ^ {t} E \left(x _ {j} ^ {u}\right), \tag {17}
181
+ $$
182
+
183
+ $$
184
+ \boldsymbol {p} _ {s} ^ {u, t} = \mathbf {M E A N} \left(\left\{x _ {t - k + 1} ^ {u}, \dots , x _ {t} ^ {u} \right\}\right) = \frac {1}{k} \sum_ {j = 1} ^ {k} E \left(x _ {t - j + 1} ^ {u}\right), \tag {18}
185
+ $$
186
+
187
+ where $E(x)$ means the embedding of item $x$ . Note that we only calculate proxies when the sequence length is longer than a threshold $l_{t}$ , since there is no need to distinguish long and short-term if the whole sequence only contains a few items [26]. The threshold $l_{t}$ , the length of the recent-behavior sequence $k$ are hyper-parameters in our method. Furthermore, we use mean pooling here for its simplicity and the performance turns out to be good enough. In fact, our self-supervised paradigm is capable of exploiting more complex design for proxies which we leave for future work.
188
+
189
+ With proxies serving as labels, we can utilize them to supervise the disentanglement of LS-term interests. Specifically, we perform contrastive learning between the encoder outputs and proxies, which requires the learned representations of LS-term interests to be more similar to their corresponding proxies than the opposite proxies. We illustrate the contrastive tasks in Figure 2 (A). Formally, there are four contrastive tasks as follows,
190
+
191
+ $$
192
+ \operatorname {s i m} \left(\boldsymbol {u} _ {l} ^ {t}, \boldsymbol {p} _ {l} ^ {\boldsymbol {u}, t}\right) > \operatorname {s i m} \left(\boldsymbol {u} _ {l} ^ {t}, \boldsymbol {p} _ {s} ^ {\boldsymbol {u}, t}\right), \tag {19}
193
+ $$
194
+
195
+ $$
196
+ \operatorname {s i m} \left(\boldsymbol {p} _ {l} ^ {u, t}, \boldsymbol {u} _ {l} ^ {t}\right) > \operatorname {s i m} \left(\boldsymbol {p} _ {l} ^ {u, t}, \boldsymbol {u} _ {s} ^ {t}\right), \tag {20}
197
+ $$
198
+
199
+ $$
200
+ \operatorname {s i m} \left(\boldsymbol {u} _ {s} ^ {t}, \boldsymbol {p} _ {s} ^ {\boldsymbol {u}, t}\right) > \operatorname {s i m} \left(\boldsymbol {u} _ {s} ^ {t}, \boldsymbol {p} _ {I} ^ {\boldsymbol {u}, t}\right), \tag {21}
201
+ $$
202
+
203
+ $$
204
+ \operatorname {s i m} \left(\boldsymbol {p} _ {s} ^ {\boldsymbol {u}, t}, \boldsymbol {u} _ {s} ^ {t}\right) > \operatorname {s i m} \left(\boldsymbol {p} _ {s} ^ {\boldsymbol {u}, t}, \boldsymbol {u} _ {l} ^ {t}\right), \tag {22}
205
+ $$
206
+
207
+ where Eqn (19)-(20) supervise long-term interests, and Eqn (21)-(22) supervise short-term interests, and $sim(\cdot ,\cdot)$ measures embedding similarity. Take long-term interests modeling as an example, Eqn (19) encourages the learned long-term interests representation, $\pmb{u}_l^t$ to be more similar to the long-term proxy, $\pmb{p}_l^{u,t}$ , than to the shortterm proxy, $\pmb{p}_s^{u,t}$ . Meanwhile, Eqn (20) requires that $\pmb{u}_l^t$ is closer to $\pmb{p}_l^{u,t}$ compared with the short-term interests representation, $\pmb{u}_s^t$ . With four symmetric contrastive tasks on the similarity between encoder outputs and proxies, we add self-supervision on LS-term interests modeling which can achieve stronger disentanglement compared with existing unsupervised approaches.
208
+
209
+ We implement two pairwise loss functions based on Bayesian Personalized Ranking (BPR) [35] and triplet loss to accomplish contrastive learning in Eqn (19)-(22). Formally, the two loss functions, which use inner product and Euclidean distance to capture embedding similarity, are computed as follows,
210
+
211
+ $$
212
+ \mathcal {L} _ {\mathrm {b p r}} (a, p, q) = \sigma (\langle a, q \rangle - \langle a, p \rangle), \tag {23}
213
+ $$
214
+
215
+ $$
216
+ \mathcal {L} _ {\text {t r i}} (a, p, q) = \max \left\{d (a, p) - d (a, q) + m, 0 \right\}, \tag {24}
217
+ $$
218
+
219
+ where $\sigma$ is the softplus activation function, $\langle \cdot ,\cdot \rangle$ denotes inner product of two embeddings, $d$ denotes the Euclidean distance, and $m$
220
+
221
+ denotes a positive margin value. Both $\mathcal{L}_{bpr}$ and $\mathcal{L}_{tri}$ are designed for making the anchor $a$ more similar to the positive sample $p$ than the negative sample $q$ . Thus the contrastive loss for self-supervised disentanglement of LS-term interests can be computed as follows,
222
+
223
+ $$
224
+ \mathcal {L} _ {\text {c o n}} ^ {u, t} = f \left(\boldsymbol {u} _ {l}, \boldsymbol {p} _ {l}, \boldsymbol {p} _ {s}\right) + f \left(\boldsymbol {p} _ {l}, \boldsymbol {u} _ {l}, \boldsymbol {u} _ {s}\right) + f \left(\boldsymbol {u} _ {s}, \boldsymbol {p} _ {s}, \boldsymbol {p} _ {l}\right) + f \left(\boldsymbol {p} _ {s}, \boldsymbol {u} _ {s}, \boldsymbol {u} _ {l}\right) \tag {25}
225
+ $$
226
+
227
+ where we omit the superscript of interest representations and proxies, and $f$ can be either $\mathcal{L}_{bpr}$ or $\mathcal{L}_{tri}$ .
228
+
229
+ Remark. Users' LS-term interests can also overlap with each other to some extent. For example, a user who only purchases clothes on an e-commerce application tends to have consistent LS-term interests. Therefore, unlike existing disentangled recommendation approaches [43, 49] which add an independence constraint forcing the learned disentangled factors to be dissimilar with each other, we do not include such regularization term and only supervise the learned representations of LS-term interests to be similar with their corresponding proxies. This is also why we do not use loss functions like InfoNCE [33] which impose too strong punishment on the similarity between opposite encoders and proxies.
230
+
231
+ In summary, we implement two separate encoders $\phi$ and $\psi$ to learn representations for LS-term interests, respectively. In order to achieve disentanglement of LS-term interests, we compute proxies from the historical interaction sequences. We further propose contrastive-learning loss functions that guide the two encoders only to capture the desired aspect in a self-supervised manner.
232
+
233
+ 3.2.5 Adaptive Fusion for Interaction Prediction. With the learned disentangled representations by self-supervised learning, how to aggregate the two aspects to predict interactions remains a challenge. Simple aggregators, such as sum and concatenation, assume that contributions of LS-term interests are fixed, which is invalid in many cases. In fact, whether long or short-term one is more important depends on the historical sequence. For example, users are mainly driven by short-term interests when they are continuously browsing items from the same category. Meanwhile, the importance of LS-term interests also depends on the target item. For instance, a sports lover may still click on a recommended bicycle due to long-term interests, even after he/she browses several books. Therefore, we include both the historical sequence and the target item as input of the aggregator, where historical sequence is compressed with a GRU. The proposed attention-based adaptive fusion model is illustrated in Figure 2 (D), which dynamically determines the importance of LS-term interests to aggregate $\boldsymbol{u}_l^t$ and $\boldsymbol{u}_s^t$ . Formally, the final fused interests are obtained as follows,
234
+
235
+ $$
236
+ \boldsymbol {h} _ {t} ^ {\boldsymbol {u}} = \operatorname {G R U} \left(\left\{E \left(x _ {1} ^ {\boldsymbol {u}}\right), \dots , E \left(x _ {t} ^ {\boldsymbol {u}}\right) \right\}\right), \tag {26}
237
+ $$
238
+
239
+ $$
240
+ \alpha = \sigma \left(\tau_ {f} \left(\boldsymbol {h} _ {t} ^ {u} \| E \left(x _ {t + 1} ^ {u}\right) \| \boldsymbol {u} _ {l} ^ {t} \| \boldsymbol {u} _ {s} ^ {t}\right), \right. \tag {27}
241
+ $$
242
+
243
+ $$
244
+ \boldsymbol {u} ^ {t} = \alpha \cdot \boldsymbol {u} _ {I} ^ {t} + (1 - \alpha) \cdot \boldsymbol {u} _ {s} ^ {t}, \tag {28}
245
+ $$
246
+
247
+ where $\sigma$ is the sigmoid activation function, and $\tau_f$ is a MLP for fusion. Here $\alpha$ denotes the estimated fusion weight based on historical interactions, target item, and user's LS-term interests.
248
+
249
+ To predict the interaction, we use the widely adopted two-layer MLP [47] shown in Figure 2 (E). Then the estimated score given a user $u$ and an item $v$ at timestamp $t + 1$ can be predicted as follows,
250
+
251
+ $$
252
+ \dot {y} _ {u, v} ^ {t + 1} = \operatorname {M L P} \left(\boldsymbol {u} ^ {t} \| E (v)\right). \tag {29}
253
+ $$
254
+
255
+ Table 1: Statistics of the two datasets used in experiments.
256
+
257
+ <table><tr><td>Dataset</td><td>Users</td><td>Items</td><td>Instances</td><td>Average Length</td></tr><tr><td>Taobao</td><td>36,915</td><td>64,138</td><td>1,471,155</td><td>39.85</td></tr><tr><td>Kuaishou</td><td>60,813</td><td>292,286</td><td>14,952,659</td><td>245.88</td></tr></table>
258
+
259
+ Following the existing works' settings[47], we use the negative log-likelihood loss function as follows,
260
+
261
+ $$
262
+ \mathcal {L} _ {\mathrm {r e c}} ^ {u, t} = - \frac {1}{N} \sum_ {v \in O} y _ {u, v} ^ {t + 1} \log \left(\hat {y} _ {u, v} ^ {t + 1}\right) + (1 - y _ {u, v} ^ {t + 1}) \log \left(1 - \hat {y} _ {u, v} ^ {t + 1}\right), \tag {30}
263
+ $$
264
+
265
+ where $O$ is the set composed of training pairs of one positive item $x_{t + 1}^{u}$ and $N - 1$ sampled negative items. We train the model in an end-to-end manner with multi-task learning on two objectives. Specifically, the joint loss function with a hyper-parameter $\beta$ to balance objectives, can be formulated as follows,
266
+
267
+ $$
268
+ \mathcal {L} = \sum_ {u = 1} ^ {M} \sum_ {t = 1} ^ {T _ {u}} \left(\mathcal {L} _ {\text {r e c}} ^ {u, t} + \beta \mathcal {L} _ {\text {c o n}} ^ {u, t}\right) + \lambda \| \Theta \| _ {2}, \tag {31}
269
+ $$
270
+
271
+ where $\lambda \| \Theta \|_2$ denotes the $L2$ regularization for addressing overfitting. The computation complexity of our implementation is $\mathcal{O}((M + N)d + Q)$ where $Q$ denotes the complexity of MLP and GRU, which is on par with the state-of-the-art SLi-Rec method [47].
272
+
273
+ # 4 EXPERIMENTS
274
+
275
+ In this section, we conduct experiments to show the effectiveness of the proposed contrastive learning framework. Specifically, we aim to answer the following research questions,
276
+
277
+ - RQ1: How does the proposed framework perform compared with state-of-the-art recommendation models?
278
+ - RQ2: Can CLSR achieves stronger disentanglement of LS-term interests against existing unsupervised baselines?
279
+ - RQ3: What is the effect of different components in CLSR?
280
+
281
+ Datasets. We conduct experiments on two datasets collected from real-world e-commerce and video platforms, Taobao<sup>1</sup> and Kuaishou<sup>2</sup>. Basic statistics of the two datasets are summarized in Table 1, where Average Length indicates the average length of user interaction sequences. We leave the details of the datasets in Section A.1.
282
+
283
+ Baselines and Metrics. We compare CLSR with state-of-the-art methods. With respect to long-term interests modeling, we include NCF [16], DIN [51] and LightGCN[14]. For short-term interests modeling, we compare with Caser [41], GRU4REC [17], DIEN [50], SASRec [20] and SURGE [6]. We also include SLi-Rec [47] which is the state-of-the-art model of LS-term interests modeling. We evaluate the models with two widely-adopted accuracy metrics including AUC and GAUC [51], as well as two commonly used ranking metrics MRR and NDCG@K. We leave the details of baselines, implementations, and hyper-parameters in Section A.2-A.3.
284
+
285
+ # 4.1 Overall Performance Comparison (RQ1)
286
+
287
+ We illustrate the overall performance on two adopted datasets in Table 2. From the results, we have the following observations:
288
+
289
+ Table 2: Overall performance on Taobao and Kuaishou datasets. Underline means the best two baselines, bold means $p$ -value $< 0.05$ , * means $p$ -value $< 0.01$ , and ** means $p$ -value $< 0.001$ .
290
+
291
+ <table><tr><td colspan="2">Dataset</td><td colspan="4">Taobao</td><td colspan="4">Kuaishou</td></tr><tr><td>Category</td><td>Method</td><td>AUC</td><td>GAUC</td><td>MRR</td><td>NDCG@2</td><td>AUC</td><td>GAUC</td><td>MRR</td><td>NDCG@2</td></tr><tr><td rowspan="3">Long-term</td><td>NCF</td><td>0.7128</td><td>0.7221</td><td>0.1446</td><td>0.0829</td><td>0.5559</td><td>0.5531</td><td>0.7734</td><td>0.8327</td></tr><tr><td>DIN</td><td>0.7637</td><td>0.8524</td><td>0.3091</td><td>0.2352</td><td>0.6160</td><td>0.7483</td><td>0.8863</td><td>0.9160</td></tr><tr><td>LightGCN</td><td>0.7483</td><td>0.7513</td><td>0.1669</td><td>0.1012</td><td>0.6403</td><td>0.6407</td><td>0.8175</td><td>0.8653</td></tr><tr><td rowspan="5">Short-term</td><td>Caser</td><td>0.8312</td><td>0.8499</td><td>0.3508</td><td>0.2890</td><td>0.7795</td><td>0.8097</td><td>0.9100</td><td>0.9336</td></tr><tr><td>GRU4REC</td><td>0.8635</td><td>0.8680</td><td>0.3993</td><td>0.3422</td><td>0.8156</td><td>0.8298</td><td>0.9166</td><td>0.9384</td></tr><tr><td>DIEN</td><td>0.8477</td><td>0.8745</td><td>0.4011</td><td>0.3404</td><td>0.7037</td><td>0.7800</td><td>0.9030</td><td>0.9284</td></tr><tr><td>SASRec</td><td>0.8598</td><td>0.8635</td><td>0.3915</td><td>0.3340</td><td>0.8199</td><td>0.8293</td><td>0.9161</td><td>0.9380</td></tr><tr><td>SURGE</td><td>0.8906</td><td>0.8888</td><td>0.4228</td><td>0.3625</td><td>0.8525</td><td>0.8610</td><td>0.9316</td><td>0.9495</td></tr><tr><td rowspan="2">LS-term</td><td>SLi-Rec</td><td>0.8664</td><td>0.8669</td><td>0.3617</td><td>0.2971</td><td>0.7978</td><td>0.8128</td><td>0.9075</td><td>0.9318</td></tr><tr><td>Ours</td><td>0.8953**</td><td>0.8936**</td><td>0.4372**</td><td>0.3788**</td><td>0.8563</td><td>0.8718</td><td>0.9382*</td><td>0.9544*</td></tr></table>
292
+
293
+ - Short-term models generally performs better than long-term models. Long-term models fail to capture temporal patterns of user interactions, hence their performance is rather poor. From the results, we can observe that AUC of NCF, DIN, and LightGCN are all less than 0.8 on Taobao dataset and less than 0.7 on Kuaishou dataset. On the other hand, short-term models outperform long-term models in most cases. For example, SURGE is the best baseline on both datasets, which uses graph convolutional propagation and graph pooling to capture the dynamics of user interests. The better performance of short-term models comes from their ability to capture the sequential pattern of user interactions. In fact, we conduct data analysis on the interaction sequences and discover that, in average, over $31\%$ of interacted items are of the same category as the previous item, which verifies the sequential pattern and explains the better performance of short-term models.
294
+
295
+ - Joint modeling of LS-term interests does not always bring performance gains. SLi-Rec is the SOTA approach that models both LS-term interests. However, the two aspects are entangled with each other, which increases model redundancy and leads to inferior accuracy. Results demonstrate that SLi-Rec is not consistently effective across different metrics and datasets. For example, SLi-Rec is the best baseline on Taobao dataset with respect to AUC, but its ranking performance is poorer than GRU4REC by about $10\%$ , indicating that it is insufficient to disentangle LS-term interests with no explicit supervision.
296
+ - Disentangled modeling of LS-term interests can achieve significant improvements. CLSR outperforms baselines with significant progress. Specifically, CLSR improves GAUC by about 0.005 ( $p$ -value $< 0.001$ ) on Taobao dataset and 0.01 ( $p$ -value $< 0.05$ ) on Kuaishou dataset, against SOTA methods. Besides, NDCG is improved by about $5\%$ on Taobao dataset. The consistent and significant progress indicate that disentangling LS-term interests is critical for accurate recommendation.
297
+
298
+ # 4.2 Study on Disentanglement of Long and Short-Term Interests (RQ2)
299
+
300
+ Both SLi-Rec and CLSR explicitly model LS-term interests, however, CLSR achieves the best performance while SLi-Rec shows inferior accuracy. We argue that it is because SLi-Rec entangles LS-term
301
+
302
+ ![](images/0307e7adca681a02ca28b4d8cd59081c60122f13e31f1bc9a04a8c716908017f.jpg)
303
+ Figure 3: Comparison of using single and both interests between CLSR and Sli-Rec.
304
+
305
+ ![](images/f4e6eb761689f4dda09c52199f1d5018da594ee774d094e6d803767ee1d5c471.jpg)
306
+
307
+ interests which increases the internal dependency of the model and leads to poor performance. On the contrary, CLSR disentangles LS-term interests with the help of self-supervision. In this section, we empirically prove that stronger disentanglement of LS-term interests is indeed achieved by CLSR.
308
+
309
+ 4.2.1 Performance of One-side Interests. In CLSR, we utilize two separate representations for LS-term interests. Therefore, it is crucial that each side only captures the desired single aspect. In order to evaluate the effectiveness of each side, we reserve one-side interests and discard the other side of CLSR and SLi-Rec. Results on two datasets are illustrated in Figure 3, from which we can observe that CLSR outperforms SLi-Rec in all cases. Specifically, on Taobao dataset, CLSR improves AUC against SLi-Rec by about 0.03 with short-term interests and full interests. On Kuaishou dataset, the improvements of AUC are about 0.1, 0.2, and 0.4 for long-term interests, short-term interests, and full interests, respectively. It indicates that CLSR attains more meaningful representations for both LS-term interests. Moreover, for both methods on both datasets, combining LS-term interests achieves better performance than using one-side interests. This further supports our motivation to model both long and short-term interests for accurate recommendation.
310
+
311
+ 4.2.2 Counterfactual Evaluation. Learning disentangled representations of underlying factors is very helpful especially when the importance of different factors changes [37, 38, 49]. For example, behaviors of higher costs, such as purchase (cost of money) in Taobao dataset and like (cost of time) in Kuaishou dataset, tend to be more driven by users' long-term interests, and behaviors of lower costs such as click in both datasets indicate more about short-term interests, which has been acknowledged by existing works [11]. Therefore, to investigate whether CLSR achieves disentanglement of LS-term interests, we design counterfactual evaluations where
312
+
313
+ Table 3: Comparison between CLSR and SLi-Rec on predicting click and purchase/like.
314
+
315
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="2">Click</td><td colspan="2">Purchase/Like</td></tr><tr><td>AUC</td><td>AVG(α)</td><td>AUC</td><td>AVG(α)</td></tr><tr><td rowspan="2">Taobao</td><td>SLi-Rec</td><td>0.8572</td><td>0.4651</td><td>0.8288</td><td>0.4350 (-6.47%)</td></tr><tr><td>CLSR</td><td>0.8885</td><td>0.3439</td><td>0.8616</td><td>0.3568 (+3.75%)</td></tr><tr><td rowspan="2">Kuaishou</td><td>SLi-Rec</td><td>0.8153</td><td>0.7259</td><td>0.7924</td><td>0.7543 (+3.91%)</td></tr><tr><td>CLSR</td><td>0.8618</td><td>0.2528</td><td>0.7946</td><td>0.2757 (+9.06%)</td></tr></table>
316
+
317
+ Table 4: Counterfactual evaluation under shuffle protocol.
318
+
319
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="2">Click</td><td colspan="2">Purchase/Like</td></tr><tr><td>AUC</td><td>MRR</td><td>AUC</td><td>MRR</td></tr><tr><td rowspan="2">Taobao</td><td>SLi-Rec</td><td>0.8092</td><td>0.2292</td><td>0.8480</td><td>0.3151</td></tr><tr><td>CLSR</td><td>0.8413</td><td>0.2744</td><td>0.8790</td><td>0.4194</td></tr><tr><td rowspan="2">Kuaishou</td><td>SLi-Rec</td><td>0.7992</td><td>0.9088</td><td>0.8165</td><td>0.9113</td></tr><tr><td>CLSR</td><td>0.8431</td><td>0.9380</td><td>0.8197</td><td>0.9167</td></tr></table>
320
+
321
+ the importance of different interests changes. Specifically, we use models well-trained on click data to predict both clicked items and purchased/liked items, where the importance of LS-term interests is different. Since purchase/like behavior reflects more long-term interests, the importance of long-term interests is supposed to be higher when predicting purchase/like than click. In other words, when the model predicts purchase/like behavior, it is expected that the attention weight for long-term interests when fusing the two aspects, i.e. $\alpha$ , to be also larger than predicting click behavior.
322
+
323
+ Table 3 illustrates the AUC and the average of $\alpha$ for clicked items and purchased/liked items. We have the following findings:
324
+
325
+ - CLSR outperforms SLi-Rec for all behaviors. Although predicting purchase/like with models trained on click data is challenging, AUC of CLSR is significantly larger than SLi-Rec by over 0.03. Meanwhile, the average $\alpha$ of CLSR is much lower in all cases, unlike SLi-Rec whose average $\alpha$ is even over 0.7 on Kuaishou dataset. In fact, low $\alpha$ in CLSR is consistent with previous findings in Table 2 that long-term interests are less important than short-term interests, which means that LS-term interests are successfully disentangled in CLSR. On the contrary, high $\alpha$ in SLi-Rec indicates that the learned long-term interests representations contain much information of the undesired short-term interests, i.e. the two aspects entangles with each other.
326
+ - Since purchase/like reflects more long-term interests than click, $\alpha$ is supposed to be also larger when predicting purchase/click. On Taobao dataset, $\alpha$ of CLSR for purchase behavior is larger than click by about $4\%$ . However, for SLi-Rec, $\alpha$ for purchase is even less than click by over $6\%$ . On Kuaishou dataset, though $\alpha$ for like is larger than click in both SLi-Rec and CLSR, the relative increment of $\alpha$ for CLSR is over two times larger than SLi-Rec $(+9.06\%)$ vs. $+3.91\%$ . This further validates that CLSR achieves much stronger disentanglement of LS-term interests.
327
+
328
+ Meanwhile, we also evaluate under special cases where long or short-term interests are blocked by re-arranging interaction sequences with two protocols, namely shuffle and truncate, as illustrated in Figure 4. The details are as follows.
329
+
330
+ - Shuffle: The historical sequence is randomly shuffled, and thus short-term interests are removed under this protocol.
331
+
332
+ ![](images/ba7792319ee9c63bb37024965ed59c152d1daf147825db55fc10c62dc4e6d1c9.jpg)
333
+ factual: original
334
+
335
+ ![](images/c77046349c1ffd64785aa8abfd2c83a79b7e1058c8263cd21ef137c17eb4c502.jpg)
336
+ counterfactual: shuffle
337
+
338
+ ![](images/9b89ca71c24a4ff6839ad6699ff3e6e572cff3bc21dd0e2042db79c51c8f3702.jpg)
339
+ counterfactual: truncate
340
+
341
+ ![](images/bbe5f735aab637734e2437f5d589477b511c19025cedb5d8173049849dec10b1.jpg)
342
+ Figure 4: Counterfactual evaluation. Shuffle: short-term interests are removed by shuffling. Truncate: long-term interests are weakened by discarding early history.
343
+
344
+ ![](images/671e36998799b122a9a8b2f4c0cbcce9271ef13bd19a34fa8b01fe3d56e12c8b.jpg)
345
+ Figure 5: Counterfactual evaluation under truncate protocol. (a) CLSR. (b) CLSR with only long-term interests.
346
+
347
+ - Truncate: Early history is discarded and only recent history is available. Thus long-term interests are weakened.
348
+
349
+ Table 4 shows the results under shuffle protocol on two datasets. Since shuffling operation blocks short-term interests, predicting click behavior is much more difficult than the original case, while predicting purchase behavior is relatively easier. We can observe that the results in Table 4 compared with Table 3 is consistent with the expectation. Specifically, for both SLi-Rec and CLSR, AUC decreases by over 0.04 and increases by about 0.02 on click-prediction task and purchase/like-prediction task, respectively. Meanwhile, CLSR improves the AUC of click-prediction by over 0.04, and improves the MRR of purchase-prediction by over $30\%$ , against SLi-Rec. Although short-term interests are invalid under this protocol, CLSR can still achieve better performance since LS-term interests are disentangled and long-term interests can still take effect.
350
+
351
+ We further present the results of CLSR under truncate protocol with varying available length $(k)$ of historical sequences in Figure 5 (a). We can observe that the performance of purchase prediction improves significantly as $k$ grows. Meanwhile, the performance of click prediction increases much slower when $k$ grows larger. This observation verifies our assumption that short-term interests can be effectively captured by mining the recent history, while for long-term interests, it is essential to take the entire history into consideration. In addition, we also show the performance of only using long-term interests representation under truncate protocol in Figure 5 (b). We can find that the accuracy of purchase-prediction increases drastically as $k$ getting larger, while the accuracy of click-prediction is barely changed. The different trends of click and purchase tasks confirm that the learned long-term interests representations only capture the desired interests and distill short-term interests.
352
+
353
+ In summary, by comparing CLSR and SLi-Rec, which both explicitly model LS-term interests, we empirically show that disentanglement of the two aspects is the reason of better recommendation performance. Moreover, it is insufficient to disentangle LS-term interests in an unsupervised way, and CLSR effectively overcomes the challenge of lacking labeled data with self-supervision.
354
+
355
+ ![](images/b08fbb7b1a1c02c8fee107187dd20573430752513d4368ba8b4a008b07ecd1c2.jpg)
356
+
357
+ ![](images/42e6195baad0decccef757a2c3923173588df622d11612569f3eac3685c80c59.jpg)
358
+ Figure 6: (a) Ablation study of contrastive loss. (b) Hyperparameter study of $\beta$ .
359
+
360
+ ![](images/e1b6c1a50b4609e16a346daca0432697906c194c1407a731043cf0fa4bd4200b.jpg)
361
+ Figure 7: Comparison between adaptive and fixed fusion.
362
+
363
+ ![](images/97f948fbda33155c50358f2ab613c5c290be47f084f9c8e09047481be604a15e.jpg)
364
+
365
+ # 4.3 Ablation and Hyper-parameter Study (RQ3)
366
+
367
+ 4.3.1 Contrastive Learning. Contrastive tasks on the similarity between learned representations and proxies for LS-term interests help achieve stronger disentanglement than existing unsupervised methods. We conduct ablation study to compare the performance of CLSR with and without the contrastive loss $\mathcal{L}_{\mathrm{con}}$ . In addition, we also evaluate the performance of replacing the short-term interests encoder $\psi$ with DIEN. Figure 6 (a) illustrates the results on Kuaishou dataset. We can find that GAUC of CLSR drops over 0.01 after removing the contrastive tasks which verifies the necessity of self-supervision. Meanwhile, adding self-supervision can also significantly improve the performance of DIEN, which means that CLSR can serve as a general framework to disentangle LS-term interests for existing recommendation models. We also investigate the performance under different loss weights of $\mathcal{L}_{\mathrm{con}}$ . Figure 6 (b) illustrates the results on Kuaishou dataset. We can observe that 0.1 is an optimal value, and too large $\beta$ may contradict with the main interaction prediction task which leads to low accuracy.
368
+
369
+ 4.3.2 Adaptive Fusion of LS-Term Interests. In CLSR, we propose to aggregate LS-term interests adaptively according to the target item and the historical sequence. Here we investigate whether this adaptive fusion is effective. To be specific, we compare with a static version, which means using a fixed $\alpha$ when combining the two aspects. Figure 7 shows the recommendation performance on two datasets, where the dashed line represents the performance of adaptive fusion. We can discover that adaptive fusion outperforms all different values of fixed $\alpha$ . These results verify the necessity of adaptive fusion of LS-term interests, and our proposed attention-based network successfully accomplishes this goal.
370
+
371
+ To conclude, we conduct extensive experiments to show the superior performance of the proposed CLSR model. Counterfactual evaluations demonstrate that LS-term interests are successfully disentangled. More experimental results are left in Section A.4.
372
+
373
+ # 5 RELATED WORK
374
+
375
+ LS-Term Interests Modeling in Recommendation. Traditional Markov chains-based methods [36] and advanced deep learning
376
+
377
+ models [17, 20, 24, 25, 30, 40, 41, 50, 55] fail to distinguish between LS-term interests, since a unified representation is insufficient to fully capture user interests. Therefore, several methods [2, 11, 19, 29, 47, 48] were proposed that explicitly differentiate between LS-term interests. For example, Zhao et al. [48] use matrix factorization for long-term interests and use RNN for short-term interests. Yu et al. [47] develop a variant of LSTM for short-term interests and adopt asymmetric SVD [22] for long-term interests. However, disentanglement of LS-term interests is not guaranteed since these approaches impose no supervision on the learned interests representations. Unlike existing unsupervised approaches, we propose a self-supervised method that attains stronger disentanglement of long and short-term interests.
378
+
379
+ Self-supervised Learning in Recommendation. Self-supervised learning [4, 7, 8, 12, 13] was recently adopted by several recommendation algorithms [32, 45, 46, 52]. For example, Zhou et al. [52] developed a self-supervised sequential recommender based on mutual information maximization. And Ma et al. [32] proposed to supervise sequential encoders with latent intention prototypes. However, those methods ignore the differences between long and short-term interests, which are crucial for accurate recommendation. In our paper, we design a self-supervised learning method to disentangle long and short-term interests for recommendation.
380
+
381
+ Disentanglement in Recommendation. Disentangled representation learning in recommendation is largely unexplored until recently [31, 42, 43, 49]. Ma et al. [31] propose to learn users' multiple preferences based on Variational Auto-Encoders. Wang et al. [42] leverage Knowledge Graph to learn different user intentions and regularize them to be differ from each other. However, most of these works fail to impose specific semantics to the learned multiple representations because of lacking labeled data, i.e. unsupervised disentanglement, which has been shown to be ineffective [28]. In this paper, we propose to disentangle with self-supervision by designing contrastive tasks between the learned representations and interest proxies extracted from the original interaction sequences.
382
+
383
+ # 6 CONCLUSION AND FUTURE WORK
384
+
385
+ In this paper, we propose to disentangle long and short-term interests for recommendation with a contrastive learning framework, CLSR. Extensive experiments and counterfactual evaluations on two large-scale datasets demonstrate that CLSR consistently outperforms SOTA baselines with significant improvements. More importantly, we empirically show that unsupervised LS-term interests modeling can easily entangle the two aspects and lead to even poorer performance. With the help of self-supervision, CLSR can effectively disentangle LS-term interests and achieve much better performance. As for future work, CLSR can be easily extended since it is a highly general framework. For example, other designs of encoders or proxies can be explored. Deploying the proposed method to industrial systems is another important future work.
386
+
387
+ # ACKNOWLEDGMENTS
388
+
389
+ This work is supported in part by The National Key Research and Development Program of China under grant 2020AAA0106000. This work is also supported in part by the National Natural Science Foundation of China under 61972223, U1936217, 61971267 and U20B2060.
390
+
391
+ # REFERENCES
392
+
393
+ [1] Martin Abadi, Paul Barham, Jianmin Chen, Zhifeng Chen, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Geoffrey Irving, Michael Isard, et al. 2016. Tensorflow: A system for large-scale machine learning. In 12th {USENIX} symposium on operating systems design and implementation ( {OSDI} 16). 265-283.
394
+ [2] Mingxiao An, Fangzhao Wu, Chuhan Wu, Kun Zhang, Zheng Liu, and Xing Xie. 2019. Neural news recommendation with long-and short-term user representations. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics. 336-345.
395
+ [3] Andreas Argyriou, Miguel González-Fierro, and Le Zhang. 2020. Microsoft Recommenders: Best Practices for Production-Ready Recommendation Systems. In Companion Proceedings of the Web Conference 2020. 50-51.
396
+ [4] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. 2020. Unsupervised learning of visual features by contrasting cluster assignments. arXiv preprint arXiv:2006.09882 (2020).
397
+ [5] Yukuo Cen, Jianwei Zhang, Xu Zou, Chang Zhou, Hongxia Yang, and Jie Tang. 2020. Controllable multi-interest framework for recommendation. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 2942-2951.
398
+ [6] Jianxin Chang, Chen Gao, Yu Zheng, Yiqun Hui, Yanan Niu, Yang Song, Depeng Jin, and Yong Li. 2021. Sequential Recommendation with Graph Neural Networks. In Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 378-387.
399
+ [7] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. 2020. A simple framework for contrastive learning of visual representations. arXiv preprint arXiv:2020.05709 (2020).
400
+ [8] Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. 2020. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297 (2020).
401
+ [9] Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014).
402
+ [10] Paul Covington, Jay Adams, and Emre Sargin. 2016. Deep neural networks for youtube recommendations. In Proceedings of the 10th ACM conference on recommender systems. 191-198.
403
+ [11] Mihajlo Grbovic and Haibin Cheng. 2018. Real-time personalization using embeddings for search ranking at airborne. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 311-320.
404
+ [12] Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. 2020. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733 (2020).
405
+ [13] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. 2020. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 9729-9738.
406
+ [14] Xiangnan He, Kuan Deng, Xiang Wang, Yan Li, Yongdong Zhang, and Meng Wang. 2020. Lightgcn: Simplifying and powering graph convolution network for recommendation. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 639-648.
407
+ [15] Xiangnan He, Xiaoyu Du, Xiang Wang, Feng Tian, Jinhui Tang, and Tat-Seng Chua. 2018. Outer product-based neural collaborative filtering. arXiv preprint arXiv:1808.03912 (2018).
408
+ [16] Xiangnan He, Lizi Liao, Hanwang Zhang, Liqiang Nie, Xia Hu, and Tat-Seng Chua. 2017. Neural collaborative filtering. In Proceedings of the 26th international conference on world wide web. 173-182.
409
+ [17] Balázs Hidasi, Alexandros Karatzoglou, Linas Baltrunas, and Domonkos Tikk. 2015. Session-based recommendations with recurrent neural networks. arXiv preprint arXiv:1511.06939 (2015).
410
+ [18] Sepp Hochreiter and Jürgen Schmidhuber. 1997. Long short-term memory. Neural computation 9, 8 (1997), 1735-1780.
411
+ [19] Linmei Hu, Chen Li, Chuan Shi, Cheng Yang, and Chao Shao. 2020. Graph neural news recommendation with long-term and short-term interest modeling. Information Processing & Management 57, 2 (2020), 102142.
412
+ [20] Wang-Cheng Kang and Julian McAuley. 2018. Self-attentive sequential recommendation. In 2018 IEEE International Conference on Data Mining (ICDM). IEEE, 197-206.
413
+ [21] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014).
414
+ [22] Yehuda Koren. 2008. Factorization meets the neighborhood: a multifaceted collaborative filtering model. In Proceedings of the 14th ACM SIGKDD international conference on Knowledge discovery and data mining, 426-434.
415
+ [23] Yehuda Koren, Robert Bell, and Chris Volinsky. 2009. Matrix factorization techniques for recommender systems. Computer 42, 8 (2009), 30-37.
416
+ [24] Jing Li, Pengjie Ren, Zhumin Chen, Zhaochun Ren, Tao Lian, and Jun Ma. 2017. Neural attentive session-based recommendation. In Proceedings of the 2017 ACM on Conference on Information and Knowledge Management. 1419-1428.
417
+ [25] Jiacheng Li, Yujie Wang, and Julian McAuley. 2020. Time interval aware selfattention for sequential recommendation. In Proceedings of the 13th international
418
+
419
+ conference on web search and data mining. 322-330.
420
+ [26] Lei Li, Li Zheng, and Tao Li. 2011. Logo: a long-short user interest integration in personalized news recommendation. In Proceedings of the fifth ACM conference on Recommender systems. 317-320.
421
+ [27] Yongqi Li, Meng Liu, Jianhua Yin, Chaoran Cui, Xin-Shun Xu, and Liqiang Nie. 2019. Routing micro-videos via a temporal graph-guided recommendation system. In Proceedings of the 27th ACM International Conference on Multimedia. 1464-1472.
422
+ [28] Francesco Locatello, Stefan Bauer, Mario Lucic, Gunnar Raetsch, Sylvain Gelly, Bernhard Scholkopf, and Olivier Bachem. 2019. Challenging common assumptions in the unsupervised learning of disentangled representations. In international conference on machine learning. PMLR, 4114-4124.
423
+ [29] Fuyu Lv, Taiwei Jin, Changlong Yu, Fei Sun, Quan Lin, Keping Yang, and Wilfred Ng. 2019. SDM: Sequential deep matching model for online large-scale recommender system. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 2635-2643.
424
+ [30] Chen Ma, Liheng Ma, Yingxue Zhang, Jianing Sun, Xue Liu, and Mark Coates. 2020. Memory augmented graph neural networks for sequential recommendation. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 34. 5045-5052.
425
+ [31] Jianxin Ma, Chang Zhou, Peng Cui, Hongxia Yang, and Wenwu Zhu. 2019. Learning disentangled representations for recommendation. In Proceedings of the 33rd International Conference on Neural Information Processing Systems. 5711-5722.
426
+ [32] Jianxin Ma, Chang Zhou, Hongxia Yang, Peng Cui, Xin Wang, and Wenwu Zhu. 2020. Disentangled Self-Supervision in Sequential Recommenders. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 483-491.
427
+ [33] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. 2018. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748 (2018).
428
+ [34] Qi Pi, Weijie Bian, Guorui Zhou, Xiaoqiang Zhu, and Kun Gai. 2019. Practice on long sequential user behavior modeling for click-through rate prediction. In Proceedings of the 25th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 2671-2679.
429
+ [35] Steffen Rendle, Christoph Freudenthaler, Zeno Ganttner, and Lars Schmidt-Thieme. 2012. BPR: Bayesian personalized ranking from implicit feedback. arXiv preprint arXiv:1205.2618 (2012).
430
+ [36] Steffen Rendle, Christoph Freudenthaler, and Lars Schmidt-Thieme. 2010. Factorizing personalized markov chains for next-basket recommendation. In Proceedings of the 19th international conference on World wide web. 811-820.
431
+ [37] Bernhard Schölkopf. 2019. Causality for machine learning. arXiv preprint arXiv:1911.10500 (2019).
432
+ [38] Bernhard Scholkopf, Francesco Locatello, Stefan Bauer, Nan Rosemary Ke, Nal Kalchbrenner, Anirudh Goyal, and Yoshua Bengio. 2021. Toward causal representation learning. Proc. IEEE 109, 5 (2021), 612-634.
433
+ [39] Weiping Song, Chence Shi, Zhiping Xiao, Zhijian Duan, Yewen Xu, Ming Zhang, and Jian Tang. 2019. Autoint: Automatic feature interaction learning via self-attentive neural networks. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 1161–1170.
434
+ [40] Fei Sun, Jun Liu, Jian Wu, Changhua Pei, Xiao Lin, Wenwu Ou, and Peng Jiang. 2019. BERT4Rec: Sequential recommendation with bidirectional encoder representations from transformer. In Proceedings of the 28th ACM International Conference on Information and Knowledge Management. 1441-1450.
435
+ [41] Jiaxi Tang and Ke Wang. 2018. Personalized top-n sequential recommendation via convolutional sequence embedding. In Proceedings of the Eleventh ACM International Conference on Web Search and Data Mining, 565-573.
436
+ [42] Xiang Wang, Tinglin Huang, Dingxian Wang, Yancheng Yuan, Zhenguang Liu, Xiangnan He, and Tat-Seng Chua. 2021. Learning Intentions behind Interactions with Knowledge Graph for Recommendation. In Proceedings of the Web Conference 2021. 878-887.
437
+ [43] Xiang Wang, Hongye Jin, An Zhang, Xiangnan He, Tong Xu, and Tat-Seng Chua. 2020. Disentangled graph collaborative filtering. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval. 1001-1010.
438
+ [44] Yifan Wang, Suyao Tang, Yuntong Lei, Weiping Song, Sheng Wang, and Ming Zhang. 2020. Disenhan: Disentangled heterogeneous graph attention network for recommendation. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management. 1605-1614.
439
+ [45] Xu Xie, Fei Sun, Zhaoyang Liu, Jinyang Gao, Bolin Ding, and Bin Cui. 2020. Contrastive Pre-training for Sequential Recommendation. arXiv e-prints (2020), arXiv-2010.
440
+ [46] Xin Xin, Alexandros Karatzoglou, Ioannis Arapakis, and Jeomon M Jose. 2020. Self-Supervised Reinforcement Learning for Recommender Systems. arXiv preprint arXiv:2006.05779 (2020).
441
+ [47] Zeping Yu, Jianxun Lian, Ahmad Mahmoody, Gongshen Liu, and Xing Xie. 2019. Adaptive User Modeling with Long and Short-Term Preferences for Personalized Recommendation.. In ICAI. 4213-4219.
442
+ [48] Wei Zhao, Benyou Wang, Jianbo Ye, Yongqiang Gao, Min Yang, and Xiaojun Chen. 2018. PLASTIC: Prioritize Long and Short-term Information in Top-n Recommendation using Adversarial Training... In Ijcai. 3676-3682.
443
+
444
+ [49] Yu Zheng, Chen Gao, Xiang Li, Xiangnan He, Yong Li, and Depeng Jin. 2021. Disentangling User Interest and Conformity for Recommendation with Causal Embedding. In Proceedings of the Web Conference 2021. 2980-2991.
445
+ [50] Guorui Zhou, Na Mou, Ying Fan, Qi Pi, Weijie Bian, Chang Zhou, Xiaoqiang Zhu, and Kun Gai. 2019. Deep interest evolution network for click-through rate prediction. In Proceedings of the AAAI conference on artificial intelligence, Vol. 33, 5941-5948.
446
+ [51] Guorui Zhou, Xiaogiang Zhu, Chenru Song, Ying Fan, Han Zhu, Xiao Ma, Yanghui Yan, Junqi Jin, Han Li, and Kun Gai. 2018. Deep interest network for click-through rate prediction. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 1059-1068.
447
+ [52] Kun Zhou, Hui Wang, Wayne Xin Zhao, Yutao Zhu, Sirui Wang, Fuzheng Zhang, Zhongyuan Wang, and Ji-Rong Wen. 2020. S3-rec: Self-supervised learning
448
+
449
+ for sequential recommendation with mutual information maximization. In Proceedings of the 29th ACM International Conference on Information & Knowledge Management. 1893-1902.
450
+ [53] Han Zhu, Daqing Chang, Ziru Xu, Pengye Zhang, Xiang Li, Jie He, Han Li, Jian Xu, and Kun Gai. 2019. Joint optimization of tree-based index and deep model for recommender systems. In Advances in Neural Information Processing Systems. 3971-3980.
451
+ [54] Han Zhu, Xiang Li, Pengye Zhang, Guozheng Li, Jie He, Han Li, and Kun Gai. 2018. Learning tree-based deep model for recommender systems. In Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. 1079-1088.
452
+ [55] Yu Zhu, Hao Li, Yikang Liao, Beidou Wang, Ziyu Guan, Haifeng Liu, and Deng Cai. 2017. What to Do Next: Modeling User Behaviors by Time-LSTM.. In IJCAI, Vol. 17. 3602-3608.
453
+
454
+ # A APPENDIX
455
+
456
+ # A.1 Datasets
457
+
458
+ We use two datasets to conduct experiments, including a public e-commerce dataset and an industrial short-video dataset, which are also adopted by the SOTA sequential recommendation model, SURGE [6]. Both of them are in million scale and collected from real-world applications.
459
+
460
+ The details of the adopted datasets are introduced as follows,
461
+
462
+ - Taobao<sup>3</sup>. This dataset [54] is collected from the largest e-commerce platform in China, and it is widely used as a benchmark dataset for recommendation research [34, 53, 54]. It contains the user behaviors, including click, cart, and purchase from November 25 to December 3, 2017. We use the click data and adopt 10-core settings to filter out inactive entities. To evaluate the recommendation performance, we use all the instances till December 1 as training data. We use the instances on December 2 for validation and evaluate the final performance with the instances on December 3.
463
+
464
+ - Kuaishou<sup>4</sup>. This industrial dataset is collected from Kuaishou APP, one of the largest short-video platforms in China. Users can browse short videos uploaded by other users. We extract a subset of the logs from October 22 to October 28, 2020. The dataset contains user interactions with short videos, including click, like, follow (subscribe), and forward. We use the click data and also adopt 10-core settings to guarantee data quality. We keep the instances of the first 6 days as training set, and reserve the last day for validation (before $12\mathrm{pm}$ ) and test (after $12\mathrm{pm}$ ).
465
+
466
+ Table 5 shows the statistics of the two datasets after splitting.
467
+
468
+ # A.2 Baselines
469
+
470
+ We compare the proposed approach with the following competitive recommenders:
471
+
472
+ - NCF [16]: This method is the state-of-the-art general recommender which combines matrix factorization and multi-layer perceptrons to capture the non-linearity of user interactions.
473
+ - DIN [51]: This method uses attention mechanism to aggregate the historical interaction sequences. Attention weights are computed according to the target item.
474
+ - LightGCN [14]: This method is the state-of-the-art GCN based recommender and it utilizes neighborhood aggregation to capture the collaborative filtering effect.
475
+ - Caser [41]: This method regards the sequence of items as images and extract sequential patterns with a convolutional network.
476
+ - GRU4REC [17]: This is the first approach that applies RNN to session-based recommendation system, with modified mini-batch training and ranking loss.
477
+ - DIEN [50]: This method improves DIN by combining attention with GRU to model the sequential pattern of user interests, and takes interests evolution into consideration.
478
+ - SASRec [20]: This method is the state-of-the-art sequential recommendation model which utilizes self-attention to capture sequential preferences.
479
+
480
+ Table 5: Statistics of the datasets.
481
+
482
+ <table><tr><td>dataset</td><td>train</td><td>validation</td><td>test</td></tr><tr><td>Taobao</td><td>1,094,775</td><td>191,946</td><td>184,434</td></tr><tr><td>Kuaishou</td><td>12,925,390</td><td>641,580</td><td>1,385,689</td></tr></table>
483
+
484
+ Table 6: Performance of different $k$ on Taobao dataset.
485
+
486
+ <table><tr><td>k</td><td>AUC</td><td>GAUC</td><td>MRR</td><td>NDCG@2</td></tr><tr><td>1</td><td>0.8975</td><td>0.8927</td><td>0.4306</td><td>0.3717</td></tr><tr><td>2</td><td>0.8956</td><td>0.8938</td><td>0.4364</td><td>0.3798</td></tr><tr><td>3</td><td>0.8953</td><td>0.8936</td><td>0.4372</td><td>0.3788</td></tr><tr><td>4</td><td>0.8936</td><td>0.8924</td><td>0.4331</td><td>0.3747</td></tr></table>
487
+
488
+ - SURGE [6]: This is the state-of-the-art recommendation approach which utilizes graph convolutional networks (GCN) to model user interest from sequential interactions.
489
+ - SLi-Rec [47]: This is the state-of-the-art algorithm which captures long-term interests with asymmetric-SVD and models short-term interests with a modified LSTM.
490
+
491
+ # A.3 Implementation Details
492
+
493
+ We implement all the models with the Microsoft Recommenders framework [3] based on TensorFlow [1]. We use the Adam optimizer [21]. Embedding size $d$ is set as 40. We use a two-layer MLP with hidden size [100, 64] for interaction estimation. Batch normalization is enabled for the MLP, and the activation function is ReLU. The maximum length for user interaction sequences is 50 for Taobao dataset and 250 for Kuaishou dataset. We use grid-search to find the best hyper-parameters. The optimal settings for our proposed implementation are: $L_{2}$ regularization weight is 1e-6. Batchsize is 500. Learning rate is 0.001. $\beta$ is 0.1. $l_{t}$ is 5 for Taobao dataset and 10 for Kuaishou dataset. $k$ is 3 for Taobao dataset and 5 for Kuaishou dataset. $\mathcal{L}_{\mathrm{con}}$ is $\mathcal{L}_{\mathrm{tri}}$ for Taobao dataset and $\mathcal{L}_{\mathrm{bpr}}$ for Kuaishou dataset.
494
+
495
+ # A.4 More Studies on the Proposed Method
496
+
497
+ In this section, we conduct experiments to investigate how the proposed method performs under different values of several introduced hyper-parameters. We also include further ablation studies on several components.
498
+
499
+ Short-term Proxy $k$ . In the proposed method, we use mean pooling of the recent $k$ interacted items as the proxy representation for short-term interests. Table 6 illustrates the results of different $k$ on Taobao dataset. We can observe that setting $k$ as 1 achieves poorer performance except for AUC, which means only using the last interacted item as proxy for short-term interests is not a good choice since one interaction can be noise with large possibilities.
500
+
501
+ Interests Evolution Short-term interests are quite different from long-term interests with respect to their dynamics over time, thus we utilize a GRU to generate query vectors in Eqn (7) which simulates the evolution of short-term interests. We study the effect of interests evolution and results are shown in Table 7. We can
502
+
503
+ Table 7: Study of interests evolution.
504
+
505
+ <table><tr><td>Dataset</td><td>Evolution</td><td>AUC</td><td>GAUC</td><td>MRR</td><td>NDCG@2</td></tr><tr><td rowspan="2">Taobao</td><td>yes</td><td>0.8953</td><td>0.8936</td><td>0.4372</td><td>0.3788</td></tr><tr><td>no</td><td>0.8847</td><td>0.8884</td><td>0.4320</td><td>0.3735</td></tr><tr><td rowspan="2">Kuaishou</td><td>yes</td><td>0.8563</td><td>0.8718</td><td>0.9382</td><td>0.9544</td></tr><tr><td>no</td><td>0.8202</td><td>0.8333</td><td>0.9226</td><td>0.9429</td></tr></table>
506
+
507
+ Table 8: Comparison of different design choices.
508
+
509
+ <table><tr><td>Dataset</td><td>LSTM</td><td>GRU</td><td>Time4LSTM</td><td>BPR</td><td>Triplet</td></tr><tr><td>Taobao</td><td>0.8872</td><td>0.8860</td><td>0.8953</td><td>0.8909</td><td>0.8953</td></tr><tr><td>Kuaishou</td><td>0.8240</td><td>0.8259</td><td>0.8563</td><td>0.8563</td><td>0.8102</td></tr></table>
510
+
511
+ Table 9: Study of fusion predictor GRU on Taobao dataset.
512
+
513
+ <table><tr><td>Method</td><td>AUC</td><td>GAUC</td><td>MRR</td><td>NDCG@2</td></tr><tr><td>w/ GRU</td><td>0.8953</td><td>0.8936</td><td>0.4372</td><td>0.3788</td></tr><tr><td>w/o GRU</td><td>0.8817</td><td>0.8853</td><td>0.4275</td><td>0.3692</td></tr></table>
514
+
515
+ observe that removing interests evolution causes a significant decrease of accuracy on both datasets, which confirms the necessity of modeling different semantics of LS-term interests.
516
+
517
+ Study of Different Design Choices We further compare different design choices in CLSR. Specifically, we investigate different options for short-term interests encoder and contrastive loss function in Eqn (14) and (25). For the RNN $\rho$ in the short-term interests encoder $\psi$ , we compare LSTM [18], GRU [9], and Time4LSTM proposed by SLi-Rec [47]. For $\mathcal{L}_{\mathrm{con}}$ , we compare BPR loss and triplet loss. Table 8 shows the results of different design choices. We can observe that Time4LSTM outperforms LSTM and GRU on both datasets, indicating that the time interval feature is helpful for Lterm interests modeling, which is ignored by LSTM and GRU. As for contrastive loss, each loss function fails to consistently outperform the competitor, which can be explained by the different scales of the two datasets. In fact, CLSR is a highly general framework in which many sequential encoders and loss functions can be utilized. We leave the further study as future work.
518
+
519
+ Fusion Predictor GRU. In the proposed adaptive fusion model based on the attention technique, we incorporate both the target item and the historical sequence to predict whether the next interaction is driven by long or short-term interests. Specifically, we adopt a separate GRU that takes the historical sequence as input, and we use the final state as the input of MLP. We conduct experiments to investigate whether taking the historical sequence into consideration is necessary. Table 9 illustrates the results of the proposed method with and without the fusion predictor GRU. We can observe that removing the fusion predictor GRU makes the recommendation performance drop significantly, which confirms that the importance of long or short-term interests is largely determined by the historical sequence.
520
+
521
+ Attentive Encoder As introduced in Equation (11), the proposed attentive encoder adopts a MLP to compute attention weights. The inputs of the MLP are composed of the key vector, query vector, the element-wise difference, and multiplication of key and query. We compare the MLP based attention with simple inner product
522
+
523
+ Table 10: Study of attentive encoder on Taobao dataset.
524
+
525
+ <table><tr><td>Attention</td><td>AUC</td><td>GAUC</td><td>MRR</td><td>NDCG@2</td></tr><tr><td>MLP</td><td>0.8953</td><td>0.8936</td><td>0.4372</td><td>0.3788</td></tr><tr><td>Inner Product</td><td>0.8684</td><td>0.8706</td><td>0.4051</td><td>0.3480</td></tr></table>
526
+
527
+ ![](images/9b99602997f0315ea0043e194c5946405f88cd080a33b81b69c87fbbc789bfd1.jpg)
528
+ Figure 8: Counterfactual (truncate) evaluation of the proposed method on Kuaisohu dataset.
529
+
530
+ ![](images/cf98833e843dfe868ca33157fe539c69411d4607d93c47e2fef931ed14fddf82.jpg)
531
+
532
+ Table 11: Training time cost on Taobao dataset.
533
+
534
+ <table><tr><td>Method</td><td>GRU4REC</td><td>SLi-Rec</td><td>CLSR</td></tr><tr><td>Time</td><td>27.8min</td><td>26.7min</td><td>28.2min</td></tr></table>
535
+
536
+ based attention:
537
+
538
+ $$
539
+ \alpha_ {k} ^ {\prime} = \left\langle \boldsymbol {v} _ {k}, \boldsymbol {u} _ {l} \right\rangle . \tag {32}
540
+ $$
541
+
542
+ Table 10 illustrates the comparison of MLP and inner product based attention. Results in Table 10 show that MLP based attention outperforms inner product-based attention with a significant margin, which indicates that the relation between user interests and historical sequences is non-linear and can not be well captured by linear operators like the inner product.
543
+
544
+ Discrepancy Supervision In our proposed method, we do not add an extra discrepancy loss on the LS-term interests to make them independent with each other as other works [43, 49], since we believe self-supervision is enough to accomplish disentanglement. During our experiments, we tried to add an independent loss between the two interests as, and AUC drops by 0.01, which verifies our point. It is worthwhile to notice that many existing works [5, 32, 44] also did not use the independent loss.
545
+
546
+ More Counterfactual Evaluations Figure 8 illustrates the AUC of click and like on Kuaishou dataset with available history length $k$ varying from 50 to 250. Results on Kuaishou dataset are in line with results on Taobao dataset in Figure 5(a). Specifically, AUC of like is more sensitive to the length of available history and improves drastically as $k$ increases, while AUC of click does not improve much as we increase $k$ . Since like reflects more about the user's long-term interest, it is necessary to have access to the entire user interaction sequence. Meanwhile, click is more about short-term interest, and thus it can be largely captured from the recent history, and looking back to early history will not bring further gains.
547
+
548
+ Complexity. We use a single GPU to compare the complexity. The training time of CLSR and typical baselines on Taobao dataset are shown in Table 11. The parameter scale of CLSR is comparable with SLi-Rec (both takes 4.1Gb GPU memory).
2202.13xxx/2202.13090/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ada8e25f02ca484100c14cd93aa3feee269eac31d4dd9d037605f605bf8bc858
3
+ size 579123
2202.13xxx/2202.13090/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13094/519c68f3-731e-4525-ae69-803e7821e6d1_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13094/519c68f3-731e-4525-ae69-803e7821e6d1_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13094/519c68f3-731e-4525-ae69-803e7821e6d1_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:995e8f5acc194e406e5a01167cd8f855919cb1587cf87a7d819835af73858f35
3
+ size 2391124
2202.13xxx/2202.13094/full.md ADDED
@@ -0,0 +1,472 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RICov++: Effective Rotation Invariant Convolutions for 3D Point Clouds Deep Learning
2
+
3
+ Zhiyuan Zhang · Binh-Son Hua · Sai-Kit Yeung
4
+
5
+ Received: date / Accepted: date
6
+
7
+ Abstract 3D point clouds deep learning is a promising field of research that allows a neural network to learn features of point clouds directly, making it a robust tool for solving 3D scene understanding tasks. While recent works show that point cloud convolutions can be invariant to translation and point permutation, investigations of the rotation invariance property for point cloud convolution has been so far scarce. Some existing methods perform point cloud convolutions with rotation-invariant features, existing methods generally do not perform as well as translation-invariant only counterpart. In this work, we argue that a key reason is that compared to point coordinates, rotation-invariant features consumed by point cloud convolution are not as distinctive. To address this problem, we propose a simple yet effective convolution operator that enhances feature distinction by designing powerful rotation invariant features from the local regions. We consider the relationship between the point of interest and its neighbors as well as the internal relationship of the neighbors to largely improve the feature descriptiveness. Our network architecture can capture both local and global context by simply tuning the neighborhood size in each convolution layer. We conduct several experiments on synthetic and real-world
8
+
9
+ point cloud classifications, part segmentation, and shape retrieval to evaluate our method, which achieves the state-of-the-art accuracy under challenging rotations.
10
+
11
+ Keywords 3D Point Cloud $\cdot$ Convolutional Neural Networks $\cdot$ Deep Learning $\cdot$ Rotation Invariance
12
+
13
+ # 1 Introduction
14
+
15
+ 3D scene understanding is a challenging problem in computer vision. With the wide availability of consumer-grade RGB-D and LiDAR sensors, acquiring 3D scenes has become easier, cheaper, resulting in many mid- and large-scale 3D datasets (Wu et al. 2015a; Chang et al. 2015; Hua et al. 2016; Dai et al. 2017; Armeni et al. 2016; Yi et al. 2016; Uy et al. 2019). One of the popular representations of such 3D data is the 3D point cloud representation. The recent advances of deep learning with 3D point clouds has led to opportunities to revisit and tackle 3D scene understanding from a new perspective.
16
+
17
+ The basic idea of 3D point clouds deep learning is to let a neural network consume a point cloud directly. A point cloud is a mathematical set and so it fundamentally differs from an image, rendering traditional neural networks unsuitable for 3D point clouds. It is therefore necessary to design a convolution-equivalent operator in the 3D domain that can take a point cloud as input and output its per-point features. In the past few years, significant efforts have been made with promising results along this direction (Qi et al. 2017a,b; Hua et al. 2018; Li et al. 2018; Xu et al. 2018; Zhang et al. 2019b; Zhao et al. 2020).
18
+
19
+ Despite such research efforts, a property often overlooked in point cloud convolution is rotation invariance. This property arises from the fact that 3D data can
20
+
21
+ have three degrees of freedom for rotation, making rotation invariance more challenging to achieve. In the 2D domain, a viable solution is to augment the training data with random rotations. However, in 3D, such data augmentation becomes less effective due to the additional degrees of freedom, which can make training prohibitively expensive.
22
+
23
+ Some previous works have been proposed to learn rotation-invariant features (Zhang et al. 2020, 2019a; Rao et al. 2019; Poulenard et al. 2019; Deng et al. 2018; Chen et al. 2019), which leads to consistent predictions given arbitrarily rotated point clouds. We observe that state-of-the-art methods can improve the feature learning by using local reference frame (LRF) to encode both local and global information (Zhang et al. 2020; Kim et al. 2020b; Thomas 2020). However, LRF usually suffers sign flipping problem in the $x$ and $y$ axes, which makes rotation-invariant convolutions built upon LRF yield features not as distinctive as a translation-invariant convolution does. This can be demonstrated in the result of the object classification task: performing classification with aligned 3D shapes (using translation-invariant convolution) is more accurate than performing the same task with shapes with arbitrary rotations (using rotation-invariant convolution). For example, state-of-the-art methods with rotation invariance (Zhang et al. 2019a; Poulenard et al. 2019; Zhang et al. 2020; Kim et al. 2020b) reported classification accuracies about $86\% - 89\%$ on ModelNet40 Wu et al. (2015a) while methods without rotation invariance can reach accuracy as high as $93\%$ (Wang et al. 2019; Zhang et al. 2019b). This motivates us to address the limitation of the LRF and design a convolution that gives more informative features to increase the overall performance.
24
+
25
+ Particularly, we propose an effective and lightweight approach to perform rotation-invariant convolution for point clouds, which is an extension of our previous work (Zhang et al. 2019a, 2020). We propose to make rotation-invariant features more informative by local reference axis (LRA), and consider point-point relations, which improves feature distinction as well. Compared to LRF, we show that our LRA is more stable. To the best of our knowledge, our method performs consistent predictions across rotations, and reduces the performance gap compared to translation-invariant convolutions.
26
+
27
+ In summary, the main contributions of this work are:
28
+
29
+ - RICConv++, an enhanced version of our previous convolution RICnv (Zhang et al. 2019a). We leverage local reference axis to achieve a stable rotation-invariant representation. We extract informative rotation invariant features by considering the relationship between interest points and the neighbors as well as the internal relationship of the neighbors;
30
+
31
+ - A neural network architecture that stacks RICov++ for learning rotation-invariant features for 3D point clouds. The network can sense local, semi-global and global context by simply adjusting the neighborhood size and achieves consistent performance across different rotation scenarios;
32
+
33
+ - Extensive experiments of our method on object classification, object part segmentation and shape retrieval that achieve the state-of-the-art performance under challenging scenarios including an analysis of rotation-invariant features and an ablation study of our neural network.
34
+
35
+ # 2 Related Works
36
+
37
+ 3D deep learning began with a focus on regular and structured representations of 3D scenes such as multiple 2D images (Su et al. 2015; Qi et al. 2016; Esteves et al. 2019), 3D volumes (Qi et al. 2016; Li et al. 2016), hierarchical data structures like octree (Riegler et al. 2017) or kd-trees (Klokov and Lempitsky 2017; Wang et al. 2017). Such representations yield good performance, but they face challenges from a practical point of view: they require large memory consumption, have imprecise representation, and are not scalable to high-resolution data. Many recent works in 3D deep learning instead leveraged 3D point cloud, a more compact and intuitive representation compared to volumes and image sets for feature learning.
38
+
39
+ A daunting task in deep learning with 3D point clouds is how to let a neural network consume a point cloud properly because mathematically, a point cloud is a set, and so to define a valid convolution for a point cloud, it is necessary to ensure that the convolution is invariant to the permutation of the point set. PointNet (Qi et al. 2017a) pioneered the first point cloud convolution with global features by max-pooling perpoint features from MLPs. Several follow-up works focus on designing convolutions that can learn local features for a point cloud efficiently (Hua et al. 2018; Qi et al. 2017b; Li et al. 2018; Xu et al. 2018; Wang et al. 2019; Zhang et al. 2019b). Interested readers could refer to the survey by Guo et al. (Guo et al. 2020) for a comprehensive overview of deep learning techniques for 3D point clouds.
40
+
41
+ However, a missing property in the previously mentioned convolution for point clouds is rotation invariance. To handle rotations, a common approach is to augment the training data with arbitrary rotations, but a disadvantage of this approach is that generalizing the predictions to unseen rotations is challenging, not to mention that the training time becomes longer due to the increased amount of training data. Instead, it is desirable
42
+
43
+ to design a specific convolution with rotation-invariant features. In the 2D domain, rotation invariance is an appealing property in feature learning where various methods have been proposed such as learning steerable filters (Weiler et al. 2018), performing a log-polar transform of the input (Esteves et al. 2018b) with cylindrical convolutional layers (Kim et al. 2020a), or implementing transformation invariant pooling operators (Laptev et al. 2016). However, these methods are not directly applicable to 3D point clouds due to their difference in both data representation and data dimensionality.
44
+
45
+ In the 3D domain, rotation invariance has also been specifically built for feature learning of point clouds. (Rao et al. 2019) mapped a point cloud to a spherical domain to define a rotation-invariant convolution. However, the learned features are not purely rotation invariant as the discretized sphere by itself is sensitive to global rotations, resulting in a notable performance drop for objects with arbitrary rotations. To improve the rotation invariant capacity, (Poulenard et al. 2019) proposed to integrate spherical harmonics to a convolution. (Chen et al. 2019) introduced a hierarchical clustering scheme to encode the relative angles between two-point vectors, and vector norm to keeps rotation invariance. (Zhang et al. 2019a) proposed a simple convolution that operates on handcrafted features built from Euclidean distances and angles that are rotation invariant by nature. While consistent results are achieved for arbitrary rotations, only local features are considered which are less descriptive and can cause accuracy degradation. Their follow-up work (Zhang et al. 2020) addressed this limitation by building a global context aware convolution based on anchors and Local Reference Frame (LRF) to achieve rotation invariance. (Kim et al. 2020b) learned rotation invariant local descriptors to aggregate local features based on LRF and applies graph convolutional neural networks. (Thomas 2020) also relied on LRF and used multiple alignment scheme to gain better results. (Li et al. 2021) presented an effective framework to construct both local and global features based on distances, angles and reference points. However, neither LRF nor the global reference are stable enough under noise or outliers, limiting their overall performance.
46
+
47
+ A great benefit of having rotation invariance property during feature learning is that it allows consistent predictions across training/testing scenarios with or without rotations being applied to the data, and they can generalize robustly to inputs with unseen rotations. However, we found that the existing techniques share a common drawback: their performance is inferior to that of a translation-invariant point cloud convolution. This is well reflected in the accuracy of the object classification task on ModelNet40 dataset (Wu et al. 2015a).
48
+
49
+ State-of-the-art translation-invariant convolutions such as PointNet (Qi et al. 2017a), PointNet++ (Qi et al. 2017b), PointCNN (Li et al. 2018), or ShellNet (Zhang et al. 2019b) report $89\% - 93\%$ of accuracy while techniques with rotation-invariant convolution only report up to $86\% - 89\%$ of accuracy (Zhang et al. 2019a; Poulenard et al. 2019; Zhang et al. 2020; Kim et al. 2020b; Li et al. 2021). Our work in this paper is dedicated to analyze and reduce this performance gap.
50
+
51
+ # 3 Our Method
52
+
53
+ # 3.1 Problem Definition
54
+
55
+ Our goal is to seek a simple but efficient way to perform convolution on a point set such that the translation and rotation invariance property is preserved. Mathematically, let $P$ be the point set; we aim for
56
+
57
+ $$
58
+ \operatorname {c o n v} (\pi (P)) = \operatorname {c o n v} (P) \tag {1}
59
+ $$
60
+
61
+ where $\pi()$ denotes an arbitrary rotation, translation, or point permutation. The traditional convolution is translation invariant by definition. Convolution for a point cloud is specially designed to achieve the permutation invariance property (Qi et al. 2017a). However, techniques that allow point cloud convolution with rotation invariance property has been so far scarce.
62
+
63
+ An effective solution is to make the input features of $P$ invariant to both translations and rotations, and then design a convolution operator that achieves permutation invariance. In this work, we propose to achieve rotation invariance for point cloud convolution with the state-of-the-art performance by directly leveraging rotation invariant features drawn from low-level geometric cues in the Euclidean space. For completeness, let us first discuss the design of rotation-invariant features in an early version of this work (RICConv (Zhang et al. 2019a)), and then discuss an improved version (RICConv++) with more distinctive features.
64
+
65
+ # 3.2 Rotation Invariant Local Features
66
+
67
+ The design of rotation-invariant features used in RICnv can be explained as in Figure 1. Given a reference point $p$ (red), $K$ nearest neighbors are determined to construct a local point set. The centroid of the point set is denoted as $m$ (blue). We use vector $\overrightarrow{pm}$ as a reference to extract translation and rotation invariant features for all points in the local point set.
68
+
69
+ Particularly, for a point $x$ in this set, its features are defined as
70
+
71
+ $$
72
+ \operatorname {R I F} (x) = \left[ d _ {0}, d _ {1}, \alpha_ {0}, \alpha_ {1} \right]. \tag {2}
73
+ $$
74
+
75
+ ![](images/34716a1bd8090d483f2b20e9116f1af26df138533ef36c5e8e877b8ef600a039.jpg)
76
+ Fig. 1 Rotation invariant features at a point $x$ used by RI-Conv (Zhang et al. 2019a). The distances and angles are constructed based on the reference vector $p\vec{m}$ , where $p$ is the representative point and $m$ the centroid of the local point set. Such features give good performance in the classification task, but are not as distinctive as features learned from translation-invariant convolutions.
77
+
78
+ Here, $d_0$ and $d_1$ represent the distances from $x$ to $p$ and to $m$ , respectively. $\alpha_0$ and $\alpha_1$ represent the angles from $x$ towards $p$ and $m$ , as shown in Figure 1. Since such low-level geometric features are invariant under rigid transformations, they are very well suited for our need to make a translation invariant convolution with rotation invariance property. Note that the reference vector $\overrightarrow{pm}$ can also serve as a local orientation indicator and we can use it to build a local coordinate system for convolution. Such rotation invariant features have been used in RICov (Zhang et al. 2019a) that achieves good classification results on the ModelNet40 dataset (Wu et al. 2015a).
79
+
80
+ A caveat from the feature extraction scheme above is the stability of the vector $\overrightarrow{pm}$ . When the centroid $m$ changes, it can cause $\overrightarrow{pm}$ to be unstable. In this work, we introduce local reference axis (LRA), a more stable reference vector based on the theory of local reference frame (LRF) for rotation-invariant shape descriptors. Similar to vector $\overrightarrow{pm}$ , LRA can be used for extracting rotation-invariant features, and for indicating the orientation of the local neighborhood to define the convolution subsequently.
81
+
82
+ Local Reference Axis Given point $p$ and its neighbors $x_{i} \in \Omega_{p}$ . A local reference axis (LRA) at $p$ is defined as the eigenvector corresponding to the smallest eigenvalue of the covariance matrix:
83
+
84
+ $$
85
+ \Sigma = \sum_ {i = 1} ^ {N _ {\text {s u b}}} w _ {i} \left(x _ {i} - p\right) \left(x _ {i} - p\right) ^ {\top}, \tag {3}
86
+ $$
87
+
88
+ where $N_{sub}$ is the number of points in the local region and $x_{i} \in \Omega_{p}$ , and
89
+
90
+ $$
91
+ w _ {i} = \frac {m - \| x _ {i} - p \|}{\sum_ {i = 1} ^ {N} m - \| x _ {i} - p \|}, \tag {4}
92
+ $$
93
+
94
+ where $m = \max_{i=1..N_{sub}}(\|x_i - p\|)$ . Intuitively, this weight allows nearby points of $p$ to have large contributions to the covariance matrix, and thus greatly affect the LRA. Points further away from $p$ however can contribute globally to the robustness of the LRA. Such weighted LRA construction is a fundamental step in 3D hand-crafted features (Tombari et al. 2010), which can be easily integrated into our proposed convolution.
95
+
96
+ Note that the construction of LRA is very similar to that of local coordinate frame (LRF) used in traditional hand-crafted shape descriptor (Tombari et al. 2010). It can be regarded that LRA is the most stable part of LRF (see Section 4.2 for the comparison experiment); in most cases (e.g., locally flat surfaces), LRA is highly similar to the normal vector of the surface. Empirically, we found that features learned with LRA performs as well as those learned with normal vectors. Note that compared to LRF, we do not make use of the two axes tangential to the surface because they could be ambiguous and unstable.
97
+
98
+ Informative Rotation Invariant Features. Given the definition of LRA, we propose more powerful rotation-invariant features as follows. Given a reference point $p$ , recall that RICnv (Zhang et al. 2019a) only considers the relation between $p$ and its neighbors, measuring Euclidean distances and angles as rotation-invariant features. We propose to additionally consider the distances and angles among the neighbors themselves. An illustration of our proposed features is shown in Figure 2. In this design, the features are kept rotation invariant by definition, and so the framework of RICnv (Zhang et al. 2019a) can work as is. We name such features Informative Rotation Invariant Features (IRIF). IRIF transforms each of the neighbor point $x_{i}$ into a tuple of seven attributes:
99
+
100
+ $$
101
+ \operatorname {I R I F} \left(x _ {i}\right) = \left[ d, \varphi , \alpha_ {0}, \alpha_ {1}, \alpha_ {2}, \beta_ {0}, \beta_ {1}, \beta_ {2} \right] \tag {5}
102
+ $$
103
+
104
+ where $d$ , $\alpha_0$ , $\alpha_{1}$ and $\alpha_{2}$ measure the relationship between neighbor point $x_{i}$ and the reference point $p$ (radial direction):
105
+
106
+ $$
107
+ d = \left\| x _ {i} - p \right\|, \tag {6}
108
+ $$
109
+
110
+ $$
111
+ \alpha_ {0} = \angle \left(L R A _ {x _ {i}}, \overrightarrow {x _ {i} p}\right),
112
+ $$
113
+
114
+ $$
115
+ \alpha_ {1} = \angle \left(L R A _ {p}, \bar {x _ {i}} \vec {p}\right),
116
+ $$
117
+
118
+ $$
119
+ \alpha_ {2} = S _ {a} \cdot \angle \left(L R A _ {x _ {i}}, L R A _ {p}\right).
120
+ $$
121
+
122
+ and $\varphi$ , $\beta_0$ , $\beta_1$ , $\beta_2$ encode the relationship between $x_i$ and its adjacent neighbor $x_{i+1}$ (clockwise direction):
123
+
124
+ $$
125
+ \varphi = \angle (\overrightarrow {x _ {i + 1} p}, \overrightarrow {x _ {i} p}), \tag {7}
126
+ $$
127
+
128
+ $$
129
+ \beta_ {0} = \angle \left(L R A _ {x _ {i}}, \overline {{x _ {i} x _ {i + 1}}}\right),
130
+ $$
131
+
132
+ $$
133
+ \beta_ {1} = \angle \left(L R A _ {x _ {i + 1}}, \overrightarrow {x _ {i} x _ {i + 1}}\right),
134
+ $$
135
+
136
+ $$
137
+ \beta_ {2} = S _ {b} \cdot \angle \left(L R A _ {x _ {i}}, L R A _ {x _ {i + 1}}\right).
138
+ $$
139
+
140
+ ![](images/6c069e820f73c84b95f60a16a5a2f70908830a9577bfe0ffc6d413058a58cd6f.jpg)
141
+ Fig. 2 Informative rotation invariant features (IRIF). The local reference axis (LRA) indicates the local orientation. For a point set with the red point $p$ as reference and grey points as its neighbors (a), clockwise ordering is imposed by projection of the points onto the local tangent disk (b). At each neighbor point, we compute the informative rotation invariant features from the relations between $p$ and its neighbors as well as the relations between the neighbors (c).
142
+
143
+ ![](images/794b459ac27988ebc390bdc42138326625dd7ebed0106cc9a19ce6d35d7259f2.jpg)
144
+ Fig. 3 Illustration of signed angles. Although the absolute angle values of $LRA_{x_{i+1}}$ and $LRA_{x_i}$ relative to $LRA_p$ are the same, their directions are opposite. So the signs are used to indicate the directions.
145
+
146
+ Here, $\angle$ is computed as the arccos of the normalized vectors. Since arccos returns values in $[0,\pi ]$ , it has a signed ambiguity as shown Figure 3. Although the absolute angle values are the same, their directions with regard to $LRA_{p}$ are totally different. To differentiate this, in Equation 5, we propose signed angle to encode both of the angle and direction information between two vectors. We define $S_{a}$ and $S_{b}$ to encode the directions as
147
+
148
+ $$
149
+ S _ {a} = \left\{ \begin{array}{l l} + 1, & \text {i f} \alpha_ {0} \leq \alpha_ {1} \\ - 1, & \text {o t h e r w i s e} \end{array} \right., \quad S _ {b} = \left\{ \begin{array}{l l} + 1, & \text {i f} \beta_ {0} \leq \beta_ {1} \\ - 1, & \text {o t h e r w i s e} \end{array} \right..
150
+ $$
151
+
152
+ Uniqueness of IRIF. During the IRIF construction, it is expected that each point is converted to an unique position in the feature space. The attributes $d$ , $\alpha_0$ , $\varphi$ in Equation 5 correspond to the radial distance, polar angle, azimuthal angle respectively which uniquely define a 3D point in the local system. Other attributes are used to encode the second order properties making our feature more informative. Note that, for each neighbor point $x_i$ the attribute $\varphi$ is the angle between its clockwise adjacent neighbor $x_{i+1}$ , and its angle with other neighbor points can be obtained by chain rule. So, each neighbor point has an unique position in the local spherical system theoretically. However, there is
153
+
154
+ a special case that all neighbor points are uniformly distributed along azimuthal direction with exactly same radial distances and same polar angles (e.g., a sphere). In this case, IRIF is no longer unique, but IRIF would still function as a feature with reduced descriptiveness.
155
+
156
+ Additionally, for the tasks targeting whole objects like classification and retrieval, global uniqueness is also important. This is achieved by enlarging the neighborhood size to include all the interest points. For example, the input of last convolution layer usually contains less number of points with higher dimensions. We can take all the input points into in the last layer to achieve global uniqueness.
157
+
158
+ # 3.3 Rotation-Invariant Convolution
159
+
160
+ From the recipes of informative rotation invariant features and local reference axis, we are now ready to define our convolution. The main steps are detailed in Figure 4.
161
+
162
+ Particularly, we start by sampling a set of representative points through farthest point sampling strategy which can generate uniformly distributed points. From each of which we perform a set of K-nearest neighbors to obtain local point sets. Let us consider a local point set $\Omega = \{x_{i}\}$ where $x_{i}$ represents 3D coordinates of the point $i$ . We define the convolution to learn the features of $\Omega$ as
163
+
164
+ $$
165
+ \mathbf {f} (\Omega) = \sigma (\mathcal {A} \left(\{\mathcal {T} \left(\mathbf {f} _ {x _ {i}}\right): \forall i \}\right)) \tag {8}
166
+ $$
167
+
168
+ This formula indicates that features of each point in the point set are first transformed by $\mathcal{T}$ before being aggregated by the aggregation function $\mathcal{A}$ and passed to an activation function $\sigma$ . We set the input features to our informative rotation-invariant features $\mathbf{f}_{x_i} = \mathrm{IRIF}(x_i)$ . We define the transformation function as
169
+
170
+ $$
171
+ \mathcal {T} \left(\mathbf {f} _ {x _ {i}}\right) = \mathbf {w} _ {i} \cdot \mathbf {f} _ {x _ {i}} = \mathbf {f} _ {x _ {i}} ^ {\prime} \tag {9}
172
+ $$
173
+
174
+ ![](images/e3d454e9555b260bfe711753a0b4cecc54e64ffa1608494bc41cced9b9dbf30e.jpg)
175
+ Fig. 4 RICov++ operator. For an input point cloud, representative points (red dots) are sampled via farthest point sampling. For a reference point $p$ (pink), K nearest neighbors are queried to yield a local point set. Then, we compute the informative rotation invariant features (Section 3.2), which is lifted to a high-dimensional space by a shared multi-layer perceptron (MLP). Concatenated with previous layer features (if any), the features of these local points are further passed to a pointwise convolution, which are finally summarized by maxpooling (Section 3.3). The convolution is applied to all representative points, resulting in output features at such features (denoted as thicker red points).
176
+
177
+ # Algorithm 1 RICov++ operator.
178
+
179
+ Input: Reference point $p$ , point set $\Omega$ , point features $\mathbf{f}_{prev}$ from previous layer (if any), local reference axes LRA Output: Convoluted features $\mathbf{f}$
180
+
181
+ 1: $\mathbf{f}\gets \{\mathrm{IRIF}(x_i):\forall x_i\in \Omega \}$
182
+ 2: $\mathbf{f}\gets \mathrm{MLP}(\mathbf{f})$
183
+ 3: $\mathbf{f}_{in}\gets [\mathbf{f}_{prev},\mathbf{f}]$
184
+ 4: $\mathbf{f}_{out} \gets \mathrm{conv}(\mathbf{f}_{in})$
185
+ 5: return maxpool(fout)
186
+
187
+ * Construct informative rotation invariant features with LRA axes (Section 3.2)
188
+
189
+ * Lift each feature to a high-dimensional feature
190
+
191
+ * Concatenate the features from the local and the previous layer (if any)
192
+
193
+ * 1D convolution with proper ordering
194
+
195
+ * Maxpool features and return
196
+
197
+ where $\cdot$ indicates the element-wise product, and $\mathbf{w}_i$ is the weight parameter to be learned by the network. Our transformation function is similar to PointNet (Qi et al. 2017a), but applied locally.
198
+
199
+ A popular choice of the aggregation function $\mathcal{A}$ is maxpooling, which supports permutation invariance in the orders of the input point features (Qi et al. 2017a). Our aggregation function differs in that it includes a 1D convolution kernel and an ordering function to maintain rotation invariance. This has been used in our previous work (Zhang et al. 2019a,b):
200
+
201
+ $$
202
+ \mathcal {A} \left(\left\{\mathbf {f} _ {x _ {i}} ^ {\prime} \right\}\right) = \operatorname {m a x p o o l} \left(\mathrm {K} \star \operatorname {o r d e r} \left(\left\{\mathbf {f} _ {x _ {i}} ^ {\prime} \right\}\right)\right) \tag {10}
203
+ $$
204
+
205
+ where order is a function that sorts the points in a clockwise order based on projecting $x_{i}$ to the local tangent disk, and $K$ is the 1D convolution kernel. To obtain the ordering, we select one neighbor point as starting point (e.g. $x_0$ ), and set $\overrightarrow{x_0p}$ as reference on the projected disk. Then, we compute the angles between $\overrightarrow{x_i p}$ and $\overrightarrow{x_0p}$ the ordering is determined by sorting the angles from 0 to
206
+
207
+ 360 degrees. In our implementation, we simply select $x_0$ by farthest point in the local neighborhood from the reference point $p$ . A benefit of using the farthest point is that the distance between the farthest point and the reference point is less likely to be zero, and so the IRIF features can be validly computed. Note that when the kernel size is 1, point ordering is only used for feature encoding (Equation 7) and not necessary for convolution, and the maxpooling makes the resulting features invariant to point permutation. The detailed steps to perform convolution is shown in the Algorithm 1.
208
+
209
+ Compared to RICnv (Zhang et al. 2019a) and GCA-Conv (Zhang et al. 2020), our convolution is simpler as it does not require binning. In fact, one of the effects of binning is to fix the instability of the $\overrightarrow{pm}$ vector. In our case, as we find that the LRA is sufficiently stable, we simply apply pointwise convolution as described.
210
+
211
+ In addition, traditional convolutional neural networks often allow downsampling/upsampling to manipulate the spatial resolution of the input. We build this
212
+
213
+ ![](images/4aa6d772627c379ea011972ce3ee62f0d54ab821d60fb24ca52da4df2427d42b.jpg)
214
+ Fig. 5 Our network architecture comprises five convolution layers to extract point cloud features before fully connected layers for object classification and shape retrieval task. We add a decoder with skip connections for segmentation task.
215
+
216
+ strategy into our convolution by simply treating the sampled point set as the downsampling/upsampling points.
217
+
218
+ # 3.4 Network Architecture
219
+
220
+ We use the proposed convolution to design neural networks for object classification, object part segmentation, semantic segmentation, and shape retrieval, respectively. The architecture is shown in Figure 5 and Table 1. Our classification network has a standard architecture and uses five consecutive layers of convolution (with point downsampling) followed by fully connected layers to output the probability map. As our convolution operator is already designed to handle arbitrary rotation and point orders, we can simply place each convolution one after another. By default, each convolution is followed by a batch normalization and an ReLU activation.
221
+
222
+ The segmentation network follows an encoder-decoder architecture with skip connections similar to U-Net (Ronneberger et al. 2015). We use an MLP after a skip connection to unify and transform the combined features to have a valid size before applying a deconvolution.
223
+
224
+ The classification network acts as the encoder, yielding the features in the latent space that can be subsequently decoded into part labels. Unless otherwise mentioned, we use 1024 points for classification/retrieval, 2048 points for part segmentation, and 4096 points for semantic segmentation, respectively.
225
+
226
+ Our deconvolution is detailed as follows. We define deconvolution in the same way as RICnv++. The difference here is that our convolution outputs to a point
227
+
228
+ subset with more feature channels while deconvolution outputs to a point set with more points compared to the input with fewer feature channels. Particularly, suppose that deconvolution begins with the set of $N_{l}$ points at layer $l$ . The RICov++ operator is applied and the features of $N_{l}$ points are upsampled to a set of $N_{l + 1}$ points at the next layer $l + 1$ . The deconvolution is repeatedly applied until the point cloud reaches the original number of points $N$ . Note that as we have the point subsets during downsampling in the encoder part, we do not need to generate points in upsampling, but just need to reuse the subsets and propagate the features by interpolation.
229
+
230
+ Neighborhood Size. Our framework is able to integrate both local and global features by simply adjusting the neighborhood size. For classification and retrieval tasks, the global information is more important, so we set the the nearest neighbor size as 8, 16, 32, 64, and 128 respectively for the five layers of convolutions in the encoder to extract features from local to global. For segmentation, we use the same setting for the encoder but carry the features from the 4th layer to the decoder to focus more on the local features and set the neighborhood size as 8, 16, 32, and 32 for the decoder layers.
231
+
232
+ # 4 Experiments
233
+
234
+ We report our evaluation results in this section. We implemented our network in PyTorch, and use a batch size of 16 for all the tasks in training. The optimization is done with an Adam optimizer. The initial learning rate
235
+
236
+ Table 1 The details of our neural network. Refer to Figure 5 for an illustration of the network architecture, and Algorithm 1 for steps in RICov++ operator. Here, K is the number of categories, and N is the number of input points.
237
+
238
+ <table><tr><td>Module</td><td>Output shape</td></tr><tr><td>RICConv++</td><td></td></tr><tr><td>Input tensor</td><td>in_dims × inpoints</td></tr><tr><td>RICConv++ operator</td><td>out_dims × outpoints</td></tr><tr><td>BatchNorm</td><td>out_dims × outpoints</td></tr><tr><td>ReLU</td><td>out_dims × outpoints</td></tr><tr><td>Classification / Retrieval</td><td></td></tr><tr><td>Input tensor</td><td>3 × N</td></tr><tr><td>RICConv++</td><td>32 × 1024</td></tr><tr><td>RICConv++</td><td>64 × 512</td></tr><tr><td>RICConv++</td><td>128 × 256</td></tr><tr><td>RICConv++</td><td>256 × 128</td></tr><tr><td>RICConv++</td><td>512 × 1</td></tr><tr><td>Fully connected</td><td>512 × 1</td></tr><tr><td>Fully connected</td><td>256 × 1</td></tr><tr><td>Softmax</td><td>K × 1</td></tr><tr><td>Segmentation</td><td></td></tr><tr><td>Input tensor</td><td>3 × N</td></tr><tr><td>RICConv++</td><td>64 × 512</td></tr><tr><td>RICConv++</td><td>128 × 256</td></tr><tr><td>RICConv++</td><td>256 × 128</td></tr><tr><td>RICConv++</td><td>512 × 64</td></tr><tr><td>RICConv++</td><td>512 × 128</td></tr><tr><td>Skip connection</td><td>768 × 128</td></tr><tr><td>MLP</td><td>512 × 128</td></tr><tr><td>RICConv++</td><td>512 × 256</td></tr><tr><td>Skip connection</td><td>640 × 256</td></tr><tr><td>MLP</td><td>256 × 256</td></tr><tr><td>RICConv++</td><td>256 × 512</td></tr><tr><td>Skip connection</td><td>320 × 512</td></tr><tr><td>MLP</td><td>128 × 512</td></tr><tr><td>RICConv++</td><td>K × N</td></tr></table>
239
+
240
+ is set to 0.001. Our training is executed on a computer with an Intel(R) Core(TM) i7-10700K CPU equipped with a NVIDIA GTX 2080ti GPU.
241
+
242
+ We evaluate our method with object classification, shape retrieval, object part segmentation, and semantic segmentation. For object classification and retrieval, we train for 200 epochs, and the network usually converges within 150 epochs. For object part segmentation, we train for 200 epochs, and the network usually converges within 150 epochs. For semantic segmentation, we train for 40 epochs. It takes about 2 hours for the training to converge for classification, about 15 hours for part segmentation, and about 40 hours for semantic segmentation.
243
+
244
+ Following Esteves et al. (2018a), we perform experiments in three cases: (1) training and testing with data augmented with rotation about gravity axis $(\mathrm{z} / \mathrm{z})$ , (2) training and testing with data augmented with arbitrary
245
+
246
+ SO3 rotations (SO3/SO3), and (3) training with data by z-rotations and testing with data by SO3 rotations (z/SO3). The first case is commonly used for evaluating translation-invariant point cloud learning methods, and the last two cases are for evaluating rotation invariance. In general, convolution with rotation invariance is expected to work well for case (3) even though the network is not trained with data augmented with SO3 rotations.
247
+
248
+ In general, our result demonstrates the effectiveness of the rotation invariant convolution we proposed. Our networks yield very consistent results despite that our networks are trained with a limited set of rotated point clouds and tested with arbitrary rotations. To the best of our knowledge, there is no previous work for point cloud learning that can achieve the same level of accuracy with the same level of consistency despite that some methods (Esteves et al. 2018a; Rao et al. 2019) demonstrated good performance when trained with a particular set of rotations. We detail our evaluations below.
249
+
250
+ # 4.1 Object Classification
251
+
252
+ The classification task is trained on the ModelNet40 variant of the ModelNet dataset (Wu et al. 2015b). ModelNet40 contains CAD models from 40 categories such as airplane, car, bottle, dresser, etc. By following Qi et al. (2017b), we use the preprocessed 9,843 models for training and 2,468 models for testing. The input point cloud size is 1024, with each point has the attributes $(x,y,z,nx,ny,nz)$ which are 3D coordinates and 3D normals in the Euclidean space.
253
+
254
+ We use the encoder layers of Figure 5 which outputs one feature vector to train the classifier. Particularly, our network outputs one feature vector of length 512 to the classifier, which is then passed through an MLP implemented by fully connected layers, resulting in $128 \times 40$ category predictions.
255
+
256
+ We use two criteria for evaluation: accuracy and accuracy standard deviation. Accuracy is a common metric to measure the performance of the classification task. In addition, accuracy deviation measures the consistency of the accuracy scores in three tested cases. In general, it is expected that methods that are rotation invariant should be insusceptible to the rotation used in the training and testing data and therefore has a low deviation in accuracy.
257
+
258
+ The evaluation results are shown in Table 2. As can be seen, our method achieves the state-of-the-art performance in all cases. More importantly, our method has almost zero accuracy deviation. Non-rotation invariant point cloud learning methods exhibit large accuracy deviations especially in the extreme z/SO3 case. This case
259
+
260
+ Table 2 Comparisons of the classification accuracy (%) on the ModelNet40 dataset. On average, our method has the best accuracy and lowest accuracy deviation in all cases including with and without normals. Here, 'nor' means 'normal'.
261
+
262
+ <table><tr><td></td><td>Method</td><td>Format</td><td>Input Size</td><td>Params.</td><td>z/z↑</td><td>SO3/SO3↑</td><td>z/SO3↑</td><td>Std.↓</td></tr><tr><td rowspan="8">Traditional</td><td>VoxNet (Maturana and Scherer 2015)</td><td>voxel</td><td>30³</td><td>0.90M</td><td>83.0</td><td>87.3</td><td>-</td><td>3.0</td></tr><tr><td>SubVolSup (Qi et al. 2016)</td><td>voxel</td><td>30³</td><td>17.00M</td><td>88.5</td><td>82.7</td><td>36.6</td><td>28.4</td></tr><tr><td>MVCNN 80x (Su et al. 2015)</td><td>view</td><td>80 × 224²</td><td>99.00M</td><td>90.2</td><td>86.0</td><td>81.5</td><td>4.3</td></tr><tr><td>PointNet (Qi et al. 2017a)</td><td>xyz</td><td>1024 × 3</td><td>3.50M</td><td>87.0</td><td>80.3</td><td>21.6</td><td>41.0</td></tr><tr><td>PointCNN (Li et al. 2018)</td><td>xyz</td><td>1024 × 3</td><td>0.60M</td><td>91.3</td><td>84.5</td><td>41.2</td><td>27.2</td></tr><tr><td>PointNet++ (Qi et al. 2017b)</td><td>xyz + nor</td><td>1024 × 6</td><td>1.40M</td><td>89.3</td><td>85.0</td><td>28.6</td><td>33.8</td></tr><tr><td>DGCNN (Wang et al. 2019)</td><td>xyz</td><td>1024 × 3</td><td>1.84M</td><td>92.2</td><td>81.1</td><td>20.6</td><td>38.5</td></tr><tr><td>RS-CNN (Liu et al. 2019)</td><td>xyz</td><td>1024 × 3</td><td>1.41M</td><td>90.3</td><td>82.6</td><td>48.7</td><td>22.1</td></tr><tr><td rowspan="11">Rotation-invariant</td><td>Spherical CNN (Esteves et al. 2018a)</td><td>voxel</td><td>2 × 64²</td><td>0.50M</td><td>88.9</td><td>86.9</td><td>78.6</td><td>5.5</td></tr><tr><td>RICNV (Zhang et al. 2019a)</td><td>xyz</td><td>1024 ×3</td><td>0.70M</td><td>86.5</td><td>86.4</td><td>86.4</td><td>0.1</td></tr><tr><td>SPHNet (Poulenard et al. 2019)</td><td>xyz</td><td>1024 ×3</td><td>2.90M</td><td>87.0</td><td>87.6</td><td>86.6</td><td>0.5</td></tr><tr><td>SFCNN (Rao et al. 2019)</td><td>xyz</td><td>1024 ×3</td><td>-</td><td>91.4</td><td>90.1</td><td>84.8</td><td>3.5</td></tr><tr><td>ClusterNet (Chen et al. 2019)</td><td>xyz</td><td>1024 ×3</td><td>1.40M</td><td>87.1</td><td>87.1</td><td>87.1</td><td>0.0</td></tr><tr><td>GCACNV (Zhang et al. 2020)</td><td>xyz</td><td>1024 ×3</td><td>0.41M</td><td>89.0</td><td>89.2</td><td>89.1</td><td>0.0</td></tr><tr><td>RI-GCN (Kim et al. 2020b)</td><td>xyz</td><td>1024 ×3</td><td>4.38M</td><td>89.5</td><td>89.5</td><td>89.5</td><td>0.0</td></tr><tr><td>RIF (Li et al. 2021)</td><td>xyz</td><td>1024 ×3</td><td>-</td><td>89.4</td><td>89.3</td><td>89.4</td><td>0.0</td></tr><tr><td>Ours</td><td>xyz</td><td>1024 ×3</td><td>0.42M</td><td>91.2</td><td>91.2</td><td>91.2</td><td>0.0</td></tr><tr><td>RI-GCN (Kim et al. 2020b)</td><td>xyz + nor</td><td>1024 ×6</td><td>4.38M</td><td>91.0</td><td>91.0</td><td>91.0</td><td>0.0</td></tr><tr><td>Ours</td><td>xyz + nor</td><td>1024 ×6</td><td>0.42M</td><td>91.3</td><td>91.3</td><td>91.3</td><td>0.0</td></tr></table>
263
+
264
+ is exceptionally hard for methods that rely on data augmentation to handle rotations (Qi et al. 2017a,b). In our observation, such techniques are only able to generalize within the type of rotation they are trained with, and generally fail in the z/SO3 test. This applies to both voxel-based and point-based learning techniques. By contrast, our method has almost no performance difference in three test cases, which confirms the robustness of the rotation invariant geometric cues in our convolution. The success of our method is attributed to two factors: the informative rotation-invariant features (Section 3.2), and the use of large neighborhood in the last layer of the network that naturally widens the perceptive field and captures global features.
265
+
266
+ Table 2 also includes a comparison on the use of normal vectors for feature learning as follows. For our method with input format $xyz$ , we use the LRA as the reference axis, while with input format $xyz + nor$ , we use normal vectors as the reference axis. This is different from PointNet++ and RI-GCN which treat normals as extra features. In both cases, our method has almost similar performance difference (0.1% accuracy difference) which means that our LRA is as descriptive as normal vectors. Our method also outperforms both PointNet++ and RI-GCN in both cases. Note that we achieve this performance without voting, a test-time augmentation scheme to boost the classification result used by RI-GCN. When voting is disabled, RI-GCN has 1% accuracy drop. Voting also slows down the inference in real applications.
267
+
268
+ Network Parameters. The capability to handle rotation invariance also has a great effect on the number of network parameters. For networks that rely on data augmentation to handle rotations, it requires more parameters to 'memorize' the rotations. Networks that are designed to be rotation invariant, such as spherical CNN (Esteves et al. 2018a) and ours, have very compact representations. In terms of the number of trainable parameters, our network has 0.4 millions (0.4M) of trainable parameters, which is the most compact network in our evaluations. Among the tested methods, only spherical CNN (Esteves et al. 2018a) (0.5M) and PointCNN (0.6M) have similar compactness. Our network has $9 \times$ less parameters than PointNet (3.5M), more than $3 \times$ less than PointNet++ (1.4M). The good balance between trainable parameters, accuracy and accuracy deviations makes our method more robust for practical use.
269
+
270
+ # 4.2 An Analysis of Rotation-Invariant Features
271
+
272
+ We compare the performance with RICnv (Zhang et al. 2019a) and GCACnv (Zhang et al. 2020) using same number of layers and same number of representative points in each layer. Here, we set the total number of layers as 3 with 512, 128, 32 representative points and 64, 32, 16 nearest neighbors, respectively. The results are shown in Table 3. In general, we observe that our informative rotation invariant features (IRIF) is more accurate than original features used in RICnv (Zhang et al. 2019a) and GCACnv (Zhang et al. 2020).
273
+
274
+ Table 3 Performance comparisons with our previous work, RICnv (Zhang et al. 2019a) and GCACnv (Zhang et al. 2020) (\%) on the ModelNet40 dataset with the same representative points and same number of layers. It shows the effectiveness of our proposed features.
275
+
276
+ <table><tr><td>Method</td><td>Core Features</td><td>OA</td></tr><tr><td>RICConv (Zhang et al. 2019a)</td><td>RIF</td><td>86.5</td></tr><tr><td>GCACnv (Zhang et al. 2020)</td><td>LRF Transform</td><td>89.0</td></tr><tr><td>Ours (w/o normal)</td><td>IRIF</td><td>89.8</td></tr><tr><td>Ours (w/ normal)</td><td>IRIF</td><td>90.3</td></tr></table>
277
+
278
+ ![](images/a78b5d34e1fc54b1928744b0404a5da1fee434b8927eeea07d13b6baacc832d7.jpg)
279
+ Fig. 6 t-SNE comparisons of the latent features for PointNet++ (Qi et al. 2017b), RICnv (Zhang et al. 2019a), GCA-Conv (Zhang et al. 2020) and our method under three different rotation settings. The clusters in the t-SNEs show that to make good decisions in object classification, it is desirable to have the cluster boundaries as separated as possible.
280
+
281
+ We further visualize the latent space learned by the neural networks using t-SNE (van der Maaten and Hinton 2008). The results are shown in Figure 6. We measure the clustering quality by two metrics, the normalized mutual information (NMI) and purity (Manning et al. 2008). We demonstrate three scenarios for object classification: $\mathrm{z} / \mathrm{z}$ , SO3/SO3, and $\mathrm{z} / \mathrm{SO3}$ . As can be seen, latent space learned by rotation-invariant convolution such as RICnv (Zhang et al. 2019a) does not exhibit good discrimination among classes. The main difference
282
+
283
+ ![](images/3617f370f2474c0551f2fb96608206fa0d763446825f49acb4e4f81e8d6651d8.jpg)
284
+ Fig. 7 Performance of our method in the presence of additive Gaussian noise on the ModelNet40 dataset. Our method outperforms all existing rotation-invariant convolutions.
285
+
286
+ between such convolution and traditional point cloud convolution is that it no longer works with point coordinates at start. In the case of RICnv, the points are transformed into Euclidean-based features including distances and angles, which are not as unique as point coordinates since many points can share the same distance and angles. This is well reflected in the t-SNE in the first column $(\mathrm{z} / \mathrm{z})$ in Figure 6. PointNet++ (Qi et al. 2017b) has a good separation among the clusters while RICnv (Zhang et al. 2019a) has more condensed clusters in the center, resulting in more ambiguities during classification. The clusters by GCACnv (Zhang et al. 2020) and our method are more similar to PointNet++, which explains their similar performance.
287
+
288
+ Similarly, in the second column (SO3/SO3), all methods have similar clustering, which explains their similar performance in the classification (see more quantitative comparisons in Table 2). Finally, the third column (z/SO3) highlights the strength of rotation-invariant convolutions as they can still maintain consistent predictions and generalize well to unseen conditions. In this case, the t-SNEs show that PointNet++ cannot generalize effectively.
289
+
290
+ # 4.3 Robustness to Noise
291
+
292
+ In real applications, the point cloud data produced from scanners usually contains noise. Here we conduct an experiment to verify the robustness of existing rotation-invariant convolutions to noise. We sample and add noise from zero-mean Gaussian distributions $N(0,\sigma^2)$ to the data, and compare the classification performance of our method with RICnv (Zhang et al. 2019a), GCA-Conv (Zhang et al. 2020) and RI-GCN (Kim et al. 2020b). The noise level can be controlled by adjusting $\sigma^2$ .
293
+
294
+ In Figure 7, we plot the accuracy against different noise level $\sigma$ . It can be seen that our method outperforms all remaining methods. In RICnv, the use of vector $\overrightarrow{pm}$ (from the representative point to the local centroid) as reference axis is unstable under noise. GCACnv and RI-GCN uses LRF which is also vulnerable to noise.
295
+
296
+ # 4.4 LRA/LRF Comparison
297
+
298
+ As the LRA is the basis to the define the Informative Rotation Invariant Features (IRIF), it is necessary to analyse the stability of LRA separately. A popular way to evaluate the robustness of LRA or LRF is to benchmark the repeatability. We follow Guo et al. (2013) (see their section 3.3) to conduct this experiment. Noted that there are also methods that solve LRFs for mesh such as MeshHog (Zaharescu et al. 2009) and RoPS (Guo et al. 2013). In this study we assume no normal vectors or triangle faces so we omit such methods in our comparison. We use six models from the Stanford 3D Scanning Repository (Curless and Levoy 1996) (Figure 8 top). The scenes are created by resampling the models down to $1/2$ of their original mesh resolution with Gaussian noise added (0.5 mesh resolution).
299
+
300
+ From each model, 1000 points are randomly selected and their correspondences in the scene are obtained by searching the closest point in the Euclidean space. Let's denote the pair of points as $(p_{si}, p_{mi})$ from scene and model respectively. The LRFs or LRAs for these two points are computed as $V_{si}$ and $V_{mi}$ . To measure the similarity between them, we use the error evaluation metric provided by Mian et al. (2010):
301
+
302
+ $$
303
+ e _ {i} = \arccos \left(\frac {\operatorname {T r} \left(V _ {s _ {i}} V _ {m _ {i}}\right) - 1}{2}\right) \frac {1 8 0}{\pi}. \tag {11}
304
+ $$
305
+
306
+ Ideally, $e_i$ is zero when there is no error. We compare with LRF used in SHOT (Tombari et al. 2010) and pm vector used in RICnv (Zhang et al. 2019a). The results are shown in Figure 8, where the horizontal axis indicates the angular error range and the vertical axis represents the percentage of points. The more points fall into left lower error range, the better of the methods. As can be seen, LRA have much more low-range angular errors than other methods, and has significantly less high-range errors.
307
+
308
+ # 4.5 Ablation Studies
309
+
310
+ IRIF Design. We first experiment by turning on/off different rotation invariant components used in the IRIF construction (3.2). The result of this experiment is shown
311
+
312
+ ![](images/3226f10233c5ddd64fcadd1a6357522985707ca16bd5adeaee61e98c67e13500.jpg)
313
+ Asia Dragon
314
+
315
+ ![](images/8c4e2fed22578cc1ee9ba36b6424230931085544398265a57b6d27edd6afae6c.jpg)
316
+ Dragon
317
+
318
+ ![](images/4857e8bd7bdcaf7178b3ced6d6a52694fd75a7304abcbad51a83713920f0e26f.jpg)
319
+ Happy Buddha
320
+
321
+ ![](images/5d822505e66c1664ff2bfe541fe5d305515900e40185385b75756d95aeb812e0.jpg)
322
+ Armadillo
323
+
324
+ ![](images/336b7a96770729a3c331efc2e58f7097f99864e6d64e18b4ebdef13d84c3eddd.jpg)
325
+ Bunny
326
+
327
+ ![](images/4a248543ba8ba706642af8d1d861ab9ad69f764ddbaebc2083c49343a37bc1da.jpg)
328
+ Thai Statue
329
+
330
+ ![](images/c9d7c4d5bdbab499f08c7d0035fba0acaf0a4d0c24ff20baa075ee03e1710dd2.jpg)
331
+ Fig. 8 Repeatability of reference axis. We plot the histogram of errors of our LRA, LRF (Tombari et al. 2010), and the $pm$ vector in RICov (Zhang et al. 2019a) for six models from the Stanford 3D Scanning Repository Curless and Levoy (1996). LRA has the best lower range errors.
332
+
333
+ Table 4 An evaluation of our features design. By combining distances and angles, relations between the representative points and the neighbors, relations among the neighbors, Model A with IRIF can achieve performance on ModelNet40 similar to PointCNN (Li et al. 2018) using xyz coordinates.
334
+
335
+ <table><tr><td>Model</td><td>φ</td><td>β0</td><td>β1</td><td>β2</td><td>d</td><td>α0</td><td>α1</td><td>α2</td><td>Acc.</td></tr><tr><td>A</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>91.2</td></tr><tr><td>B</td><td></td><td>✓</td><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>90.5</td></tr><tr><td>C</td><td>✓</td><td></td><td></td><td></td><td>✓</td><td></td><td></td><td></td><td>83.3</td></tr><tr><td>D</td><td></td><td></td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>88.4</td></tr></table>
336
+
337
+ in Table 4. Model A is our baseline setting with all rotation invariant features activated. Model B has only angle features, and Model C has only distance features. From the comparison, we can see that turning off either feature types can deteriorate the results. When only distances are employed (Model C), the accuracy decreases to $83.3\%$ . In Model D, we only keep features on the radial direction with $d$ , $\alpha_0$ , $\alpha_1$ and $\alpha_2$ such that
338
+
339
+ Table 5 Performance comparisons with different kernel sizes. Kernel size 1 has the best speed-accuracy trade-off.
340
+
341
+ <table><tr><td>Kernel Size</td><td>Time / Epoch</td><td>Params.</td><td>Acc.</td></tr><tr><td>1</td><td>40s</td><td>0.40M</td><td>91.2</td></tr><tr><td>3</td><td>63s</td><td>0.83M</td><td>91.2</td></tr><tr><td>5</td><td>73s</td><td>1.29M</td><td>91.0</td></tr><tr><td>7</td><td>85s</td><td>1.73M</td><td>91.0</td></tr></table>
342
+
343
+ Table 6 Application of IRIF features on different network architecture.
344
+
345
+ <table><tr><td>Method</td><td>z/z</td><td>SO3/SO3</td><td>z/SO3</td><td>Acc. Std</td></tr><tr><td>PN++</td><td>89.3</td><td>85.0</td><td>28.6</td><td>33.8</td></tr><tr><td>PN++ (IRIF)</td><td>89.1</td><td>89.1</td><td>89.0</td><td>0.0</td></tr><tr><td>DGCNN</td><td>92.2</td><td>81.1</td><td>20.6</td><td>38.5</td></tr><tr><td>DGCNN (IRIF)</td><td>90.2</td><td>90.1</td><td>90.2</td><td>0.0</td></tr></table>
346
+
347
+ only the relations between the reference point and its neighbors are considered. This setting is used in RIConv (Zhang et al. 2019a). Compared to Model A, it shows that our proposed features is more effective than those by RIConv, and the improvement is explained by the additional consideration of the relations among the neighbor points.
348
+
349
+ Kernel Size. For the pointwise convolution in Algorithm 1, different kernel sizes can be chosen. We compare the performance with different kernel sizes as shown in Table 5. With larger kernels, the running time and the number of parameters also increases. We observed that larger kernels (5 and 7) do not correspond to better performance because our rotation-invariant features only involves a point $x_{i}$ and its immediate neighbors, which makes a kernel up to size 3 sufficient to capture the local features. In our experiments, we choose kernel size 1 for the best speed and accuracy trade-off.
350
+
351
+ IRIF on different network architectures IRIF can be applied for other architectures to realize rotation invariance. Here, we experiment with PointNet++ (Qi et al. 2017b) and DGCNN (Wang et al. 2019) by replacing their xyz coordinates input with IRIF. The results are shown in Table 6. We can see that rotation invariance can be achieved.
352
+
353
+ Training Statistics. We further analyze the training convergence and measure the accuracy on the test set on different training epochs. We use the z/SO3 setting. The plot is shown in Figure 9. It can be seen that RICnv (Zhang et al. 2019a) converges to a flat accuracy curve after about 20 epochs. Our new method converges with higher accuracy, and the accuracy still increases by
354
+
355
+ ![](images/5292eaa55053363c74fc02aceaf11508684f54641e54c5f996eb4e20e1562144.jpg)
356
+ Fig. 9 Overall accuracy versus training epochs plot of object classification under z/SO3 mode on the ModelNet40 dataset.
357
+
358
+ 100 epochs. Both methods outperform standard point cloud methods (Qi et al. 2017a,b; Li et al. 2018) by a large margin as expected for the z/SO3 setting.
359
+
360
+ # 4.6 Real World 3D Point Cloud Classification
361
+
362
+ Real-world point cloud data usually contains missing data, occlusions, and non-uniform density. We evaluate the classification performance on ScanObjectNN (Uy et al. 2019), a real-world dataset of 3D point clouds captured by RGB-D camera. The data comprises 2902 objects classified into 15 categories sampled from real-world indoor scenes. For our evaluation, we use the processed files and choose the easiest variant OBJ_ONLY and OBJ_BG (only object without/with background points, without rotation, translation, and scaling) and the hardest variant PB_T50_RS (with $50\%$ bounding box translation, rotation around the gravity axis, and random scaling that result in rotated and partial data). The results are shown in Table 7. It can be seen that our method outperforms other existing approaches across all the scenarios and only slightly worse than PointCNN under the $\mathrm{z / z}$ scenario. This verifies that RICnv++ is also effective on real-world datasets. Note that we only test the 'w/o normal' case as the normal vectors are not provided in the processed files of this dataset.
363
+
364
+ The experiment with ScanObjectNN also demonstrates the robustness of our method in the presence of occlusion or background points. Firstly, we can see that our method works well for objects in ScanObjectNN despite that they are incomplete scans due to occlusions, which demonstrates the effectiveness of our rotation-invariant features (IRIF) and the construction of local
365
+
366
+ Table 7 Comparisons of real world 3D point cloud classification on ScanObjectNN (Uy et al. 2019) dataset. We tested on the OBJ_ONLY (object without background), OBJ_BG (object with background) and PB_T50_RS (hardest) variant.
367
+
368
+ <table><tr><td>Method</td><td>z/z</td><td>OBJ_ONLYSO3/SO3</td><td>z/SO3</td><td>z/z</td><td>OBJ_BGSO3/SO3</td><td>z/SO3</td><td>z/z</td><td>PB_T50_RSSO3/SO3</td><td>z/SO3</td></tr><tr><td>PointNet (Qi et al. 2017a)</td><td>79.2</td><td>57.5</td><td>28.7</td><td>73.3</td><td>54.7</td><td>16.7</td><td>68.2</td><td>42.2</td><td>17.1</td></tr><tr><td>PointNet++ (Qi et al. 2017b)</td><td>84.3</td><td>57.1</td><td>25.6</td><td>82.3</td><td>47.4</td><td>15.0</td><td>77.9</td><td>60.1</td><td>15.8</td></tr><tr><td>PointCNN (Li et al. 2018)</td><td>85.5</td><td>66.9</td><td>21.9</td><td>86.1</td><td>63.7</td><td>14.6</td><td>78.5</td><td>51.8</td><td>14.9</td></tr><tr><td>DGCNN (Wang et al. 2019)</td><td>86.2</td><td>73.6</td><td>20.7</td><td>82.8</td><td>71.8</td><td>17.7</td><td>78.1</td><td>63.4</td><td>16.1</td></tr><tr><td>RICConv (Zhang et al. 2019a)</td><td>79.8</td><td>79.8</td><td>79.8</td><td>78.4</td><td>78.2</td><td>78.4</td><td>68.1</td><td>68.3</td><td>68.3</td></tr><tr><td>GCAConv (Zhang et al. 2020)</td><td>80.1</td><td>80.3</td><td>80.1</td><td>78.2</td><td>78.1</td><td>78.2</td><td>69.8</td><td>70.0</td><td>69.8</td></tr><tr><td>Ours (w/o normal)</td><td>86.2</td><td>86.2</td><td>86.2</td><td>85.6</td><td>85.6</td><td>85.6</td><td>80.3</td><td>80.3</td><td>80.3</td></tr></table>
369
+
370
+ Table 8 Comparisons of 3D shape retrieval on the ShapeNet Core (Wu et al. 2015b). The accuracy $(\%)$ is reported based on the standard evaluation metrics including precision, recall, f-score, mean average precision (mAP) and normalized discounted cumulative gain (NDCG).
371
+
372
+ <table><tr><td rowspan="2">Method</td><td colspan="5">micro</td><td colspan="5">macro</td><td rowspan="2">Score</td></tr><tr><td>PN</td><td>R@N</td><td>F1@N</td><td>mAP</td><td>NDCG</td><td>PN</td><td>R@N</td><td>F1@N</td><td>mAP</td><td>NDCG</td></tr><tr><td>Furuya and Ohbuchi (2016)</td><td>81.4</td><td>68.3</td><td>70.6</td><td>65.6</td><td>75.4</td><td>60.7</td><td>53.9</td><td>50.3</td><td>47.6</td><td>56.0</td><td>56.6</td></tr><tr><td>Tatsuma and Aono (2009)</td><td>70.5</td><td>76.9</td><td>71.9</td><td>69.6</td><td>78.3</td><td>42.4</td><td>56.3</td><td>43.4</td><td>41.8</td><td>47.9</td><td>55.7</td></tr><tr><td>Bai et al. (2016)</td><td>66.0</td><td>65.0</td><td>64.3</td><td>56.7</td><td>70.1</td><td>44.3</td><td>50.8</td><td>43.7</td><td>40.6</td><td>51.3</td><td>48.7</td></tr><tr><td>Esteves et al. (2018a)</td><td>71.7</td><td>73.7</td><td>-</td><td>68.5</td><td>-</td><td>45.0</td><td>55.0</td><td>-</td><td>44.4</td><td>-</td><td>56.5</td></tr><tr><td>SFCNN (Rao et al. 2019)</td><td>77.8</td><td>75.1</td><td>75.2</td><td>70.5</td><td>81.3</td><td>65.6</td><td>53.9</td><td>53.6</td><td>48.3</td><td>58.0</td><td>59.4</td></tr><tr><td>GCACnv (Zhang et al. 2020)</td><td>82.9</td><td>76.3</td><td>74.8</td><td>70.8</td><td>81.3</td><td>66.8</td><td>55.9</td><td>51.2</td><td>49.0</td><td>58.2</td><td>61.2</td></tr><tr><td>RIF (Li et al. 2021)</td><td>82.1</td><td>73.7</td><td>74.1</td><td>70.7</td><td>80.5</td><td>51.2</td><td>66.4</td><td>55.8</td><td>51.0</td><td>56.0</td><td>60.9</td></tr><tr><td>Ours</td><td>83.2</td><td>77.2</td><td>75.2</td><td>71.3</td><td>81.8</td><td>68.1</td><td>58.2</td><td>53.7</td><td>50.2</td><td>58.8</td><td>60.7</td></tr></table>
373
+
374
+ reference axes (LRA). Secondly, when additional background points are added to the objects as demonstrated in the OBJ_BG variant, comparing between OBJ_ONLY and OBJ_BG, we observe that our method is more tolerant and has less accuracy drop $(-0.6\%)$ compared to previous methods (RICnv with $-1.4\%$ drop and GCACnv with $-1.9\%$ drop in z/z case).
375
+
376
+ # 4.7 Shape Retrieval
377
+
378
+ Another popular task to evaluate rotation invariance on 3D shape is shape retrieval (Savva et al. 2016). Here we conducted experiments on ShapeNet Core (Wu et al. 2015b), following the perturbed protocol of the SHREC'17 3D shape retrieval contest (Savva et al. 2016) and the experiment setting of SFCNN (Rao et al. 2019). We use the same output features from the bottleneck layer in the network (similar to features used in the classification task; see Figure 5). We compare our method with methods proposed in SHREC'17 (Furuya and Ohbuchi 2016; Tatsuma and Aono 2009; Bai et al. 2016) and two recent methods on rotation-invariant convolution (Esteves et al. 2018a; Rao et al. 2019). The results are shown in Table 8. Similar to the classification task, our
379
+
380
+ method achieves the state-of-the-art result, outperforming previous methods for most evaluation metrics.
381
+
382
+ # 4.8 Object Part Segmentation on ShapeNet
383
+
384
+ We also evaluated our method with the object part segmentation where each point of the input point cloud is predicted with a part label. We train and test with the ShapeNet dataset (Chang et al. 2015) that contains 16,880 CAD models in 16 categories. Each model is annotated with 2 to 6 parts, resulting in a total of 50 object parts. We follow the standard train/test split with 14,006 models for training and 2,874 models for testing, respectively.
385
+
386
+ The evaluation results are shown in Table 9. Our method outperforms translation-invariant convolution methods significantly in the SO3/SO3 and z/SO3 scenario. Compared to previous rotation-invariant convolution methods, our method also has better performance. This result aligns well with the performance reported in the object classification task. Our method also has consistent performance for both rotation cases, which empirically confirms the rotation invariance in our convolution. We illustrate the qualitative results by error maps as shown in Figure 10. By comparing with the
387
+
388
+ Table 9 Comparisons of object part segmentation performed on ShapeNet dataset (Chang et al. 2015). The mean per-class IoU (mIoU, %) is used to measure the accuracy under two challenging rotation modes: SO3/SO3 and z/SO3.
389
+
390
+ <table><tr><td>Method</td><td>SO3/SO3</td><td>z/SO3</td></tr><tr><td>PointNet (Qi et al. 2017a)</td><td>74.4</td><td>37.8</td></tr><tr><td>PointCNN (Li et al. 2018)</td><td>71.4</td><td>34.7</td></tr><tr><td>DGCNN (Wang et al. 2019)</td><td>73.3</td><td>37.4</td></tr><tr><td>RS-CNN (Liu et al. 2019)</td><td>72.5</td><td>36.5</td></tr><tr><td>RICConv (Zhang et al. 2019a)</td><td>75.5</td><td>75.3</td></tr><tr><td>GCAConv (Zhang et al. 2020)</td><td>77.3</td><td>77.2</td></tr><tr><td>RI-GCN (Kim et al. 2020b)</td><td>77.0</td><td>77.0</td></tr><tr><td>RIF (Li et al. 2021)</td><td>79.4</td><td>79.2</td></tr><tr><td>Ours (w/o normal)</td><td>80.3</td><td>80.3</td></tr><tr><td>PointNet++ (Qi et al. 2017b)</td><td>76.7</td><td>48.2</td></tr><tr><td>SpiderCNN (Xu et al. 2018)</td><td>72.3</td><td>42.9</td></tr><tr><td>Ours (w/ normal)</td><td>80.5</td><td>80.5</td></tr></table>
391
+
392
+ ![](images/0303007fffc1bdf04e1e5727ad081c0086c6d5577724435495d1786fc27a45ad.jpg)
393
+ Fig. 10 Comparison of part segmentation error maps on ShapeNet dataset under z/SO3 mode. The red points indicate wrong segmentations.
394
+
395
+ groundtruth, we plot the wrong segmentation points as red. The plot clearly shows that our predictions are the closest to the ground truth.
396
+
397
+ # 4.9 Large-scale scene segmentation on S3DIS
398
+
399
+ We conduct an experiment for the scene segmentation on the S3DIS dataset (Armeni et al. 2016) which is a large-scale indoor scene dataset comprising 3D scans from Matterport scanners in 6 indoor areas including 271 rooms with each point is annotated with one of the semantic labels from 13 categories. In this experiment, we use Area-5 for testing to better measure the generalization ability, and report the results in SO3/SO3 and z/SO3 scenario. The results are shown in Table 10. From
400
+
401
+ Table 10 Comparisons of the semantic segmentation accuracy (mIoU, %) on the S3DIS dataset (Area-5).
402
+
403
+ <table><tr><td>Method</td><td>SO3/SO3</td><td>z/SO3</td></tr><tr><td>PointNet (Qi et al. 2017a)</td><td>35.2</td><td>20.8</td></tr><tr><td>PointCNN (Li et al. 2018)</td><td>43.5</td><td>23.6</td></tr><tr><td>RICConv (Zhang et al. 2019a)</td><td>53.3</td><td>53.2</td></tr><tr><td>GCACConv (Zhang et al. 2020)</td><td>55.8</td><td>55.7</td></tr><tr><td>Ours (w/o normal)</td><td>57.0</td><td>57.1</td></tr></table>
404
+
405
+ the results, we can see that $\mathrm{RICov}++$ outperforms RI-Conv (Zhang et al. 2019a) and GCACnv (Zhang et al. 2020), and works consistently for both scenarios.
406
+
407
+ # 5 Conclusion
408
+
409
+ In this work, we revisited the design of rotation-invariant features for 3D point cloud convolution, and proposed RICnv++ for 3D point cloud convolution that satisfies rotation invariance property. The effectiveness of RICnv++ comes from the powerful and stable rotation invariant features and the flexible neighborhood size employed in the convolution. We used local reference axis (LRA) instead of the commonly used local reference frame (LRF) to construct stable rotation invariant features. The relationship between the representative point and its neighbors and relationship among neighbors are used in tandem to make our convolution much informative. The local-global information can be captured by simply using different neighborhood size in each convolution. Such a design achieves the state-of-the-art results in various tasks such as object classification, shape retrieval, and object part segmentation.
410
+
411
+ As our newly proposed convolution can closely match the performance of state-of-the-art translation-invariant convolutions, our work opens up opportunities to further reduce the performance gap between rotation- and translation-invariant convolution especially in the presence of noise. Our method also shows that handcrafted rotation-invariant features, when used properly, can lead to compelling results in 3D deep learning. There remains an open question: it would be of great interest to design a convolution that can learn rotation-invariant features automatically, without the need of handcrafted features, which could allow the applications of rotation-invariant features to more data domains.
412
+
413
+ Acknowledgements We thank the anonymous reviewers for their constructive comments. This research project is supported by the grant from Ningbo Research Institute of Zhejiang University (1149957B20210125), and partially supported by an internal grant from HKUST (R9429).
414
+
415
+ # References
416
+
417
+ Armeni I, Sener O, Zamir AR, Jiang H, Brilakis I, Fischer M, Savarese S (2016) 3d semantic parsing of large-scale indoor spaces. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1534-1543 1, 14
418
+ Bai S, Bai X, Zhou Z, Zhang Z, Jan Latecki L (2016) Gift: A real-time and scalable 3d shape search engine. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 5023-5032 13
419
+ Chang AX, Funkhouser TA, Guibas LJ, Hanrahan P, Huang QX, Li Z, Savarese S, Savva M, Song S, Su H, Xiao J, Yi L, Yu F (2015) Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:151203012 1, 13, 14
420
+ Chen C, Li G, Xu R, Chen T, Wang M, Lin L (2019) Cluster network: Deep hierarchical cluster network with rigorously rotation-invariant representation for point cloud analysis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 4994-5002 2, 3, 9
421
+ Curless B, Levoy M (1996) A volumetric method for building complex models from range images. In: Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pp 303-312 11
422
+ Dai A, Chang AX, Savva M, Halber M, Funkhouser T, Niessner M (2017) Scannet: Richly-annotated 3d reconstructions of indoor scenes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 5828-5839
423
+ Deng H, Birdal T, Ilic S (2018) Ppf-foldnet: Unsupervised learning of rotation invariant 3d local descriptors. In: Proceedings of the European Conference on Computer Vision, pp 602-618 2
424
+ Esteves C, Allen-Blanchette C, Makadia A, Daniilidis K (2018a) Learning so (3) equivariant representations with spherical cnns. In: Proceedings of the European Conference on Computer Vision (ECCV), pp 52-68 8, 9, 13
425
+ Esteves C, Allen-Blanchette C, Zhou X, Daniilidis K (2018b) Polar transformer networks. In: International Conference on Learning Representations 3
426
+ Esteves C, Xu Y, Allen-Blanchette C, Daniilidis K (2019) Equivariant multi-view networks. In: Proceedings of the IEEE International Conference on Computer Vision, pp 1568-1577 2
427
+ Furuya T, Ohbuchi R (2016) Deep aggregation of local 3d geometric features for 3d model retrieval. In: BMVC, vol 7, p 8 13
428
+ Guo Y, Sohel F, Bennamoun M, Lu M, Wan J (2013) Rotational projection statistics for 3d local surface description and object recognition. International journal of computer vision 105(1):63-86 11
429
+ Guo Y, Wang H, Hu Q, Liu H, Liu L, Bennamoun M (2020) Deep learning for 3d point clouds: A survey. IEEE transactions on pattern analysis and machine intelligence 43(12):4338-4364 2
430
+ Hua BS, Pham QH, Nguyen DT, Tran MK, Yu LF, Yeung SK (2016) Scenenn: A scene meshes dataset with annotations. In: 2016 Fourth International Conference on 3D Vision, pp 92-101 1
431
+ Hua BS, Tran MK, Yeung SK (2018) Pointwise convolutional neural networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 984-993 1, 2
432
+ Kim J, Jung W, Kim H, Lee J (2020a) Cycnn: a rotation invariant cnn using polar mapping and cylindrical convolution layers. arXiv preprint arXiv:200710588 3
433
+
434
+ Kim S, Park J, Han B (2020b) Rotation-invariant local-to-global representation learning for 3d point cloud. Advances in Neural Information Processing Systems 33:8174-8185 2, 3, 9, 10, 14
435
+ Klokov R, Lempitsky V (2017) Escape from cells: Deep kd-networks for the recognition of 3d point cloud models. In: Proceedings of the IEEE International Conference on Computer Vision, pp 863-872 2
436
+ Laptev D, Savinov N, Buhmann JM, Pollefeys M (2016) TiPooling: transformation-invariant pooling for feature learning in convolutional neural networks. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 289-297 3
437
+ Li X, Li R, Chen G, Fu CW, Cohen-Or D, Heng PA (2021) A rotation-invariant framework for deep point cloud analysis. IEEE Transactions on Visualization and Computer Graphics 3, 9, 13, 14
438
+ Li Y, Pirk S, Su H, Qi CR, Guibas LJ (2016) Fpnn: Field probing neural networks for 3d data. In: Advances in Neural Information Processing Systems, pp 307-315 2
439
+ Li Y, Bu R, Sun M, Wu W, Di X, Chen B (2018) Pointcnn: Convolution on x-transformed points. In: Advances in neural information processing systems, pp 820-830 1, 2, 3, 9, 11, 12, 13, 14
440
+ Liu Y, Fan B, Xiang S, Pan C (2019) Relation-shape convolutional neural network for point cloud analysis. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 8895-8904 9, 14
441
+ van der Maaten L, Hinton G (2008) Visualizing high-dimensional data using t-sne. Journal of Machine Learning Research 10
442
+ Manning CD, Raghavan P, Schütze H (2008) Introduction to Information Retrieval. Cambridge University Press, URL http://nlp.stanford.edu/IR-book/information-retrieval-book.html 10
443
+ Maturana D, Scherer S (2015) Voxnet: A 3d convolutional neural network for real-time object recognition. In: 2015 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pp 922-928 9
444
+ Mian A, Bennamoun M, Owens R (2010) On the repeatability and quality of keypoints for local feature-based 3d object retrieval from cluttered scenes. International Journal of Computer Vision 89(2-3):348-361 11
445
+ Poulenard A, Rakotosaona MJ, Ponty Y, Ovsjanikov M (2019) Effective rotation-invariant point cnn with spherical harmonics kernels. International Conference on 3D Vision 2, 3, 9
446
+ Qi CR, Su H, Nießner M, Dai A, Yan M, Guibas LJ (2016) Volumetric and multi-view cnns for object classification on 3d data. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 5648-5656 2, 9
447
+ Qi CR, Su H, Mo K, Guibas LJ (2017a) Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition, pp 652-660 1, 2, 3, 6, 9, 12, 13, 14
448
+ Qi CR, Yi L, Su H, Guibas LJ (2017b) Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: Advances in Neural Information Processing Systems, pp 5105-5114 1, 2, 3, 8, 9, 10, 12, 13, 14
449
+ Rao Y, Lu J, Zhou J (2019) Spherical fractal convolutional neural networks for point cloud recognition. In: Computer Vision and Pattern Recognition 2, 3, 8, 9, 13
450
+ Riegler G, Osman Ulusoy A, Geiger A (2017) Octnet: Learning deep 3d representations at high resolutions. In: Proceed-
451
+
452
+ ings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 3577-3586 2
453
+ Ronneberger O, Fischer P, Brox T (2015) U-net: Convolutional networks for biomedical image segmentation. In: International Conference on Medical image computing and computer-assisted intervention, pp 234-241 7
454
+ Savva M, Yu F, Su H, Aono M, Chen B, Cohen-Or D, Deng W, Su H, Bai S, Bai X, et al. (2016) Shrec16 track: largescale 3d shape retrieval from shapenet core55. In: Proceedings of the eurographics workshop on 3D object retrieval, vol 10 13
455
+ Su H, Maji S, Kalogerakis E, Learned-Miller E (2015) Multiview convolutional neural networks for 3d shape recognition. In: Proceedings of the IEEE international conference on computer vision, pp 945-953 2, 9
456
+ Tatsuma A, Aono M (2009) Multi-fourier spectra descriptor and augmentation with spectral clustering for 3d shape retrieval. The Visual Computer 25(8):785-804 13
457
+ Thomas H (2020) Rotation-invariant point convolution with multiple equivariant alignments. In: 2020 International Conference on 3D Vision (3DV), pp 504-513 2, 3
458
+ Tombari F, Salti S, Di Stefano L (2010) Unique signatures of histograms for local surface description. In: Proceedings of the European Conference on Computer Vision, Springer, pp 356-369 4, 11
459
+ Uy MA, Pham QH, Hua BS, Nguyen DT, Yeung SK (2019) Revisiting point cloud classification: A new benchmark dataset and classification model on real-world data. In: International Conference on Computer Vision 1, 12, 13
460
+ Wang PS, Liu Y, Guo YX, Sun CY, Tong X (2017) O-cnn: Octree-based convolutional neural networks for 3d shape analysis. ACM Transactions on Graphics 36(4):1-11 2
461
+ Wang Y, Sun Y, Liu Z, Sarma SE, Bronstein MM, Solomon JM (2019) Dynamic graph cnn for learning on point clouds. ACM Transactions on Graphics 2, 9, 12, 13, 14
462
+ Weiler M, Hamprecht FA, Storath M (2018) Learning steerable filters for rotation equivariant cnns. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 849-858 3
463
+ Wu Z, Song S, Khosla A, Yu F, Zhang L, Tang X, Xiao J (2015a) 3d shapenets: A deep representation for volumetric shapes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1912-1920 1, 2, 3, 4
464
+ Wu Z, Song S, Khosla A, Yu F, Zhang L, Tang X, Xiao J (2015b) 3d shapenets: A deep representation for volumetric shapes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp 1912-1920 8, 13
465
+ Xu Y, Fan T, Xu M, Zeng L, Qiao Y (2018) SpiderCNN: Deep learning on point sets with parameterized convolutional filters. In: Proceedings of the European Conference on Computer Vision, pp 87-102 1, 2, 14
466
+ Yi L, Kim VG, Ceylan D, Shen IC, Yan M, Su H, Lu C, Huang Q, Sheffer A, Guibas L (2016) A scalable active framework for region annotation in 3d shape collections. ACM Transactions on Graphics 35(6):1-12 1
467
+ Zaharescu A, Boyer E, Varanasi K, Horaud R (2009) Surface feature detection and description with applications to mesh matching. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, IEEE, pp 373-380 11
468
+ Zhang Z, Hua BS, Rosen DW, Yeung SK (2019a) Rotation invariant convolutions for 3d point clouds deep learning. In: International Conference on 3D Vision, pp 204-213 2, 3, 4, 6, 9, 10, 11, 12, 13, 14
469
+
470
+ Zhang Z, Hua BS, Yeung SK (2019b) Shellnet: Efficient point cloud convolutional neural networks using concentric shells statistics. In: Proceedings of the IEEE International Conference on Computer Vision, pp 1607-1616 1, 2, 3, 6
471
+ Zhang Z, Hua BS, Chen W, Tian Y, Yeung SK (2020) Global context aware convolutions for 3d point cloud understanding. In: International Conference on 3D Vision 2, 3, 6, 9, 10, 13, 14
472
+ Zhao Y, Birdal T, Lenssen JE, Menegatti E, Guibas L, Tombari F (2020) Quaternion equivariant capsule networks for 3d point clouds. In: European Conference on Computer Vision, Springer, pp 1-19 1
2202.13xxx/2202.13094/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b9a511ef403f75dff99e67c8b2fdc730cda7e9d8e00f8774bac7ba0656c3ce7
3
+ size 1030101
2202.13xxx/2202.13094/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13121/3415fbcf-d5e0-4509-b486-bd2d3fe9e7e4_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13121/3415fbcf-d5e0-4509-b486-bd2d3fe9e7e4_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13121/3415fbcf-d5e0-4509-b486-bd2d3fe9e7e4_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0fd44fe466423c0d2e95cabe061fceeda689e824111c08a7f5e1b52a7e66929b
3
+ size 4539869
2202.13xxx/2202.13121/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13121/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eaea9ffaf119f312d853b209ca70efa26ed5e128a1e66727ac447920cd9994b
3
+ size 3814952
2202.13xxx/2202.13121/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13142/edddf239-62ff-407e-929c-73252ebf5a33_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13142/edddf239-62ff-407e-929c-73252ebf5a33_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13142/edddf239-62ff-407e-929c-73252ebf5a33_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f28282b0d552dde4237cfc1ee1fc7cad42302ea3cdc0715cec256f46a6941c3
3
+ size 29753997
2202.13xxx/2202.13142/full.md ADDED
@@ -0,0 +1,822 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Real-World Blind Super-Resolution via Feature Matching with Implicit High-Resolution Priors
2
+
3
+ Chaofeng Chen*
4
+ chaofenghust@gmail.com
5
+ School of Informatics, Xiamen University
6
+ Xiamen, China
7
+
8
+ Xinyu Shi\* x98shi@uwaterloo.ca School of Informatics, Xiamen University Xiamen, China
9
+
10
+ Yipeng Qin
11
+ QinY16@cardiff.ac.uk
12
+ School of Computer Science and Informatics, Cardiff University
13
+ Cardiff, England
14
+
15
+ Xiaoming Li
16
+ csxml@gmail.com
17
+ Faculty of Computing, Harbin Institute of Technology
18
+ Harbin, China
19
+
20
+ Tao Yang yangtao9009@gmail.com DAMO Academy, Alibaba Group Shenzhen, China
21
+
22
+ Xiaoguang Han
23
+ hanxiaoguang@cuhk.edu.cn
24
+ SSE, The Chinese
25
+ University of Hong Kong
26
+ Shenzhen, China
27
+
28
+ Shihui Guo†
29
+ guoshihui@xmu.edu.cn
30
+ School of Informatics, Xiamen University
31
+ Xiamen, China
32
+
33
+ ![](images/8ee8abf1b52fff3e1766601be26685e8da75208e6805cded661faaf3807afb89.jpg)
34
+ Our SR Result / Low Resolution
35
+
36
+ ![](images/fda1911278e656e30fea8e230f18d8dcb3feb6c68b078fa2967fddff3ca3e12c.jpg)
37
+ Figure 1: Comparison between our FeMaSR and two latest works, Real-ESRGAN+ [45] and SwinIR-GAN [28] on a low resolution image with complex blind degradations. Our method can recover realistic hairs for the squirrel thanks to the implicit high-resolution priors. Please zoom in for the best view.
38
+
39
+ ![](images/a5e95d2828a4ed6ebc965640b65b6a4ef81f0a3adfadbd719803fc70a07b11cb.jpg)
40
+ Bicubic
41
+
42
+ ![](images/81d4ca33effb052c1a85acf1fec75f9b96a7bda95d94c29824e7834b917e3a6e.jpg)
43
+ Real-ESRGAN+ [45]
44
+
45
+ ![](images/2ec62a848cc0998c15fec1912a8c3b80b290a877238e28d1c37dc51238a77293.jpg)
46
+ SwinIR-GAN [28]
47
+
48
+ ![](images/06f01805edbdde482667fd016610e8f0405669f4313026da1f558815f987b4a0.jpg)
49
+ FeMaSR (Ours)
50
+
51
+ # ABSTRACT
52
+
53
+ A key challenge of real-world image super-resolution (SR) is to recover the missing details in low-resolution (LR) images with complex unknown degradations (e.g., downsampling, noise and compression). Most previous works restore such missing details in the image space. To cope with the high diversity of natural images, they either rely on the unstable GANs that are difficult to train and prone to artifacts, or resort to explicit references from high-resolution (HR)
54
+
55
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
56
+
57
+ ACM Multimedia, October 10-14, 2022, Lisbon, Portugal
58
+
59
+ © 2022 Association for Computing Machinery.
60
+
61
+ ACM ISBN 978-1-4503-XXXX-X/18/06...$15.00
62
+
63
+ https://doi.org/XXXXXXXXXXXXXX
64
+
65
+ images that are usually unavailable. In this work, we propose Feature Matching SR (FeMaSR), which restores realistic HR images in a much more compact feature space. Unlike image-space methods, our FeMaSR restores HR images by matching distorted LR image features to their distortion-free HR counterparts in our pretrained HR priors, and decoding the matched features to obtain realistic HR images. Specifically, our HR priors contain a discrete feature codebook and its associated decoder, which are pretrained on HR images with a Vector Quantized Generative Adversarial Network (VQGAN). Notably, we incorporate a novel semantic regularization in VQGAN to improve the quality of reconstructed images. For the feature matching, we first extract LR features with an LR encoder consisting of several Swin Transformer blocks and then follow a simple nearest neighbour strategy to match them with the pretrained codebook. In particular, we equip the LR encoder with residual shortcut connections to the decoder, which is critical to the optimization of feature matching loss and also helps to complement the possible feature matching errors. Experimental results show that our approach produces more realistic HR images than previous methods. Codes are released at https://github.com/chaofengc/FeMaSR.
66
+
67
+ # CCS CONCEPTS
68
+
69
+ Computing methodologies $\rightarrow$ Computational photography.
70
+
71
+ # KEYWORDS
72
+
73
+ Blind Super-Resolution, FeMaSR, Feature Matching, High-Resolution Prior, VQGAN
74
+
75
+ # ACM Reference Format:
76
+
77
+ Chaofeng Chen, Xinyu Shi, Yipeng Qin, Xiaoming Li, Tao Yang, Xiaoguang Han, and Shihui Guo. 2022. Real-World Blind Super-Resolution via Feature Matching with Implicit High-Resolution Priors. In Proceedings of ACM International Conference on Multimedia (ACM Multimedia). ACM, New York, NY, USA, 18 pages. https://doi.org/XXXXXXXXX.XXXXXXXXXX
78
+
79
+ # 1 INTRODUCTION
80
+
81
+ Single image super-resolution (SISR) is a fundamental task in low-level vision, aiming to restore high-resolution (HR) images from their low-resolution (LR) counterparts. Due to the incorporation of deep neural networks, previous works [6, 7, 28, 29, 35, 58] have made significant progress on non-blind SR, which assumes a known degradation process, e.g., bicubic downsampling. However, these methods usually fail in real-world SR tasks where the degradations are unknown, i.e., blind SR.
82
+
83
+ Blind SR is intrinsically an ill-posed problem because the complex and unknown distortions in the LR inputs have disrupted many details. Some works [13, 41, 54, 63] exploited assumptions of the classical degradation model to explicitly estimate the blur kernel and noise. As a result, most of them can only handle several simplified cases of the classical degradation model, and are a far cry from real-world SR solutions. Other works [45, 48, 53, 57] resort to the synthesis power of Generative Adversarial Networks (GANs) to generate the missing textures. Although effective, these approaches are prone to artifacts due to the notorious unstable GAN training. Instead of "guessing" the missing textures, another line of research [20, 49, 61, 62] takes advantages of reference images. Their performance is therefore determined by the reference HR images, which are not always available. Addressing this issue, recent works [37, 44] turned to implicit high-resolution priors implemented by pretrained GANs. Although bypassing the needs of explicit HR references, these methods are limited to the domain of the pretrained GANs (e.g., face images [3, 50]) and cannot generalize to natural images with diverse contents<sup>1</sup>.
84
+
85
+ In this paper, we propose a novel SR framework based on feature matching, namely FeMaSR, for blind SR of real-world images. The distinct advantage of our framework is that it addresses the aforementioned limitations of previous works by matching LR features to a set of HR features in the pretrained implicit HR priors (HRP). Inspired by the recent VQ-VAE [36, 40] and VQGAN [9], we define our HRP as the combination of a discrete codebook consisting of a pre-defined number of feature vectors and the corresponding pretrained decoder. The feature vectors contain the information of realistic textures that can be decoded into the target HR images. In this way, we break blind SR into two sub-tasks: i) learning a high-quality HRP; ii) mapping the features of LR inputs to the codebook in HRP for distortion removal and detail recovery. For the first sub-task, we
86
+
87
+ pre-train our HRP with a VQGAN that aims to reconstruct the input HR patches. However, instead of using the vanilla VQGAN, we incorporate semantic information into HRP via L2 regularization with perceptual features from VGG19, thereby enhancing the correlation between semantics and codebook features. For the second subtask, we follow SwinIR [28] and utilize several swin transformer blocks to encode the LR inputs. The LR encoder is then trained with losses between LR features and ground truth HR features selected from the pretrained codebook. Especially, we found that the feature matching loss is difficult to optimize with fixed HRP. To solve this problem, we introduce multi-scale residual shortcut connections from LR feature space to decoder features. These residual connections enable direct gradient flow from pretrained decoder to the LR encoder, thus making it easier to optimize the LR encoder. Besides, it also helps to complement the possible feature matching errors. Since HRP contains rich semantic-aware HR information of natural images, the proposed FeMaSR is able to recover higher quality textures, see Fig. 1. Our contributions can be summarized as follows:
88
+
89
+ - We propose a novel framework FeMaSR for blind SR using HRP encoded by a pretrained VQGAN network. Compared with previous works, the FeMaSR formulates SR as a feature matching problem between LR features and distortion-free HR priors, and therefore enables the generation of more realistic images with less artifacts for real-world SR.
90
+ - We introduce semantic regularization for the pretrain of semantic-aware HRP. Such a regularization enhances the correlation between semantics and HRP, thereby facilitating the generation of more realistic textures.
91
+ - We design a LR encoder with residual shortcut connections to the HRP for feature matching. The proposed framework can better match the LR features with distortion free HR features, and also complement the matching errors.
92
+
93
+ # 2 RELATED WORK
94
+
95
+ Single Image Super-Resolution (SISR) Starting from the pioneer SRCNN [8], deep neural networks have dominated the design of modern SR algorithms. Since then, various network architectures have been proposed to improve the performance of SISR. For example, Kim et al. [23] proposed a deep version of SRCNN, named VDSR. Thanks to the residual [16] and residual dense blocks [17] that enable training deeper and wider networks, EDSR [29] and RDN [60] were proposed and boosted the performance of SISR. After that, the attention mechanism is also introduced to SISR, such as channel attention [58], spatial attention [4, 35], non-local attention [59], etc. Latest works [6, 28] achieve state-of-the-art performance by employing vision image transformers [30]. These models are trained and evaluated in a non-blind manner, e.g., bicubic downsampling and blurring with known parameters, thereby making it difficult to generalize to SISR with the same degradation type but unseen parameters, let alone those with other degradation types. Addressing this issue, Zhang et al. developed a series of methods [52, 54, 55] for conditional image restoration, where users can control the outputs by changing the conditioned degradation parameters.
96
+
97
+ ![](images/402571964a9a651269f0f4de9504ce3c6447af390897e38aef62178a2042d7ca.jpg)
98
+ Figure 2: Framework of the proposed FeMaSR. It contains two stages: pretrain of high-resolution prior, and super-resolution via feature matching. We first pretrain a VQGAN to learn an implicit representation of high-resolution patches, i.e., the codebook $\mathcal{Z}$ and decoder $G$ . Then the LR encoder $E_{l}$ is optimized to find the best matching features of the LR inputs $x$ in the codebook $\mathcal{Z}$ . Since $\mathcal{Z}$ and $G$ are pretrained to reconstruct high resolution patches, FeMaSR is able to generate clearer results with less artifacts.
99
+
100
+ Blind SISR Upon the performance saturation of non-blind SISR, recent works turned to the more challenging real-world SISR with unknown degradation (a.k.a. blind SISR). In general, they model complex real-world degradations in either an implicit or an explicit way. Between them, implicit methods [10, 32, 42, 43, 48] aim to learn a degradation network from real-world LR images. In the absence of corresponding ground truth HR images, most of them employed unsupervised image-to-image translation (e.g., Cycle-GAN [65]) while some recent works [51] resort to contrastive learning. On the contrary, explicit methods aim to synthesize "real" LR images by a manually designed degradation process. Specifically, BSRGAN[53] and Real-ESRGAN[45] describe different ways to improve the common image degradation pipeline. Both of them demonstrate much better visual quality than implicit methods in blind SISR. Nevertheless, both implicit and explicit methods rely on the generative power of GANs to generate textures. However, GANs are known to have difficulties in distinguishing some real-world textures from similar degradation patterns, which usually lead to unrealistic textures or over-smoothed regions in the resulting HR images.
101
+
102
+ Prior-based SISR Since SISR is intrinsically an ill-posed problem, prior-based SISR methods take advantages of extra image priors either explicitly or implicitly. Methods based on explicit prior (a.k.a. RefSR) rely on one or multiple reference HR images which share the same or similar content with the input LR image. To locate the best reference images, various approaches were proposed, including cross-scale correspondences [62], texture transfer [61], transformer network [49], teacher-student [20], internal graph [64], etc. Li et al. [25-27] narrow the image space to faces and achieve impressive performance. Although effective, explicit priors (i.e., HR reference images) are not always available for a given real-world LR image.
103
+
104
+ Therefore, prior-based SISR is more promisingly achieved with a prior distribution (i.e., implicit prior) learnt from a large amount of HR images through GANs or VAEs. Menon et al. [33] first proposed to upscale LR faces by searching the latent space of a pretrained StyleGAN generator [22]. Gu et al. [14] improved it by introducing more latent codes. Pan et al. [38] exploited a BigGAN generator [2] as a prior for versatile image restoration. Although these methods can generate realistic images, they all contain a time-consuming optimization process. Addressing this issue, [3, 44, 50] propose to learn a posterior distribution with a pretrained StyleGAN generator. Specifically, they learn an encoder to project LR images to a latent space shared with the pretrained generator that outputs HR images. Although this approach demonstrates exciting performance for face SR, it hardly works for natural images because learning a GAN for natural images remains a challenging task. In this work, we address the above-mentioned challenge following VQGAN [9] that shows outstanding performance in natural image synthesis and can be regarded as high-quality priors for image synthesis.
105
+
106
+ # 3 METHODOLOGY
107
+
108
+ # 3.1 Framework Overview
109
+
110
+ Given an input LR image $x$ with unknown degradations, we aim to restore the corresponding high-resolution image with realistic textures. As shown in Fig. 2, we employ a two-stage framework to pretrain the High-Resolution Priors (HRP) and conduct feature matching sequentially:
111
+
112
+ - Stage I, Pretraining of High-Resolution Priors. We use HR patches to pretrain a VQGAN [9] consisting of an encoder $E$ , a discrete codebook $\mathcal{Z}$ , and a decoder $G$ . Inspired by [46], we train the VQGAN with semantic guidance that
113
+
114
+ enhances the correlation of textures and semantics. We call the codebook $\mathcal{Z}$ and decoder $G$ HRP. After pretraining, our HRP approximately encodes the complete information of HR images and allows the reconstruction of them by feeding their corresponding feature codes $z\in \mathcal{Z}$ to $G$ .
115
+
116
+ - Stage II, Super-Resolution via Feature Matching. Given the HRP (i.e., $\mathcal{Z}$ and $G$ ) obtained in Stage I, we argue that blind SR is equivalent to a feature matching problem that aims to match the feature codes of LR inputs $\hat{z}^l$ to those of their HR counterparts $z \in \mathcal{Z}$ . By feeding $G$ with the correctly matched HR feature codes $z$ , we can obtain the clean and realistic HR images required in blind SR. To address the optimization challenges posed by the quantization process of VQGAN, we further propose the incorporation of a residual shortcut module to the LR encoder. This not only facilitates training but also complements the feature matching errors, which further boosts the quality of the resulting HR images.
117
+
118
+ Details are described in the following sections.
119
+
120
+ # 3.2 Pretraining of High-Resolution Priors
121
+
122
+ We first make a brief review of VQGAN. As illustrated in Fig. 2, the input HR image $\pmb{y} \in \mathbb{R}^{H \times W \times 3}$ is first passed through the encoder $E$ to produce its output feature $\hat{z} = E(\pmb{y}) \in \mathbb{R}^{h \times w \times n_z}$ , where $n_z$ is the feature dimension. Then the discrete representation of $\hat{z}$ is calculated by finding the nearest neighbours of each element $\hat{z}_i \in \mathbb{R}^{n_z}$ , in the codebook $\mathcal{Z} \in \mathbb{R}^{K \times n_z}$ as follows:
123
+
124
+ $$
125
+ z _ {i} = \mathcal {Z} _ {k}, \quad k = \underset {j} {\arg \min } \| \hat {z} _ {i} - \mathcal {Z} _ {j} \| _ {2}, \tag {1}
126
+ $$
127
+
128
+ where $z \in \mathbb{R}^{h \times w \times n_z}$ , $K$ is the codebook size, $i \in \{1, 2, \dots, h \times w\}$ , and $j \in \{1, 2, \dots, K\}$ . After that, $\pmb{y}$ is reconstructed by $z$ with the decoder $G$ :
129
+
130
+ $$
131
+ \boldsymbol {y} ^ {\prime} = G (z) \approx \boldsymbol {y}, \tag {2}
132
+ $$
133
+
134
+ Since the feature quantization operation of Eq. (1) is non-differentiable, we follow [9, 36] and simply copy the gradients from $G$ to $E$ for backpropagation. Therefore, the model and codebook can be trained end-to-end with the following objective function:
135
+
136
+ $$
137
+ \begin{array}{l} \mathcal {L} _ {V Q} (E, G, \mathcal {Z}) = \| \boldsymbol {y} ^ {\prime} - \boldsymbol {y} \| _ {1} + \| \operatorname {s g} [ \hat {z} ] - z \| _ {2} ^ {2} \\ + \beta \| \operatorname {s g} [ z ] - \hat {z} \| _ {2} ^ {2}, \tag {3} \\ \end{array}
138
+ $$
139
+
140
+ where $\mathrm{sg}[\cdot ]$ is the stop-gradient operation, and $\beta = 0.25$ according to [9, 36]. With the pretrained VQGAN, any high resolution images $\pmb{y}$ from the training set can be reconstructed with their corresponding feature vectors in $\mathcal{Z}$ and the decoder $G$ . We therefore call them HRP in this work.
141
+
142
+ Semantic Guidance As indicated by the vanilla setting in Eq. (3), the codebook $\mathcal{Z}$ is learned purely by gradient descent where similar patterns are clustered independent of their semantics. Meanwhile, Wang et al. [46] pointed out that semantic guidance leads to better texture restoration. This motivates us to incorporate semantic information in the pretraining of VQGAN. To be specific, we regularize the training of codebook $\mathcal{Z}$ with perceptual features from a pretrained VGG19 network by adding a regularization term $\mathcal{L}_r$ to $\mathcal{L}_{VQ}$ and have
143
+
144
+ $$
145
+ \mathcal {L} _ {V Q} ^ {\prime} = \mathcal {L} _ {V Q} + \mathcal {L} _ {r} = \mathcal {L} _ {V Q} + \gamma \| \operatorname {C O N V} (z) - \phi (\boldsymbol {y}) \| _ {2} ^ {2} \tag {4}
146
+ $$
147
+
148
+ where CONV denotes a simple convolution layer to match the dimension of $z$ and $\phi (\pmb {y})$ , $\phi$ denotes the pretrained VGG19, and $\gamma$ is a weighting factor empirically set to 0.1. Note that we follow [9] and also use perceptual loss and adversarial loss in the pretraining.
149
+
150
+ In summary, our semantic-guided HRP pretraining encourages the texture restoration to be conditioned on semantics, thereby enabling the restoration of more realistic and natural textures.
151
+
152
+ # 3.3 Super-Resolution via Feature Matching
153
+
154
+ With the pretrained HRP, i.e., $\mathcal{Z}$ and $G$ , the SR task is turned into a feature matching problem between LR inputs $\pmb{x}$ and $\mathcal{Z}$ . Denote the LR encoder as $E_{l}$ , the problem can be formulated as
155
+
156
+ $$
157
+ \underset {\theta} {\arg \min } \mathcal {L} (G (\mathbf {q} [ E _ {l} (\boldsymbol {x}, \theta), \mathcal {Z} ]), \boldsymbol {y}), \tag {5}
158
+ $$
159
+
160
+ where $\theta$ is the learnable parameter of $E_{l}$ , $\mathbf{q}[\cdot]$ denotes the feature matching process same as Eq. (1), and $\mathcal{L}$ denotes the loss functions (which will be described in the following section). We first make a brief discussion about why we want to transform the SR task to a feature matching process and how can it help:
161
+
162
+ As we know, image degradation is inherently a one-to-many mapping subject to different types and levels of degradation. From a mathematical point of view, these degradations can be regarded as offsets of high-quality local features in some feature space, where the type and level of degradation correspond to the direction and distance of the offset respectively. Such offsets overlap with each other, thereby making it difficult to find the correct high-quality correspondence of a degraded feature in the feature space. Heuristically, we address this challenge by mapping a degraded feature to its Euclidean nearest neighbour in a given set of pre-defined high-quality features (i.e., the pretrained codebook $\mathcal{Z}$ ). Intuitively, the codebook with discrete features partitions the feature space into non-overlapping cells that form a degradation-based Voronoi diagram. As demonstrated in Fig. 2, we define the $K$ feature vectors $z_{k}$ in $\mathcal{Z}$ as the centers of $K$ Voronoi cells. Given an LR feature $\hat{z}_i^l$ , we compute the Euclidean distance between $\hat{z}_i^l$ and all centers $z_{k}$ to determine which cell $\hat{z}_i^l$ belongs to $^2$ , i.e., which $z_{k}$ it maps to. In this way, realistic and rich textures can be generated as the decoder inputs are mapped to expressive HR features $z_{k}$ instead of the raw LR features $\hat{z}_i^l$ .
163
+
164
+ Despite the advantages of feature matching, the optimization of Eq. (5) is quite challenging because of the complex LR inputs. For this purpose, we introduce a powerful LR encoder $E_{l}$ consisting of two parts: feature extraction module and residual shortcut module. Feature Extraction As shown in Fig. 2, the design of the feature extraction module basically follows SwinIR [28]. It is composed of a shallow feature extraction head and a deep feature extraction block. The deep feature extraction block applies the same stack of residual swim transformer layers as SwinIR, while the shallow feature extraction block is slightly different. Since the HRP is fixed, the final upscaling factor $S_{up}$ of the input LR image is controlled by the downscaling factor $S_{down}$ of the shallow feature encoder block. In this work, we have $S_{up} = S_{down} \times 8$ as the decoder $G$ upscales $z \in \mathbb{R}$ .
165
+
166
+ $\mathbb{R}^{h\times w}$ by $\times 8$ . Denote the feature extraction module as $H_{F}$ , we have:
167
+
168
+ $$
169
+ \hat {z} ^ {l} = H _ {F} (x), \tag {6}
170
+ $$
171
+
172
+ where $\hat{z}^l\in \mathbb{R}^{h\times w\times n_z}$ are the LR features used for feature matching. Residual Shortcut Module To better utilize the HRP, we further introduce multi-scale residual connections between $\hat{z}^l$ and the decoder $G$ , as shown in Fig. 2. To be specific, we use several upsampling blocks $H_{up}$ to upscale LR features $\hat{z}^l$ and add them as residuals to the decoder $G$ , i.e.,
173
+
174
+ $$
175
+ f _ {0} = z, \hat {f} _ {0} = \hat {z} ^ {l} \tag {7}
176
+ $$
177
+
178
+ $$
179
+ f _ {i} = G _ {u p} ^ {i} \left(f _ {i - 1}\right) + H _ {u p} ^ {i} \left(\hat {f} _ {i - 1}\right), i \in \{1, 2, 3, \dots \} \tag {8}
180
+ $$
181
+
182
+ where $G_{up}^{i}$ and $H_{up}^{i}$ are the $i$ -th upsampling blocks in $G$ and $H_{F}$ respectively, $f_{i-1}$ and $\hat{f}_{i-1}$ are the input features to them.
183
+
184
+ Our residual shortcut module has two main benefits. First, it sidesteps the non-differentiable quantization process in VQGAN, thus allowing gradients to be backpropagated directly from $G$ to $E_{l}$ , which greatly eases the optimization difficulty. Second, we observed that these extra residual connections have also learned to complement the potential errors in feature matching and can further boost the performance of blind SR.
185
+
186
+ # 3.4 Training Objectives
187
+
188
+ The gradients to update $E_{l}$ come from three parts: feature matching losses, image reconstruction losses, and adversarial loss.
189
+
190
+ Feature Matching Loss This loss is dedicated to the training of $E_{l}$ . We first obtain the ground truth latent representation of $\pmb{y}$ , i.e., $z_{gt} = \mathbf{q}[E(\pmb{y}),\mathcal{Z}]$ , and then calculate the L2 loss and the Gram matrix loss for LR features
191
+
192
+ $$
193
+ \mathcal {L} _ {f e m a} = \beta \| \hat {z} ^ {l} - z _ {g t} \| _ {2} ^ {2} + \alpha \| \psi (\hat {z} ^ {l} - \psi (z _ {g t})) \| _ {2} ^ {2}, \tag {9}
194
+ $$
195
+
196
+ where $\psi$ calculates the Gram matrix of features, and $\alpha$ is its weight. The Gram matrix loss, also called style loss, has been shown to be helpful to restore textures [12].
197
+
198
+ Reconstruction Loss We follow [9, 44] and employ L1 and perceptual losses as our reconstruction loss, formulated as
199
+
200
+ $$
201
+ \mathcal {L} _ {\text {r e c}} = \lambda_ {L 1} \| \dot {\boldsymbol {y}} - \boldsymbol {y} \| _ {1} + \lambda_ {p e r} \| \phi (\dot {\boldsymbol {y}}) - \phi (\boldsymbol {y}) \| _ {2} ^ {2} \tag {10}
202
+ $$
203
+
204
+ where $\phi$ is a pretrained VGG-16 network, $\lambda_{L1}$ and $\lambda_{per}$ are weights of the L1 and perceptual losses respectively.
205
+
206
+ Adversarial Loss Although our HRP already contains rich texture information, we still need an adversarial loss to help us find better-matching features in the feature matching process. We follow [45] and adopt a U-Net discriminator $D$ with spectral normalization [34]. Similar to [5], we use a hinge loss and define the generator loss as
207
+
208
+ $$
209
+ \mathcal {L} _ {a d v} = \lambda_ {a d v} \sum_ {i} - \mathbb {E} [ D (\hat {\boldsymbol {y}} _ {i}) ] \tag {11}
210
+ $$
211
+
212
+ For simplicity, the discriminator loss is omitted here.
213
+
214
+ Overall Loss The overall loss is defined as
215
+
216
+ $$
217
+ \mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\text {f e m a}} + \mathcal {L} _ {\text {r e c}} + \mathcal {L} _ {\text {a d v}} \tag {12}
218
+ $$
219
+
220
+ where the weights for each loss are set as: $\alpha = \lambda_{L1} = \lambda_{per} = 1, \beta = 0.25, \lambda_{adv} = 0.1$ .
221
+
222
+ # 4 IMPLEMENTATION DETAILS
223
+
224
+ # 4.1 Datasets and Evaluation Metrics
225
+
226
+ Training Dataset We follow BSRGAN [53] and build a training set that includes DIV2K [1], Flickr2K [29], DIV8K [15] and 10,000 face images from FFHQ [21]. We use the following ways to generate the training patches: (1) crop non-overlapping $512 \times 512$ patches; (2) filter patches with few textures; (3) for well-aligned faces in FFHQ, we perform random resize with scale factors between [0.5, 1.0] before cropping to avoid content bias. More details are provided in the supplementary material. The final training dataset contains 136,205 HR patches of size $512 \times 512$ . We use the same degradation model as BSRGAN<sup>3</sup> to generate corresponding LR images.
227
+
228
+ Synthetic Testing Dataset To ensure a fair comparison, we use a mixed degradation model of two recent works BSRGAN and Real-ESRGAN, denoted as bsrgan_plus<sup>5</sup>, to generate LR testsets for DIV2K validation set and 5 classical benchmarks, i.e., Set5, Set14, BSD100, Urban100 and Manga109. The diversity of test images guarantees a comprehensive evaluation of model performance.
229
+
230
+ Real-world Testing Dataset We test our model on three recent real-world benchmarks, including RealSR [44], DRealSR [47] and DPED-iphone [18]. We test models with an upscale factor of 4 for these real-world datasets. Images from RealSR and DRealSR are captured by DSLR cameras, and contain 100 and 93 images respectively. DPED-iphone includes 100 LR images captured by smartphone cameras. The LR images in DPED-iphone are usually more corrupted than those from RealSR and DRealSR.
231
+
232
+ Evaluation Metrics For synthetic test datasets with ground truth images, we employ the well-known perceptual metric, LPIPS [56] score, to evaluate the perceptual quality of generated images. We also report the results of the widely used PSNR, SSIM scores for references. For real-world benchmarks, there are usually no ground truth images, therefore we adopt the well-known no reference metric NIQE score for quantitative comparison.
233
+
234
+ # 4.2 Training Details
235
+
236
+ In both the HRP pretraining and SR training, we use an Adam [24] optimizer with $\beta_{1} = 0.9$ , $\beta_{2} = 0.99$ . The learning rates for both the generator and discriminator are fixed as 0.0001 throughout the training. During feature matching stage, the codebook $\mathcal{Z}$ and decoder $G$ are fixed. Both our HRP and SR networks are trained with a batch size of 16, and the HR image size is fixed as $256 \times 256$ for both $\times 2$ and $\times 4$ upscale factors. We implemented our model with PyTorch [39]. The model pretraining stage takes about 3 days on 2 GeForce RTX 3090 GPUs and the SR stage takes about 4 days on the same device.
237
+
238
+ # 5 EXPERIMENTS
239
+
240
+ # 5.1 Visualization of HRP
241
+
242
+ In this experiment, we visualize the features in the codebook $\mathcal{Z}$ with pretrained $G$ , which facilitates the understanding of the proposed framework by answering two questions: i) what priors are encoded in HRP ii) how are they correlated to the semantics?
243
+
244
+ Table 1: Quantitative comparison with state-of-the-art methods on synthetic benchmarks. LR images are generated with a mixed degradation model of BSRGAN [53] and Real-ESRGAN [45]. PSNR/SSIM ↑: the higher, the better; LPIPS ↓: the lower, the better. LPIPS scores can better reflect texture quality, and the best and second performance are marked in red and blue.
245
+
246
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Scale</td><td colspan="2">DIV2K Valid</td><td colspan="2">Set5</td><td colspan="2">Set14</td><td colspan="2">BSD100</td><td colspan="2">Urban100</td><td colspan="2">Manga109</td></tr><tr><td>PSNR SSIM</td><td>LPIPS</td><td>PSNR SSIM</td><td>LPIPS</td><td>PSNR SSIM</td><td>LPIPS</td><td>PSNR SSIM</td><td>LPIPS</td><td>PSNR SSIM</td><td>LPIPS</td><td>PSNR SSIM</td><td>LPIPS</td></tr><tr><td>CDC</td><td>×2</td><td>24.93</td><td>0.6293</td><td>0.6588</td><td>25.35</td><td>0.6747</td><td>0.5153</td><td>22.74</td><td>0.5347</td><td>0.6229</td><td>23.64</td><td>0.5282</td><td>0.7073</td></tr><tr><td>DAN</td><td>×2</td><td>24.69</td><td>0.5729</td><td>0.6219</td><td>25.27</td><td>0.6278</td><td>0.4658</td><td>22.79</td><td>0.5083</td><td>0.5639</td><td>23.46</td><td>0.4923</td><td>0.6384</td></tr><tr><td>DASR(W)</td><td>×2</td><td>24.74</td><td>0.5767</td><td>0.6304</td><td>25.31</td><td>0.6312</td><td>0.4735</td><td>22.81</td><td>0.5110</td><td>0.5720</td><td>23.49</td><td>0.4958</td><td>0.6508</td></tr><tr><td>BSRGAN</td><td>×2</td><td>26.60</td><td>0.7073</td><td>0.3182</td><td>27.65</td><td>0.7799</td><td>0.2027</td><td>24.59</td><td>0.6475</td><td>0.3013</td><td>24.88</td><td>0.5967</td><td>0.3769</td></tr><tr><td>Real-ESRGAN+</td><td>×2</td><td>25.50</td><td>0.6963</td><td>0.2993</td><td>26.73</td><td>0.7771</td><td>0.2157</td><td>23.65</td><td>0.6299</td><td>0.3023</td><td>24.11</td><td>0.5860</td><td>0.3433</td></tr><tr><td>SwinIR-GAN</td><td>×2</td><td>25.33</td><td>0.6886</td><td>0.3313</td><td>27.07</td><td>0.7793</td><td>0.2093</td><td>23.76</td><td>0.6364</td><td>0.3128</td><td>23.83</td><td>0.5717</td><td>0.3707</td></tr><tr><td>Ours</td><td>×2</td><td>25.26</td><td>0.6680</td><td>0.2753</td><td>26.46</td><td>0.7470</td><td>0.1964</td><td>23.38</td><td>0.5982</td><td>0.2852</td><td>23.83</td><td>0.5599</td><td>0.3264</td></tr><tr><td>CDC</td><td>×4</td><td>23.11</td><td>0.5850</td><td>0.7132</td><td>19.99</td><td>0.5077</td><td>0.7168</td><td>20.38</td><td>0.4551</td><td>0.7377</td><td>21.75</td><td>0.4800</td><td>0.7707</td></tr><tr><td>DAN</td><td>×4</td><td>24.22</td><td>0.5929</td><td>0.6881</td><td>20.85</td><td>0.5319</td><td>0.6771</td><td>21.44</td><td>0.4937</td><td>0.6758</td><td>22.52</td><td>0.4818</td><td>0.7438</td></tr><tr><td>DASR(W)</td><td>×4</td><td>24.19</td><td>0.5920</td><td>0.7021</td><td>20.87</td><td>0.5336</td><td>0.6972</td><td>21.43</td><td>0.4953</td><td>0.6950</td><td>22.49</td><td>0.4818</td><td>0.7576</td></tr><tr><td>BSRGAN</td><td>×4</td><td>24.91</td><td>0.6500</td><td>0.3596</td><td>21.63</td><td>0.5573</td><td>0.4683</td><td>22.17</td><td>0.5165</td><td>0.4173</td><td>22.95</td><td>0.5042</td><td>0.4405</td></tr><tr><td>Real-ESRGAN+</td><td>×4</td><td>23.80</td><td>0.6414</td><td>0.3696</td><td>21.31</td><td>0.5449</td><td>0.5068</td><td>21.54</td><td>0.5288</td><td>0.4271</td><td>22.43</td><td>0.5035</td><td>0.4693</td></tr><tr><td>SwinIR-GAN</td><td>×4</td><td>24.13</td><td>0.6479</td><td>0.3543</td><td>20.91</td><td>0.5128</td><td>0.5115</td><td>21.58</td><td>0.5041</td><td>0.4487</td><td>22.23</td><td>0.4925</td><td>0.4447</td></tr><tr><td>Ours</td><td>×4</td><td>23.77</td><td>0.6203</td><td>0.3298</td><td>20.45</td><td>0.4863</td><td>0.4942</td><td>21.24</td><td>0.4809</td><td>0.3801</td><td>22.11</td><td>0.4830</td><td>0.4143</td></tr></table>
247
+
248
+ Table 2: Quantitative comparison with state-of-the-art methods on real-world benchmarks. NIQE $\downarrow$ : the lower, the better. The best and second performance are marked in red and blue. Some numbers of competitive methods are taken from [45].
249
+
250
+ <table><tr><td>Datasets</td><td>Bicubic</td><td>DAN</td><td>RealSR</td><td>CDC</td><td>DASR(W)</td><td>BSRGAN</td><td>Real-ESRGAN+</td><td>SwinIR-GAN</td><td>Ours</td></tr><tr><td>RealSR [44]</td><td>6.2438</td><td>6.5673</td><td>6.8041</td><td>6.2376</td><td>8.1918</td><td>5.7355</td><td>4.7832</td><td>4.7644</td><td>4.7434</td></tr><tr><td>DRealSR [47]</td><td>6.5766</td><td>7.0720</td><td>7.7213</td><td>6.6359</td><td>9.1446</td><td>6.1362</td><td>4.8458</td><td>4.7053</td><td>4.1987</td></tr><tr><td>DPED-iphone [18]</td><td>6.0121</td><td>6.1414</td><td>5.5855</td><td>6.2738</td><td>6.9887</td><td>5.9906</td><td>5.2631</td><td>4.9468</td><td>5.1066</td></tr></table>
251
+
252
+ ![](images/61e455a4d904b59f0fc24b5e1373e34e2a2e087788d2334d6d460b0a6d7ddf6f.jpg)
253
+ (a) Textures generated with tiled single code. The tiled feature size are: $1 \times 1, 2 \times 2, 3 \times 3, 4 \times 4$ (from top to bottom)
254
+ Figure 3: Visualization of texture priors encoded with pretrained codebook $\mathcal{Z}$ . Semantic textures emerge when different codes are combined, such as $①$ grass, $②$ plant and $③$ water.
255
+
256
+ ![](images/0ea3c087e08893e4d4c9dd28264a57b677943ace64ce68031af73788357b0dad.jpg)
257
+ (b) Textures generated with random combination with different number of codes. The size of combined feature map is $16 \times 16$ .
258
+
259
+ As shown in Fig. 3, we visualize the priors encoded in $\mathcal{Z}$ by projecting features to RGB pixel space with pretrained decoder $G$ . In other words, we obtain the RGB patches of each vector $z_{j} \in \mathcal{Z}$
260
+
261
+ with $G(z_{j})$ , where the size of RGB patches is $8 \times 8$ . Specifically, we explore how textures are encoded by single codes and combinations of different codes:
262
+
263
+ - Fig. 3(a) shows that individual codes alone can represent some basic texture elements. However, when the same code is tiled onto a bigger feature map, e.g., $4 \times 4$ , the decoder tends to preserve the color while producing a smooth image. This implies that a single code is not enough to represent complex textures.
264
+ - Fig. 3(b) shows that complex and realistic textures can be generated by combining several different code samples, which indicates that the pretrained $\mathcal{Z}$ indeed learns to encode rich texture priors. In addition, it can be observed that different combinations of code samples correspond to different semantics, such as, $①$ grass, $②$ plant and $③$ water. Please see the supplementary materials for more examples.
265
+
266
+ Based on the above discussion, we conjecture that the individual codes in $\mathcal{Z}$ represent simple texture elements, while the diverse semantics are encoded in the combinations of multiple codes.
267
+
268
+ # 5.2 Comparison with Existing Methods
269
+
270
+ We compare the proposed QuanTexSR with several state-of-the-art methods for blind SR, including CDC [47], DAN [31], DASR(W) [43], RealSR [19], BSRGAN [53], Real-ESRGAN+ [45] and SwinIR-GAN
271
+
272
+ ![](images/2a1b5b45b9a1d2334e5c538dc796f090f836ed701c1948d6cf2b5df2255721d2.jpg)
273
+
274
+ ![](images/25613f83b32d4f4c72f5e7c1c543f254f273a71630cb5d3d10f784d249814524.jpg)
275
+ Bicubic (x2)
276
+
277
+ ![](images/bece1dfceba093983362a7a61b1dd767538e39fe90108a4eacdc7d550efe59c5.jpg)
278
+ DAN
279
+
280
+ ![](images/a0cdc4d7398ed5e7250ebb72db3004b84c69f116e39c955b19eaf9e8926a9cd7.jpg)
281
+ DASR(W)
282
+
283
+ ![](images/79c2fcd0c1b1bc34fd57670a21679a44663a26dfb56c7ffb591dfb8368929e25.jpg)
284
+ BSRGAN
285
+
286
+ ![](images/230f7260998c06b4bf3b0b8c0e61cef9fb943654659bddb30a19699b339b3d48.jpg)
287
+
288
+ ![](images/5239ff61aa6f9adaca7474a3cfb5e5e8f04bafe6624f194cac8d95fa48abf6e9.jpg)
289
+ Bicubic (x4)
290
+
291
+ ![](images/72f650fb173a26036ca4c232de01405b38a1448884c867ec12f2f2895a277366.jpg)
292
+ Real-ESRGAN+
293
+
294
+ ![](images/75ec58c90b08c719c21387cf522b24a914d768c6a28b1632879509ae44957472.jpg)
295
+ DAN
296
+
297
+ ![](images/3e72d9d8d0a92b5628cf4bb1871ce4312fad670cba0b9daeec6503f38d59b409.jpg)
298
+ SwinIR-GAN
299
+
300
+ ![](images/c83e3b27983a92a1847f66abb8180890d0f2b6fbcb34555788753565ccd67399.jpg)
301
+ DASR(W)
302
+
303
+ ![](images/15dc96ffc5381ca609221cc8e507bcf4d099336d84cc4023cb117e200de83a71.jpg)
304
+ Ours
305
+
306
+ ![](images/8d69e4e68a640097194d07959c2d87df7a746d17cf1060584cb1046e93f8ae58.jpg)
307
+ Ground Truth
308
+
309
+ ![](images/376817fea78eda8746c22e067d3e3a25d0c48cdc41c8176227823643581b2ce7.jpg)
310
+ BSRGAN
311
+
312
+ ![](images/4552f5ac8baa722a95ccd21667e835277f72234015c52ced325b7fb3885ad8cc.jpg)
313
+ Ground Truth
314
+
315
+ ![](images/b60a251c7aedb68edbbdfb5256b5ec055b37641d05d244311cd5bcffe2eebb3d.jpg)
316
+ Figure 4: Visual comparisons on two examples from synthesize benchmarks with upscale factor of 2 (first row) and 4 (second row). Thanks to the HRP, our model is able to restore realistic and faithful textures even when the inputs are severely corrupted. As for the competitive works, some have difficulties to remove degradation, i.e., DAN and DASR(W), and the others generate artifacts or tend to be oversmooth, i.e., BSRGAN, Real-ESRGAN+, SwinIRGAN. Please zoom in for best view.
317
+
318
+ ![](images/c3b2cc825ad0abfe9865a15ca13ece8b76cedb08ac3ea136615e1580f7f58720.jpg)
319
+ Bicubic
320
+
321
+ ![](images/a3afeb76d8838515733ba463186818440f2f727559096b39555936842521734e.jpg)
322
+ BSRGAN
323
+
324
+ ![](images/2373dde6487285abf5d3be355bba3ec3d52f690bfece2fde0f83e205e666d049.jpg)
325
+ DAN
326
+
327
+ ![](images/93248194f56e31237071042cf922fcf5365bbfa080881377154e56a91050f504.jpg)
328
+ Real-ESRGAN+
329
+
330
+ ![](images/cd1c76dcc1cf6fbee6b3a73e1691aae8d72306a5c6c51b8aed62d25ac25689e9.jpg)
331
+ CDC
332
+
333
+ ![](images/72852eeed3a94e2b4c119b2d8351f11269e3d3ceb4ef182edc90214bfdf3088c.jpg)
334
+ SwinIR-GAN
335
+
336
+ ![](images/98b965fdec27b1b0c49651fd937f8e1747eb1ce75bfc00b632a7f4ed46e8ad0b.jpg)
337
+ DASR(W)
338
+
339
+ ![](images/770fed18547c740fecd902b04614e8acd83223f5bfab532288f92a93abbc4980.jpg)
340
+ Ours
341
+
342
+ ![](images/0f34cb9b70d7755a4ce57f4f82979aacb22fca11d58a5f26dd9889b38a1b5ad5.jpg)
343
+ Figure 5: Visual comparisons on two real-world example with upscale factor 4. Our model can remove degradations and generate feasible details at the same time, while other GAN based methods tend to be either over-textured (first row) or over-smooth (second row). Please zoom in for best view.
344
+
345
+ ![](images/fa166e41b6b6b5598a2900f17f098b0591ada49db131903204f63a0e0328cbf8.jpg)
346
+ Bicubic
347
+
348
+ ![](images/cebfbc1e8e5d029e563a42d19eec289b8824daf980ac21f86e658b132555209a.jpg)
349
+ BSRGAN
350
+
351
+ ![](images/460225697ddd5671c1e860201c2be660470b80c3a7aa9f1457a3a87f579ae401.jpg)
352
+ DAN
353
+
354
+ ![](images/0290991ae94ed9a17f11f86462319e810890293c2209bfb33a1ae6e3479eaacd.jpg)
355
+ Real-ESRGAN+
356
+
357
+ ![](images/19b68c92e1cc461d8784dcfba74aab76ec47786e55753da7c35d2fd72959641f.jpg)
358
+ CDC
359
+
360
+ ![](images/63d807c9c54d1c4e944afe6ba28ae4837285d70da3efd28e84f0c6090734f4ce.jpg)
361
+ SwinIR-GAN
362
+
363
+ ![](images/3ff0204d309d414b7989ef05f6c6851dbb6f2cf32462ec2b6750fde3d6bd93eb.jpg)
364
+ DASR(W)
365
+
366
+ ![](images/d158d1029061f85722bdab3cdd6151e7a6828b0da140a22379ca7e1cac3de37a.jpg)
367
+ Ours
368
+
369
+ [28]. Specifically, CDC proposed a divide-and-conquer architecture; DAN, DASR(W) and RealSR learned degradation models from LR inputs; BSRGAN, Real-ESRGAN+ and SwinIR-GAN used synthetic training data generated by handcrafted degradation models. We use the original codes and weights from the official public github repositories for all competing methods. Quantitative and qualitative results on both synthetic and real-world benchmarks are reported as follows.
370
+
371
+ Comparison on Synthetic Benchmarks As Tab. 1 shows, our QuanTexSR outperforms competing methods in LPIPS scores on most benchmarks (5 out of 6). Note that we focus on the LPIPS scores as it better captures the perceptual quality than other metrics (e.g., PSNR/SSIM) [44, 45, 50, 53, 56]. In addition, it can be observed that: in general, methods that learn the degradations, such as DAN and DASR(W), perform much worse than those using manually designed degradation models, which indicates the difficulties in learning complex real-world degradations. Furthermore, we compare the SR results qualitatively through visual inspection in Fig. 4. It can be observed that in the first row, BSRGAN, Real-ESRGAN and SwinIR-GAN mistake the feather textures as noises and remove them. And in the second row, although the distortions are removed successfully, they all fail to generate feasible textures for the trees. In contrast, thanks to the semantic-aware HRP, our method does not have such problems and generates higher quality results.
372
+
373
+ Comparison on Real-world Benchmarks To make a fair comparison, we compare our method against state-of-the-art ones on three large real-world benchmarks and evaluate the results using a standard no-reference IQA metric NIQE. As Tab. 2 shows, our method outperforms competing methods in 2 out of 3 real-world benchmarks, which clearly demonstrates the effectiveness of our framework. In Fig. 5, it can be observed that our FeMaSR produces sharp and clear textures without generating artifacts, while the other methods either fail to remove degradations or tend to be over-textured and over-smooth. Please see the supplementary materials for more results.
374
+
375
+ # 5.3 Ablation Study
376
+
377
+ We conduct ablation experiments on four variations of our framework as shown in Tab. 3 to validate our design: Model-A, a baseline network by discarding Stage I, feature matching and residual shortcuts. It has a similar architecture with SwinIR, and is trained with GAN from scratch; Model-B, Model-A with pretrained decoder; Model-C, Model-B with pretrained codebook and feature matching; FeMaSR, full model with HRP and residual shortcuts; Model-D, FeMaSR based on HRP without semantic guidance.
378
+
379
+ Effectiveness of Residual Shortcut As claimed in Sec. 3, residual shortcut helps optimization of feature matching process and complements possible matching errors. We verify them by removing the residual shortcut in training (Model-C) and testing stage respectively. As we can see in Fig. 6(a), the feature matching loss $\mathcal{L}_{fema}$ decreases much faster with residual shortcut. This indicates that residual shortcut is essential for the optimization of $\mathcal{L}_{fema}$ . We can also observe a clear performance drop of model C without residual shortcut in Tab. 3 and Fig. 7. We further demonstrate how residual shortcut helps to complement feature matching errors in Fig. 6(b). We can notice that model with disabled residual shortcut
380
+
381
+ ![](images/8e00c9afdea01fcac334b346e8c3b8629f25ffa53c103feba32fc37b48b6448a.jpg)
382
+ $\mathcal{L}_{fema}$
383
+
384
+ ![](images/3aa0e36f5cb5be4eb233e357f5944cce194493c82ff664252cfa8671143ed3be.jpg)
385
+ Lrec
386
+
387
+ ![](images/3397a4c0730c86687bf3ff341643ba8f8c32440a721751ec129682ee26f5b2a5.jpg)
388
+ (a) Stage II training loss curve w/ and w/o residual shortcut.
389
+
390
+ ![](images/f21283e28056d5eb1236fa75ec37f4c0b4ab368b453ffcf549441d072da0b25a.jpg)
391
+ LR input
392
+
393
+ ![](images/d1a4f95f144af66547546646b77015b7b6506174f80a2f930fb1aa1663a06d4a.jpg)
394
+ Full results
395
+ Figure 6: Effectiveness of residual shortcut.
396
+
397
+ ![](images/a6a24c1d27efd39503aeb8e23ca18abd455c9ac27c698877aece9c210a38e0f5.jpg)
398
+ Disable residual
399
+ Intensity difference
400
+
401
+ ![](images/259a44cb5976047f37bce8e008b8a3db3c03ba58233de955c4f2c993c1acf4c6.jpg)
402
+ (b) Results of disabling residual shortcut in test stage.
403
+
404
+ Table 3: Ablation study on synthetic benchmark DIV2K Valid with upscale factor of 2.
405
+
406
+ <table><tr><td>Model ID</td><td>Model Variations</td><td>LPIPS↓</td></tr><tr><td>A</td><td>w/o HRP</td><td>0.3025</td></tr><tr><td>B</td><td>+ pretrained decoder</td><td>0.2944</td></tr><tr><td>C</td><td>+ pretrained codebook (with feature matching)</td><td>0.3358</td></tr><tr><td>FeMaSR</td><td>+ residual</td><td>0.2753</td></tr><tr><td>D</td><td>FeMaSR w/o semantic</td><td>0.2887</td></tr></table>
407
+
408
+ ![](images/3102da32ea87748706c3daa7195ad21a50600a5462000d06bb1538e0a17564d1.jpg)
409
+
410
+ ![](images/e5e8e14191cb9bf496a353af926c93fe240498c8879479a36df68164c4d55830.jpg)
411
+
412
+ ![](images/87d7d89c98bd310af448b79d6bb2e1519fefead7ec4251eef71fe52682395e14.jpg)
413
+
414
+ ![](images/c3dd39d2495ae1abf08a022c06945ec9ecf7f6dba540e730f87c9e08a7e72bbc.jpg)
415
+
416
+ ![](images/8745a7c38bdb6c1bf657d59a55ba6d3ae27663ee68b52942383d2a8489e4cffe.jpg)
417
+ LR input
418
+ Model-C
419
+ Figure 7: Visual examples of different model variations. Please zoom in for best view.
420
+
421
+ ![](images/7f12ded39a57fc131a0af5961f914aaa5d01091d5b25beac30bb9ff152310a57.jpg)
422
+ Bicubic $(\times 2)$
423
+ Model-D
424
+
425
+ ![](images/092c753efead8aba007624c2a24840af09041286abb1fef0b2824c2e5a859143.jpg)
426
+ Model-A
427
+ FeMaSR
428
+
429
+ ![](images/16128c9b5b97a3fce19dd7ab0a20728dedc080f51188ae30d442e2169905afad.jpg)
430
+ Model-B
431
+ GT
432
+
433
+ can already remove the distortions to a large extent. The residual shortcut mainly complements the color and edges.
434
+
435
+ Effectiveness of HRP Model-[A, B and FeMaSR] validate the necessities of $\mathcal{Z}$ and $G$ in HRP. As discussed above, the performance drop of Model-C is mainly due to the optimization difficulty brought
436
+
437
+ ![](images/154cace1f2c13383153ddf18aec1b6185e3101dc0fa619dd73793f52ffc585cc.jpg)
438
+ (a) Stage I reconstruction loss curve
439
+
440
+ <table><tr><td>VQGAN</td><td>Reconstruction LPIPS↓</td></tr><tr><td>w/o semantic</td><td>0.2032</td></tr><tr><td>w/ semantic</td><td>0.1893</td></tr></table>
441
+
442
+ (b) Reconstruction quality on Div2k Valid
443
+
444
+ # Figure 8: Effectiveness of semantic guidance.
445
+
446
+ by feature matching. Therefore, we do not use it to validate HRP. It can be observed that Model-B is better than Model-A since the pretrained decoder helps to stabilize GAN training. However, both Model-A and Model-B cannot handle complex distortions without feature matching and tend to generate artifacts, see Fig. 7. Meanwhile, the full model, FeMaSR, can make full use of HRP in both $G$ and $\mathcal{Z}$ , and thereby has the best performance.
447
+
448
+ Effectiveness of Semantic Guidance We provide reconstruction training loss curve and LPIPS score in Stage I to show the benefits of semantic guidance. It can be seen that VQGAN with semantic guidance converges faster and performs better, resulting in a better HRP. This finally helps to improve the restoration performance, see Tab. 3 and Fig. 7.
449
+
450
+ # 6 CONCLUSION
451
+
452
+ In this paper, we have investigated the usage of implicit high-resolution priors (HRP) encoded in the codebook and associated decoder of a pretrained VQGAN for real-world blind SR. In particular, we formulate the SR task to a feature matching problem between the LR features and distortion free HR feature codebook. Because HRP is distortion free and fixed during SR stage, our FeMaSR is able to generate more realistic results with less artifacts than previous GAN based approaches. To train a better HRP, we integrate semantic information to HRP with features from pretrained VGG19 network. To facilitate optimization of feature matching loss, we introduce multi-scale residual shortcut connections to the pretrained decoder. Quantitative and qualitative experiments on both synthetic and real-world benchmarks demonstrate the superiority of the proposed FeMaSR for real-world LR images.
453
+
454
+ # 7 ACKNOWLEDGEMENT
455
+
456
+ This work was done at GAP Lab, CUHKSZ which is directed by Prof. Xiaoguang Han, and is supported by Alibaba Innovative Research, National Natural Science Foundation of China (62072383, 61702433, 62077039), the Fundamental Research Funds for the Central Universities (20720210044, 20720190006).
457
+
458
+ # REFERENCES
459
+
460
+ [1] Eirikur Agustsson and Radu Timofte. 2017. NTIRE 2017 Challenge on Single Image Super-Resolution: Dataset and Study. In CVPRW.
461
+ [2] Andrew Brock, Jeff Donahue, and Karen Simonyan. 2019. Large Scale GAN Training for High Fidelity Natural Image Synthesis. In ICLR.
462
+ [3] Kelvin CK Chan, Xintao Wang, Xiangyu Xu, Jinwei Gu, and Chen Change Loy. 2021. Glean: Generative latent bank for large-factor image super-resolution. In CVPR. 14245-14254.
463
+ [4] Chaofeng Chen, Dihong Gong, Hao Wang, Zhifeng Li, and Kwan-Yee K. Wong. 2020. Learning Spatial Attention for Face Super-Resolution. In IEEE TIP.
464
+ [5] Chaofeng Chen, Xiaoming Li, Yang Lingbo, Xianhui Lin, Lei Zhang, and KYW Wong. 2021. Progressive Semantic-Aware Style Transformation for Blind Face Restoration. In CVPR.
465
+ [6] Hanting Chen, Yunhe Wang, Tianyu Guo, Chang Xu, Yiping Deng, Zhenhua Liu, Siwei Ma, Chunjing Xu, Chao Xu, and Wen Gao. 2021. Pre-Trained Image Processing Transformer. in CVPR.
466
+
467
+ [7] Tao Dai, Jianrui Cai, Yongbing Zhang, Shu-Tao Xia, and Lei Zhang. 2019. Second-order Attention Network for Single Image Super-Resolution. In CVPR. 11065–11074.
468
+ [8] Chao Dong, Chen Change Loy, Kaiming He, and Xiaouu Tang. 2014. Learning a deep convolutional network for image super-resolution. In ECCV. Springer, 184-199.
469
+ [9] Patrick Esser, Robin Rombach, and Bjorn Ommer. 2021. Taming transformers for high-resolution image synthesis. In CVPR. 12873-12883.
470
+ [10] Manuel Fritsche, Shuhang Gu, and Radu Timofte. 2019. Frequency separation for real-world super-resolution. In ICCVW. 3599-3608.
471
+ [11] Leon Gatys, Alexander S Ecker, and Matthias Bethge. 2015. Texture synthesis using convolutional neural networks. NeurIPS 28 (2015), 262-270.
472
+ [12] Muhammad Waleed Gondal, Bernhard Schölkopf, and Michael Hirsch. 2018. The unreasonable effectiveness of texture transfer for single image super-resolution. In ECCVW. Springer, 80-97.
473
+ [13] Jinjin Gu, Hannan Lu, Wangmeng Zuo, and Chao Dong. 2019. Blind superresolution with iterative kernel correction. In CVPR. 1604-1613.
474
+ [14] Jinjin Gu, Yujun Shen, and Bolei Zhou. 2020. Image processing using multi-code gan prior. In CVPR. 3012-3021.
475
+ [15] Shuhang Gu, Andreas Lugmayr, Martin Danelljan, Manuel Fritsche, Julien Lamour, and Radu Timofte. 2019. Div8k: Diverse 8k resolution image dataset. In ICCVW. IEEE, 3512-3516.
476
+ [16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In CVPR. 770-778.
477
+ [17] Gao Huang, Zhuang Liu, Laurens Van Der Maaten, and Kilian Q Weinberger. 2017. Densely connected convolutional networks. In CVPR. 4700-4708.
478
+ [18] Andrey Ignatov, Nikolay Kobyshev, Radu Timofte, Kenneth Vanhoey, and Luc Van Gool. 2017. Dslr-quality photos on mobile devices with deep convolutional networks. In ICCV. 3277-3285.
479
+ [19] Xiaozhong Ji, Yun Cao, Ying Tai, Chengjie Wang, Jilin Li, and Feiyue Huang. 2020. Real-world super-resolution via kernel estimation and noise injection. In CVPRW. 466-467.
480
+ [20] Yuming Jiang, Kelvin CK Chan, Xintao Wang, Chen Change Loy, and Ziwei Liu. 2021. Robust Reference-based Super-Resolution via C2-Matching. In CVPR. 2103–2112.
481
+ [21] Tero Karras, Timo Aila, Samuli Laine, and Jaakko Lehtinen. 2018. Progressive growing of gans for improved quality, stability, and variation. *ICLR* (2018).
482
+ [22] Tero Karras, Samuli Laine, Miika Aittala, Janne Hellsten, Jaakko Lehtinen, and Timo Aila. 2020. Analyzing and improving the image quality of stylegan. In CVPR. 8110-8119.
483
+ [23] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. 2016. Accurate image superresolution using very deep convolutional networks. In CVPR. 1646-1654.
484
+ [24] Diederik P Kingma and Jimmy Ba. 2014. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014).
485
+ [25] Xiaoming Li, Chaofeng Chen, Shangchen Zhou, Xianhui Lin, Wangmeng Zuo, and Lei Zhang. 2020. Blind face restoration via deep multi-scale component dictionaries. In ECCV. Springer, 399-415.
486
+ [26] Xiaoming Li, Wenyu Li, Dongwei Ren, Hongzhi Zhang, Meng Wang, and Wangmeng Zuo. 2020. Enhanced blind face restoration with multi-exemplar images and adaptive spatial feature fusion. In CVPR. 2706-2715.
487
+ [27] Xiaoming Li, Ming Liu, Yuting Ye, Wangmeng Zuo, Liang Lin, and Ruigang Yang. 2018. Learning warped guidance for blind face restoration. In ECCV. 272-289.
488
+ [28] Jingyun Liang, Jiezhang Cao, Guolei Sun, Kai Zhang, Luc Van Gool, and Radu Timofte. 2021. SwinIR: Image Restoration Using Swin Transformer. In ICCVW.
489
+ [29] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Kyoung Mu Lee. 2017. Enhanced deep residual networks for single image super-resolution. In CVPRW, 136-144.
490
+ [30] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. 2021. Swin transformer: Hierarchical vision transformer using shifted windows. ICCV (2021).
491
+ [31] Zhengxiong Luo, Yan Huang, Shang Li, Liang Wang, and Tieniu Tan. 2020. Unfolding the Alternating Optimization for Blind Super Resolution. NeurIPS 33 (2020).
492
+ [32] Shunta Maeda. 2020. Unpaired image super-resolution using pseudo-supervision. In CVPR. 291-300.
493
+ [33] Sachit Menon, Alexandru Damian, Shijia Hu, Nikhil Ravi, and Cynthia Rudin. 2020. Pulse: Self-supervised photo upsampling via latent space exploration of generative models. In CVPR. 2437-2445.
494
+ [34] Takeru Miyato, Toshiki Kataoka, Masanori Koyama, and Yuichi Yoshida. 2018. Spectral Normalization for Generative Adversarial Networks. In JCLR.
495
+ [35] Ben Niu, Weilei Wen, Wengi Ren, Xiangde Zhang, Lianping Yang, Shuzhen Wang, Kaihao Zhang, Xiaochun Cao, and Haifeng Shen. 2020. Single image superresolution via a holistic attention network. In ECCV. Springer, 191-207.
496
+ [36] Aaron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. 2017. Neural discrete representation learning. NeurIPS (2017).
497
+ [37] Xingang Pan, Xiaohang Zhan, Bo Dai, Dahua Lin, Chen Change Loy, and Ping Luo. 2020. Exploiting Deep Generative Prior for Versatile Image Restoration and Manipulation. In ECCV.
498
+
499
+ [38] Xingang Pan, Xiaohang Zhan, Bo Dai, Dahua Lin, Chen Change Loy, and Ping Luo. 2020. Exploiting deep generative prior for versatile image restoration and manipulation. In ECCV. Springer, 262-277.
500
+ [39] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. In NeurlPS, Vol. 32. 8026-8037.
501
+ [40] Ali Razavi, Aaron van den Oord, and Oriol Vinyals. 2019. Generating diverse high-fidelity images with vq-vae-2. In NeurIPS. 14866-14876.
502
+ [41] Assaf Shocher, Nadav Cohen, and Michal Irani. 2018. "zero-shot" super-resolution using deep internal learning. In CVPR. 3118-3126.
503
+ [42] Ziyu Wan, Bo Zhang, Dongdong Chen, Pan Zhang, Dong Chen, Jing Liao, and Fang Wen. 2020. Bringing old photos back to life. In CVPR. 2747-2757.
504
+ [43] Longguang Wang, Yingqian Wang, Xiaoyu Dong, Qingyu Xu, Jungang Yang, Wei An, and Yulan Guo. 2021. Unsupervised Degradation Representation Learning for Blind Super-Resolution. In CVPR. 10581-10590.
505
+ [44] Xintao Wang, Yu Li, Honglun Zhang, and Ying Shan. 2021. Towards Real-World Blind Face Restoration with Generative Facial Prior. In CVPR. 9168-9178.
506
+ [45] Xintao Wang, Liangbin Xie, Chao Dong, and Ying Shan. 2021. Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. ICCVW (2021).
507
+ [46] Xintao Wang, Ke Yu, Chao Dong, and Chen Change Loy. 2018. Recovering realistic texture in image super-resolution by deep spatial feature transform. In CVPR.
508
+ [47] Pengxu Wei, Ziwei Xie, Hannan Lu, Zongyuan Zhan, Qixiang Ye, Wangmeng Zuo, and Liang Lin. 2020. Component divide-and-conquer for real-world image super-resolution. In ECCV. Springer, 101-117.
509
+ [48] Yunxuan Wei, Shuhang Gu, Yawei Li, Radu Timofte, Longcun Jin, and Hengjie Song. 2021. Unsupervised real-world image super resolution via domain-distance aware training. In CVPR. 13385-13394.
510
+ [49] Fuzhi Yang, Huan Yang, Jianlong Fu, Hongtao Lu, and Baining Guo. 2020. Learning Texture Transformer Network for Image Super-Resolution. In CVPR.
511
+ [50] Tao Yang, Peiran Ren, Xuansong Xie, and Lei Zhang. 2021. GAN Prior Embedded Network for Blind Face Restoration in the Wild. In CVPR. 672-681.
512
+ [51] Jiahui Zhang, Shijian Lu, Fangneng Zhan, and Yingchen Yu. 2021. Blind Image Super-Resolution via Contrastive Representation Learning. arXiv preprint arXiv:2107.00708 (2021).
513
+ [52] Kai Zhang, Luc Van Gool, and Radu Timofte. 2020. Deep unfolding network for image super-resolution. In CVPR. 3217-3226.
514
+ [53] Kai Zhang, Jingyun Liang, Luc Van Gool, and Radu Timofte. 2021. Designing a practical degradation model for deep blind image super-resolution. ICCV (2021).
515
+ [54] Kai Zhang, Wangmeng Zuo, and Lei Zhang. 2018. Learning a single convolutional super-resolution network for multiple degradations. In CVPR. 3262-3271.
516
+ [55] Kai Zhang, Wangmeng Zuo, and Lei Zhang. 2019. Deep plug-and-play superresolution for arbitrary blur kernels. In CVPR. 1671-1681.
517
+ [56] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. 2018. The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In CVPR.
518
+ [57] Wenlong Zhang, Yihao Liu, Chao Dong, and Yu Qiao. 2019. Ranksrgan: Generative adversarial networks with ranker for image super-resolution. In ICCV. 3096-3105.
519
+ [58] Yulun Zhang, Kunpeng Li, Kai Li, Lichen Wang, Bineng Zhong, and Yun Fu. 2018. Image super-resolution using very deep residual channel attention networks. In ECCV. 286-301.
520
+ [59] Yulun Zhang, Kunpeng Li, Kai Li, Bineng Zhong, and Yun Fu. 2019. Residual Non-local Attention Networks for Image Restoration. In ICLR.
521
+ [60] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. 2018. Residual dense network for image super-resolution. In CVPR. 2472-2481.
522
+ [61] Zhifei Zhang, Zhaowen Wang, Zhe Lin, and Hairong Qi. 2019. Image superresolution by neural texture transfer. In CVPR. 7982-7991.
523
+ [62] Haitian Zheng, Mengqi Ji, Haoqian Wang, Yebin Liu, and Lu Fang. 2018. CrossNet: An End-to-end Reference-based Super Resolution Network using Cross-scale Warping. In ECCV. 88-104.
524
+ [63] Ruofan Zhou and Sabine Susstrunk. 2019. Kernel modeling super-resolution on real low-resolution images. In CVPR. 2433-2443.
525
+ [64] Shangchen Zhou, Jiawei Zhang, Wangmeng Zuo, and Chen Change Loy. 2020. Cross-Scale Internal Graph Neural Network for Image Super-Resolution. In NeurIPS.
526
+ [65] Jun-Yan Zhu, Taesung Park, Phillip Isola, and Alexei A Efros. 2017. Unpaired image-to-image translation using cycle-consistent adversarial networks. In ICCV. 2223-2232.
527
+
528
+ # A MORE IMPLEMENTATION DETAILS
529
+
530
+ # A.1 Network Architectures of VQGAN
531
+
532
+ In complementary to the simple network architecture in the paper, we provide details of hyper parameters for the VQGAN encoder and decoder in Fig. 9. We use the codebook $f^{tp}$ with size $1024 \times 512$ in our experiment. The input image is of size $256 \times 256$ and downsampled into $32 \times 32$ feature maps. Convolution layers with "k3n#s1" are used to match feature channels with the codebook feature dimensions before and after the feature quantization process.
533
+
534
+ We perform experiments to select suitable codebook numbers for our model, see Tab. 4 for the results. It can be observed that more codes generally leads to better reconstruction performance, but the improvements is marginal when the number is sufficiently large. We empirically select 1024 as a good balance between performance and computation cost.
535
+
536
+ Table 4: VQGAN reconstruction performance with different code numbers.
537
+
538
+ <table><tr><td>Codebook number</td><td>256</td><td>512</td><td>1024</td><td>2048</td></tr><tr><td>Reconstruction LPIPS↓</td><td>0.2165</td><td>0.2044</td><td>0.1893</td><td>0.1837</td></tr></table>
539
+
540
+ # A.2 Details of Synthetic Dataset
541
+
542
+ Generation of training HR patches. As described in the paper, we use high resolution images from DIV2K [1], Flickr2K [29], DIV8K [15] and 10,000 face images from FFHQ [21] to generate HR training patches of size $512 \times 512$ . The overall summary of training images are shown in Tab. 5, and some examples are shown in Fig. 10. Details to obtain the patches are as follow:
543
+
544
+ For the first three datasets which contains natural images, we crop the patches with the following steps:
545
+
546
+ (1) Crop non-overlapping $512 \times 512$ patches;
547
+ (2) Filter patches with few textures (or edges). For this purpose, we first calculate the sobel edge map of the patch, then compute mean and variation of the edge map, denoted as $\mu, \sigma^2$ . Because edge map is sparse, more edges means bigger $\sigma^2$ and $\mu$ , we therefore empirically filter patches whose $\sigma^2 < 10$ ;
548
+
549
+ For the FFHQ face dataset, the images are well-aligned faces of size $1024 \times 1024$ . Previous non-overlap cropping would cause content bias for this dataset. Therefore, we first randomly resize the image with scale factor between [0.5, 1.0], and then random crop only one patch from each image.
550
+
551
+ Online generation of training pairs. We use the same degradation model as BSRGAN [53] to generate corresponding LR images online. To be specific, the input HR patches are first randomly cropped to $256 \times 256$ patches, and then degraded with degradation_bsrgan $^4$ function with scale factors 2 and 4 to generate training pairs.
552
+
553
+ Generation of synthetic testing benchmarks. For a fair comparison, we use a mixed degradation model of BSRGAN and Real-ESRGAN to synthesize testing LR images. Specifically, we use
554
+
555
+ degradation_bsrgan_plus $^5$ function with scale factor 2 and 4 to generate testing pairs with a fixed random seed 123.
556
+
557
+ # B MORE RESULTS
558
+
559
+ # B.1 Visualization of HRP
560
+
561
+ In this part, we show more empirical visualizations of the learned high-resolution priors (HRP). Firstly, we show an overview of all the 1024 codes separately in Fig. 12. Then, we show visualization of semantic-related codes with the help of a semantic texture datasets, i.e., the OST dataset [46] with the following steps:
562
+
563
+ - Obtain the codes of each image with pretrained VQGAN. Figure 13 show some examples of the reconstruction results with our model.
564
+ - Calculate the distribution of each texture category on the codebook $\mathcal{Z}$ , as shown in Fig. 14
565
+ - Sample codes from the distribution of each class and randomly arrange them to compose a $8 \times 8$ latent feature $z$ , and then decode it to $64 \times 64$ RGB texture patch.
566
+
567
+ Finally, Figure 15 empirically visualizes the learned HRP on OST dataset. We can observe that codes sampled from different texture distribution generates textures similar to the corresponding semantics, which proves the effectiveness of the HRP. Please note that the texture shapes are closely related with orders of latent codes, Fig. 15 are just empirical statistic visualizations with random arrangement of codes.
568
+
569
+ # B.2 Failure Cases
570
+
571
+ By observing the results, we empirically discovered a limitation of our proposed FeMaSR: it favors natural textures over artificial textures, e.g. the straight lines that dominate the building images. Our method usually generates curved lines instead (see Fig. 11). A similar phenomenon also occurs in neural texture synthesis [11]. We leave the solution of this problem to future work.
572
+
573
+ # B.3 Qualitative Results
574
+
575
+ We show more results on synthetic datasets in Fig. 16 and real-world test images in Fig. 17
576
+
577
+ ![](images/df8000804acb2138c807124eb609d7cd739e4ad0dee01a570adc7cc7749ae90b.jpg)
578
+ Figure 9: Details of hyper parameters for VQGAN. "k3n64s1" denotes single convolution layer with kernel size $3 \times 3$ , output channel 64 and stride size 1, "#" means the corresponding value. We use group normalization with 32 group numbers, leakyrelu with 0.2 as negative slope. The upsample convolution is a nearest upsample layer with scale factor 2 followed by a convolution layer.
579
+
580
+ Table 5: Details of HR training datasets.
581
+
582
+ <table><tr><td>Dataset</td><td>DIV2K</td><td>DIV8K</td><td>Flickr2K</td><td>FFHQ</td><td>Total</td></tr><tr><td>Number of full image</td><td>800</td><td>1500</td><td>2550</td><td>10,000</td><td>14,850</td></tr><tr><td>Typical image size W × H</td><td>2032 × 1344</td><td>6720 × 3840</td><td>2032 × 1344</td><td>1024 × 1024</td><td>-</td></tr><tr><td>Number of cropped patches</td><td>8257</td><td>90,892</td><td>27,056</td><td>10,000</td><td>136,205</td></tr></table>
583
+
584
+ ![](images/2f220858912a0db95f0d4a815b8519138f32952df989007098a4d4b1ef48788e.jpg)
585
+ Selected patch and edge map
586
+
587
+ ![](images/b0448da8858dca99e3e96d713d66758795f0fd383946c40c2c56b9ab5e0284cb.jpg)
588
+ Filtered patch and edge map
589
+
590
+ ![](images/5d125f8a17cf218f7a455d24f65150802b6dddfc304f050eeb75c97807b73f2e.jpg)
591
+ FFHQ face images
592
+
593
+ ![](images/61d71269e20faba4999532b7ec85e3cfb0d7028163dd143f47558ee1d7aa6489.jpg)
594
+ Cropped face patches
595
+
596
+ ![](images/54d855fb9a30318178f57bf9880d673e1181fedc5479146b549adf4763832cae.jpg)
597
+ LR Input
598
+
599
+ ![](images/227cdf31a8c4cd61fbaff35e93ba9f84bf63802776b087427b4d3c8d04e90f75.jpg)
600
+ Figure 10: Examples of cropped training HR patches.
601
+ FeMaSR
602
+
603
+ ![](images/bb49ab5734815e9979df4b6d0dab8ea97e2e67f706016d4ff1ef359f063f125c.jpg)
604
+ Ground Truth
605
+ Figure 11: Failure case: a building image with straight lines.
606
+
607
+ ![](images/6a11c8c1ae89aa9c6d9c8f248a2368988a2ba93ca82b6fadc458adc22d7bc67d.jpg)
608
+ Figure 12: Visualization overview of all the 1024 codes in HRP.
609
+
610
+ ![](images/4f861deeb6e45d3eb0c10958a58537f1e489cb051d65f78e634cace6585f8fe8.jpg)
611
+ Original images
612
+ Figure 13: Reconstruction examples from OST dataset.
613
+
614
+ ![](images/0e27306a011978083ad8fe0bd31120b9444bcaee7f5c19063c34df659fe6e228.jpg)
615
+ Reconstructed images with VQGAN
616
+
617
+ ![](images/c8f23b87f55b1a36ab181191aeeeca2c09cbfc303dd7b65865aa562c3f805304.jpg)
618
+
619
+ ![](images/cb01b4281336e42fdb6a95f3495d54131762196adcb452bbaecf9efe8839caea.jpg)
620
+
621
+ ![](images/9a4b9608fe87e87a33949131e2a71b5976d412fd3d23535cb22df6c003544577.jpg)
622
+
623
+ ![](images/c25c7d76031817022fbe49a422930eb7341658cc49779fce2fbe42eecc1a2148.jpg)
624
+
625
+ ![](images/c42f6697d4e8c90743fb1ebca66d41e65dbf94cd690ef8952351e68d0a5695c2.jpg)
626
+
627
+ ![](images/e6d68f576693ce13edb1fe5941fa04946faf11fbca72111ee0798606652a84fa.jpg)
628
+
629
+ ![](images/6c8ea1d7d0bae465ec07807fac1ba06e0945e6811cca88d0c2e351dec87ab0a0.jpg)
630
+ Figure 14: Code index distribution for 7 texture categories in OST dataset.
631
+
632
+ ![](images/df9b336e02a4baad2be8c99db90a9f8f8cf7b715e5036db0bce2beb0a3a92cca.jpg)
633
+ (a) Animal
634
+
635
+ ![](images/102f5b8c41018a1998937d5bbb14294b7f48f7b3691ef17bce8998be355d4c43.jpg)
636
+ (b) Building
637
+
638
+ ![](images/0062c4212a9e4f769e9dd10b392511d2d5d224e11ea4f9f17764085817e78386.jpg)
639
+ (c) Grass
640
+
641
+ ![](images/4343b713266572dc1cb1af6be80ed9feb6357155692f65fccc426f4b8e91803c.jpg)
642
+ (d) Mountain
643
+
644
+ ![](images/dcfe747797b60ff7b2632aa2f4dd362ac6e467d134ae6109d8928f5dc5e24aac.jpg)
645
+ (e) Plant
646
+
647
+ ![](images/eaee8e33781b925240f4aff738b0e86e6c47376903789dce991cb1eb27572249.jpg)
648
+ (f) Sky
649
+
650
+ ![](images/45c36f4351fee8510adb9af8a849d70c9285db3a9f59aff0951381d808a72eed.jpg)
651
+ (g) Water
652
+ Figure 15: Visualization of different semantic-related textures encoded in HRP.
653
+
654
+ ![](images/4c29794eef4f4bf0b422b1da950b819becaa3fad5bbaa50af607fa80f44d9562.jpg)
655
+ Bicubic
656
+
657
+ ![](images/853f379f765119ffbb1dd47c5aa167ea2bd664d492292624388559269cd94a95.jpg)
658
+ DAN
659
+
660
+ ![](images/a4d080d6ba11c1b278f119697eac18cc2ce5d6b57109d1f29e7cc532270a095b.jpg)
661
+
662
+ ![](images/466fd7d4e48077a0e07cac6945c998f3ddcd02f83104c6439494c95e2e7844a8.jpg)
663
+
664
+ ![](images/d436aad33a7749ac69e842251f4f5b65f58f9e8feb28f5d5ec615a46ffe06330.jpg)
665
+ Real-ESRGAN+
666
+
667
+ ![](images/8907d53088ed62847bbd11fd92f356856183356f2371fadddec5c17225aa9c1b.jpg)
668
+ SwinIR-GAN
669
+
670
+ ![](images/791fda809c8cb9e7b0f26050deb7ebe71d555db08d9295d25827532493ee071f.jpg)
671
+ DASR(W)
672
+
673
+ ![](images/145fc2fe95e07d29a5250a99a61ede9223cbcbf7612e863ed7f311f6cb77cde9.jpg)
674
+ BSRGAN
675
+ Ground Truth
676
+
677
+ ![](images/9fdd2cff57fd4472b998e85ceef9872e4d693f4eff19d4cd7391042feb92044c.jpg)
678
+
679
+ ![](images/be26a48316ab4885478e99df5e795418ae8a0cddb2085663ef29bba058e416b1.jpg)
680
+ Bicubic
681
+
682
+ ![](images/b15d6ea022d49a9b069dc3153961f29facba1dcb8a14514fb0ed0e45a5a7db78.jpg)
683
+ Ours
684
+ DAN
685
+
686
+ ![](images/47a8621fc2c4773ab656d95d06c5f16d8ac330341ce6b070a02a1b2389f04f30.jpg)
687
+ DASR(W)
688
+
689
+ ![](images/a7c70779e99b368cafa47f2d20db57f23438922ce58617d1b1d01a3d8153db13.jpg)
690
+ BSRGAN
691
+
692
+ ![](images/d607f3c3e994d697662561181d1012481225feb80011ff1e99a2ac049e1ed7f3.jpg)
693
+ Real-ESRGAN+
694
+
695
+ ![](images/6158661a5186f1ab941306a1adea1e13ba5ddb869efb7401194f3062a6960ba9.jpg)
696
+
697
+ ![](images/01214758c5bf79e8fb411d500124776c203d71c7d06a6a498dcfc1ad96e1d031.jpg)
698
+
699
+ ![](images/797152e441124ed31fc85cc7c1ced7f057e168f92aa76afdca1f69f922045d4a.jpg)
700
+
701
+ ![](images/98fedf5cdfdca9f6c286cd83bc032ebbf72b3d64f0245134ef99edde8e830a16.jpg)
702
+
703
+ ![](images/388e72b2ae9c3dd96d8e40a9a5dd3a395e25f4df29484329f54e515991c7ed49.jpg)
704
+ Bicubic
705
+
706
+ ![](images/74d0d88df6c4be55da1b6e51a5d930fd93a2f362a12bf95701e0527021b87f56.jpg)
707
+ SwinIR-GAN
708
+ DAN
709
+
710
+ ![](images/3ae5e0542795e2e389193f328e067d52f66f5c2f737c710eeaf825681a2ec73f.jpg)
711
+ Ours
712
+ DASR(W)
713
+
714
+ ![](images/d2a7c83a40475f423086c56e0b81403a7f65166e91a20d9ca1d5b1a520f82b1d.jpg)
715
+ Ground Truth
716
+ BSRGAN
717
+
718
+ ![](images/2652d05a48da7dcd58a09e32e644de1531272cee12d94b17dcc5f1294e438968.jpg)
719
+ Real-ESRGAN+
720
+
721
+ ![](images/1140f12563a06de9a4aa0482f35b53b583e6e8ff6000a802748c61d3f6624102.jpg)
722
+ SwinIR-GAN
723
+
724
+ ![](images/b29444fa19d41537660794ddfb5c8760c5919450cd959999dd7d83184f054fc9.jpg)
725
+ Ours
726
+
727
+ ![](images/e9ed70039fc6340b2141979fa2897306766d5e1791dcd175f4134df5b267cd4c.jpg)
728
+ Ground Truth
729
+
730
+ ![](images/abc054741705f24060b706a353cee5a6eebc746c05839085e5b681c115a02cce.jpg)
731
+ Real-ESRGAN+
732
+
733
+ ![](images/dd737cbe4c3b8111cbede613d06de8439f79b7ad4574e839dad9062ef1ac32f4.jpg)
734
+ Bicubic
735
+
736
+ ![](images/213971175978bd0db2e10e771434da45b92b4510d3fe6155224df0ecf28ee000.jpg)
737
+ DAN
738
+
739
+ ![](images/e7ff57e987986048e674592129e490ba809a62bd420e8282fc456db5a62d6e92.jpg)
740
+ DASR(W)
741
+
742
+ ![](images/f97b5f4f5cbbdba7530ab5625fc85de061909f4ff698590dfc02881889656e6b.jpg)
743
+ BSRGAN
744
+
745
+ ![](images/4eed91ea0613bc2a138d2c8745c4c9e521e1512c175bc44463ab3c79fe8dc451.jpg)
746
+ Figure 16: More results on synthetic benchmarks.
747
+
748
+ Real-ESRGAN+
749
+
750
+ ![](images/90e10cfcd8dd578482972cdcc1e96453725297e9582accbe0db981bc96b2913e.jpg)
751
+ SwinIR-GAN
752
+
753
+ ![](images/baa58e6b9239ffe08e5e540f50df9cea51d3795c1c18e5522ac4e72765d936d6.jpg)
754
+ Ours
755
+
756
+ ![](images/281395676697cbe5139ce57113e18686f2edbc0fdeacda27443d1ef69d679fca.jpg)
757
+ Ground Truth
758
+
759
+ ![](images/ab3dfa3ff26d2d33fb49d14c4fc0adefe932d367bc91f770973708709d3ab68a.jpg)
760
+
761
+ ![](images/2daeefa7c05fa55499914f375554bb7b8a0226afc7b90e10c2191fd6e603eaf2.jpg)
762
+ Bicubic
763
+
764
+ ![](images/9eb41baf152c2a6ddb8c44281bab403d18929f6cd997fa3ded836308a127a265.jpg)
765
+ DAN
766
+
767
+ ![](images/44153905c04234069d061d04ebb01b31c48c243dc60c86763337d5edf57c0992.jpg)
768
+ CDC
769
+
770
+ ![](images/db895e6ab1fd81ce5db21122d7976c98eba33de4e295d9c70ef2e1817f7ca4b3.jpg)
771
+ DASR(W)
772
+
773
+ ![](images/557c1612da3a1920d946ce03db472ef4374942c4a13e7f24070618e897f839c0.jpg)
774
+ BSRGAN
775
+
776
+ ![](images/2c61a6f02a0018ca92dabe19c6c06ba5ccb69490cc6b0bc0772a93ccedd0a5ab.jpg)
777
+ Real-ESRGAN+
778
+
779
+ ![](images/7f7c77b25e46ca9be66bc933733a8de3ee4da59b62d727256e4349f43e96ee31.jpg)
780
+ SwinIR-GAN
781
+
782
+ ![](images/82a123e35ed03a4ba48cc5eead51845a058ef68f32cb1ba87a1b1605533e5002.jpg)
783
+ Ours
784
+
785
+ ![](images/2433012d43ede0edeb62a1da12ea3de2455bfaffcdd9046f2cb5175475b59142.jpg)
786
+
787
+ ![](images/173265ccad740a896d125ff402ce635338e9b31f881533159ec79f8b474151c2.jpg)
788
+ Bicubic
789
+
790
+ ![](images/6685099492690fcda5c2fde0c19af20a59fa4c1a010c798b20f1088134bf825e.jpg)
791
+ DAN
792
+
793
+ ![](images/a99de3a604fbc5e6c391a92b016f05e020d96b772d117a3ea95a003314464ab7.jpg)
794
+ CDC
795
+
796
+ ![](images/a4e3967068cfbca31af97747a331a187c0524dfdf18657dbd6592b268afa886e.jpg)
797
+ DASR(W)
798
+
799
+ ![](images/5c340be848484ba1409c1adea340a9b28a194e4ee1745883eda500c3debb730a.jpg)
800
+ BSRGAN
801
+
802
+ ![](images/f99c25b1a83802c2c209cf982b8b9c02b52f330866acafc71a77a37725578f45.jpg)
803
+ Real-ESRGAN+
804
+
805
+ ![](images/d0b8e3fcb2aea898d46cad1c37b7256873bc3dabe5a23395551acc6c5165d197.jpg)
806
+ SwinIR-GAN
807
+
808
+ ![](images/943ebf61b31d5a1872c7e0ec782b21cf77b9b7861c6b3f111c0750d3acf9ed89.jpg)
809
+ Ours
810
+
811
+ ![](images/380283428b85817fdbd701e614c1b7e11cb29d18fde660307dd372c2f230a208.jpg)
812
+ Bicubic
813
+
814
+ ![](images/967ecb266b206844c6ef4ef4ac3909b6d13f287b05a6a9f00e74b4d66e6a5ad8.jpg)
815
+ Real-ESRGAN+
816
+
817
+ ![](images/b147d8f1b063288333f5b7f8ab6c7786057c61cf8127448129a09f8b8b3eeafe.jpg)
818
+ SwinIR-GAN
819
+ Figure 17: More results on real-world test images.
820
+
821
+ ![](images/ae3700d9bc312c96fa9d50a83856ae3505890f1ecc6594c32b27b2f14fc72eef.jpg)
822
+ Ours
2202.13xxx/2202.13142/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2659ad926e61d4818efc6b9ea03eb023f43f8bf1aed8d06f47060875a9a592e5
3
+ size 2999986
2202.13xxx/2202.13142/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13162/747f2044-56ac-435c-b2d3-916c376e22a8_content_list.json ADDED
@@ -0,0 +1,2226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Pix2NeRF: Unsupervised Conditional $\\pi$ -GAN for Single Image to Neural Radiance Fields Translation",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 117,
8
+ 128,
9
+ 849,
10
+ 172
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Shengqu Cai \nETH Zürich",
17
+ "bbox": [
18
+ 158,
19
+ 204,
20
+ 263,
21
+ 237
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Anton Obukhov ETH Zürich",
28
+ "bbox": [
29
+ 310,
30
+ 204,
31
+ 439,
32
+ 237
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Dengxin Dai \nMPI for Informatics \nETH Zürich",
39
+ "bbox": [
40
+ 486,
41
+ 204,
42
+ 648,
43
+ 255
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Luc Van Gool \nETH Zürich \nKU Leuven",
50
+ "bbox": [
51
+ 696,
52
+ 204,
53
+ 810,
54
+ 255
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "image",
60
+ "img_path": "images/03b5d36de9f518895308aa61c743c5ca190b453155e662f8799786dd520dcc23.jpg",
61
+ "image_caption": [],
62
+ "image_footnote": [],
63
+ "bbox": [
64
+ 98,
65
+ 290,
66
+ 246,
67
+ 364
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "image",
73
+ "img_path": "images/bac52eb184382fb931f6e620ad144cce4f5f74a4e0bdd23a040e0ea71dc7d7e8.jpg",
74
+ "image_caption": [],
75
+ "image_footnote": [],
76
+ "bbox": [
77
+ 264,
78
+ 316,
79
+ 354,
80
+ 349
81
+ ],
82
+ "page_idx": 0
83
+ },
84
+ {
85
+ "type": "image",
86
+ "img_path": "images/db632d04adcc584b493c37e7db9fed6da69f0537fafd8471cb6a72db68f94f2a.jpg",
87
+ "image_caption": [],
88
+ "image_footnote": [],
89
+ "bbox": [
90
+ 375,
91
+ 295,
92
+ 444,
93
+ 359
94
+ ],
95
+ "page_idx": 0
96
+ },
97
+ {
98
+ "type": "image",
99
+ "img_path": "images/b1d2cf56cf0e7175c7a9fd1d778875e9532fd7b0e311e8a02ee12b33e7ee5940.jpg",
100
+ "image_caption": [],
101
+ "image_footnote": [],
102
+ "bbox": [
103
+ 496,
104
+ 290,
105
+ 645,
106
+ 364
107
+ ],
108
+ "page_idx": 0
109
+ },
110
+ {
111
+ "type": "image",
112
+ "img_path": "images/92f69b457de2745ae32395d8d87c6a4e8c82d541c3fa0f6333f34d8871f15101.jpg",
113
+ "image_caption": [],
114
+ "image_footnote": [],
115
+ "bbox": [
116
+ 656,
117
+ 319,
118
+ 746,
119
+ 349
120
+ ],
121
+ "page_idx": 0
122
+ },
123
+ {
124
+ "type": "image",
125
+ "img_path": "images/dc67c9256da1e44df7fa8242787fcbe7229810bb1c31f62a3f7941733c0ed8fd.jpg",
126
+ "image_caption": [],
127
+ "image_footnote": [],
128
+ "bbox": [
129
+ 754,
130
+ 310,
131
+ 852,
132
+ 349
133
+ ],
134
+ "page_idx": 0
135
+ },
136
+ {
137
+ "type": "image",
138
+ "img_path": "images/cb2a0126e80eb1d4ee0be2db00bd953132b166b0d572948b8e45dede7b5d7cde.jpg",
139
+ "image_caption": [],
140
+ "image_footnote": [],
141
+ "bbox": [
142
+ 98,
143
+ 382,
144
+ 166,
145
+ 435
146
+ ],
147
+ "page_idx": 0
148
+ },
149
+ {
150
+ "type": "image",
151
+ "img_path": "images/577d28cf142ad5bbf06ed0454ed46258170927a3c4f3ecbf7f61865e65a327d0.jpg",
152
+ "image_caption": [
153
+ "Figure 1. Overview of Pix2NeRF: We propose a method for unsupervised learning of neural representations of scenes, sharing a common pose prior. At test time, Pix2NeRF disentangles pose and content from an input image and renders novel views of the content. Top: $\\pi$ -GAN is trained on a dataset without pose supervision. Bottom: a trained model is conditioned on a single image to obtain pose-dependent views."
154
+ ],
155
+ "image_footnote": [],
156
+ "bbox": [
157
+ 166,
158
+ 421,
159
+ 287,
160
+ 469
161
+ ],
162
+ "page_idx": 0
163
+ },
164
+ {
165
+ "type": "image",
166
+ "img_path": "images/39614bc39f8834b323b605baf922b22887f0a081b3e9a24c6a26850b71dd412e.jpg",
167
+ "image_caption": [],
168
+ "image_footnote": [],
169
+ "bbox": [
170
+ 294,
171
+ 383,
172
+ 454,
173
+ 506
174
+ ],
175
+ "page_idx": 0
176
+ },
177
+ {
178
+ "type": "image",
179
+ "img_path": "images/65fcae6a18554eedba038be3bbcf1e391e914fe6eec7e136ddcdff029ecfe722.jpg",
180
+ "image_caption": [],
181
+ "image_footnote": [],
182
+ "bbox": [
183
+ 486,
184
+ 380,
185
+ 566,
186
+ 435
187
+ ],
188
+ "page_idx": 0
189
+ },
190
+ {
191
+ "type": "image",
192
+ "img_path": "images/a4ec2bd3d24e482824b0390d81413c855402842364962edccc0bbdfa373d4d7b.jpg",
193
+ "image_caption": [],
194
+ "image_footnote": [],
195
+ "bbox": [
196
+ 566,
197
+ 422,
198
+ 687,
199
+ 469
200
+ ],
201
+ "page_idx": 0
202
+ },
203
+ {
204
+ "type": "image",
205
+ "img_path": "images/210e355cea14ee6b87e8c857e9c70e0faac3f900ef9527925b76fb5bb10b75d8.jpg",
206
+ "image_caption": [],
207
+ "image_footnote": [],
208
+ "bbox": [
209
+ 692,
210
+ 381,
211
+ 852,
212
+ 508
213
+ ],
214
+ "page_idx": 0
215
+ },
216
+ {
217
+ "type": "text",
218
+ "text": "Abstract",
219
+ "text_level": 1,
220
+ "bbox": [
221
+ 233,
222
+ 593,
223
+ 310,
224
+ 609
225
+ ],
226
+ "page_idx": 0
227
+ },
228
+ {
229
+ "type": "text",
230
+ "text": "We propose a pipeline to generate Neural Radiance Fields (NeRF) of an object or a scene of a specific class, conditioned on a single input image. This is a challenging task, as training NeRF requires multiple views of the same scene, coupled with corresponding poses, which are hard to obtain. Our method is based on $\\pi$ -GAN, a generative model for unconditional 3D-aware image synthesis, which maps random latent codes to radiance fields of a class of objects. We jointly optimize (1) the $\\pi$ -GAN objective to utilize its high-fidelity 3D-aware generation and (2) a carefully designed reconstruction objective. The latter includes an encoder coupled with $\\pi$ -GAN generator to form an autoencoder. Unlike previous few-shot NeRF approaches, our pipeline is unsupervised, capable of being trained with independent images without 3D, multi-view, or pose supervision. Applications of our pipeline include 3d avatar generation, object-centric novel view synthesis with a single input image, and 3d-aware super-resolution, to name a few.",
231
+ "bbox": [
232
+ 73,
233
+ 628,
234
+ 473,
235
+ 902
236
+ ],
237
+ "page_idx": 0
238
+ },
239
+ {
240
+ "type": "text",
241
+ "text": "1. Introduction",
242
+ "text_level": 1,
243
+ "bbox": [
244
+ 500,
245
+ 625,
246
+ 630,
247
+ 641
248
+ ],
249
+ "page_idx": 0
250
+ },
251
+ {
252
+ "type": "text",
253
+ "text": "Following the success of Neural Radiance Fields (NeRF) [23], encoding scenes as weights of multi-layer perceptrons (MLPs) has emerged as a promising research direction. Novel View Synthesis is an important application: given sparse sample views of a scene, the task is to synthesize novel views from unseen camera poses. NeRF addresses it by encoding color and volume density at each point of the 3D scene into a neural network and uses traditional volume rendering to compose 2D views.",
254
+ "bbox": [
255
+ 496,
256
+ 651,
257
+ 893,
258
+ 787
259
+ ],
260
+ "page_idx": 0
261
+ },
262
+ {
263
+ "type": "text",
264
+ "text": "While NeRF is capable of synthesizing novel views with high fidelity, it is often impractical due to being \"overfitted\" to a given scene and requiring multiple views of the scene to train. Several follow-up works attempt to address these limitations via making NeRF generalize to new scenes.",
265
+ "bbox": [
266
+ 496,
267
+ 787,
268
+ 893,
269
+ 864
270
+ ],
271
+ "page_idx": 0
272
+ },
273
+ {
274
+ "type": "aside_text",
275
+ "text": "arXiv:2202.13162v1 [cs.CV] 26 Feb 2022",
276
+ "bbox": [
277
+ 22,
278
+ 263,
279
+ 57,
280
+ 705
281
+ ],
282
+ "page_idx": 0
283
+ },
284
+ {
285
+ "type": "page_footnote",
286
+ "text": "Corresponding author: Shengqu Cai (shecai@student.ethz.ch)",
287
+ "bbox": [
288
+ 521,
289
+ 875,
290
+ 851,
291
+ 887
292
+ ],
293
+ "page_idx": 0
294
+ },
295
+ {
296
+ "type": "page_footnote",
297
+ "text": "Code (coming soon): https://github.com/HexagonPrime/Pix2NeRF",
298
+ "bbox": [
299
+ 524,
300
+ 888,
301
+ 877,
302
+ 898
303
+ ],
304
+ "page_idx": 0
305
+ },
306
+ {
307
+ "type": "text",
308
+ "text": "Major progress has been made in training a general NeRF capable of encoding a scene given only one or a handful of views [5, 7, 16, 40, 41, 46]. However, these works are designed to work well only with multi-view images during either training or both training and inference.",
309
+ "bbox": [
310
+ 75,
311
+ 90,
312
+ 470,
313
+ 167
314
+ ],
315
+ "page_idx": 1
316
+ },
317
+ {
318
+ "type": "text",
319
+ "text": "One reason why single-shot NeRF, or in general single-shot novel view synthesis is challenging, is the incomplete content information within a single image. For example, given a frontal image of a car, there is very little information to infer a novel view from the back directly. Bringing back the traditional inverse graphics and 3D reconstruction pipelines, [44] addresses this issue by making an additional assumption on the symmetry of the scene to interpolate potentially missing geometry information within a single image. However, this technique is limited to scenes where symmetry can be introduced and does not tackle the general case.",
320
+ "bbox": [
321
+ 75,
322
+ 167,
323
+ 470,
324
+ 335
325
+ ],
326
+ "page_idx": 1
327
+ },
328
+ {
329
+ "type": "text",
330
+ "text": "Therefore, a natural follow-up question is how does a human brain address such a challenging task? One of the approaches we use unconsciously is learning a prior implicit model for object categories and mapping what we observe to the learned model. This line of thinking is already explored in prior works [40, 46]. An essential part missing from these works is ensuring that novel views also meet our expectation of the object class, and due to the lack of supervision from a sole image, this is normally done via imagination.",
331
+ "bbox": [
332
+ 75,
333
+ 335,
334
+ 470,
335
+ 473
336
+ ],
337
+ "page_idx": 1
338
+ },
339
+ {
340
+ "type": "text",
341
+ "text": "One of the closest forms of imagination developed by the machine learning community is Generative Adversarial Networks [13]. GANs have been very successful in image synthesis and transformation. Beyond 2D, studies have shown GAN's capability of synthesizing 3D content [24] from natural images. This suggests another approach to address 3D reconstruction without multi-view images via 3D GAN inversion. Such a strategy bypasses the problem of missing information within one sole image due to GAN's adversarial training. Existing works [31, 47] utilize such a method based on HoloGAN [24], StyleGAN [47], and others, but one of the drawbacks naturally from these 3D-aware generative models is their relatively weak 3D consistency.",
342
+ "bbox": [
343
+ 75,
344
+ 474,
345
+ 470,
346
+ 671
347
+ ],
348
+ "page_idx": 1
349
+ },
350
+ {
351
+ "type": "text",
352
+ "text": "With the rapid increase of NeRF [23] popularity, corresponding generative models are also gaining attention. GRAF [35] and $\\pi$ -GAN [2] follow traditional GAN settings by mapping latent codes to category-specific radiance fields. These generative models typically have high 3D consistency due to the built-in volumetric rendering design. This observation suggests the possibility of few-shot 3D reconstruction using adversarial training and radiance fields.",
353
+ "bbox": [
354
+ 75,
355
+ 671,
356
+ 470,
357
+ 792
358
+ ],
359
+ "page_idx": 1
360
+ },
361
+ {
362
+ "type": "text",
363
+ "text": "In this paper, we formulate the task of translating an input image of a given category to NeRF as an end-to-end pipeline termed Pix2NeRF (Fig. 1). The method can perform novel view synthesis given a single image, without the need of pretraining, annotation, or fine-tuning. Pix2NeRF can be trained with natural images – without explicit 3D supervision, in an end-to-end fashion. Inspired by prior works [31,40,46],",
364
+ "bbox": [
365
+ 75,
366
+ 795,
367
+ 470,
368
+ 902
369
+ ],
370
+ "page_idx": 1
371
+ },
372
+ {
373
+ "type": "text",
374
+ "text": "we introduce an encoder mapping a given image to a latent space. We jointly optimize several objectives. First, we train $\\pi$ -GAN and the added encoder to map generated images back to the latent space. Second, we adapt the encoder coupled with $\\pi$ -GAN's generator to form a conditional GAN, trained with both adversarial and reconstruction loss. We show that merely doing $\\pi$ -GAN inversion is challenging and insufficient to complete our goal, and adaptation is important for calibrating learned representations of the encoder and generator. Our framework is able to instantiate NeRF in a single shot manner while naturally preserving the ability to synthesize novel views with high fidelity, comparable to state-of-the-art generative NeRF models.",
375
+ "bbox": [
376
+ 496,
377
+ 90,
378
+ 893,
379
+ 287
380
+ ],
381
+ "page_idx": 1
382
+ },
383
+ {
384
+ "type": "text",
385
+ "text": "Contributions.",
386
+ "text_level": 1,
387
+ "bbox": [
388
+ 500,
389
+ 300,
390
+ 607,
391
+ 314
392
+ ],
393
+ "page_idx": 1
394
+ },
395
+ {
396
+ "type": "list",
397
+ "sub_type": "text",
398
+ "list_items": [
399
+ "- We propose Pix2NeRF, the first unsupervised single-shot NeRF model, that can learn scene radiance fields from images without 3D, multi-view, or pose supervision.",
400
+ "- Our pipeline is the first work on conditional GAN-based NeRF, or in general, NeRF-based GAN inversion. We expect our pipeline to become a strong baseline for future works towards these research directions.",
401
+ "- We demonstrate the superiority of our method compared with naive GAN inversion methods and conduct an extensive ablation studies to justify our design choices."
402
+ ],
403
+ "bbox": [
404
+ 496,
405
+ 325,
406
+ 893,
407
+ 487
408
+ ],
409
+ "page_idx": 1
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "2. Related works",
414
+ "text_level": 1,
415
+ "bbox": [
416
+ 500,
417
+ 501,
418
+ 643,
419
+ 517
420
+ ],
421
+ "page_idx": 1
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "Our work can be classified as a category-specific 3D-aware neural novel view synthesis method, which is strongly based on NeRF [23] and $\\pi$ -GAN [2].",
426
+ "bbox": [
427
+ 496,
428
+ 527,
429
+ 893,
430
+ 574
431
+ ],
432
+ "page_idx": 1
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "Neural scene representations. The field of encoding a scene into neural networks has proven to be a promising research direction. This includes, but is not limited to: parameterizing the geometry of a scene via signed distance functions or occupancy [6, 22, 28, 36], encoding both geometry and appearance [18, 26, 33, 38], etc. Recently, the impressive performance of Neural Radiance Fields (NeRF) [23] has drawn extensive attention to this field. It encodes a scene as a multivariable vector-valued function $f(x,y,z,\\theta ,\\phi) = (r,g,b,\\sigma)$ approximated by MLP, where $(x,y,z)$ denotes spatial coordinates, $(\\theta ,\\phi)$ denotes viewing direction, and $(r,g,b,\\sigma)$ corresponds to color and volume density. This function is then called repeatedly by any of the volume rendering techniques to produce novel views. The outstanding performance of NeRF inspired follow-up works to extend it towards alternative settings, such as training from unconstrained images [20], training without poses [21, 43], etc.",
437
+ "bbox": [
438
+ 496,
439
+ 585,
440
+ 895,
441
+ 843
442
+ ],
443
+ "page_idx": 1
444
+ },
445
+ {
446
+ "type": "text",
447
+ "text": "NeRF-based GANs. Following the developments of GANs and NeRFs, several works tried combining them to form generative models producing NeRFs. One of the first attempts",
448
+ "bbox": [
449
+ 496,
450
+ 854,
451
+ 893,
452
+ 902
453
+ ],
454
+ "page_idx": 1
455
+ },
456
+ {
457
+ "type": "page_number",
458
+ "text": "2",
459
+ "bbox": [
460
+ 478,
461
+ 924,
462
+ 491,
463
+ 936
464
+ ],
465
+ "page_idx": 1
466
+ },
467
+ {
468
+ "type": "text",
469
+ "text": "in this direction is GRAF [35]; it performs category-specific radiance fields generation by conditioning NeRF on shape and appearance code. Following the NeRF pipeline, the generator can synthesize an image given a random code and a view direction. The generated image is passed into the discriminator together with real images, thus implementing a GAN. GRAF is an unsupervised model, since it does not require ground truth camera poses; therefore, it can be trained using \"in the wild\" images. This is done by introducing a pose prior relative to a canonical view frame of reference, e.g., Gaussian distribution to describe head pitch and yaw relative to a front face view. $\\pi$ -GAN [2] is similar to GRAF, but conditions on a single latent code and utilizes FiLM [10, 30] SIREN [37] layers instead of simple MLPs. More recently, several works improved synthesis quality with high resolutions [14], better 3D shapes [45], and precise control [25, 48].",
470
+ "bbox": [
471
+ 75,
472
+ 90,
473
+ 472,
474
+ 349
475
+ ],
476
+ "page_idx": 2
477
+ },
478
+ {
479
+ "type": "text",
480
+ "text": "Few-shot NeRF. The main property of NeRFs is the ability to bake in a 3D scene into MLP weights. However, this is also a limitation since it must be retrained for each new scene, which takes a lot of time and money. To lift this constraint, PixelNeRF [46] and GRF [40] condition MLPs on pixel-aligned features extracted by a CNN encoder. During the novel view rendering phase, 3D points along the rays are projected onto the extracted feature grid to get aligned features, then fed into an MLP with the points. More recently, CodeNeRF [16] suggested training NeRF with learnable latent codes and utilizing test-time optimization to find the best latent codes (and camera poses) given an image. However, these methods still require multi-view supervision during training, which constrains their usage in real-world settings, where multi-view datasets are challenging to collect.",
481
+ "bbox": [
482
+ 75,
483
+ 380,
484
+ 472,
485
+ 608
486
+ ],
487
+ "page_idx": 2
488
+ },
489
+ {
490
+ "type": "text",
491
+ "text": "Therefore, single-shot NeRF without additional supervision (e.g., 3D objects, multi-view image collections) remains an under-explored research direction. In this paper, we bridge this gap by incorporating an auto-encoder architecture into an existing $\\pi$ -GAN NeRF framework to obtain a conditional single-shot NeRF model, retaining the best properties of all components. We note that the concurrent work [31] shares similar ideas. The key differences are a different backbone network (HoloGAN [24]) and its lack of 3D consistency, which the authors point out. Contrary, we utilize the newly-proposed NeRF-based GAN method called $\\pi$ -GAN [2], which naturally provides stronger 3D consistency by design. We demonstrate that merely applying the approach of [31] is insufficient to obtain an accurate mapping from image to latent space with $\\pi$ -GAN as a backbone. Nevertheless, our framework can be viewed as [31] specifically improved towards NeRF-based GAN models, or CodeNeRF [16] combined with GANs.",
492
+ "bbox": [
493
+ 75,
494
+ 628,
495
+ 472,
496
+ 901
497
+ ],
498
+ "page_idx": 2
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "3. Method",
503
+ "text_level": 1,
504
+ "bbox": [
505
+ 500,
506
+ 89,
507
+ 591,
508
+ 106
509
+ ],
510
+ "page_idx": 2
511
+ },
512
+ {
513
+ "type": "text",
514
+ "text": "Pix2NeRF consists of three neural networks, a Generator $G$ , a Discriminator $D$ , together forming a Generative Adversarial Network, and an Encoder $E$ forming an auto-encoder together with $G$ . The generator is conditioned on the output view pose $d$ and a latent code $z$ , broadly describing content variations, such as color or shape. It employs 3D-volume rendering techniques and outputs a single parameterized scene view as RGB image $I$ . The discriminator $D$ is a CNN, which simultaneously predicts distribution origin of the input RGB image via logit $l$ (real – “in the wild”, or fake – generated by $G$ ), and the corresponding scene pose $d$ . The encoder $E$ is a CNN tasked to map an input image onto the latent manifold, learned by $G$ , and at the same time predict the input's pose:",
515
+ "bbox": [
516
+ 496,
517
+ 114,
518
+ 893,
519
+ 313
520
+ ],
521
+ "page_idx": 2
522
+ },
523
+ {
524
+ "type": "equation",
525
+ "text": "\n$$\nG: z, d \\to I\n$$\n",
526
+ "text_format": "latex",
527
+ "bbox": [
528
+ 647,
529
+ 323,
530
+ 740,
531
+ 337
532
+ ],
533
+ "page_idx": 2
534
+ },
535
+ {
536
+ "type": "equation",
537
+ "text": "\n$$\nD: I \\rightarrow l, d \\tag {1}\n$$\n",
538
+ "text_format": "latex",
539
+ "bbox": [
540
+ 650,
541
+ 340,
542
+ 890,
543
+ 357
544
+ ],
545
+ "page_idx": 2
546
+ },
547
+ {
548
+ "type": "equation",
549
+ "text": "\n$$\nE: I \\to z, d.\n$$\n",
550
+ "text_format": "latex",
551
+ "bbox": [
552
+ 651,
553
+ 361,
554
+ 741,
555
+ 375
556
+ ],
557
+ "page_idx": 2
558
+ },
559
+ {
560
+ "type": "text",
561
+ "text": "Functionally, Pix2NeRF extends $\\pi$ -GAN [2] with the encoder $E$ trained jointly with the GAN to allow mapping images back to the latent manifold. Because the encoder $E$ disentangles the content $z$ and the pose $d$ of the input $I$ , content can be further used to condition the $\\pi$ -GAN generator $G$ and obtain novel views by varying the rendered pose $d$ .",
562
+ "bbox": [
563
+ 496,
564
+ 387,
565
+ 893,
566
+ 477
567
+ ],
568
+ "page_idx": 2
569
+ },
570
+ {
571
+ "type": "text",
572
+ "text": "Having defined network modules, we turn to specifying the inputs and outputs of the modules. The latent code $z$ comes from a simple prior distribution $p_{z}$ (multivariate uniform in our case) - it makes sampling random codes $z_{\\mathrm{rand}}$ easy and lets us design $E$ such that it can encode any input image $I$ into some $z_{\\mathrm{pred}}$ within the support of $p_{z}$ . Following prior art [2,35], the unsupervised setting we operate in assumes we have access to the prior distribution of poses $p_{d}$ of real images $I_{\\mathrm{real}} \\sim p_{\\mathrm{real}}$ used for training. Depending on the dataset and choice of pose coordinates, it can be multivariate Gaussian with diagonal covariance (for images of faces) or uniform on a (hemi-)sphere (for images of cars). Parameters of this distribution must be known to allow easy sampling random poses $d_{\\mathrm{rand}}$ for the generator, and that $p_{d}$ is representative of poses of real images $I_{\\mathrm{real}}$ .",
573
+ "bbox": [
574
+ 496,
575
+ 478,
576
+ 893,
577
+ 703
578
+ ],
579
+ "page_idx": 2
580
+ },
581
+ {
582
+ "type": "text",
583
+ "text": "Simply training the encoder $E$ to map an image $I$ into GAN latent space (as in Stage 1 of [31]) simultaneously with training GAN is challenging. This is because the encoder needs to correctly map images of the same scene from different views to a single latent code. This is especially hard when these views contain variations of fine details due to occlusions. As seen from Eq. 1 and the design Fig. 2, our method disentangles latent representation of image mapped by the encoder and generator input into content $z$ and pose $d$ , which undergo separate treatment.",
584
+ "bbox": [
585
+ 496,
586
+ 704,
587
+ 893,
588
+ 854
589
+ ],
590
+ "page_idx": 2
591
+ },
592
+ {
593
+ "type": "text",
594
+ "text": "Given an input image, Pix2NeRF disentangles pose and content and produces a radiance field of the content, which is (1) consistent with the input under the disentangled pose",
595
+ "bbox": [
596
+ 496,
597
+ 854,
598
+ 893,
599
+ 902
600
+ ],
601
+ "page_idx": 2
602
+ },
603
+ {
604
+ "type": "page_number",
605
+ "text": "3",
606
+ "bbox": [
607
+ 478,
608
+ 924,
609
+ 491,
610
+ 936
611
+ ],
612
+ "page_idx": 2
613
+ },
614
+ {
615
+ "type": "image",
616
+ "img_path": "images/8df12082cf8d982f7ff6aa0c016f4dbe4f6c50014f577a99bc620788443e0c04.jpg",
617
+ "image_caption": [
618
+ "Figure 2. Overview of building blocks and objectives, used in Pix2NeRF. GAN objectives follow $\\pi$ -GAN [2] and ensure that NeRF outputs match the distribution of real images $p_{\\mathrm{real}}$ under the latent prior $p_z$ and pose prior $p_d$ . Reconstruction and GAN inversion objectives ensure calibrated latent representations, such that $E$ and $G$ can operate as an auto-encoder, similar to [31]. The conditional adversarial objective enables learning better representations without explicit pose supervision. Legend: green - trained module, blue - frozen, gradient - warm-up."
619
+ ],
620
+ "image_footnote": [],
621
+ "bbox": [
622
+ 99,
623
+ 90,
624
+ 867,
625
+ 224
626
+ ],
627
+ "page_idx": 3
628
+ },
629
+ {
630
+ "type": "text",
631
+ "text": "and (2) consistent and realistic under different poses from $p_d$ . To achieve these properties, we devise several training objectives for (1) generator, (2) discriminator, (3) GAN inversion, (4) reconstruction, and (5) conditional adversarial training.",
632
+ "bbox": [
633
+ 75,
634
+ 333,
635
+ 470,
636
+ 407
637
+ ],
638
+ "page_idx": 3
639
+ },
640
+ {
641
+ "type": "text",
642
+ "text": "These objectives are used to compute gradients for parameters of $G$ , $D$ , and $E$ within a single optimization process. However, certain parts remain \"frozen\" during optimizer updates (such as $G$ during $D$ updates and vice-versa); we denote them with an asterisk in equations (e.g., $G^{*}$ ) and blue color in Fig. 2. We empirically find that training encoder from the start has a detrimental effect on the whole pipeline and employ a warm-up strategy (denoted with green-blue transitions), explained further.",
643
+ "bbox": [
644
+ 75,
645
+ 411,
646
+ 473,
647
+ 547
648
+ ],
649
+ "page_idx": 3
650
+ },
651
+ {
652
+ "type": "text",
653
+ "text": "3.1. GAN generator objective",
654
+ "text_level": 1,
655
+ "bbox": [
656
+ 76,
657
+ 563,
658
+ 307,
659
+ 579
660
+ ],
661
+ "page_idx": 3
662
+ },
663
+ {
664
+ "type": "text",
665
+ "text": "The generator is trained to \"fool\" the discriminator by serving it progressively realistic images. Pix2NeRF follows the same procedure of training the generator as $\\pi$ -GAN: it samples latent codes $z_{\\mathrm{rand}} \\sim p_z$ and random poses $d_{\\mathrm{rand}} \\sim p_d$ in pairs, which are then passed through the generator to obtain fake generated images:",
666
+ "bbox": [
667
+ 75,
668
+ 589,
669
+ 468,
670
+ 679
671
+ ],
672
+ "page_idx": 3
673
+ },
674
+ {
675
+ "type": "equation",
676
+ "text": "\n$$\nI _ {\\text {g e n}} = G \\left(z _ {\\text {r a n d}}, d _ {\\text {r a n d}}\\right), \\tag {2}\n$$\n",
677
+ "text_format": "latex",
678
+ "bbox": [
679
+ 191,
680
+ 696,
681
+ 468,
682
+ 714
683
+ ],
684
+ "page_idx": 3
685
+ },
686
+ {
687
+ "type": "text",
688
+ "text": "which are further fed into the frozen discriminator:",
689
+ "bbox": [
690
+ 76,
691
+ 729,
692
+ 413,
693
+ 744
694
+ ],
695
+ "page_idx": 3
696
+ },
697
+ {
698
+ "type": "equation",
699
+ "text": "\n$$\nl _ {\\text {g e n}}, d _ {\\text {g e n}} = D ^ {*} \\left(I _ {\\text {g e n}}\\right). \\tag {3}\n$$\n",
700
+ "text_format": "latex",
701
+ "bbox": [
702
+ 194,
703
+ 762,
704
+ 468,
705
+ 779
706
+ ],
707
+ "page_idx": 3
708
+ },
709
+ {
710
+ "type": "text",
711
+ "text": "Following [2], another component helpful to the stability and performance of GAN training is MSE supervision of predicted poses $d_{\\mathrm{gen}}$ of images generated with $d_{\\mathrm{rand}}$ . It penalizes the generator if the image pose recovered by the discriminator does not correspond to the sampled pose, thus setting the goal of learning a \"canonical\" 3D space. This is especially helpful if the pose distribution of real data is",
712
+ "bbox": [
713
+ 75,
714
+ 794,
715
+ 470,
716
+ 900
717
+ ],
718
+ "page_idx": 3
719
+ },
720
+ {
721
+ "type": "text",
722
+ "text": "noisy, such as seen in CelebA [19].",
723
+ "bbox": [
724
+ 500,
725
+ 333,
726
+ 733,
727
+ 349
728
+ ],
729
+ "page_idx": 3
730
+ },
731
+ {
732
+ "type": "equation",
733
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {G A N}} (G) = \\underset { \\begin{array}{c} z _ {\\text {r a n d}} \\sim p _ {z} \\\\ d _ {\\text {r a n d}} \\sim p _ {d} \\end{array} } {\\mathbb {E}} \\left[ \\text {s o f t p l u s} (- l _ {\\text {g e n}}) + \\right. \\tag {4}\n$$\n",
734
+ "text_format": "latex",
735
+ "bbox": [
736
+ 539,
737
+ 359,
738
+ 890,
739
+ 398
740
+ ],
741
+ "page_idx": 3
742
+ },
743
+ {
744
+ "type": "equation",
745
+ "text": "\n$$\n\\lambda_ {\\mathrm {p o s}} \\left\\| d _ {\\mathrm {r a n d}} - d _ {\\mathrm {g e n}} \\right\\| _ {2} ^ {2} \\biggr ],\n$$\n",
746
+ "text_format": "latex",
747
+ "bbox": [
748
+ 694,
749
+ 398,
750
+ 851,
751
+ 425
752
+ ],
753
+ "page_idx": 3
754
+ },
755
+ {
756
+ "type": "text",
757
+ "text": "where $\\lambda_{\\mathrm{pos}}$ is a tuned weighting factor.",
758
+ "bbox": [
759
+ 500,
760
+ 436,
761
+ 758,
762
+ 452
763
+ ],
764
+ "page_idx": 3
765
+ },
766
+ {
767
+ "type": "text",
768
+ "text": "3.2. GAN discriminator objective",
769
+ "text_level": 1,
770
+ "bbox": [
771
+ 500,
772
+ 462,
773
+ 758,
774
+ 479
775
+ ],
776
+ "page_idx": 3
777
+ },
778
+ {
779
+ "type": "text",
780
+ "text": "The discriminator is trained to distinguish between the generated fake samples and real data sampled from the dataset. Pix2NeRF follows the exact procedure of training the discriminator in $\\pi$ -GAN: it samples latent codes $z_{\\mathrm{rand}} \\sim p_z$ and random poses $d_{\\mathrm{rand}} \\sim p_d$ in pairs, which are then passed through the frozen generator to obtain fake generated images:",
781
+ "bbox": [
782
+ 498,
783
+ 486,
784
+ 893,
785
+ 592
786
+ ],
787
+ "page_idx": 3
788
+ },
789
+ {
790
+ "type": "equation",
791
+ "text": "\n$$\nI _ {\\text {g e n}} = G ^ {*} \\left(z _ {\\text {r a n d}}, d _ {\\text {r a n d}}\\right). \\tag {5}\n$$\n",
792
+ "text_format": "latex",
793
+ "bbox": [
794
+ 611,
795
+ 604,
796
+ 890,
797
+ 622
798
+ ],
799
+ "page_idx": 3
800
+ },
801
+ {
802
+ "type": "text",
803
+ "text": "The discriminator is then trained using these generated images $I_{\\mathrm{gen}}$ and real images $I_{\\mathrm{real}} \\sim p_{\\mathrm{real}}$ :",
804
+ "bbox": [
805
+ 498,
806
+ 633,
807
+ 893,
808
+ 666
809
+ ],
810
+ "page_idx": 3
811
+ },
812
+ {
813
+ "type": "equation",
814
+ "text": "\n$$\nl _ {\\text {r e a l}}, d _ {\\text {r e a l}} = D \\left(I _ {\\text {r e a l}}\\right), \\tag {6}\n$$\n",
815
+ "text_format": "latex",
816
+ "bbox": [
817
+ 617,
818
+ 678,
819
+ 890,
820
+ 700
821
+ ],
822
+ "page_idx": 3
823
+ },
824
+ {
825
+ "type": "equation",
826
+ "text": "\n$$\nl _ {\\mathrm {g e n}}, d _ {\\mathrm {g e n}} = D (I _ {\\mathrm {g e n}}).\n$$\n",
827
+ "text_format": "latex",
828
+ "bbox": [
829
+ 622,
830
+ 698,
831
+ 769,
832
+ 715
833
+ ],
834
+ "page_idx": 3
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "The discriminator objective modified to take into account MSE supervision over the known pose can then be formulated as follows:",
839
+ "bbox": [
840
+ 498,
841
+ 727,
842
+ 893,
843
+ 770
844
+ ],
845
+ "page_idx": 3
846
+ },
847
+ {
848
+ "type": "equation",
849
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {G A N}} (D) = \\underset {I _ {\\text {r e a l}} \\sim p _ {\\text {r e a l}}} {\\mathbb {E}} \\left[ \\text {s o f t p l u s} (- l _ {\\text {r e a l}}) \\right] +\n$$\n",
850
+ "text_format": "latex",
851
+ "bbox": [
852
+ 524,
853
+ 780,
854
+ 831,
855
+ 806
856
+ ],
857
+ "page_idx": 3
858
+ },
859
+ {
860
+ "type": "equation",
861
+ "text": "\n$$\n\\mathbb {E} _ {\\substack {z _ {\\text {rand}} \\sim p _ {z} \\\\ d _ {\\text {rand}} \\sim p _ {d}}} \\left[ \\text {softplus} \\left(l _ {\\text {gen}}\\right) + \\right. \\tag{7}\n$$\n",
862
+ "text_format": "latex",
863
+ "bbox": [
864
+ 619,
865
+ 809,
866
+ 890,
867
+ 845
868
+ ],
869
+ "page_idx": 3
870
+ },
871
+ {
872
+ "type": "equation",
873
+ "text": "\n$$\n\\left. \\lambda_ {\\mathrm {p o s}} \\left\\| d _ {\\mathrm {r a n d}} - d _ {\\mathrm {g e n}} \\right\\| _ {2} ^ {2} \\right],\n$$\n",
874
+ "text_format": "latex",
875
+ "bbox": [
876
+ 686,
877
+ 848,
878
+ 844,
879
+ 875
880
+ ],
881
+ "page_idx": 3
882
+ },
883
+ {
884
+ "type": "text",
885
+ "text": "where $\\lambda_{\\mathrm{pos}}$ is a tuned weighting factor.",
886
+ "bbox": [
887
+ 500,
888
+ 886,
889
+ 758,
890
+ 901
891
+ ],
892
+ "page_idx": 3
893
+ },
894
+ {
895
+ "type": "page_number",
896
+ "text": "4",
897
+ "bbox": [
898
+ 478,
899
+ 924,
900
+ 491,
901
+ 935
902
+ ],
903
+ "page_idx": 3
904
+ },
905
+ {
906
+ "type": "text",
907
+ "text": "3.3. GAN inversion objective",
908
+ "text_level": 1,
909
+ "bbox": [
910
+ 76,
911
+ 90,
912
+ 302,
913
+ 107
914
+ ],
915
+ "page_idx": 4
916
+ },
917
+ {
918
+ "type": "text",
919
+ "text": "The encoder $E$ is jointly optimized with the discriminator $D$ and reuses $I_{\\mathrm{gen}}$ computed for GAN discriminator objective Eq. (5):",
920
+ "bbox": [
921
+ 76,
922
+ 113,
923
+ 470,
924
+ 159
925
+ ],
926
+ "page_idx": 4
927
+ },
928
+ {
929
+ "type": "equation",
930
+ "text": "\n$$\nz _ {\\text {p r e d}}, d _ {\\text {p r e d}} = E \\left(I _ {\\text {g e n}}\\right). \\tag {8}\n$$\n",
931
+ "text_format": "latex",
932
+ "bbox": [
933
+ 192,
934
+ 170,
935
+ 468,
936
+ 186
937
+ ],
938
+ "page_idx": 4
939
+ },
940
+ {
941
+ "type": "text",
942
+ "text": "This objective aims to ensure consistency between the sampled content and pose and those extracted from the generated image by the encoder. This is done using the MSE loss:",
943
+ "bbox": [
944
+ 76,
945
+ 196,
946
+ 470,
947
+ 243
948
+ ],
949
+ "page_idx": 4
950
+ },
951
+ {
952
+ "type": "equation",
953
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {G A N} ^ {- 1}} (E) = \\underset { \\begin{array}{c} z _ {\\text {r a n d}} \\sim p _ {z} \\\\ d _ {\\text {r a n d}} \\sim p _ {d} \\end{array} } {\\mathbb {E}} \\left[ \\| z _ {\\text {p r e d}} - z _ {\\text {r a n d}} \\| _ {2} ^ {2} + \\right. \\tag {9}\n$$\n",
954
+ "text_format": "latex",
955
+ "bbox": [
956
+ 116,
957
+ 252,
958
+ 468,
959
+ 287
960
+ ],
961
+ "page_idx": 4
962
+ },
963
+ {
964
+ "type": "equation",
965
+ "text": "\n$$\n\\left. \\left\\| d _ {\\mathrm {p r e d}} - d _ {\\mathrm {r a n d}} \\right\\| _ {2} ^ {2} \\right].\n$$\n",
966
+ "text_format": "latex",
967
+ "bbox": [
968
+ 290,
969
+ 287,
970
+ 419,
971
+ 309
972
+ ],
973
+ "page_idx": 4
974
+ },
975
+ {
976
+ "type": "text",
977
+ "text": "Up until now, the objectives only ensured a generative mapping from the latent space to radiance fields and some basic form of consistency to learn auto-encoder. However, our experiments show that optimizing just these three objectives does not produce a reasonable mapping. Therefore, Pix2NeRF adds two more objectives to address reconstruction quality and 3D consistency in the unsupervised setting.",
978
+ "bbox": [
979
+ 76,
980
+ 316,
981
+ 470,
982
+ 422
983
+ ],
984
+ "page_idx": 4
985
+ },
986
+ {
987
+ "type": "text",
988
+ "text": "3.4. Reconstruction objective",
989
+ "text_level": 1,
990
+ "bbox": [
991
+ 76,
992
+ 431,
993
+ 303,
994
+ 448
995
+ ],
996
+ "page_idx": 4
997
+ },
998
+ {
999
+ "type": "text",
1000
+ "text": "While the GAN inversion objective promotes consistency in latent space, nothing so far directly promotes consistency in the image space. To this end, we condition the generator $G$ on a real image by extracting its latent code and pose prediction using the encoder, and then render its view using the predicted pose:",
1001
+ "bbox": [
1002
+ 76,
1003
+ 455,
1004
+ 468,
1005
+ 546
1006
+ ],
1007
+ "page_idx": 4
1008
+ },
1009
+ {
1010
+ "type": "equation",
1011
+ "text": "\n$$\nz _ {\\text {p r e d}}, d _ {\\text {p r e d}} = E \\left(I _ {\\text {r e a l}}\\right) \\tag {10}\n$$\n",
1012
+ "text_format": "latex",
1013
+ "bbox": [
1014
+ 165,
1015
+ 556,
1016
+ 468,
1017
+ 580
1018
+ ],
1019
+ "page_idx": 4
1020
+ },
1021
+ {
1022
+ "type": "equation",
1023
+ "text": "\n$$\nI _ {\\text {r e c o n}} = G \\left(z _ {\\text {p r e d}}, d _ {\\text {p r e d}}\\right).\n$$\n",
1024
+ "text_format": "latex",
1025
+ "bbox": [
1026
+ 204,
1027
+ 575,
1028
+ 377,
1029
+ 592
1030
+ ],
1031
+ "page_idx": 4
1032
+ },
1033
+ {
1034
+ "type": "text",
1035
+ "text": "Ideally, we expect to get back the original image. However, using MSE loss alone in the image space is known to promote structural inconsistencies and blur. In line with [31], we employ Structural Similarity Index Measure loss (SSIM [42]) with weighting factor $\\lambda_{\\mathrm{ssim}}$ and a perceptual loss (VGG [44]) with weighting factor $\\lambda_{\\mathrm{vgg}}$ . We can therefore aggregate the reconstruction loss as follows:",
1036
+ "bbox": [
1037
+ 76,
1038
+ 601,
1039
+ 468,
1040
+ 705
1041
+ ],
1042
+ "page_idx": 4
1043
+ },
1044
+ {
1045
+ "type": "equation",
1046
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {r e c o n}} (G, E) = \\underset {I _ {\\mathrm {r e a l}} \\sim p _ {\\mathrm {r e a l}}} {\\mathbb {E}} \\left[ \\| I _ {\\mathrm {r e c o n}} - I _ {\\mathrm {r e a l}} \\| _ {2} ^ {2} + \\right.\n$$\n",
1047
+ "text_format": "latex",
1048
+ "bbox": [
1049
+ 99,
1050
+ 714,
1051
+ 419,
1052
+ 744
1053
+ ],
1054
+ "page_idx": 4
1055
+ },
1056
+ {
1057
+ "type": "equation",
1058
+ "text": "\n$$\n\\lambda_ {\\text {s s i m}} \\mathcal {L} _ {\\text {s s i m}} \\left(I _ {\\text {r e c o n}}, I _ {\\text {r e a l}}\\right) + \\tag {11}\n$$\n",
1059
+ "text_format": "latex",
1060
+ "bbox": [
1061
+ 233,
1062
+ 746,
1063
+ 468,
1064
+ 763
1065
+ ],
1066
+ "page_idx": 4
1067
+ },
1068
+ {
1069
+ "type": "equation",
1070
+ "text": "\n$$\n\\left. \\lambda_ {\\mathrm {v g g}} \\mathcal {L} _ {\\mathrm {v g g}} \\left(I _ {\\mathrm {r e c o n}}, I _ {\\mathrm {r e a l}}\\right) \\right].\n$$\n",
1071
+ "text_format": "latex",
1072
+ "bbox": [
1073
+ 246,
1074
+ 766,
1075
+ 418,
1076
+ 792
1077
+ ],
1078
+ "page_idx": 4
1079
+ },
1080
+ {
1081
+ "type": "text",
1082
+ "text": "3.5. Conditional adversarial objective",
1083
+ "text_level": 1,
1084
+ "bbox": [
1085
+ 76,
1086
+ 801,
1087
+ 367,
1088
+ 816
1089
+ ],
1090
+ "page_idx": 4
1091
+ },
1092
+ {
1093
+ "type": "text",
1094
+ "text": "The reconstruction objective promotes good reconstruction quality for just one view extracted by the encoder $E$ . This may push the combination of networks towards either predicting trivial poses or unrealistic reconstructions for other poses from $p_d$ . To alleviate that, we further apply an",
1095
+ "bbox": [
1096
+ 76,
1097
+ 825,
1098
+ 470,
1099
+ 901
1100
+ ],
1101
+ "page_idx": 4
1102
+ },
1103
+ {
1104
+ "type": "text",
1105
+ "text": "adversarial objective while conditioning the generator on an image $I_{\\mathrm{real}}$ when it is rendered from random poses. Reusing results from Eq. (10),",
1106
+ "bbox": [
1107
+ 498,
1108
+ 90,
1109
+ 890,
1110
+ 136
1111
+ ],
1112
+ "page_idx": 4
1113
+ },
1114
+ {
1115
+ "type": "equation",
1116
+ "text": "\n$$\nl _ {\\text {c o n d}}, d _ {\\text {c o n d}} = D ^ {*} \\left(G \\left(z _ {\\text {p r e d}}, d _ {\\text {r a n d}}\\right)\\right)\n$$\n",
1117
+ "text_format": "latex",
1118
+ "bbox": [
1119
+ 534,
1120
+ 142,
1121
+ 774,
1122
+ 159
1123
+ ],
1124
+ "page_idx": 4
1125
+ },
1126
+ {
1127
+ "type": "equation",
1128
+ "text": "\n$$\n\\mathcal {L} _ {\\text {c o n d}} (G, E) = \\underset { \\begin{array}{l} I _ {\\text {r e a l}} \\sim p _ {\\text {r e a l}} \\\\ d _ {\\text {r a n d}} \\sim p _ {d} \\end{array} } {\\mathbb {E}} \\left[ \\text {s o f t p l u s} (- l _ {\\text {c o n d}}) \\right]. \\tag {12}\n$$\n",
1129
+ "text_format": "latex",
1130
+ "bbox": [
1131
+ 527,
1132
+ 161,
1133
+ 890,
1134
+ 196
1135
+ ],
1136
+ "page_idx": 4
1137
+ },
1138
+ {
1139
+ "type": "text",
1140
+ "text": "3.6. Encoder warm-up",
1141
+ "text_level": 1,
1142
+ "bbox": [
1143
+ 500,
1144
+ 202,
1145
+ 676,
1146
+ 218
1147
+ ],
1148
+ "page_idx": 4
1149
+ },
1150
+ {
1151
+ "type": "text",
1152
+ "text": "As pointed out in [31], reconstruction loss may easily dominate and cause the model overfitting towards input views while losing its ability to represent 3D. We, therefore, introduce a simple \"warm-up\" strategy to counter this issue. For the first half iterations of the training protocol, we freeze the encoder while optimizing reconstruction and conditional adversarial loss and optimize only the generator for these two objectives. This serves as a warm-up for the generator to roughly learn the correspondence between encoder outputs and encoded images. The encoder is then unfrozen, enabling further distillation of its learned representations.",
1153
+ "bbox": [
1154
+ 496,
1155
+ 224,
1156
+ 893,
1157
+ 390
1158
+ ],
1159
+ "page_idx": 4
1160
+ },
1161
+ {
1162
+ "type": "text",
1163
+ "text": "After the warm-up stage, the encoder and generator directly form a pre-trained auto-encoder capable of producing 3D representations close to ground truth, bypassing the cumbersome early-stage reconstruction objective, which is extremely hard to balance with GAN objectives. We show the necessity of this strategy and comparison with merely assigning a smaller weight for reconstruction loss in the ablation studies.",
1164
+ "bbox": [
1165
+ 496,
1166
+ 391,
1167
+ 893,
1168
+ 510
1169
+ ],
1170
+ "page_idx": 4
1171
+ },
1172
+ {
1173
+ "type": "text",
1174
+ "text": "3.7. Training and Inference",
1175
+ "text_level": 1,
1176
+ "bbox": [
1177
+ 500,
1178
+ 518,
1179
+ 714,
1180
+ 535
1181
+ ],
1182
+ "page_idx": 4
1183
+ },
1184
+ {
1185
+ "type": "text",
1186
+ "text": "The objectives mentioned above can be trained jointly; however, we optimize them in alternative iterations due to GPU memory constraints. The discriminator and GAN inversion objectives are optimized upon every iteration; the GAN generator objective is optimized on even iterations; reconstruction and conditional adversarial objectives are optimized jointly during odd iterations with weighting factor $\\lambda_{\\mathrm{recon}}$ :",
1187
+ "bbox": [
1188
+ 496,
1189
+ 542,
1190
+ 893,
1191
+ 662
1192
+ ],
1193
+ "page_idx": 4
1194
+ },
1195
+ {
1196
+ "type": "equation",
1197
+ "text": "\n$$\n\\mathcal {L} _ {\\text {o d d}} = \\mathcal {L} _ {\\text {c o n d}} + \\lambda_ {\\text {r e c o n}} \\mathcal {L} _ {\\text {r e c o n}}. \\tag {13}\n$$\n",
1198
+ "text_format": "latex",
1199
+ "bbox": [
1200
+ 591,
1201
+ 670,
1202
+ 890,
1203
+ 686
1204
+ ],
1205
+ "page_idx": 4
1206
+ },
1207
+ {
1208
+ "type": "text",
1209
+ "text": "During the inference stage, Pix2NeRF only requires a single input image, which can be fed into the encoder $E$ and then generator $G$ , coupled with arbitrarily selected poses for novel view synthesis. At the same time, instead of obtaining the latent code $z$ from the encoder, it is possible to sample it from the prior distribution $p_{z}$ , to make the model synthesize novel samples like a $\\pi$ -GAN.",
1210
+ "bbox": [
1211
+ 496,
1212
+ 691,
1213
+ 890,
1214
+ 797
1215
+ ],
1216
+ "page_idx": 4
1217
+ },
1218
+ {
1219
+ "type": "text",
1220
+ "text": "4. Experiments",
1221
+ "text_level": 1,
1222
+ "bbox": [
1223
+ 500,
1224
+ 809,
1225
+ 630,
1226
+ 825
1227
+ ],
1228
+ "page_idx": 4
1229
+ },
1230
+ {
1231
+ "type": "text",
1232
+ "text": "4.1. Evaluation",
1233
+ "text_level": 1,
1234
+ "bbox": [
1235
+ 500,
1236
+ 834,
1237
+ 620,
1238
+ 848
1239
+ ],
1240
+ "page_idx": 4
1241
+ },
1242
+ {
1243
+ "type": "text",
1244
+ "text": "Datasets. We train and evaluate our pipeline on several 3D datasets listed below. CelebA [19] is a dataset of over 200k",
1245
+ "bbox": [
1246
+ 500,
1247
+ 869,
1248
+ 890,
1249
+ 900
1250
+ ],
1251
+ "page_idx": 4
1252
+ },
1253
+ {
1254
+ "type": "page_number",
1255
+ "text": "5",
1256
+ "bbox": [
1257
+ 480,
1258
+ 924,
1259
+ 488,
1260
+ 936
1261
+ ],
1262
+ "page_idx": 4
1263
+ },
1264
+ {
1265
+ "type": "image",
1266
+ "img_path": "images/9b8fc887f4777b10a53a9c91c6cb7bfacaca6895d4d2c0e4e5caa9b2aa824eec.jpg",
1267
+ "image_caption": [
1268
+ "input reconstruction",
1269
+ "novel views",
1270
+ "Figure 3. Reconstructed and novel views on CARLA [8], CelebA [19], and ShapeNet-SRN [4,38] chairs. See Appendix for more results."
1271
+ ],
1272
+ "image_footnote": [],
1273
+ "bbox": [
1274
+ 107,
1275
+ 114,
1276
+ 859,
1277
+ 585
1278
+ ],
1279
+ "page_idx": 5
1280
+ },
1281
+ {
1282
+ "type": "text",
1283
+ "text": "images of celebrity faces. We use its \"aligned\" version and apply center cropping to keep the face area roughly. We hold out 8k images as the test set. CARLA [8] contains 10k images of 16 car models rendered with Carla driving simulator with random textures. ShapeNet-SRN is a dataset hosted by the authors of SRN [38], from which we use the \"chairs\" split for the comparison with prior multi-view methods. The dataset contains 50 rendered views from ShapeNet [4] with Archimedean spiral camera poses for each of the 6591 instances. As the ShapeNet-SRN dataset does not include the lower hemisphere in its validation and test sets, we filter the training set to contain only the upper hemisphere as well.",
1284
+ "bbox": [
1285
+ 75,
1286
+ 659,
1287
+ 472,
1288
+ 840
1289
+ ],
1290
+ "page_idx": 5
1291
+ },
1292
+ {
1293
+ "type": "text",
1294
+ "text": "Evaluation metrics. Pix2NeRF is evaluated in two modes: unconditional, which assumes sampling directly from $p_z$ and $p_d$ , and conditional, which corresponds to using $z =$",
1295
+ "bbox": [
1296
+ 76,
1297
+ 854,
1298
+ 470,
1299
+ 902
1300
+ ],
1301
+ "page_idx": 5
1302
+ },
1303
+ {
1304
+ "type": "text",
1305
+ "text": "$E(I_{\\mathrm{real}})$ , $I_{\\mathrm{real}} \\sim p_{\\mathrm{real}}$ , while still sampling from $p_d$ . For \"in the wild\" datasets, as we do not possess multi-view ground truth images, we resort to reporting generative metrics: Inception Score (IS) [34], Frechet Inception Distance (FID) [15], and Kernel Inception Distance (KID) [1] with scaling factor $\\times 100$ following the steps of prior works [2,35] using the implementation [27]. To compare with multi-view-based novel view synthesis methods on Shapenet-SRN, we follow the evaluation protocols in pixelNeRF and CodeNeRF and report PSNR (Peak Signal to Noise Ratio) and SSIM (Structural Similarity Index Measure) [42].",
1306
+ "bbox": [
1307
+ 496,
1308
+ 659,
1309
+ 893,
1310
+ 825
1311
+ ],
1312
+ "page_idx": 5
1313
+ },
1314
+ {
1315
+ "type": "text",
1316
+ "text": "Technical details. We choose the latent code prior distribution $p_{z}$ as a multivariate uniform on $[-1, 1]$ . We build our model on top of the $\\pi$ -GAN implementation in PyTorch [29], re-using its released generator and discriminator architec",
1317
+ "bbox": [
1318
+ 496,
1319
+ 839,
1320
+ 893,
1321
+ 900
1322
+ ],
1323
+ "page_idx": 5
1324
+ },
1325
+ {
1326
+ "type": "page_number",
1327
+ "text": "6",
1328
+ "bbox": [
1329
+ 478,
1330
+ 924,
1331
+ 491,
1332
+ 936
1333
+ ],
1334
+ "page_idx": 5
1335
+ },
1336
+ {
1337
+ "type": "table",
1338
+ "img_path": "images/f84d39c5129f815a9ee245d09ce60b339616aa15b5a26071800881852e12647b.jpg",
1339
+ "table_caption": [],
1340
+ "table_footnote": [],
1341
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">64 × 64</td><td colspan=\"3\">128 × 128</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td></tr><tr><td>HoloGAN [24]</td><td>-</td><td>2.87</td><td>-</td><td>39.7</td><td>2.91</td><td>1.89</td></tr><tr><td>GRAF [35]</td><td>-</td><td>-</td><td>-</td><td>41.1</td><td>2.29</td><td>2.34</td></tr><tr><td>π-GAN [2]</td><td>5.15</td><td>0.09</td><td>2.28</td><td>14.7</td><td>0.39</td><td>2.62</td></tr><tr><td>Pix2NeRF unconditional</td><td>6.25</td><td>0.16</td><td>2.29</td><td>14.82</td><td>0.91</td><td>2.47</td></tr><tr><td>Pix2NeRF conditional</td><td>24.64</td><td>1.93</td><td>2.24</td><td>30.98</td><td>2.29</td><td>2.20</td></tr></table>",
1342
+ "bbox": [
1343
+ 80,
1344
+ 88,
1345
+ 467,
1346
+ 186
1347
+ ],
1348
+ "page_idx": 6
1349
+ },
1350
+ {
1351
+ "type": "table",
1352
+ "img_path": "images/3e74416a712fdc28d29f00e98a4eb647e650ae239628eda060e7a2c81af9eaed.jpg",
1353
+ "table_caption": [
1354
+ "Table 1. Quantitative results on CelebA [19]."
1355
+ ],
1356
+ "table_footnote": [],
1357
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">64 × 64</td><td colspan=\"3\">128 × 128</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td></tr><tr><td>HoloGAN [24]</td><td>134</td><td>9.70</td><td>-</td><td>67.5</td><td>3.95</td><td>3.52</td></tr><tr><td>GRAF [35]</td><td>30</td><td>0.91</td><td>-</td><td>41.7</td><td>2.43</td><td>3.70</td></tr><tr><td>π-GAN [2]</td><td>13.59</td><td>0.34</td><td>3.85</td><td>29.2</td><td>1.36</td><td>4.27</td></tr><tr><td>Pix2NeRF unconditional</td><td>10.54</td><td>0.37</td><td>3.95</td><td>27.23</td><td>1.43</td><td>4.38</td></tr><tr><td>Pix2NeRF conditional</td><td>12.06</td><td>0.44</td><td>3.81</td><td>38.51</td><td>2.37</td><td>3.89</td></tr></table>",
1358
+ "bbox": [
1359
+ 80,
1360
+ 229,
1361
+ 467,
1362
+ 328
1363
+ ],
1364
+ "page_idx": 6
1365
+ },
1366
+ {
1367
+ "type": "table",
1368
+ "img_path": "images/cfd40973f45b4673bc20272a93efb3aa43ac7db0ed3d20e940194270284cc19f.jpg",
1369
+ "table_caption": [
1370
+ "Table 2. Quantitative results on CARLA [8]."
1371
+ ],
1372
+ "table_footnote": [],
1373
+ "table_body": "<table><tr><td>Method</td><td colspan=\"2\">PSNR ↑</td><td>SSIM ↑</td></tr><tr><td>GRF* [40]</td><td colspan=\"2\">21.25</td><td>0.86</td></tr><tr><td>TCO* [39]</td><td colspan=\"2\">21.27</td><td>0.88</td></tr><tr><td>dGQN* [12]</td><td colspan=\"2\">21.59</td><td>0.87</td></tr><tr><td>ENR* [11]</td><td colspan=\"2\">22.83</td><td>-</td></tr><tr><td>SRN** [38]</td><td colspan=\"2\">22.89</td><td>0.89</td></tr><tr><td>PixelNeRF* [46]</td><td colspan=\"2\">23.72</td><td>0.91</td></tr><tr><td>CodeNeRF** [16]</td><td colspan=\"2\">22.39</td><td>0.87</td></tr><tr><td>Pix2NeRF conditional</td><td colspan=\"2\">18.14</td><td>0.84</td></tr><tr><td>Method</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td></tr><tr><td>HoloGAN [24]</td><td>-</td><td>1.54</td><td>-</td></tr><tr><td>π-GAN [2]</td><td>15.47</td><td>0.55</td><td>4.62</td></tr><tr><td>Pix2NeRF unconditional</td><td>14.31</td><td>0.51</td><td>4.62</td></tr><tr><td>Pix2NeRF conditional</td><td>17.55</td><td>0.59</td><td>4.36</td></tr></table>",
1374
+ "bbox": [
1375
+ 148,
1376
+ 369,
1377
+ 397,
1378
+ 577
1379
+ ],
1380
+ "page_idx": 6
1381
+ },
1382
+ {
1383
+ "type": "text",
1384
+ "text": "Table 3. Quantitative results on ShapeNet-SRN [4, 38] chairs. Top: reconstruction metrics $(128\\times 128)$ . Bottom: generative metrics $(64\\times 64)$ . Legend: * - requires multi-view training data; ** - requires multi-view training data and test time optimization.",
1385
+ "bbox": [
1386
+ 75,
1387
+ 588,
1388
+ 470,
1389
+ 643
1390
+ ],
1391
+ "page_idx": 6
1392
+ },
1393
+ {
1394
+ "type": "text",
1395
+ "text": "tures. We also use the discriminator architecture as the backbone of our encoder, where we add a tanh at the end of the latent code head. All models are optimized with Adam [17] optimizer for 300k iterations, which is approximately the same computational cost to obtain a $\\pi$ -GAN model. CelebA [19] models are trained with batch size 48 on resolution $64 \\times 64$ , where we sample 24 points per ray. We use learning rates of 2e-4, 6e-5, and 2e-4 for discriminator, generator, and encoder, respectively. For all other models, we utilized $\\pi$ -GAN [2]'s progressive training strategy, starting with training on resolution $32 \\times 32$ with learning rates 4e-5, 4e-4, and 4e-4 for generator, discriminator, and encoder, respectively, with 96 sampled points per ray. We increase to resolution $64 \\times 64$ with learning rates 2e-5, 2e-4, and 2e-4 for generator, discriminator, and encoder, respec",
1396
+ "bbox": [
1397
+ 75,
1398
+ 672,
1399
+ 470,
1400
+ 900
1401
+ ],
1402
+ "page_idx": 6
1403
+ },
1404
+ {
1405
+ "type": "text",
1406
+ "text": "tively, and sample 72 points per ray after 50k iterations. We empirically set $\\lambda_{\\mathrm{recon}} = 5$ , $\\lambda_{\\mathrm{ssim}} = 1$ and $\\lambda_{\\mathrm{vgg}} = 1$ for all datasets. For CelebA [19], we follow [2] and set $\\lambda_{\\mathrm{pos}} = 15$ . For CARLA [8] and ShapeNet-SRN [4, 38], we set $\\lambda_{\\mathrm{pos}} = 0$ as we do not observe significant difference. We use $|z| = 512$ for CelebA [19] and $|z| = 256$ for CARLA [8] and Shapenet-SRN [4, 38].",
1407
+ "bbox": [
1408
+ 496,
1409
+ 90,
1410
+ 890,
1411
+ 196
1412
+ ],
1413
+ "page_idx": 6
1414
+ },
1415
+ {
1416
+ "type": "text",
1417
+ "text": "Quantitative results. We show the evaluation on CelebA [19] and CARLA [8] in Tables 1 and 2 respectively. We also show evaluation with the same generative metrics on ShapeNet-SRN in Table 3 (bottom). We observe that even though our model's conditional synthesis is not as good as our backbone $\\pi$ -GAN (especially on CelebA), it is on par with other prior 3D view generation methods [24, 35].",
1418
+ "bbox": [
1419
+ 496,
1420
+ 215,
1421
+ 890,
1422
+ 321
1423
+ ],
1424
+ "page_idx": 6
1425
+ },
1426
+ {
1427
+ "type": "text",
1428
+ "text": "Since we do not explicitly enforce prior distribution $p_{z}$ on the encoded samples $E(I_{\\mathrm{real}})$ from $p_{\\mathrm{real}}$ , the image of $p_{\\mathrm{real}}$ resulting from the encoder mapping may occupy a small portion in $p_{z}$ . Thus, conditioning on $p_{\\mathrm{real}}$ naturally leads to a smaller variation in samples from $p_{z}$ , and hence, smaller diversity of NeRF outputs. For this reason, directly sampling randomly from $p_{z}$ (unconditionally) achieves better performance as measured by the generative metrics. Additionally, our generator outperforms $\\pi$ -GAN on most metrics on CARLA [8] and ShapeNet-SRN [4, 38]. Results on CelebA [19] are less consistent due to dataset noise (background, geometry, pose noise, artifacts, etc.), encouraging GANs to converge towards the mean as a trade-off to variations. These observations can be related to manifold learning [9], where we enforce the existence of a latent code for each real image in the train set.",
1429
+ "bbox": [
1430
+ 496,
1431
+ 325,
1432
+ 893,
1433
+ 568
1434
+ ],
1435
+ "page_idx": 6
1436
+ },
1437
+ {
1438
+ "type": "text",
1439
+ "text": "We compare our method with other single-image 3D inference methods in Table 3 on ShapeNet-SRN [4, 38] in $128 \\times 128$ resolution. Since our model assumes a strictly-spherical camera parameterization model, which does not correspond well to the ground truth poses of ShapeNet-SRN [4, 38], we use our encoder to extract poses from the images.",
1440
+ "bbox": [
1441
+ 496,
1442
+ 573,
1443
+ 890,
1444
+ 665
1445
+ ],
1446
+ "page_idx": 6
1447
+ },
1448
+ {
1449
+ "type": "text",
1450
+ "text": "Despite being generative, unsupervised, and not requiring test time optimization in contrast to all other methods, our model's performance does not drop much below the competition. Considering that other models were trained on 128, while our models were trained on $64 \\times 64$ but rendered at $128 \\times 128$ resolution, we observe a super-resolution effect.",
1451
+ "bbox": [
1452
+ 496,
1453
+ 670,
1454
+ 890,
1455
+ 761
1456
+ ],
1457
+ "page_idx": 6
1458
+ },
1459
+ {
1460
+ "type": "text",
1461
+ "text": "Qualitative results. We show some qualitative results of our model's performance on CARLA [8] and CelebA [19] in Fig. 3. We can see that our model can synthesize novel views with good quality while existing few-shot NeRF methods [16, 40, 46] are not able to train on these \"in the wild\" datasets due to the lack of multi-view supervision. Our model can also produce decent 3D representations even under extreme poses and artifacts (see row 5).",
1462
+ "bbox": [
1463
+ 496,
1464
+ 779,
1465
+ 890,
1466
+ 900
1467
+ ],
1468
+ "page_idx": 6
1469
+ },
1470
+ {
1471
+ "type": "page_number",
1472
+ "text": "7",
1473
+ "bbox": [
1474
+ 478,
1475
+ 924,
1476
+ 488,
1477
+ 935
1478
+ ],
1479
+ "page_idx": 6
1480
+ },
1481
+ {
1482
+ "type": "image",
1483
+ "img_path": "images/4bcc4e98fba0b0b6781beeabd5d2defe5ba6e3a1d4975e79da82da69c36ccdd7.jpg",
1484
+ "image_caption": [
1485
+ "Figure 4. Qualitative results of ablation studies, obtained with an image from the test split of CelebA [19]. $\\lambda_{\\mathrm{recon}}$ is set to 1 for lower reconstruction weights instead of the warm-up ablation. See Appendix for results obtained by using other $\\lambda_{\\mathrm{recon}}$ values."
1486
+ ],
1487
+ "image_footnote": [],
1488
+ "bbox": [
1489
+ 80,
1490
+ 89,
1491
+ 467,
1492
+ 301
1493
+ ],
1494
+ "page_idx": 7
1495
+ },
1496
+ {
1497
+ "type": "text",
1498
+ "text": "4.2. Ablation studies",
1499
+ "text_level": 1,
1500
+ "bbox": [
1501
+ 76,
1502
+ 402,
1503
+ 238,
1504
+ 419
1505
+ ],
1506
+ "page_idx": 7
1507
+ },
1508
+ {
1509
+ "type": "text",
1510
+ "text": "We perform a thorough ablation study to verify our design choices by removing the key components one by one and training models under identical settings as the full model. Qualitative results for the following ablations are in Fig. 4; refer to Appendix for the corresponding quantitative results.",
1511
+ "bbox": [
1512
+ 75,
1513
+ 426,
1514
+ 472,
1515
+ 503
1516
+ ],
1517
+ "page_idx": 7
1518
+ },
1519
+ {
1520
+ "type": "text",
1521
+ "text": "Naive GAN inversion. We compare Pix2NeRF with naive GAN inversion: having a pre-trained GAN, we freeze its weights and train an encoder to map images to their corresponding latent codes. The results show that the encoder can learn an approximate mapping from images to latent code. However, due to the lack of joint distillation, the reconstruction is off from the input image.",
1522
+ "bbox": [
1523
+ 75,
1524
+ 515,
1525
+ 470,
1526
+ 621
1527
+ ],
1528
+ "page_idx": 7
1529
+ },
1530
+ {
1531
+ "type": "text",
1532
+ "text": "Auto-encoder. Another potential approach is to utilize $\\pi$ -GAN's architecture as an auto-encoder, in which the latent space is dropped from the pipeline and training the reconstruction and conditional adversarial objectives only. Under this setting, while the reconstruction achieves decent quality, we can observe visible 3D inconsistency, suggesting difficulty of optimization with the remaining objectives.",
1533
+ "bbox": [
1534
+ 75,
1535
+ 633,
1536
+ 470,
1537
+ 741
1538
+ ],
1539
+ "page_idx": 7
1540
+ },
1541
+ {
1542
+ "type": "text",
1543
+ "text": "No GAN inversion. We proceed with ablations by removing the GAN inversion step from the pipeline. The visual results turn out to be blurry and uncanny compared with full settings. One possible explanation is that this step is a connection between $\\pi$ -GAN training and reconstruction, which significantly affects the overall performance.",
1544
+ "bbox": [
1545
+ 75,
1546
+ 752,
1547
+ 470,
1548
+ 843
1549
+ ],
1550
+ "page_idx": 7
1551
+ },
1552
+ {
1553
+ "type": "text",
1554
+ "text": "No conditional adversarial objective. We further deactivate the conditional adversarial loss and retrain the model. As a result, the renderings become incomplete and have clear",
1555
+ "bbox": [
1556
+ 75,
1557
+ 854,
1558
+ 472,
1559
+ 900
1560
+ ],
1561
+ "page_idx": 7
1562
+ },
1563
+ {
1564
+ "type": "text",
1565
+ "text": "visual artifacts. In addition, 3D consistency degrades significantly, which justifies this objective in the given setting.",
1566
+ "bbox": [
1567
+ 498,
1568
+ 90,
1569
+ 893,
1570
+ 122
1571
+ ],
1572
+ "page_idx": 7
1573
+ },
1574
+ {
1575
+ "type": "text",
1576
+ "text": "Warm-up. To verify the effect of the warm-up strategy, we train three separate models and compare their performances: without warm-up, without unfreezing encoder (always warm-up), and assigning a lower weight for reconstruction instead of the warm-up. Without the warm-up strategy, the model tends to overfit the input view and cannot produce meaningful content from novel poses. If we only use the warm-up strategy and never unfreeze the encoder, the distillation is relatively weak, which results in few fine details. With lower reconstruction weight instead of the warm-up, the balance between reconstruction and adversarial objective is missing, resulting in mode collapse for novel view synthesis.",
1577
+ "bbox": [
1578
+ 496,
1579
+ 135,
1580
+ 893,
1581
+ 316
1582
+ ],
1583
+ "page_idx": 7
1584
+ },
1585
+ {
1586
+ "type": "text",
1587
+ "text": "5. Conclusions",
1588
+ "text_level": 1,
1589
+ "bbox": [
1590
+ 500,
1591
+ 333,
1592
+ 627,
1593
+ 349
1594
+ ],
1595
+ "page_idx": 7
1596
+ },
1597
+ {
1598
+ "type": "text",
1599
+ "text": "In this paper, we introduced Pix2NeRF, a novel unsupervised single-shot framework capable of translating an input image of a scene into a neural radiance field (NeRF), thereby performing single-shot novel view synthesis. The key idea of Pix2NeRF is to utilize generative NeRF models to interpolate missing geometry information. This is accomplished by jointly training an encoder that maps images to a latent space, which disentangles content and pose, and the generative NeRF model while keeping these two parts dependent on each other. Pix2NeRF can go beyond the auto-encoder setting and perform novel scene generation by sampling random content and pose and passing through the generator. Our framework demonstrates high reconstruction quality and 3D consistency, on par and better than previous works.",
1600
+ "bbox": [
1601
+ 496,
1602
+ 359,
1603
+ 893,
1604
+ 571
1605
+ ],
1606
+ "page_idx": 7
1607
+ },
1608
+ {
1609
+ "type": "text",
1610
+ "text": "Limitations and future work. The current setting in consideration is limited to one category per dataset and cannot directly generalize beyond the chosen category. Alternative research directions include local conditional fields similar to PixelNeRF [46] and GRF [40], which can generalize to unseen categories, multi-instance, and even real-world scenes. Being a general framework, Pix2NeRF is not limited to using $\\pi$ -GAN as its backbone. Newer generative NeRF models, e.g. EG3D [3] could potentially achieve better visual quality. Additionally, architecture search, especially with respect to the encoder remains a challenging problem. Utilizing more mature encoder architectures from 2D GAN feed-forward inversion literature, e.g. pixel2style2pixel [32], could potentially improve the performance of Pix2NeRF significantly.",
1611
+ "bbox": [
1612
+ 496,
1613
+ 585,
1614
+ 893,
1615
+ 797
1616
+ ],
1617
+ "page_idx": 7
1618
+ },
1619
+ {
1620
+ "type": "text",
1621
+ "text": "Ethical consideration. As with most modern conditional generative models, Pix2NeRF can be misused by generating content to spread misinformation or perform targeted attacks. The growing popularity of deepfake celebrity accounts in social media suggests that new use cases, markets, and novel ways of monetizing this kind of data will follow.",
1622
+ "bbox": [
1623
+ 496,
1624
+ 809,
1625
+ 893,
1626
+ 900
1627
+ ],
1628
+ "page_idx": 7
1629
+ },
1630
+ {
1631
+ "type": "page_number",
1632
+ "text": "8",
1633
+ "bbox": [
1634
+ 480,
1635
+ 924,
1636
+ 488,
1637
+ 936
1638
+ ],
1639
+ "page_idx": 7
1640
+ },
1641
+ {
1642
+ "type": "text",
1643
+ "text": "References",
1644
+ "text_level": 1,
1645
+ "bbox": [
1646
+ 78,
1647
+ 89,
1648
+ 173,
1649
+ 104
1650
+ ],
1651
+ "page_idx": 8
1652
+ },
1653
+ {
1654
+ "type": "list",
1655
+ "sub_type": "ref_text",
1656
+ "list_items": [
1657
+ "[1] Mikołaj Binkowski, Danica J. Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans, 2021. 6, 1",
1658
+ "[2] Eric Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In arXiv, 2020. 2, 3, 4, 6, 7",
1659
+ "[3] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In arXiv, 2021. 8",
1660
+ "[4] Angel X. Chang, Thomas A. Funkhouser, Leonidas J. Guibas, Pat Hanrahan, Qi-Xing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. Shapenet: An information-rich 3d model repository. CoRR, abs/1512.03012, 2015. 6, 7, 1, 4",
1661
+ "[5] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo, 2021. 2",
1662
+ "[6] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2",
1663
+ "[7] Julian Chibane, Aayush Bansal, Verica Lazova, and Gerard Pons-Moll. Stereo radiance fields (srf): Learning view synthesis for sparse views of novel scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7911-7920, June 2021. 2",
1664
+ "[8] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. CARLA: An open urban driving simulator. In Proceedings of the 1st Annual Conference on Robot Learning, pages 1-16, 2017. 6, 7, 1, 5",
1665
+ "[9] Yilun Du, Katherine M. Collins, Joshua B. Tenenbaum, and Vincent Sitzmann. Learning signal-agnostic manifolds of neural fields, 2021. 7",
1666
+ "[10] Vincent Dumoulin, Ethan Perez, Nathan Schucher, Florian Strub, Harm de Vries, Aaron Courville, and Yoshua Bengio. Feature-wise transformations. Distill, 2018. https://distill.pub/2018/feature-wise-transformations.3",
1667
+ "[11] Emilien Dupont, Miguel Bautista Martin, Alex Colburn, Aditya Sankar, Josh Susskind, and Qi Shan. Equivariant neural rendering. In International Conference on Machine Learning, pages 2761-2770. PMLR, 2020. 7",
1668
+ "[12] SMA Eslami, DJ Rezende, F Besse, F Viola, AS Morcos, M Garnelo, A Ruderman, AA Rusu, I Danihelka, K Gregor, et al. Neural scene representation and rendering. Science, 360(6394):1204-, 2018. 7",
1669
+ "[13] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Y. Bengio. Generative adversarial networks. Advances in Neural Information Processing Systems, 3, 06 2014. 2",
1670
+ "[14] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis, 2021. 3"
1671
+ ],
1672
+ "bbox": [
1673
+ 78,
1674
+ 114,
1675
+ 470,
1676
+ 892
1677
+ ],
1678
+ "page_idx": 8
1679
+ },
1680
+ {
1681
+ "type": "list",
1682
+ "sub_type": "ref_text",
1683
+ "list_items": [
1684
+ "[15] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Günter Klambauer, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a nash equilibrium. CoRR, abs/1706.08500, 2017. 6, 1",
1685
+ "[16] Wonbong Jang and Lourdes Agapito. Codenerf: Disentangled neural radiance fields for object categories, 2021. 2, 3, 7",
1686
+ "[17] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations, 12 2014. 7",
1687
+ "[18] Chen-Hsuan Lin, Chaoyang Wang, and Simon Lucey. Sdfsrn: Learning signed distance 3d object reconstruction from static images. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 1",
1688
+ "[19] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), December 2015. 4, 5, 6, 7, 8, 1, 2, 3",
1689
+ "[20] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In CVPR, 2021. 2",
1690
+ "[21] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. arXiv preprint arXiv:2103.15606, 2021. 2",
1691
+ "[22] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2",
1692
+ "[23] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 1, 2",
1693
+ "[24] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In The IEEE International Conference on Computer Vision (ICCV), Nov 2019. 2, 3, 7",
1694
+ "[25] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 3",
1695
+ "[26] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2",
1696
+ "[27] Anton Obukhov, Maximilian Seitzer, Po-Wei Wu, Semen Zhydenko, Jonathan Kyl, and Elvis Yu-Jing Lin. High-fidelity performance metrics for generative models in pytorch, 2020. Version: 0.3.0, DOI: 10.5281/zenodo.4957738.6",
1697
+ "[28] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 2"
1698
+ ],
1699
+ "bbox": [
1700
+ 501,
1701
+ 92,
1702
+ 893,
1703
+ 896
1704
+ ],
1705
+ "page_idx": 8
1706
+ },
1707
+ {
1708
+ "type": "page_number",
1709
+ "text": "9",
1710
+ "bbox": [
1711
+ 478,
1712
+ 924,
1713
+ 491,
1714
+ 936
1715
+ ],
1716
+ "page_idx": 8
1717
+ },
1718
+ {
1719
+ "type": "list",
1720
+ "sub_type": "ref_text",
1721
+ "list_items": [
1722
+ "[29] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Köpf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. CoRR, abs/1912.01703, 2019. 6",
1723
+ "[30] Ethan Perez, Florian Strub, Harm de Vries, Vincent Dumoulin, and Aaron C. Courville. Film: Visual reasoning with a general conditioning layer. CoRR, abs/1709.07871, 2017. 3",
1724
+ "[31] Pierluigi Zama Ramirez, Alessio Tonioni, and Federico Tombari. Unsupervised novel view synthesis from a single image. CoRR, abs/2102.03285, 2021. 2, 3, 4, 5, 1",
1725
+ "[32] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: a stylegan encoder for image-to-image translation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2021. 8",
1726
+ "[33] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In The IEEE International Conference on Computer Vision (ICCV), October 2019. 2",
1727
+ "[34] Tim Salimans, Ian J. Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. CoRR, abs/1606.03498, 2016. 6, 1",
1728
+ "[35] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 3, 6, 7",
1729
+ "[36] Vincent Sitzmann, Eric R. Chan, Richard Tucker, Noah Snavely, and Gordon Wetzstein. Metasdf: Meta-learning signed distance functions. In arXiv, 2020. 2",
1730
+ "[37] Vincent Sitzmann, Julien N.P. Martel, Alexander W. Bergman, David B. Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. In Proc. NeurIPS, 2020. 3",
1731
+ "[38] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure"
1732
+ ],
1733
+ "bbox": [
1734
+ 78,
1735
+ 90,
1736
+ 470,
1737
+ 650
1738
+ ],
1739
+ "page_idx": 9
1740
+ },
1741
+ {
1742
+ "type": "list",
1743
+ "sub_type": "ref_text",
1744
+ "list_items": [
1745
+ "aware neural scene representations. In Advances in Neural Information Processing Systems, 2019. 2, 6, 7, 1, 4",
1746
+ "[39] Maxim Tatarchenko, Alexey Dosovitskiy, and Thomas Brox. Multi-view 3d models from single images with a convolutional network, 2016. 7",
1747
+ "[40] Alex Trevithick and Bo Yang. Grf: Learning a general radiance field for 3d scene representation and rendering. In arXiv:2010.04595, 2020. 2, 3, 7, 8",
1748
+ "[41] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo MartinBrualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In CVPR, 2021. 2",
1749
+ "[42] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 5, 6, 1",
1750
+ "[43] Zirui Wang, Shangzhe Wu, Weidi Xie, Min Chen, and Victor Adrian Prisacariu. NeRF-: Neural radiance fields without known camera parameters. https://arxiv.org/abs/2102.07064, 2021.2",
1751
+ "[44] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In CVPR, 2020. 2, 5",
1752
+ "[45] Xudong Xu, Xingang Pan, Dahua Lin, and Bo Dai. Generative occupancy fields for 3d surface-aware image synthesis. In Advances in Neural Information Processing Systems(NeurIPS), 2021. 3",
1753
+ "[46] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2, 3, 7, 8",
1754
+ "[47] Yuxuan Zhang, Wenzheng Chen, Huan Ling, Jun Gao, Yinan Zhang, Antonio Torralba, and Sanja Fidler. Image gans meet differentiable rendering for inverse graphics and interpretable 3d neural rendering. In International Conference on Learning Representations, 2021. 2",
1755
+ "[48] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis, 2021. 3"
1756
+ ],
1757
+ "bbox": [
1758
+ 501,
1759
+ 92,
1760
+ 893,
1761
+ 642
1762
+ ],
1763
+ "page_idx": 9
1764
+ },
1765
+ {
1766
+ "type": "page_number",
1767
+ "text": "10",
1768
+ "bbox": [
1769
+ 477,
1770
+ 924,
1771
+ 495,
1772
+ 936
1773
+ ],
1774
+ "page_idx": 9
1775
+ },
1776
+ {
1777
+ "type": "text",
1778
+ "text": "Pix2NeRF: Unsupervised Conditional $\\pi$ -GAN for Single Image to Neural Radiance Fields Translation",
1779
+ "text_level": 1,
1780
+ "bbox": [
1781
+ 117,
1782
+ 85,
1783
+ 851,
1784
+ 128
1785
+ ],
1786
+ "page_idx": 10
1787
+ },
1788
+ {
1789
+ "type": "text",
1790
+ "text": "Supplementary Material",
1791
+ "bbox": [
1792
+ 367,
1793
+ 141,
1794
+ 601,
1795
+ 162
1796
+ ],
1797
+ "page_idx": 10
1798
+ },
1799
+ {
1800
+ "type": "table",
1801
+ "img_path": "images/8bef392e36d5d06d3681b9fe1c38d83ae7962a8fb39a0fbb9f62d43def98bd58.jpg",
1802
+ "table_caption": [],
1803
+ "table_footnote": [],
1804
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">128 × 128</td><td colspan=\"2\">64 × 64</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>PSNR ↑</td><td>SSIM ↑</td></tr><tr><td>Pix2NeRF unconditional</td><td>26.45</td><td>1.18</td><td>4.39</td><td>-</td><td>-</td></tr><tr><td>Pix2NeRF conditional</td><td>26.81</td><td>1.23</td><td>4.27</td><td>18.75</td><td>0.82</td></tr></table>",
1805
+ "bbox": [
1806
+ 81,
1807
+ 172,
1808
+ 464,
1809
+ 234
1810
+ ],
1811
+ "page_idx": 10
1812
+ },
1813
+ {
1814
+ "type": "table",
1815
+ "img_path": "images/1fa03174e53c26ee2777a04f3a89e563f3cb79655fbe93700914bcc5e2b3cabe.jpg",
1816
+ "table_caption": [
1817
+ "Table 4. Additional quantitative results on ShapeNet-SRN [4, 38]."
1818
+ ],
1819
+ "table_footnote": [],
1820
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"3\">CelebA 64 × 64</td><td colspan=\"5\">ShapeNet-SRN 64 × 64</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>PSNR ↑</td><td>SSIM ↑</td></tr><tr><td>A</td><td>28.90</td><td>2.99</td><td>1.62</td><td>34.01</td><td>1.73</td><td>3.65</td><td>15.91</td><td>0.71</td></tr><tr><td>B</td><td>43.19</td><td>2.84</td><td>1.33</td><td>43.06</td><td>2.49</td><td>2.92</td><td>16.27</td><td>0.71</td></tr><tr><td>C</td><td>39.42</td><td>3.07</td><td>1.65</td><td>41.47</td><td>2.80</td><td>2.96</td><td>15.14</td><td>0.68</td></tr><tr><td>D</td><td>33.92</td><td>2.84</td><td>1.87</td><td>35.72</td><td>1.74</td><td>3.75</td><td>16.81</td><td>0.77</td></tr><tr><td>E</td><td>31.31</td><td>2.75</td><td>1.95</td><td>21.67</td><td>0.89</td><td>4.35</td><td>18.03</td><td>0.79</td></tr><tr><td>F</td><td>39.86</td><td>3.18</td><td>1.73</td><td>27.70</td><td>1.22</td><td>4.09</td><td>16.98</td><td>0.77</td></tr><tr><td>G</td><td>73.52</td><td>7.47</td><td>1.91</td><td>27.10</td><td>1.31</td><td>4.26</td><td>17.77</td><td>0.79</td></tr><tr><td>H</td><td>73.03</td><td>7.08</td><td>1.97</td><td>41.11</td><td>2.27</td><td>3.34</td><td>14.98</td><td>0.74</td></tr><tr><td>I</td><td>140.25</td><td>16.33</td><td>1.79</td><td>184.10</td><td>17.19</td><td>2.55</td><td>10.95</td><td>0.59</td></tr><tr><td>J</td><td>168.59</td><td>18.89</td><td>1.50</td><td>266.64</td><td>30.29</td><td>1.98</td><td>10.28</td><td>0.47</td></tr><tr><td>Full</td><td>24.64</td><td>1.93</td><td>2.24</td><td>17.55</td><td>0.59</td><td>4.36</td><td>18.75</td><td>0.82</td></tr></table>",
1821
+ "bbox": [
1822
+ 80,
1823
+ 270,
1824
+ 465,
1825
+ 426
1826
+ ],
1827
+ "page_idx": 10
1828
+ },
1829
+ {
1830
+ "type": "table",
1831
+ "img_path": "images/3d44897acbcbfcb4ab918c05c0e332bc7a31d0a13b95bed0aae0c04b6fb91b48.jpg",
1832
+ "table_caption": [
1833
+ "Table 5. Quantitative results of ablation study on CelebA [19] and ShapeNet-SRN [4,38]. \"Full\" denotes Pix2NeRF conditional setup.",
1834
+ "Table 6. Input view reconstruction (PSNR, SSIM) on a test set, and novel view synthesis (FID, KID $\\times$ 100, IS)."
1835
+ ],
1836
+ "table_footnote": [],
1837
+ "table_body": "<table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>FID↓</td><td>KID↓</td><td>IS↑</td></tr><tr><td>Pix2NeRF E + frozen π-GAN G</td><td>13.04</td><td>0.46</td><td>28.25</td><td>2.97</td><td>1.52</td></tr><tr><td>π-GAN optimization (200 iterations)</td><td>23.42</td><td>0.80</td><td>16.09</td><td>0.83</td><td>2.10</td></tr><tr><td>π-GAN optimization (700 iterations)</td><td>24.21</td><td>0.82</td><td>17.14</td><td>0.72</td><td>2.14</td></tr><tr><td>Pix2NeRF (feed-forward)</td><td>17.95</td><td>0.67</td><td>24.82</td><td>1.93</td><td>2.21</td></tr><tr><td>Pix2NeRF (200 iterations)</td><td>27.12</td><td>0.89</td><td>12.86</td><td>0.64</td><td>2.27</td></tr><tr><td>Pix2NeRF (1000 iterations)</td><td>27.73</td><td>0.90</td><td>12.01</td><td>0.62</td><td>2.30</td></tr></table>",
1838
+ "bbox": [
1839
+ 81,
1840
+ 516,
1841
+ 465,
1842
+ 601
1843
+ ],
1844
+ "page_idx": 10
1845
+ },
1846
+ {
1847
+ "type": "text",
1848
+ "text": "A. Additional qualitative results",
1849
+ "text_level": 1,
1850
+ "bbox": [
1851
+ 76,
1852
+ 619,
1853
+ 349,
1854
+ 637
1855
+ ],
1856
+ "page_idx": 10
1857
+ },
1858
+ {
1859
+ "type": "text",
1860
+ "text": "We demonstrate additional qualitative results achieved by Pix2NeRF on three datasets: CelebA [19], Shapenet-SRN chairs [4, 38], and CARLA [8] in Figures 6, 7, and 8 respectively.",
1861
+ "bbox": [
1862
+ 75,
1863
+ 645,
1864
+ 468,
1865
+ 705
1866
+ ],
1867
+ "page_idx": 10
1868
+ },
1869
+ {
1870
+ "type": "text",
1871
+ "text": "B. Additional quantitative results",
1872
+ "text_level": 1,
1873
+ "bbox": [
1874
+ 76,
1875
+ 717,
1876
+ 359,
1877
+ 734
1878
+ ],
1879
+ "page_idx": 10
1880
+ },
1881
+ {
1882
+ "type": "text",
1883
+ "text": "Table 4 provides additional quantitative results on ShapeNet-SRN [4, 38] with generative metrics computed on $128 \\times 128$ resolution, and reconstruction metrics computed on $64 \\times 64$ resolution. We do not report PSNR and SSIM for CelebA [19] as there is no ground truth novel views.",
1884
+ "bbox": [
1885
+ 75,
1886
+ 742,
1887
+ 468,
1888
+ 832
1889
+ ],
1890
+ "page_idx": 10
1891
+ },
1892
+ {
1893
+ "type": "text",
1894
+ "text": "C. Additional ablation study",
1895
+ "text_level": 1,
1896
+ "bbox": [
1897
+ 76,
1898
+ 844,
1899
+ 318,
1900
+ 862
1901
+ ],
1902
+ "page_idx": 10
1903
+ },
1904
+ {
1905
+ "type": "text",
1906
+ "text": "We provide quantitative results of each ablation study on CelebA [19] and Shapenet-SRN [4, 38] to further verify our",
1907
+ "bbox": [
1908
+ 76,
1909
+ 869,
1910
+ 468,
1911
+ 900
1912
+ ],
1913
+ "page_idx": 10
1914
+ },
1915
+ {
1916
+ "type": "text",
1917
+ "text": "design choices. As in the ablation study in our main paper, we report FID [15], KID [1] and IS [34] for CelebA [19], and additionally report PSNR and SSIM [42] on Shapenet-SRN [4,18]. We measure results after inference on resolution $64 \\times 64$ . We show quantitative ablation results in Table 5. Legend: A - naive GAN inversion; B - auto-encoder; C - no GAN inversion; D - no conditional adversarial objective; E - no warm-up; F - always warm-up; G, H, I, J - lower weights for reconstruction instead of warm-up, with $\\lambda_{\\mathrm{recon}} = 1, 0.1, 0.01, 0.001$ respectively. Note that since the encoder output is not enforced to strictly follow $p_z$ , naive GAN inversion (stage 1 in [31]) failed completely due to bad initialization. We therefore use a \"warmed-up\" version of the generator trained for 300k iterations.",
1918
+ "bbox": [
1919
+ 496,
1920
+ 175,
1921
+ 893,
1922
+ 387
1923
+ ],
1924
+ "page_idx": 10
1925
+ },
1926
+ {
1927
+ "type": "text",
1928
+ "text": "D. Input reconstruction and hybrid optimization",
1929
+ "text_level": 1,
1930
+ "bbox": [
1931
+ 498,
1932
+ 400,
1933
+ 893,
1934
+ 433
1935
+ ],
1936
+ "page_idx": 10
1937
+ },
1938
+ {
1939
+ "type": "text",
1940
+ "text": "We ran extra ablations and summarized our model performance by providing both input reconstruction (cols 2,3) and novel view synthesis (cols 4,5,6) results in Tab. 6 (row 4). We show $\\pi$ -GAN latent optimization on an input image for 700 iterations, as recommended by its authors in row 3. Note that it requires time-consuming per-instance optimization due to the NeRF's rendering mechanism. Additionally, we use the Pix2NeRF encoder's output as a starting point and perform latent optimization with a frozen Pix2NeRF generator for only 200 iterations, shown in row 5. A qualitative comparison is shown in Fig. 5. Note that our model does not overfit the input view even with 1000 iterations of input view optimization (row 6), while $\\pi$ -GAN shows strong artifacts and requires a search for the optimal number of iterations.",
1941
+ "bbox": [
1942
+ 496,
1943
+ 443,
1944
+ 893,
1945
+ 652
1946
+ ],
1947
+ "page_idx": 10
1948
+ },
1949
+ {
1950
+ "type": "text",
1951
+ "text": "E. Necessity of generator distilling",
1952
+ "text_level": 1,
1953
+ "bbox": [
1954
+ 500,
1955
+ 665,
1956
+ 790,
1957
+ 683
1958
+ ],
1959
+ "page_idx": 10
1960
+ },
1961
+ {
1962
+ "type": "text",
1963
+ "text": "We trained the encoder with a pretrained frozen $\\pi$ -GAN generator using all the losses. As can be seen from the results in Tab. 6 Row 1, the model struggles to capture details accurately without fine-tuning the generator jointly.",
1964
+ "bbox": [
1965
+ 496,
1966
+ 690,
1967
+ 890,
1968
+ 752
1969
+ ],
1970
+ "page_idx": 10
1971
+ },
1972
+ {
1973
+ "type": "text",
1974
+ "text": "F. Linear interpolation",
1975
+ "text_level": 1,
1976
+ "bbox": [
1977
+ 500,
1978
+ 763,
1979
+ 696,
1980
+ 782
1981
+ ],
1982
+ "page_idx": 10
1983
+ },
1984
+ {
1985
+ "type": "text",
1986
+ "text": "We interpolate novel views between two different input images by predicting their corresponding latent codes and poses, then applying linear interpolation to get the intermediate codes and poses. We show the results interpolating five images in Figure 9.",
1987
+ "bbox": [
1988
+ 496,
1989
+ 789,
1990
+ 893,
1991
+ 866
1992
+ ],
1993
+ "page_idx": 10
1994
+ },
1995
+ {
1996
+ "type": "page_number",
1997
+ "text": "1",
1998
+ "bbox": [
1999
+ 480,
2000
+ 924,
2001
+ 488,
2002
+ 935
2003
+ ],
2004
+ "page_idx": 10
2005
+ },
2006
+ {
2007
+ "type": "image",
2008
+ "img_path": "images/163c2cb75f0f6580593868d080cce7290887dbe3808719d95a12dfd86f93f645.jpg",
2009
+ "image_caption": [
2010
+ "Figure 5. Qualitative comparison on CelebA. Top - input, middle - reconstruction, bottom - novel view synthesis."
2011
+ ],
2012
+ "image_footnote": [],
2013
+ "bbox": [
2014
+ 86,
2015
+ 93,
2016
+ 450,
2017
+ 263
2018
+ ],
2019
+ "page_idx": 11
2020
+ },
2021
+ {
2022
+ "type": "text",
2023
+ "text": "G. Limitations and failure cases",
2024
+ "text_level": 1,
2025
+ "bbox": [
2026
+ 500,
2027
+ 89,
2028
+ 769,
2029
+ 106
2030
+ ],
2031
+ "page_idx": 11
2032
+ },
2033
+ {
2034
+ "type": "text",
2035
+ "text": "Despite training on images without pose or 3D supervision, Pix2NeRF can reconstruct objects from a single image and achieve decent quality. However, the methodology of using an encoder to encode an entire image into a single latent code is quite challenging, especially when the dataset is noisy, such as CelebA [19]. Pix2NeRF cannot always capture fine details accurately. We observe failure cases when the input is out-of-distribution relative to that of the training set $p_{\\mathrm{real}}$ , as shown in Figure 10. It might be possible to improve these hard cases by introducing pixel-wise features instead of (or, in addition to) the global latent code, as done in Pix2NeRF [46] and GRF [40].",
2036
+ "bbox": [
2037
+ 496,
2038
+ 114,
2039
+ 893,
2040
+ 297
2041
+ ],
2042
+ "page_idx": 11
2043
+ },
2044
+ {
2045
+ "type": "page_number",
2046
+ "text": "2",
2047
+ "bbox": [
2048
+ 478,
2049
+ 924,
2050
+ 491,
2051
+ 936
2052
+ ],
2053
+ "page_idx": 11
2054
+ },
2055
+ {
2056
+ "type": "image",
2057
+ "img_path": "images/ea4ceed7db54088b46f5b6072efcae6cfe293e638b455444f1110d4f61d324ed.jpg",
2058
+ "image_caption": [
2059
+ "input reconstruction",
2060
+ "novel views",
2061
+ "Figure 6. Further reconstructions and novel views on CelebA [19]."
2062
+ ],
2063
+ "image_footnote": [],
2064
+ "bbox": [
2065
+ 89,
2066
+ 109,
2067
+ 883,
2068
+ 813
2069
+ ],
2070
+ "page_idx": 12
2071
+ },
2072
+ {
2073
+ "type": "page_number",
2074
+ "text": "3",
2075
+ "bbox": [
2076
+ 480,
2077
+ 924,
2078
+ 488,
2079
+ 935
2080
+ ],
2081
+ "page_idx": 12
2082
+ },
2083
+ {
2084
+ "type": "image",
2085
+ "img_path": "images/5f882f63b67a9dd2226d173970188a8b9aaf3c26e34d8127b9fc56382293ae8d.jpg",
2086
+ "image_caption": [
2087
+ "input reconstruction",
2088
+ "novel views",
2089
+ "Figure 7. Further reconstructions and novel views on ShapeNet-SRN [4, 38]."
2090
+ ],
2091
+ "image_footnote": [],
2092
+ "bbox": [
2093
+ 106,
2094
+ 109,
2095
+ 870,
2096
+ 801
2097
+ ],
2098
+ "page_idx": 13
2099
+ },
2100
+ {
2101
+ "type": "page_number",
2102
+ "text": "4",
2103
+ "bbox": [
2104
+ 478,
2105
+ 924,
2106
+ 491,
2107
+ 935
2108
+ ],
2109
+ "page_idx": 13
2110
+ },
2111
+ {
2112
+ "type": "image",
2113
+ "img_path": "images/fdf8be2d5cb597e79d5853dbe733d4cf232227b601473d3ca2abf15190b4256f.jpg",
2114
+ "image_caption": [
2115
+ "input reconstruction",
2116
+ "novel views",
2117
+ "Figure 8. Further reconstructions and novel views on CARLA [8]."
2118
+ ],
2119
+ "image_footnote": [],
2120
+ "bbox": [
2121
+ 81,
2122
+ 112,
2123
+ 885,
2124
+ 834
2125
+ ],
2126
+ "page_idx": 14
2127
+ },
2128
+ {
2129
+ "type": "page_number",
2130
+ "text": "5",
2131
+ "bbox": [
2132
+ 480,
2133
+ 924,
2134
+ 488,
2135
+ 935
2136
+ ],
2137
+ "page_idx": 14
2138
+ },
2139
+ {
2140
+ "type": "image",
2141
+ "img_path": "images/4e3e5aeaf617c9ff3b6505dbb498b0b0b3ba4956d4d79934a56cac5fdf95a42b.jpg",
2142
+ "image_caption": [
2143
+ "image 1",
2144
+ "Linear interpolation",
2145
+ "image 2",
2146
+ "Figure 9. Linear interpolation on CelebA [19]."
2147
+ ],
2148
+ "image_footnote": [],
2149
+ "bbox": [
2150
+ 89,
2151
+ 112,
2152
+ 877,
2153
+ 578
2154
+ ],
2155
+ "page_idx": 15
2156
+ },
2157
+ {
2158
+ "type": "image",
2159
+ "img_path": "images/cf7178b4f6bd7728acc660901217a2127fc389f4acef0ca6b032c4684493bbee.jpg",
2160
+ "image_caption": [
2161
+ "input"
2162
+ ],
2163
+ "image_footnote": [],
2164
+ "bbox": [
2165
+ 405,
2166
+ 657,
2167
+ 470,
2168
+ 704
2169
+ ],
2170
+ "page_idx": 15
2171
+ },
2172
+ {
2173
+ "type": "image",
2174
+ "img_path": "images/1f23088284e61bd675b773eca2a05772f2a8a71819086934e1813cc59d29f55c.jpg",
2175
+ "image_caption": [
2176
+ "reconstruction"
2177
+ ],
2178
+ "image_footnote": [],
2179
+ "bbox": [
2180
+ 509,
2181
+ 657,
2182
+ 570,
2183
+ 705
2184
+ ],
2185
+ "page_idx": 15
2186
+ },
2187
+ {
2188
+ "type": "image",
2189
+ "img_path": "images/469856648e3d010e510bb642e82964d8ae819b1783f9323fe7fa21ef0995436b.jpg",
2190
+ "image_caption": [],
2191
+ "image_footnote": [],
2192
+ "bbox": [
2193
+ 383,
2194
+ 713,
2195
+ 486,
2196
+ 790
2197
+ ],
2198
+ "page_idx": 15
2199
+ },
2200
+ {
2201
+ "type": "image",
2202
+ "img_path": "images/e70bfc5cfb09f620cc35751093472ac052bdb1debba1df0d5998df31e118cddc.jpg",
2203
+ "image_caption": [
2204
+ "Figure 10. Failure cases on CelebA [19] and ShapeNet-SRN [4,38]."
2205
+ ],
2206
+ "image_footnote": [],
2207
+ "bbox": [
2208
+ 488,
2209
+ 713,
2210
+ 586,
2211
+ 791
2212
+ ],
2213
+ "page_idx": 15
2214
+ },
2215
+ {
2216
+ "type": "page_number",
2217
+ "text": "6",
2218
+ "bbox": [
2219
+ 480,
2220
+ 925,
2221
+ 488,
2222
+ 935
2223
+ ],
2224
+ "page_idx": 15
2225
+ }
2226
+ ]
2202.13xxx/2202.13162/747f2044-56ac-435c-b2d3-916c376e22a8_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13162/747f2044-56ac-435c-b2d3-916c376e22a8_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99cdc2fbc96205e78cb16d14f71ec1bfa34bb01fb1f2ab3100ba070853244cdf
3
+ size 2323843
2202.13xxx/2202.13162/full.md ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pix2NeRF: Unsupervised Conditional $\pi$ -GAN for Single Image to Neural Radiance Fields Translation
2
+
3
+ Shengqu Cai
4
+ ETH Zürich
5
+
6
+ Anton Obukhov ETH Zürich
7
+
8
+ Dengxin Dai
9
+ MPI for Informatics
10
+ ETH Zürich
11
+
12
+ Luc Van Gool
13
+ ETH Zürich
14
+ KU Leuven
15
+
16
+ ![](images/03b5d36de9f518895308aa61c743c5ca190b453155e662f8799786dd520dcc23.jpg)
17
+
18
+ ![](images/bac52eb184382fb931f6e620ad144cce4f5f74a4e0bdd23a040e0ea71dc7d7e8.jpg)
19
+
20
+ ![](images/db632d04adcc584b493c37e7db9fed6da69f0537fafd8471cb6a72db68f94f2a.jpg)
21
+
22
+ ![](images/b1d2cf56cf0e7175c7a9fd1d778875e9532fd7b0e311e8a02ee12b33e7ee5940.jpg)
23
+
24
+ ![](images/92f69b457de2745ae32395d8d87c6a4e8c82d541c3fa0f6333f34d8871f15101.jpg)
25
+
26
+ ![](images/dc67c9256da1e44df7fa8242787fcbe7229810bb1c31f62a3f7941733c0ed8fd.jpg)
27
+
28
+ ![](images/cb2a0126e80eb1d4ee0be2db00bd953132b166b0d572948b8e45dede7b5d7cde.jpg)
29
+
30
+ ![](images/577d28cf142ad5bbf06ed0454ed46258170927a3c4f3ecbf7f61865e65a327d0.jpg)
31
+ Figure 1. Overview of Pix2NeRF: We propose a method for unsupervised learning of neural representations of scenes, sharing a common pose prior. At test time, Pix2NeRF disentangles pose and content from an input image and renders novel views of the content. Top: $\pi$ -GAN is trained on a dataset without pose supervision. Bottom: a trained model is conditioned on a single image to obtain pose-dependent views.
32
+
33
+ ![](images/39614bc39f8834b323b605baf922b22887f0a081b3e9a24c6a26850b71dd412e.jpg)
34
+
35
+ ![](images/65fcae6a18554eedba038be3bbcf1e391e914fe6eec7e136ddcdff029ecfe722.jpg)
36
+
37
+ ![](images/a4ec2bd3d24e482824b0390d81413c855402842364962edccc0bbdfa373d4d7b.jpg)
38
+
39
+ ![](images/210e355cea14ee6b87e8c857e9c70e0faac3f900ef9527925b76fb5bb10b75d8.jpg)
40
+
41
+ # Abstract
42
+
43
+ We propose a pipeline to generate Neural Radiance Fields (NeRF) of an object or a scene of a specific class, conditioned on a single input image. This is a challenging task, as training NeRF requires multiple views of the same scene, coupled with corresponding poses, which are hard to obtain. Our method is based on $\pi$ -GAN, a generative model for unconditional 3D-aware image synthesis, which maps random latent codes to radiance fields of a class of objects. We jointly optimize (1) the $\pi$ -GAN objective to utilize its high-fidelity 3D-aware generation and (2) a carefully designed reconstruction objective. The latter includes an encoder coupled with $\pi$ -GAN generator to form an autoencoder. Unlike previous few-shot NeRF approaches, our pipeline is unsupervised, capable of being trained with independent images without 3D, multi-view, or pose supervision. Applications of our pipeline include 3d avatar generation, object-centric novel view synthesis with a single input image, and 3d-aware super-resolution, to name a few.
44
+
45
+ # 1. Introduction
46
+
47
+ Following the success of Neural Radiance Fields (NeRF) [23], encoding scenes as weights of multi-layer perceptrons (MLPs) has emerged as a promising research direction. Novel View Synthesis is an important application: given sparse sample views of a scene, the task is to synthesize novel views from unseen camera poses. NeRF addresses it by encoding color and volume density at each point of the 3D scene into a neural network and uses traditional volume rendering to compose 2D views.
48
+
49
+ While NeRF is capable of synthesizing novel views with high fidelity, it is often impractical due to being "overfitted" to a given scene and requiring multiple views of the scene to train. Several follow-up works attempt to address these limitations via making NeRF generalize to new scenes.
50
+
51
+ Major progress has been made in training a general NeRF capable of encoding a scene given only one or a handful of views [5, 7, 16, 40, 41, 46]. However, these works are designed to work well only with multi-view images during either training or both training and inference.
52
+
53
+ One reason why single-shot NeRF, or in general single-shot novel view synthesis is challenging, is the incomplete content information within a single image. For example, given a frontal image of a car, there is very little information to infer a novel view from the back directly. Bringing back the traditional inverse graphics and 3D reconstruction pipelines, [44] addresses this issue by making an additional assumption on the symmetry of the scene to interpolate potentially missing geometry information within a single image. However, this technique is limited to scenes where symmetry can be introduced and does not tackle the general case.
54
+
55
+ Therefore, a natural follow-up question is how does a human brain address such a challenging task? One of the approaches we use unconsciously is learning a prior implicit model for object categories and mapping what we observe to the learned model. This line of thinking is already explored in prior works [40, 46]. An essential part missing from these works is ensuring that novel views also meet our expectation of the object class, and due to the lack of supervision from a sole image, this is normally done via imagination.
56
+
57
+ One of the closest forms of imagination developed by the machine learning community is Generative Adversarial Networks [13]. GANs have been very successful in image synthesis and transformation. Beyond 2D, studies have shown GAN's capability of synthesizing 3D content [24] from natural images. This suggests another approach to address 3D reconstruction without multi-view images via 3D GAN inversion. Such a strategy bypasses the problem of missing information within one sole image due to GAN's adversarial training. Existing works [31, 47] utilize such a method based on HoloGAN [24], StyleGAN [47], and others, but one of the drawbacks naturally from these 3D-aware generative models is their relatively weak 3D consistency.
58
+
59
+ With the rapid increase of NeRF [23] popularity, corresponding generative models are also gaining attention. GRAF [35] and $\pi$ -GAN [2] follow traditional GAN settings by mapping latent codes to category-specific radiance fields. These generative models typically have high 3D consistency due to the built-in volumetric rendering design. This observation suggests the possibility of few-shot 3D reconstruction using adversarial training and radiance fields.
60
+
61
+ In this paper, we formulate the task of translating an input image of a given category to NeRF as an end-to-end pipeline termed Pix2NeRF (Fig. 1). The method can perform novel view synthesis given a single image, without the need of pretraining, annotation, or fine-tuning. Pix2NeRF can be trained with natural images – without explicit 3D supervision, in an end-to-end fashion. Inspired by prior works [31,40,46],
62
+
63
+ we introduce an encoder mapping a given image to a latent space. We jointly optimize several objectives. First, we train $\pi$ -GAN and the added encoder to map generated images back to the latent space. Second, we adapt the encoder coupled with $\pi$ -GAN's generator to form a conditional GAN, trained with both adversarial and reconstruction loss. We show that merely doing $\pi$ -GAN inversion is challenging and insufficient to complete our goal, and adaptation is important for calibrating learned representations of the encoder and generator. Our framework is able to instantiate NeRF in a single shot manner while naturally preserving the ability to synthesize novel views with high fidelity, comparable to state-of-the-art generative NeRF models.
64
+
65
+ # Contributions.
66
+
67
+ - We propose Pix2NeRF, the first unsupervised single-shot NeRF model, that can learn scene radiance fields from images without 3D, multi-view, or pose supervision.
68
+ - Our pipeline is the first work on conditional GAN-based NeRF, or in general, NeRF-based GAN inversion. We expect our pipeline to become a strong baseline for future works towards these research directions.
69
+ - We demonstrate the superiority of our method compared with naive GAN inversion methods and conduct an extensive ablation studies to justify our design choices.
70
+
71
+ # 2. Related works
72
+
73
+ Our work can be classified as a category-specific 3D-aware neural novel view synthesis method, which is strongly based on NeRF [23] and $\pi$ -GAN [2].
74
+
75
+ Neural scene representations. The field of encoding a scene into neural networks has proven to be a promising research direction. This includes, but is not limited to: parameterizing the geometry of a scene via signed distance functions or occupancy [6, 22, 28, 36], encoding both geometry and appearance [18, 26, 33, 38], etc. Recently, the impressive performance of Neural Radiance Fields (NeRF) [23] has drawn extensive attention to this field. It encodes a scene as a multivariable vector-valued function $f(x,y,z,\theta ,\phi) = (r,g,b,\sigma)$ approximated by MLP, where $(x,y,z)$ denotes spatial coordinates, $(\theta ,\phi)$ denotes viewing direction, and $(r,g,b,\sigma)$ corresponds to color and volume density. This function is then called repeatedly by any of the volume rendering techniques to produce novel views. The outstanding performance of NeRF inspired follow-up works to extend it towards alternative settings, such as training from unconstrained images [20], training without poses [21, 43], etc.
76
+
77
+ NeRF-based GANs. Following the developments of GANs and NeRFs, several works tried combining them to form generative models producing NeRFs. One of the first attempts
78
+
79
+ in this direction is GRAF [35]; it performs category-specific radiance fields generation by conditioning NeRF on shape and appearance code. Following the NeRF pipeline, the generator can synthesize an image given a random code and a view direction. The generated image is passed into the discriminator together with real images, thus implementing a GAN. GRAF is an unsupervised model, since it does not require ground truth camera poses; therefore, it can be trained using "in the wild" images. This is done by introducing a pose prior relative to a canonical view frame of reference, e.g., Gaussian distribution to describe head pitch and yaw relative to a front face view. $\pi$ -GAN [2] is similar to GRAF, but conditions on a single latent code and utilizes FiLM [10, 30] SIREN [37] layers instead of simple MLPs. More recently, several works improved synthesis quality with high resolutions [14], better 3D shapes [45], and precise control [25, 48].
80
+
81
+ Few-shot NeRF. The main property of NeRFs is the ability to bake in a 3D scene into MLP weights. However, this is also a limitation since it must be retrained for each new scene, which takes a lot of time and money. To lift this constraint, PixelNeRF [46] and GRF [40] condition MLPs on pixel-aligned features extracted by a CNN encoder. During the novel view rendering phase, 3D points along the rays are projected onto the extracted feature grid to get aligned features, then fed into an MLP with the points. More recently, CodeNeRF [16] suggested training NeRF with learnable latent codes and utilizing test-time optimization to find the best latent codes (and camera poses) given an image. However, these methods still require multi-view supervision during training, which constrains their usage in real-world settings, where multi-view datasets are challenging to collect.
82
+
83
+ Therefore, single-shot NeRF without additional supervision (e.g., 3D objects, multi-view image collections) remains an under-explored research direction. In this paper, we bridge this gap by incorporating an auto-encoder architecture into an existing $\pi$ -GAN NeRF framework to obtain a conditional single-shot NeRF model, retaining the best properties of all components. We note that the concurrent work [31] shares similar ideas. The key differences are a different backbone network (HoloGAN [24]) and its lack of 3D consistency, which the authors point out. Contrary, we utilize the newly-proposed NeRF-based GAN method called $\pi$ -GAN [2], which naturally provides stronger 3D consistency by design. We demonstrate that merely applying the approach of [31] is insufficient to obtain an accurate mapping from image to latent space with $\pi$ -GAN as a backbone. Nevertheless, our framework can be viewed as [31] specifically improved towards NeRF-based GAN models, or CodeNeRF [16] combined with GANs.
84
+
85
+ # 3. Method
86
+
87
+ Pix2NeRF consists of three neural networks, a Generator $G$ , a Discriminator $D$ , together forming a Generative Adversarial Network, and an Encoder $E$ forming an auto-encoder together with $G$ . The generator is conditioned on the output view pose $d$ and a latent code $z$ , broadly describing content variations, such as color or shape. It employs 3D-volume rendering techniques and outputs a single parameterized scene view as RGB image $I$ . The discriminator $D$ is a CNN, which simultaneously predicts distribution origin of the input RGB image via logit $l$ (real – “in the wild”, or fake – generated by $G$ ), and the corresponding scene pose $d$ . The encoder $E$ is a CNN tasked to map an input image onto the latent manifold, learned by $G$ , and at the same time predict the input's pose:
88
+
89
+ $$
90
+ G: z, d \to I
91
+ $$
92
+
93
+ $$
94
+ D: I \rightarrow l, d \tag {1}
95
+ $$
96
+
97
+ $$
98
+ E: I \to z, d.
99
+ $$
100
+
101
+ Functionally, Pix2NeRF extends $\pi$ -GAN [2] with the encoder $E$ trained jointly with the GAN to allow mapping images back to the latent manifold. Because the encoder $E$ disentangles the content $z$ and the pose $d$ of the input $I$ , content can be further used to condition the $\pi$ -GAN generator $G$ and obtain novel views by varying the rendered pose $d$ .
102
+
103
+ Having defined network modules, we turn to specifying the inputs and outputs of the modules. The latent code $z$ comes from a simple prior distribution $p_{z}$ (multivariate uniform in our case) - it makes sampling random codes $z_{\mathrm{rand}}$ easy and lets us design $E$ such that it can encode any input image $I$ into some $z_{\mathrm{pred}}$ within the support of $p_{z}$ . Following prior art [2,35], the unsupervised setting we operate in assumes we have access to the prior distribution of poses $p_{d}$ of real images $I_{\mathrm{real}} \sim p_{\mathrm{real}}$ used for training. Depending on the dataset and choice of pose coordinates, it can be multivariate Gaussian with diagonal covariance (for images of faces) or uniform on a (hemi-)sphere (for images of cars). Parameters of this distribution must be known to allow easy sampling random poses $d_{\mathrm{rand}}$ for the generator, and that $p_{d}$ is representative of poses of real images $I_{\mathrm{real}}$ .
104
+
105
+ Simply training the encoder $E$ to map an image $I$ into GAN latent space (as in Stage 1 of [31]) simultaneously with training GAN is challenging. This is because the encoder needs to correctly map images of the same scene from different views to a single latent code. This is especially hard when these views contain variations of fine details due to occlusions. As seen from Eq. 1 and the design Fig. 2, our method disentangles latent representation of image mapped by the encoder and generator input into content $z$ and pose $d$ , which undergo separate treatment.
106
+
107
+ Given an input image, Pix2NeRF disentangles pose and content and produces a radiance field of the content, which is (1) consistent with the input under the disentangled pose
108
+
109
+ ![](images/8df12082cf8d982f7ff6aa0c016f4dbe4f6c50014f577a99bc620788443e0c04.jpg)
110
+ Figure 2. Overview of building blocks and objectives, used in Pix2NeRF. GAN objectives follow $\pi$ -GAN [2] and ensure that NeRF outputs match the distribution of real images $p_{\mathrm{real}}$ under the latent prior $p_z$ and pose prior $p_d$ . Reconstruction and GAN inversion objectives ensure calibrated latent representations, such that $E$ and $G$ can operate as an auto-encoder, similar to [31]. The conditional adversarial objective enables learning better representations without explicit pose supervision. Legend: green - trained module, blue - frozen, gradient - warm-up.
111
+
112
+ and (2) consistent and realistic under different poses from $p_d$ . To achieve these properties, we devise several training objectives for (1) generator, (2) discriminator, (3) GAN inversion, (4) reconstruction, and (5) conditional adversarial training.
113
+
114
+ These objectives are used to compute gradients for parameters of $G$ , $D$ , and $E$ within a single optimization process. However, certain parts remain "frozen" during optimizer updates (such as $G$ during $D$ updates and vice-versa); we denote them with an asterisk in equations (e.g., $G^{*}$ ) and blue color in Fig. 2. We empirically find that training encoder from the start has a detrimental effect on the whole pipeline and employ a warm-up strategy (denoted with green-blue transitions), explained further.
115
+
116
+ # 3.1. GAN generator objective
117
+
118
+ The generator is trained to "fool" the discriminator by serving it progressively realistic images. Pix2NeRF follows the same procedure of training the generator as $\pi$ -GAN: it samples latent codes $z_{\mathrm{rand}} \sim p_z$ and random poses $d_{\mathrm{rand}} \sim p_d$ in pairs, which are then passed through the generator to obtain fake generated images:
119
+
120
+ $$
121
+ I _ {\text {g e n}} = G \left(z _ {\text {r a n d}}, d _ {\text {r a n d}}\right), \tag {2}
122
+ $$
123
+
124
+ which are further fed into the frozen discriminator:
125
+
126
+ $$
127
+ l _ {\text {g e n}}, d _ {\text {g e n}} = D ^ {*} \left(I _ {\text {g e n}}\right). \tag {3}
128
+ $$
129
+
130
+ Following [2], another component helpful to the stability and performance of GAN training is MSE supervision of predicted poses $d_{\mathrm{gen}}$ of images generated with $d_{\mathrm{rand}}$ . It penalizes the generator if the image pose recovered by the discriminator does not correspond to the sampled pose, thus setting the goal of learning a "canonical" 3D space. This is especially helpful if the pose distribution of real data is
131
+
132
+ noisy, such as seen in CelebA [19].
133
+
134
+ $$
135
+ \mathcal {L} _ {\mathrm {G A N}} (G) = \underset { \begin{array}{c} z _ {\text {r a n d}} \sim p _ {z} \\ d _ {\text {r a n d}} \sim p _ {d} \end{array} } {\mathbb {E}} \left[ \text {s o f t p l u s} (- l _ {\text {g e n}}) + \right. \tag {4}
136
+ $$
137
+
138
+ $$
139
+ \lambda_ {\mathrm {p o s}} \left\| d _ {\mathrm {r a n d}} - d _ {\mathrm {g e n}} \right\| _ {2} ^ {2} \biggr ],
140
+ $$
141
+
142
+ where $\lambda_{\mathrm{pos}}$ is a tuned weighting factor.
143
+
144
+ # 3.2. GAN discriminator objective
145
+
146
+ The discriminator is trained to distinguish between the generated fake samples and real data sampled from the dataset. Pix2NeRF follows the exact procedure of training the discriminator in $\pi$ -GAN: it samples latent codes $z_{\mathrm{rand}} \sim p_z$ and random poses $d_{\mathrm{rand}} \sim p_d$ in pairs, which are then passed through the frozen generator to obtain fake generated images:
147
+
148
+ $$
149
+ I _ {\text {g e n}} = G ^ {*} \left(z _ {\text {r a n d}}, d _ {\text {r a n d}}\right). \tag {5}
150
+ $$
151
+
152
+ The discriminator is then trained using these generated images $I_{\mathrm{gen}}$ and real images $I_{\mathrm{real}} \sim p_{\mathrm{real}}$ :
153
+
154
+ $$
155
+ l _ {\text {r e a l}}, d _ {\text {r e a l}} = D \left(I _ {\text {r e a l}}\right), \tag {6}
156
+ $$
157
+
158
+ $$
159
+ l _ {\mathrm {g e n}}, d _ {\mathrm {g e n}} = D (I _ {\mathrm {g e n}}).
160
+ $$
161
+
162
+ The discriminator objective modified to take into account MSE supervision over the known pose can then be formulated as follows:
163
+
164
+ $$
165
+ \mathcal {L} _ {\mathrm {G A N}} (D) = \underset {I _ {\text {r e a l}} \sim p _ {\text {r e a l}}} {\mathbb {E}} \left[ \text {s o f t p l u s} (- l _ {\text {r e a l}}) \right] +
166
+ $$
167
+
168
+ $$
169
+ \mathbb {E} _ {\substack {z _ {\text {rand}} \sim p _ {z} \\ d _ {\text {rand}} \sim p _ {d}}} \left[ \text {softplus} \left(l _ {\text {gen}}\right) + \right. \tag{7}
170
+ $$
171
+
172
+ $$
173
+ \left. \lambda_ {\mathrm {p o s}} \left\| d _ {\mathrm {r a n d}} - d _ {\mathrm {g e n}} \right\| _ {2} ^ {2} \right],
174
+ $$
175
+
176
+ where $\lambda_{\mathrm{pos}}$ is a tuned weighting factor.
177
+
178
+ # 3.3. GAN inversion objective
179
+
180
+ The encoder $E$ is jointly optimized with the discriminator $D$ and reuses $I_{\mathrm{gen}}$ computed for GAN discriminator objective Eq. (5):
181
+
182
+ $$
183
+ z _ {\text {p r e d}}, d _ {\text {p r e d}} = E \left(I _ {\text {g e n}}\right). \tag {8}
184
+ $$
185
+
186
+ This objective aims to ensure consistency between the sampled content and pose and those extracted from the generated image by the encoder. This is done using the MSE loss:
187
+
188
+ $$
189
+ \mathcal {L} _ {\mathrm {G A N} ^ {- 1}} (E) = \underset { \begin{array}{c} z _ {\text {r a n d}} \sim p _ {z} \\ d _ {\text {r a n d}} \sim p _ {d} \end{array} } {\mathbb {E}} \left[ \| z _ {\text {p r e d}} - z _ {\text {r a n d}} \| _ {2} ^ {2} + \right. \tag {9}
190
+ $$
191
+
192
+ $$
193
+ \left. \left\| d _ {\mathrm {p r e d}} - d _ {\mathrm {r a n d}} \right\| _ {2} ^ {2} \right].
194
+ $$
195
+
196
+ Up until now, the objectives only ensured a generative mapping from the latent space to radiance fields and some basic form of consistency to learn auto-encoder. However, our experiments show that optimizing just these three objectives does not produce a reasonable mapping. Therefore, Pix2NeRF adds two more objectives to address reconstruction quality and 3D consistency in the unsupervised setting.
197
+
198
+ # 3.4. Reconstruction objective
199
+
200
+ While the GAN inversion objective promotes consistency in latent space, nothing so far directly promotes consistency in the image space. To this end, we condition the generator $G$ on a real image by extracting its latent code and pose prediction using the encoder, and then render its view using the predicted pose:
201
+
202
+ $$
203
+ z _ {\text {p r e d}}, d _ {\text {p r e d}} = E \left(I _ {\text {r e a l}}\right) \tag {10}
204
+ $$
205
+
206
+ $$
207
+ I _ {\text {r e c o n}} = G \left(z _ {\text {p r e d}}, d _ {\text {p r e d}}\right).
208
+ $$
209
+
210
+ Ideally, we expect to get back the original image. However, using MSE loss alone in the image space is known to promote structural inconsistencies and blur. In line with [31], we employ Structural Similarity Index Measure loss (SSIM [42]) with weighting factor $\lambda_{\mathrm{ssim}}$ and a perceptual loss (VGG [44]) with weighting factor $\lambda_{\mathrm{vgg}}$ . We can therefore aggregate the reconstruction loss as follows:
211
+
212
+ $$
213
+ \mathcal {L} _ {\mathrm {r e c o n}} (G, E) = \underset {I _ {\mathrm {r e a l}} \sim p _ {\mathrm {r e a l}}} {\mathbb {E}} \left[ \| I _ {\mathrm {r e c o n}} - I _ {\mathrm {r e a l}} \| _ {2} ^ {2} + \right.
214
+ $$
215
+
216
+ $$
217
+ \lambda_ {\text {s s i m}} \mathcal {L} _ {\text {s s i m}} \left(I _ {\text {r e c o n}}, I _ {\text {r e a l}}\right) + \tag {11}
218
+ $$
219
+
220
+ $$
221
+ \left. \lambda_ {\mathrm {v g g}} \mathcal {L} _ {\mathrm {v g g}} \left(I _ {\mathrm {r e c o n}}, I _ {\mathrm {r e a l}}\right) \right].
222
+ $$
223
+
224
+ # 3.5. Conditional adversarial objective
225
+
226
+ The reconstruction objective promotes good reconstruction quality for just one view extracted by the encoder $E$ . This may push the combination of networks towards either predicting trivial poses or unrealistic reconstructions for other poses from $p_d$ . To alleviate that, we further apply an
227
+
228
+ adversarial objective while conditioning the generator on an image $I_{\mathrm{real}}$ when it is rendered from random poses. Reusing results from Eq. (10),
229
+
230
+ $$
231
+ l _ {\text {c o n d}}, d _ {\text {c o n d}} = D ^ {*} \left(G \left(z _ {\text {p r e d}}, d _ {\text {r a n d}}\right)\right)
232
+ $$
233
+
234
+ $$
235
+ \mathcal {L} _ {\text {c o n d}} (G, E) = \underset { \begin{array}{l} I _ {\text {r e a l}} \sim p _ {\text {r e a l}} \\ d _ {\text {r a n d}} \sim p _ {d} \end{array} } {\mathbb {E}} \left[ \text {s o f t p l u s} (- l _ {\text {c o n d}}) \right]. \tag {12}
236
+ $$
237
+
238
+ # 3.6. Encoder warm-up
239
+
240
+ As pointed out in [31], reconstruction loss may easily dominate and cause the model overfitting towards input views while losing its ability to represent 3D. We, therefore, introduce a simple "warm-up" strategy to counter this issue. For the first half iterations of the training protocol, we freeze the encoder while optimizing reconstruction and conditional adversarial loss and optimize only the generator for these two objectives. This serves as a warm-up for the generator to roughly learn the correspondence between encoder outputs and encoded images. The encoder is then unfrozen, enabling further distillation of its learned representations.
241
+
242
+ After the warm-up stage, the encoder and generator directly form a pre-trained auto-encoder capable of producing 3D representations close to ground truth, bypassing the cumbersome early-stage reconstruction objective, which is extremely hard to balance with GAN objectives. We show the necessity of this strategy and comparison with merely assigning a smaller weight for reconstruction loss in the ablation studies.
243
+
244
+ # 3.7. Training and Inference
245
+
246
+ The objectives mentioned above can be trained jointly; however, we optimize them in alternative iterations due to GPU memory constraints. The discriminator and GAN inversion objectives are optimized upon every iteration; the GAN generator objective is optimized on even iterations; reconstruction and conditional adversarial objectives are optimized jointly during odd iterations with weighting factor $\lambda_{\mathrm{recon}}$ :
247
+
248
+ $$
249
+ \mathcal {L} _ {\text {o d d}} = \mathcal {L} _ {\text {c o n d}} + \lambda_ {\text {r e c o n}} \mathcal {L} _ {\text {r e c o n}}. \tag {13}
250
+ $$
251
+
252
+ During the inference stage, Pix2NeRF only requires a single input image, which can be fed into the encoder $E$ and then generator $G$ , coupled with arbitrarily selected poses for novel view synthesis. At the same time, instead of obtaining the latent code $z$ from the encoder, it is possible to sample it from the prior distribution $p_{z}$ , to make the model synthesize novel samples like a $\pi$ -GAN.
253
+
254
+ # 4. Experiments
255
+
256
+ # 4.1. Evaluation
257
+
258
+ Datasets. We train and evaluate our pipeline on several 3D datasets listed below. CelebA [19] is a dataset of over 200k
259
+
260
+ ![](images/9b8fc887f4777b10a53a9c91c6cb7bfacaca6895d4d2c0e4e5caa9b2aa824eec.jpg)
261
+ input reconstruction
262
+ novel views
263
+ Figure 3. Reconstructed and novel views on CARLA [8], CelebA [19], and ShapeNet-SRN [4,38] chairs. See Appendix for more results.
264
+
265
+ images of celebrity faces. We use its "aligned" version and apply center cropping to keep the face area roughly. We hold out 8k images as the test set. CARLA [8] contains 10k images of 16 car models rendered with Carla driving simulator with random textures. ShapeNet-SRN is a dataset hosted by the authors of SRN [38], from which we use the "chairs" split for the comparison with prior multi-view methods. The dataset contains 50 rendered views from ShapeNet [4] with Archimedean spiral camera poses for each of the 6591 instances. As the ShapeNet-SRN dataset does not include the lower hemisphere in its validation and test sets, we filter the training set to contain only the upper hemisphere as well.
266
+
267
+ Evaluation metrics. Pix2NeRF is evaluated in two modes: unconditional, which assumes sampling directly from $p_z$ and $p_d$ , and conditional, which corresponds to using $z =$
268
+
269
+ $E(I_{\mathrm{real}})$ , $I_{\mathrm{real}} \sim p_{\mathrm{real}}$ , while still sampling from $p_d$ . For "in the wild" datasets, as we do not possess multi-view ground truth images, we resort to reporting generative metrics: Inception Score (IS) [34], Frechet Inception Distance (FID) [15], and Kernel Inception Distance (KID) [1] with scaling factor $\times 100$ following the steps of prior works [2,35] using the implementation [27]. To compare with multi-view-based novel view synthesis methods on Shapenet-SRN, we follow the evaluation protocols in pixelNeRF and CodeNeRF and report PSNR (Peak Signal to Noise Ratio) and SSIM (Structural Similarity Index Measure) [42].
270
+
271
+ Technical details. We choose the latent code prior distribution $p_{z}$ as a multivariate uniform on $[-1, 1]$ . We build our model on top of the $\pi$ -GAN implementation in PyTorch [29], re-using its released generator and discriminator architec
272
+
273
+ <table><tr><td rowspan="2">Method</td><td colspan="3">64 × 64</td><td colspan="3">128 × 128</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td></tr><tr><td>HoloGAN [24]</td><td>-</td><td>2.87</td><td>-</td><td>39.7</td><td>2.91</td><td>1.89</td></tr><tr><td>GRAF [35]</td><td>-</td><td>-</td><td>-</td><td>41.1</td><td>2.29</td><td>2.34</td></tr><tr><td>π-GAN [2]</td><td>5.15</td><td>0.09</td><td>2.28</td><td>14.7</td><td>0.39</td><td>2.62</td></tr><tr><td>Pix2NeRF unconditional</td><td>6.25</td><td>0.16</td><td>2.29</td><td>14.82</td><td>0.91</td><td>2.47</td></tr><tr><td>Pix2NeRF conditional</td><td>24.64</td><td>1.93</td><td>2.24</td><td>30.98</td><td>2.29</td><td>2.20</td></tr></table>
274
+
275
+ Table 1. Quantitative results on CelebA [19].
276
+
277
+ <table><tr><td rowspan="2">Method</td><td colspan="3">64 × 64</td><td colspan="3">128 × 128</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td></tr><tr><td>HoloGAN [24]</td><td>134</td><td>9.70</td><td>-</td><td>67.5</td><td>3.95</td><td>3.52</td></tr><tr><td>GRAF [35]</td><td>30</td><td>0.91</td><td>-</td><td>41.7</td><td>2.43</td><td>3.70</td></tr><tr><td>π-GAN [2]</td><td>13.59</td><td>0.34</td><td>3.85</td><td>29.2</td><td>1.36</td><td>4.27</td></tr><tr><td>Pix2NeRF unconditional</td><td>10.54</td><td>0.37</td><td>3.95</td><td>27.23</td><td>1.43</td><td>4.38</td></tr><tr><td>Pix2NeRF conditional</td><td>12.06</td><td>0.44</td><td>3.81</td><td>38.51</td><td>2.37</td><td>3.89</td></tr></table>
278
+
279
+ Table 2. Quantitative results on CARLA [8].
280
+
281
+ <table><tr><td>Method</td><td colspan="2">PSNR ↑</td><td>SSIM ↑</td></tr><tr><td>GRF* [40]</td><td colspan="2">21.25</td><td>0.86</td></tr><tr><td>TCO* [39]</td><td colspan="2">21.27</td><td>0.88</td></tr><tr><td>dGQN* [12]</td><td colspan="2">21.59</td><td>0.87</td></tr><tr><td>ENR* [11]</td><td colspan="2">22.83</td><td>-</td></tr><tr><td>SRN** [38]</td><td colspan="2">22.89</td><td>0.89</td></tr><tr><td>PixelNeRF* [46]</td><td colspan="2">23.72</td><td>0.91</td></tr><tr><td>CodeNeRF** [16]</td><td colspan="2">22.39</td><td>0.87</td></tr><tr><td>Pix2NeRF conditional</td><td colspan="2">18.14</td><td>0.84</td></tr><tr><td>Method</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td></tr><tr><td>HoloGAN [24]</td><td>-</td><td>1.54</td><td>-</td></tr><tr><td>π-GAN [2]</td><td>15.47</td><td>0.55</td><td>4.62</td></tr><tr><td>Pix2NeRF unconditional</td><td>14.31</td><td>0.51</td><td>4.62</td></tr><tr><td>Pix2NeRF conditional</td><td>17.55</td><td>0.59</td><td>4.36</td></tr></table>
282
+
283
+ Table 3. Quantitative results on ShapeNet-SRN [4, 38] chairs. Top: reconstruction metrics $(128\times 128)$ . Bottom: generative metrics $(64\times 64)$ . Legend: * - requires multi-view training data; ** - requires multi-view training data and test time optimization.
284
+
285
+ tures. We also use the discriminator architecture as the backbone of our encoder, where we add a tanh at the end of the latent code head. All models are optimized with Adam [17] optimizer for 300k iterations, which is approximately the same computational cost to obtain a $\pi$ -GAN model. CelebA [19] models are trained with batch size 48 on resolution $64 \times 64$ , where we sample 24 points per ray. We use learning rates of 2e-4, 6e-5, and 2e-4 for discriminator, generator, and encoder, respectively. For all other models, we utilized $\pi$ -GAN [2]'s progressive training strategy, starting with training on resolution $32 \times 32$ with learning rates 4e-5, 4e-4, and 4e-4 for generator, discriminator, and encoder, respectively, with 96 sampled points per ray. We increase to resolution $64 \times 64$ with learning rates 2e-5, 2e-4, and 2e-4 for generator, discriminator, and encoder, respec
286
+
287
+ tively, and sample 72 points per ray after 50k iterations. We empirically set $\lambda_{\mathrm{recon}} = 5$ , $\lambda_{\mathrm{ssim}} = 1$ and $\lambda_{\mathrm{vgg}} = 1$ for all datasets. For CelebA [19], we follow [2] and set $\lambda_{\mathrm{pos}} = 15$ . For CARLA [8] and ShapeNet-SRN [4, 38], we set $\lambda_{\mathrm{pos}} = 0$ as we do not observe significant difference. We use $|z| = 512$ for CelebA [19] and $|z| = 256$ for CARLA [8] and Shapenet-SRN [4, 38].
288
+
289
+ Quantitative results. We show the evaluation on CelebA [19] and CARLA [8] in Tables 1 and 2 respectively. We also show evaluation with the same generative metrics on ShapeNet-SRN in Table 3 (bottom). We observe that even though our model's conditional synthesis is not as good as our backbone $\pi$ -GAN (especially on CelebA), it is on par with other prior 3D view generation methods [24, 35].
290
+
291
+ Since we do not explicitly enforce prior distribution $p_{z}$ on the encoded samples $E(I_{\mathrm{real}})$ from $p_{\mathrm{real}}$ , the image of $p_{\mathrm{real}}$ resulting from the encoder mapping may occupy a small portion in $p_{z}$ . Thus, conditioning on $p_{\mathrm{real}}$ naturally leads to a smaller variation in samples from $p_{z}$ , and hence, smaller diversity of NeRF outputs. For this reason, directly sampling randomly from $p_{z}$ (unconditionally) achieves better performance as measured by the generative metrics. Additionally, our generator outperforms $\pi$ -GAN on most metrics on CARLA [8] and ShapeNet-SRN [4, 38]. Results on CelebA [19] are less consistent due to dataset noise (background, geometry, pose noise, artifacts, etc.), encouraging GANs to converge towards the mean as a trade-off to variations. These observations can be related to manifold learning [9], where we enforce the existence of a latent code for each real image in the train set.
292
+
293
+ We compare our method with other single-image 3D inference methods in Table 3 on ShapeNet-SRN [4, 38] in $128 \times 128$ resolution. Since our model assumes a strictly-spherical camera parameterization model, which does not correspond well to the ground truth poses of ShapeNet-SRN [4, 38], we use our encoder to extract poses from the images.
294
+
295
+ Despite being generative, unsupervised, and not requiring test time optimization in contrast to all other methods, our model's performance does not drop much below the competition. Considering that other models were trained on 128, while our models were trained on $64 \times 64$ but rendered at $128 \times 128$ resolution, we observe a super-resolution effect.
296
+
297
+ Qualitative results. We show some qualitative results of our model's performance on CARLA [8] and CelebA [19] in Fig. 3. We can see that our model can synthesize novel views with good quality while existing few-shot NeRF methods [16, 40, 46] are not able to train on these "in the wild" datasets due to the lack of multi-view supervision. Our model can also produce decent 3D representations even under extreme poses and artifacts (see row 5).
298
+
299
+ ![](images/4bcc4e98fba0b0b6781beeabd5d2defe5ba6e3a1d4975e79da82da69c36ccdd7.jpg)
300
+ Figure 4. Qualitative results of ablation studies, obtained with an image from the test split of CelebA [19]. $\lambda_{\mathrm{recon}}$ is set to 1 for lower reconstruction weights instead of the warm-up ablation. See Appendix for results obtained by using other $\lambda_{\mathrm{recon}}$ values.
301
+
302
+ # 4.2. Ablation studies
303
+
304
+ We perform a thorough ablation study to verify our design choices by removing the key components one by one and training models under identical settings as the full model. Qualitative results for the following ablations are in Fig. 4; refer to Appendix for the corresponding quantitative results.
305
+
306
+ Naive GAN inversion. We compare Pix2NeRF with naive GAN inversion: having a pre-trained GAN, we freeze its weights and train an encoder to map images to their corresponding latent codes. The results show that the encoder can learn an approximate mapping from images to latent code. However, due to the lack of joint distillation, the reconstruction is off from the input image.
307
+
308
+ Auto-encoder. Another potential approach is to utilize $\pi$ -GAN's architecture as an auto-encoder, in which the latent space is dropped from the pipeline and training the reconstruction and conditional adversarial objectives only. Under this setting, while the reconstruction achieves decent quality, we can observe visible 3D inconsistency, suggesting difficulty of optimization with the remaining objectives.
309
+
310
+ No GAN inversion. We proceed with ablations by removing the GAN inversion step from the pipeline. The visual results turn out to be blurry and uncanny compared with full settings. One possible explanation is that this step is a connection between $\pi$ -GAN training and reconstruction, which significantly affects the overall performance.
311
+
312
+ No conditional adversarial objective. We further deactivate the conditional adversarial loss and retrain the model. As a result, the renderings become incomplete and have clear
313
+
314
+ visual artifacts. In addition, 3D consistency degrades significantly, which justifies this objective in the given setting.
315
+
316
+ Warm-up. To verify the effect of the warm-up strategy, we train three separate models and compare their performances: without warm-up, without unfreezing encoder (always warm-up), and assigning a lower weight for reconstruction instead of the warm-up. Without the warm-up strategy, the model tends to overfit the input view and cannot produce meaningful content from novel poses. If we only use the warm-up strategy and never unfreeze the encoder, the distillation is relatively weak, which results in few fine details. With lower reconstruction weight instead of the warm-up, the balance between reconstruction and adversarial objective is missing, resulting in mode collapse for novel view synthesis.
317
+
318
+ # 5. Conclusions
319
+
320
+ In this paper, we introduced Pix2NeRF, a novel unsupervised single-shot framework capable of translating an input image of a scene into a neural radiance field (NeRF), thereby performing single-shot novel view synthesis. The key idea of Pix2NeRF is to utilize generative NeRF models to interpolate missing geometry information. This is accomplished by jointly training an encoder that maps images to a latent space, which disentangles content and pose, and the generative NeRF model while keeping these two parts dependent on each other. Pix2NeRF can go beyond the auto-encoder setting and perform novel scene generation by sampling random content and pose and passing through the generator. Our framework demonstrates high reconstruction quality and 3D consistency, on par and better than previous works.
321
+
322
+ Limitations and future work. The current setting in consideration is limited to one category per dataset and cannot directly generalize beyond the chosen category. Alternative research directions include local conditional fields similar to PixelNeRF [46] and GRF [40], which can generalize to unseen categories, multi-instance, and even real-world scenes. Being a general framework, Pix2NeRF is not limited to using $\pi$ -GAN as its backbone. Newer generative NeRF models, e.g. EG3D [3] could potentially achieve better visual quality. Additionally, architecture search, especially with respect to the encoder remains a challenging problem. Utilizing more mature encoder architectures from 2D GAN feed-forward inversion literature, e.g. pixel2style2pixel [32], could potentially improve the performance of Pix2NeRF significantly.
323
+
324
+ Ethical consideration. As with most modern conditional generative models, Pix2NeRF can be misused by generating content to spread misinformation or perform targeted attacks. The growing popularity of deepfake celebrity accounts in social media suggests that new use cases, markets, and novel ways of monetizing this kind of data will follow.
325
+
326
+ # References
327
+
328
+ [1] Mikołaj Binkowski, Danica J. Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans, 2021. 6, 1
329
+ [2] Eric Chan, Marco Monteiro, Petr Kellnhofer, Jiajun Wu, and Gordon Wetzstein. pi-gan: Periodic implicit generative adversarial networks for 3d-aware image synthesis. In arXiv, 2020. 2, 3, 4, 6, 7
330
+ [3] Eric R. Chan, Connor Z. Lin, Matthew A. Chan, Koki Nagano, Boxiao Pan, Shalini De Mello, Orazio Gallo, Leonidas Guibas, Jonathan Tremblay, Sameh Khamis, Tero Karras, and Gordon Wetzstein. Efficient geometry-aware 3D generative adversarial networks. In arXiv, 2021. 8
331
+ [4] Angel X. Chang, Thomas A. Funkhouser, Leonidas J. Guibas, Pat Hanrahan, Qi-Xing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, Jianxiong Xiao, Li Yi, and Fisher Yu. Shapenet: An information-rich 3d model repository. CoRR, abs/1512.03012, 2015. 6, 7, 1, 4
332
+ [5] Anpei Chen, Zexiang Xu, Fuqiang Zhao, Xiaoshuai Zhang, Fanbo Xiang, Jingyi Yu, and Hao Su. Mvsnerf: Fast generalizable radiance field reconstruction from multi-view stereo, 2021. 2
333
+ [6] Zhiqin Chen and Hao Zhang. Learning implicit fields for generative shape modeling. Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 2
334
+ [7] Julian Chibane, Aayush Bansal, Verica Lazova, and Gerard Pons-Moll. Stereo radiance fields (srf): Learning view synthesis for sparse views of novel scenes. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 7911-7920, June 2021. 2
335
+ [8] Alexey Dosovitskiy, German Ros, Felipe Codevilla, Antonio Lopez, and Vladlen Koltun. CARLA: An open urban driving simulator. In Proceedings of the 1st Annual Conference on Robot Learning, pages 1-16, 2017. 6, 7, 1, 5
336
+ [9] Yilun Du, Katherine M. Collins, Joshua B. Tenenbaum, and Vincent Sitzmann. Learning signal-agnostic manifolds of neural fields, 2021. 7
337
+ [10] Vincent Dumoulin, Ethan Perez, Nathan Schucher, Florian Strub, Harm de Vries, Aaron Courville, and Yoshua Bengio. Feature-wise transformations. Distill, 2018. https://distill.pub/2018/feature-wise-transformations.3
338
+ [11] Emilien Dupont, Miguel Bautista Martin, Alex Colburn, Aditya Sankar, Josh Susskind, and Qi Shan. Equivariant neural rendering. In International Conference on Machine Learning, pages 2761-2770. PMLR, 2020. 7
339
+ [12] SMA Eslami, DJ Rezende, F Besse, F Viola, AS Morcos, M Garnelo, A Ruderman, AA Rusu, I Danihelka, K Gregor, et al. Neural scene representation and rendering. Science, 360(6394):1204-, 2018. 7
340
+ [13] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Y. Bengio. Generative adversarial networks. Advances in Neural Information Processing Systems, 3, 06 2014. 2
341
+ [14] Jiatao Gu, Lingjie Liu, Peng Wang, and Christian Theobalt. Stylenerf: A style-based 3d-aware generator for high-resolution image synthesis, 2021. 3
342
+
343
+ [15] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, Günter Klambauer, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a nash equilibrium. CoRR, abs/1706.08500, 2017. 6, 1
344
+ [16] Wonbong Jang and Lourdes Agapito. Codenerf: Disentangled neural radiance fields for object categories, 2021. 2, 3, 7
345
+ [17] Diederik Kingma and Jimmy Ba. Adam: A method for stochastic optimization. International Conference on Learning Representations, 12 2014. 7
346
+ [18] Chen-Hsuan Lin, Chaoyang Wang, and Simon Lucey. Sdfsrn: Learning signed distance 3d object reconstruction from static images. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 1
347
+ [19] Ziwei Liu, Ping Luo, Xiaogang Wang, and Xiaou Tang. Deep learning face attributes in the wild. In Proceedings of International Conference on Computer Vision (ICCV), December 2015. 4, 5, 6, 7, 8, 1, 2, 3
348
+ [20] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In CVPR, 2021. 2
349
+ [21] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. Gnerf: Gan-based neural radiance field without posed camera. arXiv preprint arXiv:2103.15606, 2021. 2
350
+ [22] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy networks: Learning 3d reconstruction in function space. In Proceedings IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2019. 2
351
+ [23] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 1, 2
352
+ [24] Thu Nguyen-Phuoc, Chuan Li, Lucas Theis, Christian Richardt, and Yong-Liang Yang. Hologan: Unsupervised learning of 3d representations from natural images. In The IEEE International Conference on Computer Vision (ICCV), Nov 2019. 2, 3, 7
353
+ [25] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2021. 3
354
+ [26] Michael Niemeyer, Lars Mescheder, Michael Oechsle, and Andreas Geiger. Differentiable volumetric rendering: Learning implicit 3d representations without 3d supervision. In Proc. IEEE Conf. on Computer Vision and Pattern Recognition (CVPR), 2020. 2
355
+ [27] Anton Obukhov, Maximilian Seitzer, Po-Wei Wu, Semen Zhydenko, Jonathan Kyl, and Elvis Yu-Jing Lin. High-fidelity performance metrics for generative models in pytorch, 2020. Version: 0.3.0, DOI: 10.5281/zenodo.4957738.6
356
+ [28] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. Deepsdf: Learning continuous signed distance functions for shape representation. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 2
357
+
358
+ [29] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, Alban Desmaison, Andreas Köpf, Edward Yang, Zach DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. Pytorch: An imperative style, high-performance deep learning library. CoRR, abs/1912.01703, 2019. 6
359
+ [30] Ethan Perez, Florian Strub, Harm de Vries, Vincent Dumoulin, and Aaron C. Courville. Film: Visual reasoning with a general conditioning layer. CoRR, abs/1709.07871, 2017. 3
360
+ [31] Pierluigi Zama Ramirez, Alessio Tonioni, and Federico Tombari. Unsupervised novel view synthesis from a single image. CoRR, abs/2102.03285, 2021. 2, 3, 4, 5, 1
361
+ [32] Elad Richardson, Yuval Alaluf, Or Patashnik, Yotam Nitzan, Yaniv Azar, Stav Shapiro, and Daniel Cohen-Or. Encoding in style: a stylegan encoder for image-to-image translation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2021. 8
362
+ [33] Shunsuke Saito, Zeng Huang, Ryota Natsume, Shigeo Morishima, Angjoo Kanazawa, and Hao Li. Pifu: Pixel-aligned implicit function for high-resolution clothed human digitization. In The IEEE International Conference on Computer Vision (ICCV), October 2019. 2
363
+ [34] Tim Salimans, Ian J. Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. CoRR, abs/1606.03498, 2016. 6, 1
364
+ [35] Katja Schwarz, Yiyi Liao, Michael Niemeyer, and Andreas Geiger. Graf: Generative radiance fields for 3d-aware image synthesis. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2, 3, 6, 7
365
+ [36] Vincent Sitzmann, Eric R. Chan, Richard Tucker, Noah Snavely, and Gordon Wetzstein. Metasdf: Meta-learning signed distance functions. In arXiv, 2020. 2
366
+ [37] Vincent Sitzmann, Julien N.P. Martel, Alexander W. Bergman, David B. Lindell, and Gordon Wetzstein. Implicit neural representations with periodic activation functions. In Proc. NeurIPS, 2020. 3
367
+ [38] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure
368
+
369
+ aware neural scene representations. In Advances in Neural Information Processing Systems, 2019. 2, 6, 7, 1, 4
370
+ [39] Maxim Tatarchenko, Alexey Dosovitskiy, and Thomas Brox. Multi-view 3d models from single images with a convolutional network, 2016. 7
371
+ [40] Alex Trevithick and Bo Yang. Grf: Learning a general radiance field for 3d scene representation and rendering. In arXiv:2010.04595, 2020. 2, 3, 7, 8
372
+ [41] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo MartinBrualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In CVPR, 2021. 2
373
+ [42] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 5, 6, 1
374
+ [43] Zirui Wang, Shangzhe Wu, Weidi Xie, Min Chen, and Victor Adrian Prisacariu. NeRF-: Neural radiance fields without known camera parameters. https://arxiv.org/abs/2102.07064, 2021.2
375
+ [44] Shangzhe Wu, Christian Rupprecht, and Andrea Vedaldi. Unsupervised learning of probably symmetric deformable 3d objects from images in the wild. In CVPR, 2020. 2, 5
376
+ [45] Xudong Xu, Xingang Pan, Dahua Lin, and Bo Dai. Generative occupancy fields for 3d surface-aware image synthesis. In Advances in Neural Information Processing Systems(NeurIPS), 2021. 3
377
+ [46] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. In CVPR, 2021. 2, 3, 7, 8
378
+ [47] Yuxuan Zhang, Wenzheng Chen, Huan Ling, Jun Gao, Yinan Zhang, Antonio Torralba, and Sanja Fidler. Image gans meet differentiable rendering for inverse graphics and interpretable 3d neural rendering. In International Conference on Learning Representations, 2021. 2
379
+ [48] Peng Zhou, Lingxi Xie, Bingbing Ni, and Qi Tian. Cips3d: A 3d-aware generator of gans based on conditionally-independent pixel synthesis, 2021. 3
380
+
381
+ # Pix2NeRF: Unsupervised Conditional $\pi$ -GAN for Single Image to Neural Radiance Fields Translation
382
+
383
+ Supplementary Material
384
+
385
+ <table><tr><td rowspan="2">Method</td><td colspan="3">128 × 128</td><td colspan="2">64 × 64</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>PSNR ↑</td><td>SSIM ↑</td></tr><tr><td>Pix2NeRF unconditional</td><td>26.45</td><td>1.18</td><td>4.39</td><td>-</td><td>-</td></tr><tr><td>Pix2NeRF conditional</td><td>26.81</td><td>1.23</td><td>4.27</td><td>18.75</td><td>0.82</td></tr></table>
386
+
387
+ Table 4. Additional quantitative results on ShapeNet-SRN [4, 38].
388
+
389
+ <table><tr><td rowspan="2">Method</td><td colspan="3">CelebA 64 × 64</td><td colspan="5">ShapeNet-SRN 64 × 64</td></tr><tr><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>FID ↓</td><td>KID ↓</td><td>IS ↑</td><td>PSNR ↑</td><td>SSIM ↑</td></tr><tr><td>A</td><td>28.90</td><td>2.99</td><td>1.62</td><td>34.01</td><td>1.73</td><td>3.65</td><td>15.91</td><td>0.71</td></tr><tr><td>B</td><td>43.19</td><td>2.84</td><td>1.33</td><td>43.06</td><td>2.49</td><td>2.92</td><td>16.27</td><td>0.71</td></tr><tr><td>C</td><td>39.42</td><td>3.07</td><td>1.65</td><td>41.47</td><td>2.80</td><td>2.96</td><td>15.14</td><td>0.68</td></tr><tr><td>D</td><td>33.92</td><td>2.84</td><td>1.87</td><td>35.72</td><td>1.74</td><td>3.75</td><td>16.81</td><td>0.77</td></tr><tr><td>E</td><td>31.31</td><td>2.75</td><td>1.95</td><td>21.67</td><td>0.89</td><td>4.35</td><td>18.03</td><td>0.79</td></tr><tr><td>F</td><td>39.86</td><td>3.18</td><td>1.73</td><td>27.70</td><td>1.22</td><td>4.09</td><td>16.98</td><td>0.77</td></tr><tr><td>G</td><td>73.52</td><td>7.47</td><td>1.91</td><td>27.10</td><td>1.31</td><td>4.26</td><td>17.77</td><td>0.79</td></tr><tr><td>H</td><td>73.03</td><td>7.08</td><td>1.97</td><td>41.11</td><td>2.27</td><td>3.34</td><td>14.98</td><td>0.74</td></tr><tr><td>I</td><td>140.25</td><td>16.33</td><td>1.79</td><td>184.10</td><td>17.19</td><td>2.55</td><td>10.95</td><td>0.59</td></tr><tr><td>J</td><td>168.59</td><td>18.89</td><td>1.50</td><td>266.64</td><td>30.29</td><td>1.98</td><td>10.28</td><td>0.47</td></tr><tr><td>Full</td><td>24.64</td><td>1.93</td><td>2.24</td><td>17.55</td><td>0.59</td><td>4.36</td><td>18.75</td><td>0.82</td></tr></table>
390
+
391
+ Table 5. Quantitative results of ablation study on CelebA [19] and ShapeNet-SRN [4,38]. "Full" denotes Pix2NeRF conditional setup.
392
+ Table 6. Input view reconstruction (PSNR, SSIM) on a test set, and novel view synthesis (FID, KID $\times$ 100, IS).
393
+
394
+ <table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>FID↓</td><td>KID↓</td><td>IS↑</td></tr><tr><td>Pix2NeRF E + frozen π-GAN G</td><td>13.04</td><td>0.46</td><td>28.25</td><td>2.97</td><td>1.52</td></tr><tr><td>π-GAN optimization (200 iterations)</td><td>23.42</td><td>0.80</td><td>16.09</td><td>0.83</td><td>2.10</td></tr><tr><td>π-GAN optimization (700 iterations)</td><td>24.21</td><td>0.82</td><td>17.14</td><td>0.72</td><td>2.14</td></tr><tr><td>Pix2NeRF (feed-forward)</td><td>17.95</td><td>0.67</td><td>24.82</td><td>1.93</td><td>2.21</td></tr><tr><td>Pix2NeRF (200 iterations)</td><td>27.12</td><td>0.89</td><td>12.86</td><td>0.64</td><td>2.27</td></tr><tr><td>Pix2NeRF (1000 iterations)</td><td>27.73</td><td>0.90</td><td>12.01</td><td>0.62</td><td>2.30</td></tr></table>
395
+
396
+ # A. Additional qualitative results
397
+
398
+ We demonstrate additional qualitative results achieved by Pix2NeRF on three datasets: CelebA [19], Shapenet-SRN chairs [4, 38], and CARLA [8] in Figures 6, 7, and 8 respectively.
399
+
400
+ # B. Additional quantitative results
401
+
402
+ Table 4 provides additional quantitative results on ShapeNet-SRN [4, 38] with generative metrics computed on $128 \times 128$ resolution, and reconstruction metrics computed on $64 \times 64$ resolution. We do not report PSNR and SSIM for CelebA [19] as there is no ground truth novel views.
403
+
404
+ # C. Additional ablation study
405
+
406
+ We provide quantitative results of each ablation study on CelebA [19] and Shapenet-SRN [4, 38] to further verify our
407
+
408
+ design choices. As in the ablation study in our main paper, we report FID [15], KID [1] and IS [34] for CelebA [19], and additionally report PSNR and SSIM [42] on Shapenet-SRN [4,18]. We measure results after inference on resolution $64 \times 64$ . We show quantitative ablation results in Table 5. Legend: A - naive GAN inversion; B - auto-encoder; C - no GAN inversion; D - no conditional adversarial objective; E - no warm-up; F - always warm-up; G, H, I, J - lower weights for reconstruction instead of warm-up, with $\lambda_{\mathrm{recon}} = 1, 0.1, 0.01, 0.001$ respectively. Note that since the encoder output is not enforced to strictly follow $p_z$ , naive GAN inversion (stage 1 in [31]) failed completely due to bad initialization. We therefore use a "warmed-up" version of the generator trained for 300k iterations.
409
+
410
+ # D. Input reconstruction and hybrid optimization
411
+
412
+ We ran extra ablations and summarized our model performance by providing both input reconstruction (cols 2,3) and novel view synthesis (cols 4,5,6) results in Tab. 6 (row 4). We show $\pi$ -GAN latent optimization on an input image for 700 iterations, as recommended by its authors in row 3. Note that it requires time-consuming per-instance optimization due to the NeRF's rendering mechanism. Additionally, we use the Pix2NeRF encoder's output as a starting point and perform latent optimization with a frozen Pix2NeRF generator for only 200 iterations, shown in row 5. A qualitative comparison is shown in Fig. 5. Note that our model does not overfit the input view even with 1000 iterations of input view optimization (row 6), while $\pi$ -GAN shows strong artifacts and requires a search for the optimal number of iterations.
413
+
414
+ # E. Necessity of generator distilling
415
+
416
+ We trained the encoder with a pretrained frozen $\pi$ -GAN generator using all the losses. As can be seen from the results in Tab. 6 Row 1, the model struggles to capture details accurately without fine-tuning the generator jointly.
417
+
418
+ # F. Linear interpolation
419
+
420
+ We interpolate novel views between two different input images by predicting their corresponding latent codes and poses, then applying linear interpolation to get the intermediate codes and poses. We show the results interpolating five images in Figure 9.
421
+
422
+ ![](images/163c2cb75f0f6580593868d080cce7290887dbe3808719d95a12dfd86f93f645.jpg)
423
+ Figure 5. Qualitative comparison on CelebA. Top - input, middle - reconstruction, bottom - novel view synthesis.
424
+
425
+ # G. Limitations and failure cases
426
+
427
+ Despite training on images without pose or 3D supervision, Pix2NeRF can reconstruct objects from a single image and achieve decent quality. However, the methodology of using an encoder to encode an entire image into a single latent code is quite challenging, especially when the dataset is noisy, such as CelebA [19]. Pix2NeRF cannot always capture fine details accurately. We observe failure cases when the input is out-of-distribution relative to that of the training set $p_{\mathrm{real}}$ , as shown in Figure 10. It might be possible to improve these hard cases by introducing pixel-wise features instead of (or, in addition to) the global latent code, as done in Pix2NeRF [46] and GRF [40].
428
+
429
+ ![](images/ea4ceed7db54088b46f5b6072efcae6cfe293e638b455444f1110d4f61d324ed.jpg)
430
+ input reconstruction
431
+ novel views
432
+ Figure 6. Further reconstructions and novel views on CelebA [19].
433
+
434
+ ![](images/5f882f63b67a9dd2226d173970188a8b9aaf3c26e34d8127b9fc56382293ae8d.jpg)
435
+ input reconstruction
436
+ novel views
437
+ Figure 7. Further reconstructions and novel views on ShapeNet-SRN [4, 38].
438
+
439
+ ![](images/fdf8be2d5cb597e79d5853dbe733d4cf232227b601473d3ca2abf15190b4256f.jpg)
440
+ input reconstruction
441
+ novel views
442
+ Figure 8. Further reconstructions and novel views on CARLA [8].
443
+
444
+ ![](images/4e3e5aeaf617c9ff3b6505dbb498b0b0b3ba4956d4d79934a56cac5fdf95a42b.jpg)
445
+ image 1
446
+ Linear interpolation
447
+ image 2
448
+ Figure 9. Linear interpolation on CelebA [19].
449
+
450
+ ![](images/cf7178b4f6bd7728acc660901217a2127fc389f4acef0ca6b032c4684493bbee.jpg)
451
+ input
452
+
453
+ ![](images/1f23088284e61bd675b773eca2a05772f2a8a71819086934e1813cc59d29f55c.jpg)
454
+ reconstruction
455
+
456
+ ![](images/469856648e3d010e510bb642e82964d8ae819b1783f9323fe7fa21ef0995436b.jpg)
457
+
458
+ ![](images/e70bfc5cfb09f620cc35751093472ac052bdb1debba1df0d5998df31e118cddc.jpg)
459
+ Figure 10. Failure cases on CelebA [19] and ShapeNet-SRN [4,38].
2202.13xxx/2202.13162/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd03268bb7138e4a4c99900c019a2648b76d9a2531c8f11b1a04c3d4fbf5bc55
3
+ size 1567922
2202.13xxx/2202.13162/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13169/909a7465-0b93-460b-9a57-ce6ae5e551db_content_list.json ADDED
@@ -0,0 +1,1657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "A SYSTEMATIC EVALUATION OF LARGE LANGUAGE MODELS OF CODE",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 171,
8
+ 99,
9
+ 823,
10
+ 146
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Frank F. Xu, Uri Alon, Graham Neubig, Vincent J. Hellendoorn",
17
+ "bbox": [
18
+ 179,
19
+ 170,
20
+ 630,
21
+ 184
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "School of Computer Science",
28
+ "bbox": [
29
+ 183,
30
+ 185,
31
+ 372,
32
+ 198
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Carnegie Mellon University",
39
+ "bbox": [
40
+ 183,
41
+ 198,
42
+ 369,
43
+ 213
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "{fangzhex,ualon,gneubig}@cs.cmu.edu,vhellendoorn@cmu.edu",
50
+ "bbox": [
51
+ 183,
52
+ 213,
53
+ 650,
54
+ 227
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "ABSTRACT",
61
+ "text_level": 1,
62
+ "bbox": [
63
+ 450,
64
+ 263,
65
+ 545,
66
+ 277
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "Large language models (LMs) of code have recently shown tremendous promise in completing code and synthesizing code from natural language descriptions. However, the current state-of-the-art code LMs (e.g., Codex (Chen et al., 2021)) are not publicly available, leaving many questions about their model and data design decisions. We aim to fill in some of these blanks through a systematic evaluation of the largest existing models: Codex, GPT-J, GPT-Neo, GPT-NeoX-20B, and CodeParrot, across various programming languages. Although Codex itself is not open-source, we find that existing open-source models do achieve close results in some programming languages, although targeted mainly for natural language modeling. We further identify an important missing piece in the form of a large open-source model trained exclusively on a multi-lingual corpus of code. We release a new model, PolyCoder, with 2.7B parameters based on the GPT-2 architecture, that was trained on 249GB of code across 12 programming languages on a single machine. In the C programming language, PolyCoder outperforms all models including Codex. Our trained models are open-source and publicly available at https://github.com/VHellendoorn/Code-LMs, which enables future research and application in this area.",
73
+ "bbox": [
74
+ 228,
75
+ 291,
76
+ 769,
77
+ 529
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "1 INTRODUCTION",
84
+ "text_level": 1,
85
+ "bbox": [
86
+ 173,
87
+ 551,
88
+ 336,
89
+ 566
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "Language models (LMs) assign probabilities to sequences of tokens, and are widely applied to natural language text (Bengio et al., 2003; Baevski & Auli, 2018; Brown et al., 2020). Recently, LMs have shown impressive performance in modeling also source code, written in programming languages (Hindle et al., 2016; Hellendoorn & Devanbu, 2017; Alon et al., 2020; Karampatsis et al., 2020). These models excel at useful downstream tasks like code completion (Raychev et al., 2014) and synthesizing code from natural language descriptions (Desai et al., 2016). The current state-of-the-art large language models for code, such as Austin et al. (2021), have shown significant progress for AI-based programming assistance. Most notably, one of the largest of these models, Codex (Chen et al., 2021) has been deployed in the real-world production tool GitHub Copilot<sup>1</sup>, as an in-IDE developer assistant that automatically generates code based on the user's context.",
96
+ "bbox": [
97
+ 169,
98
+ 580,
99
+ 826,
100
+ 722
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "Despite the great success of large language models of code, the strongest models are not publicly available. This prevents the application of these models outside of well-resourced companies and limits research in this field for low-resourced organizations. For example, Codex provides non-free access to the model's output through black-box API calls,[2] but the model's weights and training data are unavailable. This prevents researchers from fine-tuning and adapting this model to domains and tasks other than code completion. The lack of access to the model's internals also prevents the research community from studying other key aspects of these models, such as interpretability, distillation of the model for more efficient deployment, and incorporating additional components such as retrieval.",
107
+ "bbox": [
108
+ 169,
109
+ 727,
110
+ 826,
111
+ 852
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "Several medium to large-sized pre-trained language models are publicly available, such as GPT-Neo (Black et al., 2021), GPT-J (Wang & Komatsuzaki, 2021) and GPT-NeoX (Black et al., 2022).",
118
+ "bbox": [
119
+ 169,
120
+ 859,
121
+ 828,
122
+ 888
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "header",
128
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
129
+ "bbox": [
130
+ 171,
131
+ 32,
132
+ 537,
133
+ 47
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "page_footnote",
139
+ "text": "1https://copilot.github.com/",
140
+ "bbox": [
141
+ 189,
142
+ 896,
143
+ 401,
144
+ 910
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "page_footnote",
150
+ "text": "2https://openai.com/blog/openai-codex/",
151
+ "bbox": [
152
+ 192,
153
+ 910,
154
+ 473,
155
+ 922
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "aside_text",
161
+ "text": "arXiv:2202.13169v3 [cs.PL] 4 May 2022",
162
+ "bbox": [
163
+ 22,
164
+ 266,
165
+ 60,
166
+ 700
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "page_number",
172
+ "text": "1",
173
+ "bbox": [
174
+ 493,
175
+ 948,
176
+ 503,
177
+ 959
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "Despite being trained on a mixture of a wide variety of text including news articles, online forums, and just a modest selection of (GitHub) software repositories (Gao et al., 2020), these language models can be used to generate source code with a reasonable performance Chen et al. (2021). In addition, there are a few open-source language models that are trained solely on source code. For example, CodeParrot (Tunstall et al., 2022) was trained on 180 GB of Python code.",
184
+ "bbox": [
185
+ 169,
186
+ 103,
187
+ 826,
188
+ 174
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "Given the variety of model sizes and training schemes involved in these models and lack of comparisons between these, the impact of many modeling and training design decisions remains unclear. For instance, we do not know the precise selection of data on which Codex and other private models were trained; however, we do know that some public models (e.g., GPT-J) were trained on a mix of natural language and code in multiple programming languages, while other models (e.g., CodeParrot) were trained solely on code in one particular programming language. Multilingual models potentially provide better generalization, because different programming languages share similar keywords and properties, as shown by the success of multilingual models for natural language (Conneau & Lample, 2019) and for code (Zügner et al., 2021). This may hint that multilingual LMs can generalize across languages, outperform monolingual models and be useful for modeling low-resource programming languages, but this is yet to be verified empirically.",
195
+ "bbox": [
196
+ 169,
197
+ 180,
198
+ 826,
199
+ 335
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "In this paper, we present a systematic evaluation of existing models of code – Codex, GPT-J, GPT-Neo, GPT-NeoX, and CodeParrot – across various programming languages. We aim to shed more light on the landscape of code modeling design decisions by comparing and contrasting these models, as well as providing a key missing link: thus far, no large open-source language model was trained exclusively on code from multiple programming languages. We provide three such models, ranging from 160M to 2.7B parameters, which we release under the umbrella name “PolyCoder”. First, we perform an extensive comparison of the training and evaluation settings between PolyCoder, open-source models, and Codex. Second, we evaluate the models on the HumanEval benchmark (Chen et al., 2021) and compare how do models of different sizes and training steps scale, and how different temperatures affect the generation quality. Finally, since HumanEval only evaluates the natural language to Python synthesis, we curate an unseen evaluation dataset in each of the 12 languages, to evaluate the perplexity of different models. We find that although Codex is allegedly focused on Python (Chen et al. (2021) §3.1), Codex performs surprisingly well in other programming languages too, and even better than GPT-J and GPT-NeoX that were trained on the Pile (Gao et al., 2020). Nonetheless, in the C programming language, our PolyCoder model achieves a lower perplexity than all these models, including Codex.",
206
+ "bbox": [
207
+ 169,
208
+ 340,
209
+ 826,
210
+ 561
211
+ ],
212
+ "page_idx": 1
213
+ },
214
+ {
215
+ "type": "text",
216
+ "text": "Although most current models perform worse than Codex, we hope that this systematic study helps future research in this area to design more efficient and effective models. More importantly, through this systematic evaluation of different models, we encourage the community to study and release medium-large scale language models for code, in response to the concerns expressed by Hellendoorn & Sawant (2021):",
217
+ "bbox": [
218
+ 169,
219
+ 569,
220
+ 823,
221
+ 638
222
+ ],
223
+ "page_idx": 1
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "[...] this exploding trend in cost to achieve the state of the art has left the ability to train and test such models limited to a select few large technology companies—and way beyond the resources of virtually all academic labs.",
228
+ "bbox": [
229
+ 233,
230
+ 643,
231
+ 759,
232
+ 686
233
+ ],
234
+ "page_idx": 1
235
+ },
236
+ {
237
+ "type": "text",
238
+ "text": "We believe that our efforts are a significant step towards democratization of large language models of code.",
239
+ "bbox": [
240
+ 169,
241
+ 691,
242
+ 823,
243
+ 720
244
+ ],
245
+ "page_idx": 1
246
+ },
247
+ {
248
+ "type": "text",
249
+ "text": "2 RELATED WORK",
250
+ "text_level": 1,
251
+ "bbox": [
252
+ 171,
253
+ 743,
254
+ 346,
255
+ 758
256
+ ],
257
+ "page_idx": 1
258
+ },
259
+ {
260
+ "type": "text",
261
+ "text": "At the core of code modeling lies ongoing work on pretraining of language models (LMs). Large-scale pretraining of LMs has had an astounding impact on natural language processing in recent years (Han et al., 2021). Figure 1 provides an overview of how different models compare in size and availability.",
262
+ "bbox": [
263
+ 169,
264
+ 776,
265
+ 826,
266
+ 821
267
+ ],
268
+ "page_idx": 1
269
+ },
270
+ {
271
+ "type": "text",
272
+ "text": "2.1 PRETRAINING METHODS",
273
+ "text_level": 1,
274
+ "bbox": [
275
+ 171,
276
+ 839,
277
+ 388,
278
+ 853
279
+ ],
280
+ "page_idx": 1
281
+ },
282
+ {
283
+ "type": "text",
284
+ "text": "We discuss three popular pretraining methods used in code language modeling. An illustration of these methods are shown in Figure 2.",
285
+ "bbox": [
286
+ 169,
287
+ 864,
288
+ 823,
289
+ 895
290
+ ],
291
+ "page_idx": 1
292
+ },
293
+ {
294
+ "type": "header",
295
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
296
+ "bbox": [
297
+ 171,
298
+ 32,
299
+ 535,
300
+ 47
301
+ ],
302
+ "page_idx": 1
303
+ },
304
+ {
305
+ "type": "page_footnote",
306
+ "text": "3The exact training set that Codex was trained on is unknown.",
307
+ "bbox": [
308
+ 191,
309
+ 909,
310
+ 560,
311
+ 922
312
+ ],
313
+ "page_idx": 1
314
+ },
315
+ {
316
+ "type": "page_number",
317
+ "text": "2",
318
+ "bbox": [
319
+ 493,
320
+ 948,
321
+ 504,
322
+ 959
323
+ ],
324
+ "page_idx": 1
325
+ },
326
+ {
327
+ "type": "image",
328
+ "img_path": "images/8901ec7df14752b423b34e5c99e95a10731491311b938f7f2cf25cb96b17bd25.jpg",
329
+ "image_caption": [
330
+ "Figure 1: Existing language models of code, their sizes and availability (open source vs. not open-source)."
331
+ ],
332
+ "image_footnote": [],
333
+ "bbox": [
334
+ 246,
335
+ 109,
336
+ 750,
337
+ 247
338
+ ],
339
+ "page_idx": 2
340
+ },
341
+ {
342
+ "type": "image",
343
+ "img_path": "images/fb903304fc761cc4e48a0afedfb8cf30ade78db9160e691caf3bba079f453ebf.jpg",
344
+ "image_caption": [
345
+ "Figure 2: Three types of pretrained language models."
346
+ ],
347
+ "image_footnote": [],
348
+ "bbox": [
349
+ 173,
350
+ 323,
351
+ 825,
352
+ 417
353
+ ],
354
+ "page_idx": 2
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "Left-to-Right Language Models (Figure 2, left) Auto-regressive, Left-to-right LMs, predict the probability of a token given the previous tokens. In code modeling, CodeGPT (124M) (Lu et al., 2021), CodeParrot (1.5B) (Tunstall et al., 2022), GPT-Neo (2.7B) (Black et al., 2021), GPT-J (6B) (Wang & Komatsuzaki, 2021), Codex (12B) (Chen et al., 2021), GPT-NeoX (20B) (Black et al., 2022), and Google's (137B) (Austin et al., 2021) belong to this category. The left-to-right nature of these models makes them highly useful for program generation tasks, such as code completion. On the other hand, as code is usually not written in a single, left-to-write pass, it is not trivial to leverage context that appears \"after\" the location of the generation. In this paper, we focus on this family of models and will discuss the existing models in more detail in the following sections.",
359
+ "bbox": [
360
+ 169,
361
+ 483,
362
+ 826,
363
+ 609
364
+ ],
365
+ "page_idx": 2
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "Masked Language Models (Figure 2, middle) While auto-regressive language models are powerful for modeling the probability of sequences, their unidirectional nature makes them less suitable for producing effective whole-sequence representations for downstream tasks such as classification. One popular bidirectional objective function used widely in representation learning is masked language modeling (Devlin et al., 2018), where the aim is to predict masked text pieces based on surrounding context. CodeBERT (125M) (Feng et al., 2020) and CuBERT (345M) (Kanade et al., 2020) are examples of such models in code. In programming contexts, these methods provide useful representations of a sequence of code for downstream tasks such as code classification, clone detection, and defect detection.",
370
+ "bbox": [
371
+ 169,
372
+ 633,
373
+ 826,
374
+ 758
375
+ ],
376
+ "page_idx": 2
377
+ },
378
+ {
379
+ "type": "text",
380
+ "text": "Encoder-decoder Models (Figure 2, right) An encoder-decoder model first uses an encoder to encode an input sequence, and then uses a left-to-right LM to decode an output sequence conditioned on the input sequence. Popular pretraining objectives include masked span prediction (Raffel et al., 2019) where the input sequence is randomly masked with multiple masks and the output sequence are the masked contents in order, and denoising sequence reconstruction (Lewis et al., 2019) where the input is a corrupted sequence and the output is the original sequence. These pretrained models are useful in many sequence-to-sequence tasks (Raffel et al., 2019). In code, CodeT5 (220M) (Wang et al., 2021), and PLBART (406M) (Ahmad et al., 2021) use the two objectives mentioned above respectively, and performs well in conditional generation downstream tasks such as code commenting, or natural language to code generation.",
381
+ "bbox": [
382
+ 169,
383
+ 784,
384
+ 826,
385
+ 924
386
+ ],
387
+ "page_idx": 2
388
+ },
389
+ {
390
+ "type": "header",
391
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
392
+ "bbox": [
393
+ 171,
394
+ 32,
395
+ 535,
396
+ 47
397
+ ],
398
+ "page_idx": 2
399
+ },
400
+ {
401
+ "type": "page_number",
402
+ "text": "3",
403
+ "bbox": [
404
+ 493,
405
+ 948,
406
+ 503,
407
+ 959
408
+ ],
409
+ "page_idx": 2
410
+ },
411
+ {
412
+ "type": "text",
413
+ "text": "2.2 PRETRAINING DATA",
414
+ "text_level": 1,
415
+ "bbox": [
416
+ 171,
417
+ 103,
418
+ 356,
419
+ 118
420
+ ],
421
+ "page_idx": 3
422
+ },
423
+ {
424
+ "type": "text",
425
+ "text": "Some models (e.g. CodeParrot and CodeT5) are trained on GitHub code only, with corpora extracted using either Google BigQuery's GitHub dataset $^{4}$ , or CodeSearchNet (Husain et al., 2019). Others (e.g., GPT-Neo and GPT-J) are trained on \"the Pile\" (Gao et al., 2020), a large corpus containing a blend of natural language texts and code from various domains, including Stack Exchange dumps, software documentations, and popular ( $>100$ stars) GitHub repositories. The datasets on which other proprietary models (Codex, Google's) were trained on are unknown. One goal of our study is to try to shed light on what corpora might be the most useful for pretraining models of code.",
426
+ "bbox": [
427
+ 169,
428
+ 128,
429
+ 826,
430
+ 228
431
+ ],
432
+ "page_idx": 3
433
+ },
434
+ {
435
+ "type": "text",
436
+ "text": "3 EVALUATION SETTINGS",
437
+ "text_level": 1,
438
+ "bbox": [
439
+ 171,
440
+ 246,
441
+ 405,
442
+ 263
443
+ ],
444
+ "page_idx": 3
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "We evaluate all models using both extrinsic and intrinsic benchmarks, as described below.",
449
+ "bbox": [
450
+ 169,
451
+ 277,
452
+ 759,
453
+ 292
454
+ ],
455
+ "page_idx": 3
456
+ },
457
+ {
458
+ "type": "text",
459
+ "text": "Extrinsic Evaluation One of the most popular downstream tasks for code modeling is code generation given a natural language description. Following Chen et al. (2021), we evaluate all models on the HumanEval dataset. The dataset contains 164 prompts with descriptions in the form of code comments and function definitions, including argument names and function names, and test cases to judge whether the generated code is correct. To generate code given a prompt, we use the same sampling strategy as Chen et al. (2021), using softmax with a temperature parameter $\\text{softmax}(x / T)$ . We evaluate using a wide range of temperatures $T = [0.2, 0.4, 0.6, 0.8]$ to control for the confidence of the model's predictions. Similarly to Codex, we use nucleus sampling (Holtzman et al., 2019) with top- $p = 0.95$ . We sample tokens from the model until we encounter one of the following stop sequences that indicate the end of a method: 'nclass', 'ndef', '\\nif', '\\nif', or '\\nprint'. We randomly sample 100 examples per prompt in the evaluation dataset.",
460
+ "bbox": [
461
+ 169,
462
+ 306,
463
+ 826,
464
+ 460
465
+ ],
466
+ "page_idx": 3
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "Intrinsic Evaluation To evaluate the intrinsic performance of different models, we compute the perplexity for each language on an unseen set of GitHub repositories. To prevent training-to-test data leakage for models such as GPT-Neo and GPT-J, we remove repositories in our evaluation dataset that appeared in the GitHub portion of the Pile training dataset $^{6}$ . To evaluate Codex, we use OpenAI's API $^{7}$ , choosing the code-davinci-001 engine. We note that the data that this model was trained on is unknown, so we cannot prevent data leakage from the training to the test set for Codex. We sampled 100 random files for each of the 12 programming languages in our evaluation dataset. To make perplexity comparable across different tokenization methods used in different models, we use Pygments $^{8}$ to equally normalize the log-likelihood sum of each model, when computing perplexity.",
471
+ "bbox": [
472
+ 169,
473
+ 473,
474
+ 826,
475
+ 602
476
+ ],
477
+ "page_idx": 3
478
+ },
479
+ {
480
+ "type": "text",
481
+ "text": "4 COMPARED MODELS",
482
+ "text_level": 1,
483
+ "bbox": [
484
+ 171,
485
+ 618,
486
+ 382,
487
+ 633
488
+ ],
489
+ "page_idx": 3
490
+ },
491
+ {
492
+ "type": "text",
493
+ "text": "4.1 EXISTING MODELS",
494
+ "text_level": 1,
495
+ "bbox": [
496
+ 171,
497
+ 648,
498
+ 349,
499
+ 662
500
+ ],
501
+ "page_idx": 3
502
+ },
503
+ {
504
+ "type": "text",
505
+ "text": "As discussed in Section 2, we mainly focus on auto-regressive left-to-right pretrained language models, most suitable for code completion tasks.",
506
+ "bbox": [
507
+ 169,
508
+ 675,
509
+ 823,
510
+ 705
511
+ ],
512
+ "page_idx": 3
513
+ },
514
+ {
515
+ "type": "text",
516
+ "text": "We evaluate Codex, as it is currently deployed in real-world and has impressive performance in code completion (Chen et al., 2021). Codex uses the GPT-3 language model (Brown et al., 2020) as its underlying model architecture. Codex was trained on a dataset spanning 179GB (after deduplication) covering over 54 million public Python repositories obtained from GitHub on May 2020. As reflected in its impressive results in other programming languages than Python, we suspect that Codex was also trained on large corpora of additional programming languages. The model available for querying through a non-free API.",
517
+ "bbox": [
518
+ 169,
519
+ 710,
520
+ 825,
521
+ 808
522
+ ],
523
+ "page_idx": 3
524
+ },
525
+ {
526
+ "type": "header",
527
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
528
+ "bbox": [
529
+ 171,
530
+ 32,
531
+ 535,
532
+ 47
533
+ ],
534
+ "page_idx": 3
535
+ },
536
+ {
537
+ "type": "page_footnote",
538
+ "text": "4https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code",
539
+ "bbox": [
540
+ 171,
541
+ 815,
542
+ 826,
543
+ 840
544
+ ],
545
+ "page_idx": 3
546
+ },
547
+ {
548
+ "type": "page_footnote",
549
+ "text": "5The absence of whitespace, which is significant in Python, signals an exit from the method body.",
550
+ "bbox": [
551
+ 192,
552
+ 842,
553
+ 769,
554
+ 856
555
+ ],
556
+ "page_idx": 3
557
+ },
558
+ {
559
+ "type": "page_footnote",
560
+ "text": "<sup>6</sup>https://github.com/EleutherAI/github-download",
561
+ "bbox": [
562
+ 192,
563
+ 856,
564
+ 547,
565
+ 869
566
+ ],
567
+ "page_idx": 3
568
+ },
569
+ {
570
+ "type": "page_footnote",
571
+ "text": "<sup>7</sup>https://beta.openai.com/docs/engines/codex-series-private-beta",
572
+ "bbox": [
573
+ 192,
574
+ 869,
575
+ 660,
576
+ 883
577
+ ],
578
+ "page_idx": 3
579
+ },
580
+ {
581
+ "type": "page_footnote",
582
+ "text": "<sup>8</sup>https://pygments.org/docs/lexers/",
583
+ "bbox": [
584
+ 192,
585
+ 883,
586
+ 444,
587
+ 897
588
+ ],
589
+ "page_idx": 3
590
+ },
591
+ {
592
+ "type": "page_footnote",
593
+ "text": "Every model uses its original tokenizer for predicting the next token. We use the shared tokenizer only for computing the perplexity given the log-likelihood sum.",
594
+ "bbox": [
595
+ 169,
596
+ 897,
597
+ 823,
598
+ 924
599
+ ],
600
+ "page_idx": 3
601
+ },
602
+ {
603
+ "type": "page_number",
604
+ "text": "4",
605
+ "bbox": [
606
+ 493,
607
+ 948,
608
+ 504,
609
+ 959
610
+ ],
611
+ "page_idx": 3
612
+ },
613
+ {
614
+ "type": "text",
615
+ "text": "As for open-source models, we compare GPT-Neo, GPT-J and GPT-NeoX, the largest variants having 2.7, 6 and 20 billion parameters, respectively. GPT-NeoX is the largest open-source pretrained language models available. These models are trained on the Pile dataset, so they are a good representatives of models that were trained on both natural language texts from various domains and source code from GitHub. We also compare CodeParrot with at most 1.5 billion parameters, a model that was only trained on Python code from GitHub. CodeParrot follows the process used in Chen et al. (2021) that obtained over 20M files Python files from Google BigQuery Github database, resulting in a 180GB dataset, which is comparable to Codex's Python training data, but the model itself is much smaller.",
616
+ "bbox": [
617
+ 169,
618
+ 103,
619
+ 826,
620
+ 228
621
+ ],
622
+ "page_idx": 4
623
+ },
624
+ {
625
+ "type": "text",
626
+ "text": "There was no large open-source language model trained almost exclusively on code from multiple programming languages. To fill this gap, we train a 2.7 billion model, PolyCoder, on a mixture of repositories from GitHub in 12 different programming languages.",
627
+ "bbox": [
628
+ 169,
629
+ 234,
630
+ 823,
631
+ 280
632
+ ],
633
+ "page_idx": 4
634
+ },
635
+ {
636
+ "type": "table",
637
+ "img_path": "images/be1ea0b45464dcb77ab8a884a2fb0666810b4604c76a7958e794c7371d265303.jpg",
638
+ "table_caption": [],
639
+ "table_footnote": [],
640
+ "table_body": "<table><tr><td>Language</td><td>Repositories</td><td>Files</td><td>Size Before Filtering</td><td>Size After Filtering</td></tr><tr><td>C</td><td>10,749</td><td>3,037,112</td><td>221G</td><td>55G</td></tr><tr><td>C#</td><td>9,511</td><td>2,514,494</td><td>30G</td><td>21G</td></tr><tr><td>C++</td><td>13,726</td><td>4,289,506</td><td>115G</td><td>52G</td></tr><tr><td>Go</td><td>12,371</td><td>1,416,789</td><td>70G</td><td>15G</td></tr><tr><td>Java</td><td>15,044</td><td>5,120,129</td><td>60G</td><td>41G</td></tr><tr><td>JavaScript</td><td>25,144</td><td>1,774,174</td><td>66G</td><td>22G</td></tr><tr><td>PHP</td><td>9,960</td><td>1,714,058</td><td>21G</td><td>13G</td></tr><tr><td>Python</td><td>25,446</td><td>1,550,208</td><td>24G</td><td>16G</td></tr><tr><td>Ruby</td><td>5,826</td><td>674,343</td><td>5.0G</td><td>4.1G</td></tr><tr><td>Rust</td><td>4,991</td><td>304,842</td><td>5.2G</td><td>3.5G</td></tr><tr><td>Scala</td><td>1,497</td><td>245,100</td><td>2.2G</td><td>1.8G</td></tr><tr><td>TypeScript</td><td>12,830</td><td>1,441,926</td><td>12G</td><td>9.2G</td></tr><tr><td>Total</td><td>147,095</td><td>24,082,681</td><td>631.4G</td><td>253.6G</td></tr></table>",
641
+ "bbox": [
642
+ 222,
643
+ 289,
644
+ 777,
645
+ 489
646
+ ],
647
+ "page_idx": 4
648
+ },
649
+ {
650
+ "type": "text",
651
+ "text": "Table 1: Training corpus statistics.",
652
+ "bbox": [
653
+ 382,
654
+ 500,
655
+ 611,
656
+ 513
657
+ ],
658
+ "page_idx": 4
659
+ },
660
+ {
661
+ "type": "text",
662
+ "text": "4.2 POLYCODER'S DATA",
663
+ "text_level": 1,
664
+ "bbox": [
665
+ 171,
666
+ 537,
667
+ 361,
668
+ 551
669
+ ],
670
+ "page_idx": 4
671
+ },
672
+ {
673
+ "type": "text",
674
+ "text": "Raw Code Corpus Collection GitHub is an excellent source for publicly available source code of various programming languages. We cloned the most popular repositories for 12 popular programming languages with at least 50 stars (stopping at about 25K per language to avoid a too heavy skew towards popular programming languages) from GitHub in October 2021. For each project, each file belonging to the majority-language of that project was extracted, yielding the initial training set. This initial, unfiltered dataset spanned 631GB and 38.9M files.",
675
+ "bbox": [
676
+ 169,
677
+ 564,
678
+ 826,
679
+ 648
680
+ ],
681
+ "page_idx": 4
682
+ },
683
+ {
684
+ "type": "text",
685
+ "text": "Data Preprocessing The detailed data preprocessing strategy comparison with other models are analyzed in Table 2. In general, we tried to follow Codex's design decisions, although there is a fair bit of ambiguity in the description of its data preprocessing.",
686
+ "bbox": [
687
+ 169,
688
+ 662,
689
+ 823,
690
+ 707
691
+ ],
692
+ "page_idx": 4
693
+ },
694
+ {
695
+ "type": "text",
696
+ "text": "Deduplication and Filtering Similarly to Codex and CodeParrot, very large (>1MB) and very short (<100 tokens) files were filtered out, reducing the size of the dataset by $33\\%$ , from 631GB to 424GB. This only reduced the total number of files by $8\\%$ , showing that a small number of files were responsible for a large part of the corpus.[10]",
697
+ "bbox": [
698
+ 169,
699
+ 719,
700
+ 823,
701
+ 777
702
+ ],
703
+ "page_idx": 4
704
+ },
705
+ {
706
+ "type": "text",
707
+ "text": "Allamanis (2019) has shown that code duplication that commonly manifests in datasets of code adversely effects language modeling of code. Therefore, we deduplicated files based on a hash of their content, which reduced the number of files by nearly $30\\%$ , and the dataset size by additional $29\\%$ , leaving 24.1M files and 254GB of data.",
708
+ "bbox": [
709
+ 169,
710
+ 782,
711
+ 823,
712
+ 839
713
+ ],
714
+ "page_idx": 4
715
+ },
716
+ {
717
+ "type": "text",
718
+ "text": "Overall, the filtering of very large and very short files plus dedduplication, reduced the number of files by $38\\%$ , and the dataset size by $61\\%$ , roughly on par with the $70\\%$ dataset size reduction reported by CodeParrot. A key difference that remains is that other approaches use more fine-grained filtering",
719
+ "bbox": [
720
+ 169,
721
+ 845,
722
+ 823,
723
+ 888
724
+ ],
725
+ "page_idx": 4
726
+ },
727
+ {
728
+ "type": "header",
729
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
730
+ "bbox": [
731
+ 171,
732
+ 32,
733
+ 535,
734
+ 47
735
+ ],
736
+ "page_idx": 4
737
+ },
738
+ {
739
+ "type": "page_footnote",
740
+ "text": "10Codex additionally mentions removing \"auto-generated\" files, but the definition of this was not clear, so we omitted this step.",
741
+ "bbox": [
742
+ 169,
743
+ 897,
744
+ 823,
745
+ 924
746
+ ],
747
+ "page_idx": 4
748
+ },
749
+ {
750
+ "type": "page_number",
751
+ "text": "5",
752
+ "bbox": [
753
+ 493,
754
+ 948,
755
+ 504,
756
+ 959
757
+ ],
758
+ "page_idx": 4
759
+ },
760
+ {
761
+ "type": "table",
762
+ "img_path": "images/aa9aa0819cf50f5b5f822d6e4282591a319e17154509c9de260059839bd246cd.jpg",
763
+ "table_caption": [],
764
+ "table_footnote": [],
765
+ "table_body": "<table><tr><td></td><td>PolyCoder</td><td>CodeParrot</td><td>Codex</td></tr><tr><td>Dedup</td><td>Exact</td><td>Exact</td><td>Unclear, mentions “unique”</td></tr><tr><td>Filtering</td><td>Files &gt; 1 MB, &lt; 100 to-kens</td><td>Files &gt; 1MB, max line length &gt; 1000, mean line length &gt; 100, fraction of alphanumeric charac-ters &lt; 0.25, containing the word “auto-generated” or similar in the first 5 lines</td><td>Files &gt; 1MB, max line length &gt; 1000, mean line length &gt; 100, auto-generated (details unclear), contained small percentage of al-phenmeric characters (details unclear)</td></tr><tr><td>Tokenization</td><td>Trained GPT-2 tok-enizer on a random 5% subset (all languages)</td><td>Trained GPT-2 tokenizer on train split</td><td>GPT-3 tokenizer, add multi-whitespace tokens to reduce re-dundant whitespace tokens</td></tr></table>",
766
+ "bbox": [
767
+ 173,
768
+ 101,
769
+ 834,
770
+ 268
771
+ ],
772
+ "page_idx": 5
773
+ },
774
+ {
775
+ "type": "text",
776
+ "text": "Table 2: Comparison of data preprocessing strategies of different models.",
777
+ "bbox": [
778
+ 254,
779
+ 277,
780
+ 740,
781
+ 292
782
+ ],
783
+ "page_idx": 5
784
+ },
785
+ {
786
+ "type": "text",
787
+ "text": "strategies, such as limiting the maximum line length or average line length, filtering of probable auto-generated files, etc. For example, Chen et al. (2021) have filtered only $11\\%$ of their training data.",
788
+ "bbox": [
789
+ 169,
790
+ 349,
791
+ 826,
792
+ 380
793
+ ],
794
+ "page_idx": 5
795
+ },
796
+ {
797
+ "type": "text",
798
+ "text": "The dataset statistics are shown in Table 1, showcasing data sizes per language before and after filtering. Our dataset contains less Python code (only 16G) than Codex or CodeParrot, and instead covers many different programming languages.",
799
+ "bbox": [
800
+ 169,
801
+ 386,
802
+ 823,
803
+ 429
804
+ ],
805
+ "page_idx": 5
806
+ },
807
+ {
808
+ "type": "text",
809
+ "text": "Tokenizer We train a GPT-2 tokenizer (using BPE (Sennrich et al., 2015)) on a random $5\\%$ subset of all the pretraining data, containing all the languages. Codex uses an existing trained GPT-3 tokenizer, with the addition of multi-whitespace tokens to reduce the sequence length after tokenization, as consecutive whitespaces are more common in code than in text.",
810
+ "bbox": [
811
+ 169,
812
+ 477,
813
+ 826,
814
+ 535
815
+ ],
816
+ "page_idx": 5
817
+ },
818
+ {
819
+ "type": "text",
820
+ "text": "4.3 POLYCODER'S TRAINING",
821
+ "text_level": 1,
822
+ "bbox": [
823
+ 171,
824
+ 585,
825
+ 393,
826
+ 599
827
+ ],
828
+ "page_idx": 5
829
+ },
830
+ {
831
+ "type": "text",
832
+ "text": "Considering our budget, we chose the GPT-2 (Radford et al., 2019) as our model architecture. To study the effect of scaling of model size, we train 3 different sized models, with 2.7 billion, 400 million and 160 million parameters, as the largest 2.7B model being on par with GPT-Neo for fair comparison. The 2.7 billion model is a 32 layer, 2,560 dimensional Transformer model, with a max context window of 2048 tokens, trained with a batch size of 128 sequences (262K tokens). The model is trained for 150K steps. The 400 million model is a 24 layer, 1,024 dimensional variant, and the 160 million model is a 12 layer, 768 dimensional variant, otherwise idem. We use GPT-NeoX toolkit to train the model efficiently in parallel with 8 Nvidia RTX 8000 GPUs on a single machine. The wall time used to train the largest 2.7B model is about 6 weeks. In its default configuration, this model should train for 320K steps, which was not feasible with our resources. Instead, we adjusted the learning rate decay to half this number and trained for up to 150K steps (near-convergence). The training and validation loss curves for different sized models are shown in Figure 3. We see that even after training for 150K steps, the validation losses are still decreasing. This, combined with the shorter training schedule and faster learning rate decay, strongly signals that the models are still under-fitting and could benefit from longer training.",
833
+ "bbox": [
834
+ 169,
835
+ 625,
836
+ 826,
837
+ 834
838
+ ],
839
+ "page_idx": 5
840
+ },
841
+ {
842
+ "type": "text",
843
+ "text": "We compare the training setting and hyperparameters with CodeParrot and Codex in Table 3. Due to high computational costs, we were unable to perform hyperparameter search. Most hyperparameters are the same as those used in their respective GPT-2 model training $^{12}$ to provide a good default with regards to the corresponding model size. Some key differences include context window sizes to allow for more tokens as context, batch sizes and tokens trained, as well as model initialization with or without natural language knowledge.",
844
+ "bbox": [
845
+ 169,
846
+ 839,
847
+ 826,
848
+ 925
849
+ ],
850
+ "page_idx": 5
851
+ },
852
+ {
853
+ "type": "header",
854
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
855
+ "bbox": [
856
+ 171,
857
+ 32,
858
+ 535,
859
+ 47
860
+ ],
861
+ "page_idx": 5
862
+ },
863
+ {
864
+ "type": "page_number",
865
+ "text": "6",
866
+ "bbox": [
867
+ 493,
868
+ 948,
869
+ 504,
870
+ 959
871
+ ],
872
+ "page_idx": 5
873
+ },
874
+ {
875
+ "type": "table",
876
+ "img_path": "images/e629b8e4fbd8556c8a8b4aa29e27e63a98781b6b90b25ca696dfe9ec3fba0c85.jpg",
877
+ "table_caption": [],
878
+ "table_footnote": [],
879
+ "table_body": "<table><tr><td></td><td>PolyCoder (2.7B)</td><td>CodeParrot (1.5B)</td><td>Codex (12B)</td></tr><tr><td>Model Initialization</td><td>From scratch</td><td>From scratch</td><td>Initialized from GPT-3</td></tr><tr><td>NL Knowledge</td><td>Learned from comments in the code</td><td>Learned from comments in the code</td><td>Natural language knowledge from GPT-3</td></tr><tr><td>Learning Rate</td><td>1.6e-4</td><td>2.0e-4</td><td>1e-4</td></tr><tr><td>Optimizer</td><td>AdamW</td><td>AdamW</td><td>AdamW</td></tr><tr><td>Adam betas</td><td>0.9, 0.999</td><td>0.9, 0.999</td><td>0.9, 0.95</td></tr><tr><td>Adam eps</td><td>1e-8</td><td>1e-8</td><td>1e-8</td></tr><tr><td>Weight Decay</td><td>-</td><td>0.1</td><td>0.1</td></tr><tr><td>Warmup Steps</td><td>1600</td><td>750</td><td>175</td></tr><tr><td>Learning Rate Decay</td><td>Cosine</td><td>Cosine</td><td>Cosine</td></tr><tr><td>Batch Size (#tokens)</td><td>262K</td><td>524K</td><td>2M</td></tr><tr><td>Training Steps</td><td>150K steps, 39B tokens</td><td>50K steps, 26B tokens</td><td>100B tokens</td></tr><tr><td>Context Window</td><td>2048</td><td>1024</td><td>4096</td></tr></table>",
880
+ "bbox": [
881
+ 179,
882
+ 101,
883
+ 821,
884
+ 308
885
+ ],
886
+ "page_idx": 6
887
+ },
888
+ {
889
+ "type": "text",
890
+ "text": "Table 3: Comparison of design decisions and hyper-parameters in training different models of code.",
891
+ "bbox": [
892
+ 169,
893
+ 328,
894
+ 823,
895
+ 344
896
+ ],
897
+ "page_idx": 6
898
+ },
899
+ {
900
+ "type": "image",
901
+ "img_path": "images/ce4ec24158b3fc74f61e49ee28e94ba448bbbb9762fb2b6cac7ff6cd5a5f9402.jpg",
902
+ "image_caption": [
903
+ "(a) Training"
904
+ ],
905
+ "image_footnote": [],
906
+ "bbox": [
907
+ 181,
908
+ 364,
909
+ 491,
910
+ 489
911
+ ],
912
+ "page_idx": 6
913
+ },
914
+ {
915
+ "type": "image",
916
+ "img_path": "images/531b5a4ca06b7a48c158abde843453c21e2ce156e9fae318204a014701bc4f6f.jpg",
917
+ "image_caption": [
918
+ "(b) Validation",
919
+ "Figure 3: Training and validation loss during the 150K step training process."
920
+ ],
921
+ "image_footnote": [],
922
+ "bbox": [
923
+ 500,
924
+ 362,
925
+ 816,
926
+ 489
927
+ ],
928
+ "page_idx": 6
929
+ },
930
+ {
931
+ "type": "text",
932
+ "text": "5 RESULTS",
933
+ "text_level": 1,
934
+ "bbox": [
935
+ 171,
936
+ 569,
937
+ 282,
938
+ 584
939
+ ],
940
+ "page_idx": 6
941
+ },
942
+ {
943
+ "type": "text",
944
+ "text": "5.1 EXTRINSIC EVALUATION",
945
+ "text_level": 1,
946
+ "bbox": [
947
+ 171,
948
+ 602,
949
+ 387,
950
+ 616
951
+ ],
952
+ "page_idx": 6
953
+ },
954
+ {
955
+ "type": "text",
956
+ "text": "The overall results are shown in Table 4. $^{13}$ The numbers are obtained by sampling with different temperatures and picking the best value for each metric. Among existing models, PolyCoder is worse than similarly sized GPT-Neo and the even smaller Codex 300M. Overall, PolyCoder lies after Codex, GPT-Neo/J, while performing stronger than CodeParrot. PolyCoder, which was trained only on code, falls behind a similar sized model (GPT-Neo 2.7B) trained on the Pile, a blend of natural language texts and code. Looking at the rightmost columns in Table 4 offers a potential explanation: in terms of total Python tokens seen during training, all models substantially exceed ours. This in partly because they use a higher proportion of Python code (we aimed to balance data volume across programming languages), and in part because of resource limitations, which lead to PolyCoder not observing its entire training data. In addition, the natural language blend in the training corpus may help code language modeling as well, especially with code-related texts such as Stack Exchange dumps being included.",
957
+ "bbox": [
958
+ 169,
959
+ 627,
960
+ 826,
961
+ 792
962
+ ],
963
+ "page_idx": 6
964
+ },
965
+ {
966
+ "type": "text",
967
+ "text": "Compared to GPT-Neo (2.7B), PolyCoder has seen fewer Python tokens, but more code tokens in other programming languages, hinting that transfer from other languages to Python helps to achieve a similar performance. This suggests that future research could benefit from blending code in different programming languages, as well as natural language text.",
968
+ "bbox": [
969
+ 169,
970
+ 801,
971
+ 823,
972
+ 859
973
+ ],
974
+ "page_idx": 6
975
+ },
976
+ {
977
+ "type": "header",
978
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
979
+ "bbox": [
980
+ 171,
981
+ 32,
982
+ 535,
983
+ 47
984
+ ],
985
+ "page_idx": 6
986
+ },
987
+ {
988
+ "type": "page_footnote",
989
+ "text": "11https://github.com/EleutherAI/gpt-neox",
990
+ "bbox": [
991
+ 187,
992
+ 869,
993
+ 482,
994
+ 883
995
+ ],
996
+ "page_idx": 6
997
+ },
998
+ {
999
+ "type": "page_footnote",
1000
+ "text": "$^{12}$ https://github.com/EleutherAI/gpt-neox/tree/main/configs",
1001
+ "bbox": [
1002
+ 189,
1003
+ 883,
1004
+ 612,
1005
+ 897
1006
+ ],
1007
+ "page_idx": 6
1008
+ },
1009
+ {
1010
+ "type": "page_footnote",
1011
+ "text": "<sup>13</sup>Due to the large model size of GPT-NeoX (20B) and limited computational budget, we did not include it in the HumanEval experiment.",
1012
+ "bbox": [
1013
+ 174,
1014
+ 897,
1015
+ 823,
1016
+ 924
1017
+ ],
1018
+ "page_idx": 6
1019
+ },
1020
+ {
1021
+ "type": "page_number",
1022
+ "text": "7",
1023
+ "bbox": [
1024
+ 493,
1025
+ 948,
1026
+ 503,
1027
+ 959
1028
+ ],
1029
+ "page_idx": 6
1030
+ },
1031
+ {
1032
+ "type": "table",
1033
+ "img_path": "images/1728ca91345f17dd715cd1939c5cb9c79dca3a0c1c2b5a7bcc7eb2aa8911bee9.jpg",
1034
+ "table_caption": [],
1035
+ "table_footnote": [
1036
+ "*Codex is initialized with another pretrained model, GPT-3."
1037
+ ],
1038
+ "table_body": "<table><tr><td>Model</td><td>Pass@1</td><td>Pass@10</td><td>Pass@100</td><td>Tokens Trained</td><td>Code Tokens</td><td>Python Tokens</td></tr><tr><td>PolyCoder (160M)</td><td>2.13%</td><td>3.35%</td><td>4.88%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>PolyCoder (400M)</td><td>2.96%</td><td>5.29%</td><td>11.59%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>PolyCoder (2.7B)</td><td>5.59%</td><td>9.84%</td><td>17.68%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>CodeParrot (110M)</td><td>3.80%</td><td>6.57%</td><td>12.78%</td><td>26B</td><td>26B</td><td>26B</td></tr><tr><td>CodeParrot (1.5B)</td><td>3.58%</td><td>8.03%</td><td>14.96%</td><td>26B</td><td>26B</td><td>26B</td></tr><tr><td>GPT-Neo (125M)</td><td>0.75%</td><td>1.88%</td><td>2.97%</td><td>300B</td><td>22.8B</td><td>3.1B</td></tr><tr><td>GPT-Neo (1.3B)</td><td>4.79%</td><td>7.47%</td><td>16.30%</td><td>380B</td><td>28.8B</td><td>3.9B</td></tr><tr><td>GPT-Neo (2.7B)</td><td>6.41%</td><td>11.27%</td><td>21.37%</td><td>420B</td><td>31.9B</td><td>4.3B</td></tr><tr><td>GPT-J (6B)</td><td>11.62%</td><td>15.74%</td><td>27.74%</td><td>402B</td><td>30.5B</td><td>4.1B</td></tr><tr><td>Codex (300M)</td><td>13.17%</td><td>20.37%</td><td>36.27%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr><tr><td>Codex (2.5B)</td><td>21.36%</td><td>35.42%</td><td>59.50%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr><tr><td>Codex (12B)</td><td>28.81%</td><td>46.81%</td><td>72.31%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr></table>",
1039
+ "bbox": [
1040
+ 173,
1041
+ 101,
1042
+ 846,
1043
+ 297
1044
+ ],
1045
+ "page_idx": 7
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "text": "Table 4: Results of different models on the HumanEval benchmark, and the number of different types of tokens seen during the training process.",
1050
+ "bbox": [
1051
+ 169,
1052
+ 321,
1053
+ 823,
1054
+ 354
1055
+ ],
1056
+ "page_idx": 7
1057
+ },
1058
+ {
1059
+ "type": "image",
1060
+ "img_path": "images/fa7912260a4111029c9185fa123796d75bffb01e3e5c9091c7a753c35a7077f3.jpg",
1061
+ "image_caption": [
1062
+ "(a) Pass@1"
1063
+ ],
1064
+ "image_footnote": [],
1065
+ "bbox": [
1066
+ 181,
1067
+ 376,
1068
+ 385,
1069
+ 489
1070
+ ],
1071
+ "page_idx": 7
1072
+ },
1073
+ {
1074
+ "type": "image",
1075
+ "img_path": "images/dc20f2aa105d567d4ab5f926de020b293aeb95ee4a41df57fa27cd37db6e0029.jpg",
1076
+ "image_caption": [
1077
+ "(b) Pass@10"
1078
+ ],
1079
+ "image_footnote": [],
1080
+ "bbox": [
1081
+ 395,
1082
+ 376,
1083
+ 598,
1084
+ 489
1085
+ ],
1086
+ "page_idx": 7
1087
+ },
1088
+ {
1089
+ "type": "image",
1090
+ "img_path": "images/6b6e903178364401458cfb950fe83d4a1a5e44078a0144732feb2fac18362569.jpg",
1091
+ "image_caption": [
1092
+ "(c) Pass@100",
1093
+ "Figure 4: The scaling effect of HumanEval performance on different models."
1094
+ ],
1095
+ "image_footnote": [],
1096
+ "bbox": [
1097
+ 612,
1098
+ 376,
1099
+ 816,
1100
+ 489
1101
+ ],
1102
+ "page_idx": 7
1103
+ },
1104
+ {
1105
+ "type": "text",
1106
+ "text": "Scaling Effect To further understand the effect of the number of model parameters with respect to HumanEval code completion performance, we show the Pass@1, Pass@10 and Pass@100 percentage with respect to the model size in Figure 4. We can see that the performance of the Codex models are significantly better than all the other open-source models across all numbers of parameters. The performance on HumanEval benchmark increases linearly with the magnitude (log scale) of the number of parameters in the model. Similar scaling effects could be found on PolyCoder and GPT-Neo/J models. Interestingly, the CodeParrot models that are trained only on Python seem to have reached a saturating performance with respect to increasing number of parameters, where the training corpus being focused on Python may have some effect. With higher number of parameters (2.7B), PolyCoder's performance is trending worse than that of GPT-Neo/J. Comparing GPT-Neo/J that is trained on Pile dataset containing a blend of text, Stack Exchange dumps and GitHub data, with PolyCoder that are trained on only GitHub repositories of popular programming languages, we hypothesize that the added text, especially texts in technical and software engineering domains, may be crucial for the larger model to boost the performance. We also compare the performance difference between the model trained after 100K steps versus the model after 150K steps in Appendix A, and find that training for longer helps the larger model more as it is still under-fitted.",
1107
+ "bbox": [
1108
+ 169,
1109
+ 574,
1110
+ 826,
1111
+ 796
1112
+ ],
1113
+ "page_idx": 7
1114
+ },
1115
+ {
1116
+ "type": "text",
1117
+ "text": "Temperature Effect All the above results are obtained by sampling the language model with different temperatures and picking the best value for each metric. We are also interested in how different choices of temperature affects the final generation quality. We summarize the results in Figure 5. The general trend is for Pass@1, lower temperatures are better, and for Pass@100, a higher temperature will help, while for Pass@10 a temperature in the middle is better suited. We hypothesize that this is because a higher temperature during generation makes the model less confident in its predictions and thus allow for more exploration and more diverse outputs, resulting in better accuracy at Pass@100. Too high a temperature (0.8) is also hurtful if the model is capable enough.",
1118
+ "bbox": [
1119
+ 169,
1120
+ 816,
1121
+ 828,
1122
+ 931
1123
+ ],
1124
+ "page_idx": 7
1125
+ },
1126
+ {
1127
+ "type": "header",
1128
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
1129
+ "bbox": [
1130
+ 171,
1131
+ 32,
1132
+ 535,
1133
+ 47
1134
+ ],
1135
+ "page_idx": 7
1136
+ },
1137
+ {
1138
+ "type": "page_number",
1139
+ "text": "8",
1140
+ "bbox": [
1141
+ 493,
1142
+ 948,
1143
+ 503,
1144
+ 959
1145
+ ],
1146
+ "page_idx": 7
1147
+ },
1148
+ {
1149
+ "type": "image",
1150
+ "img_path": "images/047da50880c13aa5511b30a163332f7695f6d9ca37e31c854adfedb2395f361f.jpg",
1151
+ "image_caption": [
1152
+ "Figure 6: Perplexity comparison on our evaluation dataset of different models on different programming languages. Note that the y-axis is capped at 4; CodeParrot's entropy on all languages other than Python is much higher than shown here (see Table 5)."
1153
+ ],
1154
+ "image_footnote": [],
1155
+ "bbox": [
1156
+ 191,
1157
+ 106,
1158
+ 816,
1159
+ 292
1160
+ ],
1161
+ "page_idx": 8
1162
+ },
1163
+ {
1164
+ "type": "text",
1165
+ "text": "On the contrary, a lower temperature makes the model output very confident in its prediction and thus will be better suited for generating very few correct examples, and thus the better performance for Pass@1. In Appendix B we repeat these experiments with the smaller models as well. This suggests the importance of temperature and the need to tune it individually for different generation scenarios.",
1166
+ "bbox": [
1167
+ 169,
1168
+ 372,
1169
+ 550,
1170
+ 482
1171
+ ],
1172
+ "page_idx": 8
1173
+ },
1174
+ {
1175
+ "type": "image",
1176
+ "img_path": "images/2b2795d720f91a54fffacee7de8ddb77c13b3ab9633ed1f6894dec0663d39faf.jpg",
1177
+ "image_caption": [
1178
+ "Figure 5: HumanEval performance with different softmax temperatures during generation."
1179
+ ],
1180
+ "image_footnote": [],
1181
+ "bbox": [
1182
+ 570,
1183
+ 380,
1184
+ 816,
1185
+ 518
1186
+ ],
1187
+ "page_idx": 8
1188
+ },
1189
+ {
1190
+ "type": "text",
1191
+ "text": "5.2 INTRINSIC EVALUATION",
1192
+ "text_level": 1,
1193
+ "bbox": [
1194
+ 171,
1195
+ 500,
1196
+ 383,
1197
+ 513
1198
+ ],
1199
+ "page_idx": 8
1200
+ },
1201
+ {
1202
+ "type": "text",
1203
+ "text": "The perplexity results on the evaluation datasets are shown in Figure 6, with detailed numbers in Appendix C. The plot caps the perplexity score to 4 as CodeParrot performs poorly in languages other than Python. It is important to note that although Codex's perplexities are lower than",
1204
+ "bbox": [
1205
+ 169,
1206
+ 527,
1207
+ 549,
1208
+ 595
1209
+ ],
1210
+ "page_idx": 8
1211
+ },
1212
+ {
1213
+ "type": "text",
1214
+ "text": "other models in most languages, Codex might have been trained on the test sets, and its results are thus over-optimistic.",
1215
+ "bbox": [
1216
+ 169,
1217
+ 595,
1218
+ 823,
1219
+ 625
1220
+ ],
1221
+ "page_idx": 8
1222
+ },
1223
+ {
1224
+ "type": "text",
1225
+ "text": "Notably, PolyCoder outperforms Codex and all other models in the C language. Comparing the open-source models only, PolyCoder performs better than the similarly sized GPT-Neo 2.7B in C, JavaScript, Rust, Scala and TypeScript.",
1226
+ "bbox": [
1227
+ 169,
1228
+ 631,
1229
+ 826,
1230
+ 675
1231
+ ],
1232
+ "page_idx": 8
1233
+ },
1234
+ {
1235
+ "type": "text",
1236
+ "text": "In the other 11 languages other than C, all other open-source models, including ours, are significantly worse (higher perplexity) than Codex. We hypothesize that this is due to the fact that PolyCoder is trained on an imbalanced mixture of different languages, with C and $\\mathrm{C + + }$ being closely related and the two most dominant in the entire training corpus (Section 4.2). Thus, the larger volume in total (because of long files) makes C the most \"favored\" language by PolyCoder. The reason why PolyCoder does not outperform Codex in $\\mathrm{C + + }$ is possibly due to the complexity of $\\mathrm{C + + }$ language and Codex's significantly longer context window size (4096, compared to PolyCoder's 2048), or because Codex is possibly trained on more $\\mathrm{C + + }$ training data.",
1237
+ "bbox": [
1238
+ 169,
1239
+ 680,
1240
+ 825,
1241
+ 792
1242
+ ],
1243
+ "page_idx": 8
1244
+ },
1245
+ {
1246
+ "type": "text",
1247
+ "text": "With the same pretraining corpus, the gain from a 2.7B model (GPT-Neo) to a 6B model (GPT-J) is significant over all languages. However, when increasing the model size further to 20B, the improvement varies across different languages. For example, the performance on Go, Java, Rust, Scala, JavaScript do not increase significantly when the model size increases by 3 times. This suggests that for some programming languages, and given the amounts of data, the capacity of GPT-J is sufficient. Interestingly, these languages seem to coincide with languages where PolyCoder outperforms a similarly sized model trained on Pile. This may hint that for the languages in which larger models do not provide additional gains, training the model only using code may be enough or slightly more helpful than training on both natural language and code.",
1248
+ "bbox": [
1249
+ 169,
1250
+ 797,
1251
+ 826,
1252
+ 925
1253
+ ],
1254
+ "page_idx": 8
1255
+ },
1256
+ {
1257
+ "type": "header",
1258
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
1259
+ "bbox": [
1260
+ 171,
1261
+ 32,
1262
+ 535,
1263
+ 47
1264
+ ],
1265
+ "page_idx": 8
1266
+ },
1267
+ {
1268
+ "type": "page_number",
1269
+ "text": "9",
1270
+ "bbox": [
1271
+ 493,
1272
+ 948,
1273
+ 503,
1274
+ 959
1275
+ ],
1276
+ "page_idx": 8
1277
+ },
1278
+ {
1279
+ "type": "text",
1280
+ "text": "We can see that comparing different models, perplexity trends for Python correlates well with the HumanEval benchmark performance of the extrinsic evaluation (Section 5.1). This suggests that perplexity is a useful and low-cost metric to estimate other, downstream, metrics.",
1281
+ "bbox": [
1282
+ 169,
1283
+ 103,
1284
+ 826,
1285
+ 148
1286
+ ],
1287
+ "page_idx": 9
1288
+ },
1289
+ {
1290
+ "type": "text",
1291
+ "text": "6 CONCLUSION",
1292
+ "text_level": 1,
1293
+ "bbox": [
1294
+ 171,
1295
+ 165,
1296
+ 320,
1297
+ 181
1298
+ ],
1299
+ "page_idx": 9
1300
+ },
1301
+ {
1302
+ "type": "text",
1303
+ "text": "In this paper, we perform a systematic evaluation of large language models for code. The performance generally benefits from larger models and longer training time. We also believe that the better results of GPT-Neo over PolyCoder in some languages show that training on natural language text and code can benefit the modeling of code. To help future research in the area, we release PolyCoder, a large open-source language model for code, trained exclusively on code in 12 different programming languages. In the C programming language, PolyCoder achieves lower perplexity than all models including Codex.",
1304
+ "bbox": [
1305
+ 169,
1306
+ 196,
1307
+ 826,
1308
+ 297
1309
+ ],
1310
+ "page_idx": 9
1311
+ },
1312
+ {
1313
+ "type": "text",
1314
+ "text": "REFERENCES",
1315
+ "text_level": 1,
1316
+ "bbox": [
1317
+ 173,
1318
+ 314,
1319
+ 287,
1320
+ 329
1321
+ ],
1322
+ "page_idx": 9
1323
+ },
1324
+ {
1325
+ "type": "list",
1326
+ "sub_type": "ref_text",
1327
+ "list_items": [
1328
+ "Wasi Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. Unified pre-training for program understanding and generation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2655–2668, Online, June 2021. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2021.naacl-main.211.",
1329
+ "Miltiadis Allamanis. The adverse effects of code duplication in machine learning models of code. In Proceedings of the 2019 ACM SIGPLAN International Symposium on New Ideas, New Paradigms, and Reflections on Programming and Software, pp. 143-153, 2019.",
1330
+ "Uri Alon, Roy Sadaka, Omer Levy, and Eran Yahav. Structural language models of code. In International Conference on Machine Learning, pp. 245-256. PMLR, 2020.",
1331
+ "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021.",
1332
+ "Alexei Baevski and Michael Auli. Adaptive input representations for neural language modeling. arXiv preprint arXiv:1809.10853, 2018.",
1333
+ "Yoshua Bengio, Réjean Ducharme, Pascal Vincent, and Christian Jauvin. A neural probabilistic language model. Journal of machine learning research, 3(Feb):1137-1155, 2003.",
1334
+ "Sid Black, Leo Gao, Phil Wang, Connor Leahy, and Stella Biderman. GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow, March 2021. URL https://doi.org/10.5281/zenodo.5297715. If you use this software, please cite it using these metadata.",
1335
+ "Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, and Samuel Weinbach. GPT-NeoX-20B: An open-source autoregressive language model. 2022.",
1336
+ "Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020.",
1337
+ "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde, Jared Kaplan, Harri Edwards, Yura Burda, Nicholas Joseph, Greg Brockman, et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021.",
1338
+ "Alexis Conneau and Guillaume Lample. Cross-lingual language model pretraining. Advances in Neural Information Processing Systems, 32:7059-7069, 2019.",
1339
+ "Aditya Desai, Sumit Gulwani, Vineet Hingorani, Nidhi Jain, Amey Karkare, Mark Marron, and Subhajit Roy. Program synthesis using natural language. In Proceedings of the 38th International Conference on Software Engineering, pp. 345-356, 2016."
1340
+ ],
1341
+ "bbox": [
1342
+ 173,
1343
+ 337,
1344
+ 826,
1345
+ 924
1346
+ ],
1347
+ "page_idx": 9
1348
+ },
1349
+ {
1350
+ "type": "header",
1351
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
1352
+ "bbox": [
1353
+ 171,
1354
+ 32,
1355
+ 537,
1356
+ 47
1357
+ ],
1358
+ "page_idx": 9
1359
+ },
1360
+ {
1361
+ "type": "page_number",
1362
+ "text": "10",
1363
+ "bbox": [
1364
+ 490,
1365
+ 948,
1366
+ 508,
1367
+ 959
1368
+ ],
1369
+ "page_idx": 9
1370
+ },
1371
+ {
1372
+ "type": "list",
1373
+ "sub_type": "ref_text",
1374
+ "list_items": [
1375
+ "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.",
1376
+ "Zhangyin Feng, Daya Guo, Duyu Tang, Nan Duan, Xiaocheng Feng, Ming Gong, Linjun Shou, Bing Qin, Ting Liu, Daxin Jiang, et al. Codebert: A pre-trained model for programming and natural languages. arXiv preprint arXiv:2002.08155, 2020.",
1377
+ "Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020.",
1378
+ "Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Liang Zhang, Wentao Han, Minlie Huang, et al. Pre-trained models: Past, present and future. AI Open, 2021.",
1379
+ "Vincent J Hellendoorn and Premkumar Devanbu. Are deep neural networks the best choice for modeling source code? In Proceedings of the 2017 11th Joint Meeting on Foundations of Software Engineering, pp. 763-773, 2017.",
1380
+ "Vincent J. Hellendoorn and Anand Ashok Sawant. The growing cost of deep learning for source code. Commun. ACM, 65(1):31-33, dec 2021. ISSN 0001-0782. doi: 10.1145/3501261. URL https://doi.org/10.1145/3501261.",
1381
+ "Abram Hindle, Earl T Barr, Mark Gabel, Zhendong Su, and Premkumar Devanbu. On the naturalness of software. Communications of the ACM, 59(5):122-131, 2016.",
1382
+ "Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration. arXiv preprint arXiv:1904.09751, 2019.",
1383
+ "Hamel Husain, Ho-Hsiang Wu, Tiferet Gazit, Miltiadis Allamanis, and Marc Brockschmidt. Code-searchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436, 2019.",
1384
+ "Aditya Kanade, Petros Maniatis, Gogul Balakrishnan, and Kensen Shi. Learning and evaluating contextual embedding of source code. In International Conference on Machine Learning, pp. 5110-5121. PMLR, 2020.",
1385
+ "Rafael-Michael Karampatsis, Hlib Babii, Romain Robbes, Charles Sutton, and Andrea Janes. Big code! = big vocabulary: Open-vocabulary models for source code. In 2020 IEEE/ACM 42nd International Conference on Software Engineering (ICSE), pp. 1073-1085. IEEE, 2020.",
1386
+ "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461, 2019.",
1387
+ "Shuai Lu, Daya Guo, Shuo Ren, Junjie Huang, Alexey Svyatkovskiy, Ambrosio Blanco, Colin Clement, Dawn Drain, Daxin Jiang, Duyu Tang, Ge Li, Lidong Zhou, Linjun Shou, Long Zhou, Michele Tufano, MING GONG, Ming Zhou, Nan Duan, Neel Sundaresan, Shao Kun Deng, Shengyu Fu, and Shujie LIU. CodeXGLUE: A machine learning benchmark dataset for code understanding and generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. URL https://openreview.net/forum?id=61E4dQXaUcb.",
1388
+ "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.",
1389
+ "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683, 2019.",
1390
+ "Veselin Raychev, Martin Vechev, and Eran Yahav. Code completion with statistical language models. In Proceedings of the 35th ACM SIGPLAN Conference on Programming Language Design and Implementation, pp. 419-428, 2014."
1391
+ ],
1392
+ "bbox": [
1393
+ 171,
1394
+ 102,
1395
+ 825,
1396
+ 924
1397
+ ],
1398
+ "page_idx": 10
1399
+ },
1400
+ {
1401
+ "type": "header",
1402
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
1403
+ "bbox": [
1404
+ 171,
1405
+ 32,
1406
+ 537,
1407
+ 47
1408
+ ],
1409
+ "page_idx": 10
1410
+ },
1411
+ {
1412
+ "type": "page_number",
1413
+ "text": "11",
1414
+ "bbox": [
1415
+ 488,
1416
+ 946,
1417
+ 506,
1418
+ 960
1419
+ ],
1420
+ "page_idx": 10
1421
+ },
1422
+ {
1423
+ "type": "list",
1424
+ "sub_type": "ref_text",
1425
+ "list_items": [
1426
+ "Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015.",
1427
+ "Lewis Tunstall, Leandro von Werra, and Thomas Wolf. Natural Language Processing with Transformers.\" O'Reilly Media, Inc.\", 2022.",
1428
+ "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021.",
1429
+ "Yue Wang, Weishi Wang, Shafiq Joty, and Steven CH Hoi. Codet5: Identifier-aware unified pre-trained encoder-decoder models for code understanding and generation. arXiv preprint arXiv:2109.00859, 2021.",
1430
+ "Daniel Zügner, Tobias Kirschstein, Michele Catasta, Jure Leskovec, and Stephan Gunnemann. Language-agnostic representation learning of source code from structure and context. In International Conference on Learning Representations, 2021. URL https://openreview.net/for um?id=Xh5eMZVONGF."
1431
+ ],
1432
+ "bbox": [
1433
+ 171,
1434
+ 103,
1435
+ 826,
1436
+ 321
1437
+ ],
1438
+ "page_idx": 11
1439
+ },
1440
+ {
1441
+ "type": "text",
1442
+ "text": "A SCALING EFFECT: TRAINED LONGER",
1443
+ "text_level": 1,
1444
+ "bbox": [
1445
+ 171,
1446
+ 347,
1447
+ 522,
1448
+ 364
1449
+ ],
1450
+ "page_idx": 11
1451
+ },
1452
+ {
1453
+ "type": "text",
1454
+ "text": "We compare the performance difference between the model trained after 100K steps versus the model after 150K steps in Figure 7. We can see that in the larger 2.7B model, by training the model longer till 150K steps, the performance increases uniformly, with Pass@100 increasing the most. However, for a smaller model such as the 400M model, by training the model longer till 100K steps, the improvements are subdued and Pass@100 drops. This suggests that with the larger model, training for longer may provide additional boost in performance. This echoes with the observation from the training curve (Figure 3) as well.",
1455
+ "bbox": [
1456
+ 169,
1457
+ 378,
1458
+ 826,
1459
+ 476
1460
+ ],
1461
+ "page_idx": 11
1462
+ },
1463
+ {
1464
+ "type": "image",
1465
+ "img_path": "images/b92970c300f8517e1232a2d9abf89620fd7c495e523649649f42aec630eb88b1.jpg",
1466
+ "image_caption": [
1467
+ "(a) 2.7B Model"
1468
+ ],
1469
+ "image_footnote": [],
1470
+ "bbox": [
1471
+ 290,
1472
+ 494,
1473
+ 493,
1474
+ 609
1475
+ ],
1476
+ "page_idx": 11
1477
+ },
1478
+ {
1479
+ "type": "image",
1480
+ "img_path": "images/4999c3b042853defebb733dc6522b2753ab67ad5e6459e5752dc0ca3ca2e2ff9.jpg",
1481
+ "image_caption": [
1482
+ "(b) 400M Model",
1483
+ "Figure 7: HumanEval performance comparison after training the model for longer."
1484
+ ],
1485
+ "image_footnote": [],
1486
+ "bbox": [
1487
+ 503,
1488
+ 494,
1489
+ 707,
1490
+ 609
1491
+ ],
1492
+ "page_idx": 11
1493
+ },
1494
+ {
1495
+ "type": "text",
1496
+ "text": "B TEMPERATURE EFFECT: SMALLER MODELS",
1497
+ "text_level": 1,
1498
+ "bbox": [
1499
+ 171,
1500
+ 688,
1501
+ 576,
1502
+ 704
1503
+ ],
1504
+ "page_idx": 11
1505
+ },
1506
+ {
1507
+ "type": "text",
1508
+ "text": "We show how temperature affects HumanEval performance on model of all three sizes in Figure 8. We find that for a larger model, e.g., the 2.7B model, a temperature as high as 0.8 is actually hurting the performance for Pass@100, suggesting that if the model is good enough, a very high temperature may cause the outputs to be too diverse, thus hurting the correctness. This suggests the importance of temperature and the need to tune it individually for different model capacity and different generation scenarios.",
1509
+ "bbox": [
1510
+ 169,
1511
+ 719,
1512
+ 826,
1513
+ 801
1514
+ ],
1515
+ "page_idx": 11
1516
+ },
1517
+ {
1518
+ "type": "text",
1519
+ "text": "C DETAILED PERPLEXITY RESULTS",
1520
+ "text_level": 1,
1521
+ "bbox": [
1522
+ 171,
1523
+ 823,
1524
+ 486,
1525
+ 838
1526
+ ],
1527
+ "page_idx": 11
1528
+ },
1529
+ {
1530
+ "type": "text",
1531
+ "text": "We show the detailed perplexity of different models on different languages in Table 5. The number of tokens shown in the table is obtained after tokenizing the code in each language using their respective lexers, by Pygments. This number of tokens is used to normalize the perplexity scores to make them comparable across models. Note that CodeParrot is only trained on Python data and thus performs poorly in other languages.",
1532
+ "bbox": [
1533
+ 169,
1534
+ 854,
1535
+ 825,
1536
+ 925
1537
+ ],
1538
+ "page_idx": 11
1539
+ },
1540
+ {
1541
+ "type": "header",
1542
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
1543
+ "bbox": [
1544
+ 171,
1545
+ 32,
1546
+ 537,
1547
+ 47
1548
+ ],
1549
+ "page_idx": 11
1550
+ },
1551
+ {
1552
+ "type": "page_number",
1553
+ "text": "12",
1554
+ "bbox": [
1555
+ 488,
1556
+ 946,
1557
+ 508,
1558
+ 959
1559
+ ],
1560
+ "page_idx": 11
1561
+ },
1562
+ {
1563
+ "type": "image",
1564
+ "img_path": "images/e33241ef2a21c293287f32c85f0c9e2952d80eb80d541fc5c8beb257d965b88b.jpg",
1565
+ "image_caption": [
1566
+ "(a) 2.7B Model"
1567
+ ],
1568
+ "image_footnote": [],
1569
+ "bbox": [
1570
+ 189,
1571
+ 208,
1572
+ 390,
1573
+ 321
1574
+ ],
1575
+ "page_idx": 12
1576
+ },
1577
+ {
1578
+ "type": "image",
1579
+ "img_path": "images/5b509909fb3b14b2ed776f75a90d38440510d10ceddac64800d30bc9d64f8245.jpg",
1580
+ "image_caption": [
1581
+ "(b) 400M Model",
1582
+ "Figure 8: HumanEval performance using different softmax temperatures during generation."
1583
+ ],
1584
+ "image_footnote": [],
1585
+ "bbox": [
1586
+ 398,
1587
+ 208,
1588
+ 596,
1589
+ 321
1590
+ ],
1591
+ "page_idx": 12
1592
+ },
1593
+ {
1594
+ "type": "image",
1595
+ "img_path": "images/010d6157ca0c6fca469da4abe8737e53bc03cdd8cae6e8d9594ef8ab7b5a7a75.jpg",
1596
+ "image_caption": [
1597
+ "(c) 160M Model"
1598
+ ],
1599
+ "image_footnote": [],
1600
+ "bbox": [
1601
+ 607,
1602
+ 208,
1603
+ 805,
1604
+ 321
1605
+ ],
1606
+ "page_idx": 12
1607
+ },
1608
+ {
1609
+ "type": "table",
1610
+ "img_path": "images/18d2f4c096ca0e7618834564dedd29dbdd3469f1a7e4587f57fff75993eef358.jpg",
1611
+ "table_caption": [],
1612
+ "table_footnote": [
1613
+ "* Since the exact training set of Codex is unknown, it might have been trained on these test sets, and Codex's results are over-optimistic."
1614
+ ],
1615
+ "table_body": "<table><tr><td>Language</td><td>#tokens</td><td>Codex*</td><td>PolyCoder 2.7B</td><td>GPT-Neo 2.7B</td><td>GPT-J 6B</td><td>GPT-NeoX</td><td>CodeParrot</td></tr><tr><td>C</td><td>55,333</td><td>2.55</td><td>2.33</td><td>3.69</td><td>2.82</td><td>2.37</td><td>19.23</td></tr><tr><td>C#</td><td>67,306</td><td>1.72</td><td>2.58</td><td>2.49</td><td>2.20</td><td>2.12</td><td>7.16</td></tr><tr><td>C++</td><td>69,627</td><td>1.95</td><td>2.99</td><td>2.87</td><td>2.47</td><td>2.32</td><td>8.48</td></tr><tr><td>Go</td><td>79,947</td><td>1.39</td><td>2.57</td><td>2.19</td><td>1.89</td><td>1.85</td><td>10.00</td></tr><tr><td>Java</td><td>65,484</td><td>1.94</td><td>2.92</td><td>2.78</td><td>2.49</td><td>2.47</td><td>6.79</td></tr><tr><td>JavaScript</td><td>54,620</td><td>2.17</td><td>3.06</td><td>3.07</td><td>2.73</td><td>2.62</td><td>9.23</td></tr><tr><td>PHP</td><td>45,682</td><td>1.98</td><td>3.70</td><td>3.61</td><td>2.81</td><td>2.45</td><td>19.91</td></tr><tr><td>Python</td><td>79,653</td><td>1.47</td><td>3.18</td><td>3.00</td><td>2.68</td><td>2.61</td><td>2.95</td></tr><tr><td>Ruby</td><td>46,537</td><td>1.39</td><td>3.96</td><td>3.77</td><td>3.13</td><td>2.89</td><td>14.26</td></tr><tr><td>Rust</td><td>107,717</td><td>1.96</td><td>3.24</td><td>3.30</td><td>2.92</td><td>2.92</td><td>8.68</td></tr><tr><td>Scala</td><td>65,756</td><td>1.75</td><td>3.87</td><td>3.88</td><td>3.37</td><td>3.33</td><td>12.91</td></tr><tr><td>JavaScript</td><td>55,895</td><td>2.40</td><td>3.61</td><td>3.90</td><td>3.43</td><td>3.41</td><td>12.54</td></tr></table>",
1616
+ "bbox": [
1617
+ 173,
1618
+ 587,
1619
+ 859,
1620
+ 765
1621
+ ],
1622
+ "page_idx": 12
1623
+ },
1624
+ {
1625
+ "type": "text",
1626
+ "text": "Table 5: Perplexity of different models for different programming languages on our evaluation dataset.",
1627
+ "bbox": [
1628
+ 171,
1629
+ 801,
1630
+ 823,
1631
+ 816
1632
+ ],
1633
+ "page_idx": 12
1634
+ },
1635
+ {
1636
+ "type": "header",
1637
+ "text": "Published as a workshop paper at DL4C @ ICLR 2022",
1638
+ "bbox": [
1639
+ 171,
1640
+ 32,
1641
+ 535,
1642
+ 47
1643
+ ],
1644
+ "page_idx": 12
1645
+ },
1646
+ {
1647
+ "type": "page_number",
1648
+ "text": "13",
1649
+ "bbox": [
1650
+ 488,
1651
+ 946,
1652
+ 506,
1653
+ 959
1654
+ ],
1655
+ "page_idx": 12
1656
+ }
1657
+ ]
2202.13xxx/2202.13169/909a7465-0b93-460b-9a57-ce6ae5e551db_model.json ADDED
@@ -0,0 +1,2162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "header",
5
+ "bbox": [
6
+ 0.173,
7
+ 0.033,
8
+ 0.538,
9
+ 0.049
10
+ ],
11
+ "angle": 0,
12
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.172,
18
+ 0.1,
19
+ 0.825,
20
+ 0.147
21
+ ],
22
+ "angle": 0,
23
+ "content": "A SYSTEMATIC EVALUATION OF LARGE LANGUAGE MODELS OF CODE"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.18,
29
+ 0.171,
30
+ 0.631,
31
+ 0.185
32
+ ],
33
+ "angle": 0,
34
+ "content": "Frank F. Xu, Uri Alon, Graham Neubig, Vincent J. Hellendoorn"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.184,
40
+ 0.186,
41
+ 0.374,
42
+ 0.199
43
+ ],
44
+ "angle": 0,
45
+ "content": "School of Computer Science"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.184,
51
+ 0.199,
52
+ 0.37,
53
+ 0.214
54
+ ],
55
+ "angle": 0,
56
+ "content": "Carnegie Mellon University"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.184,
62
+ 0.214,
63
+ 0.651,
64
+ 0.228
65
+ ],
66
+ "angle": 0,
67
+ "content": "{fangzhex,ualon,gneubig}@cs.cmu.edu,vhellendoorn@cmu.edu"
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.451,
73
+ 0.264,
74
+ 0.547,
75
+ 0.278
76
+ ],
77
+ "angle": 0,
78
+ "content": "ABSTRACT"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.23,
84
+ 0.292,
85
+ 0.77,
86
+ 0.53
87
+ ],
88
+ "angle": 0,
89
+ "content": "Large language models (LMs) of code have recently shown tremendous promise in completing code and synthesizing code from natural language descriptions. However, the current state-of-the-art code LMs (e.g., Codex (Chen et al., 2021)) are not publicly available, leaving many questions about their model and data design decisions. We aim to fill in some of these blanks through a systematic evaluation of the largest existing models: Codex, GPT-J, GPT-Neo, GPT-NeoX-20B, and CodeParrot, across various programming languages. Although Codex itself is not open-source, we find that existing open-source models do achieve close results in some programming languages, although targeted mainly for natural language modeling. We further identify an important missing piece in the form of a large open-source model trained exclusively on a multi-lingual corpus of code. We release a new model, PolyCoder, with 2.7B parameters based on the GPT-2 architecture, that was trained on 249GB of code across 12 programming languages on a single machine. In the C programming language, PolyCoder outperforms all models including Codex. Our trained models are open-source and publicly available at https://github.com/VHellendoorn/Code-LMs, which enables future research and application in this area."
90
+ },
91
+ {
92
+ "type": "title",
93
+ "bbox": [
94
+ 0.174,
95
+ 0.552,
96
+ 0.338,
97
+ 0.567
98
+ ],
99
+ "angle": 0,
100
+ "content": "1 INTRODUCTION"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.17,
106
+ 0.582,
107
+ 0.828,
108
+ 0.723
109
+ ],
110
+ "angle": 0,
111
+ "content": "Language models (LMs) assign probabilities to sequences of tokens, and are widely applied to natural language text (Bengio et al., 2003; Baevski & Auli, 2018; Brown et al., 2020). Recently, LMs have shown impressive performance in modeling also source code, written in programming languages (Hindle et al., 2016; Hellendoorn & Devanbu, 2017; Alon et al., 2020; Karampatsis et al., 2020). These models excel at useful downstream tasks like code completion (Raychev et al., 2014) and synthesizing code from natural language descriptions (Desai et al., 2016). The current state-of-the-art large language models for code, such as Austin et al. (2021), have shown significant progress for AI-based programming assistance. Most notably, one of the largest of these models, Codex (Chen et al., 2021) has been deployed in the real-world production tool GitHub Copilot<sup>1</sup>, as an in-IDE developer assistant that automatically generates code based on the user's context."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.17,
117
+ 0.728,
118
+ 0.828,
119
+ 0.853
120
+ ],
121
+ "angle": 0,
122
+ "content": "Despite the great success of large language models of code, the strongest models are not publicly available. This prevents the application of these models outside of well-resourced companies and limits research in this field for low-resourced organizations. For example, Codex provides non-free access to the model's output through black-box API calls,[2] but the model's weights and training data are unavailable. This prevents researchers from fine-tuning and adapting this model to domains and tasks other than code completion. The lack of access to the model's internals also prevents the research community from studying other key aspects of these models, such as interpretability, distillation of the model for more efficient deployment, and incorporating additional components such as retrieval."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.171,
128
+ 0.86,
129
+ 0.829,
130
+ 0.89
131
+ ],
132
+ "angle": 0,
133
+ "content": "Several medium to large-sized pre-trained language models are publicly available, such as GPT-Neo (Black et al., 2021), GPT-J (Wang & Komatsuzaki, 2021) and GPT-NeoX (Black et al., 2022)."
134
+ },
135
+ {
136
+ "type": "page_footnote",
137
+ "bbox": [
138
+ 0.191,
139
+ 0.897,
140
+ 0.403,
141
+ 0.911
142
+ ],
143
+ "angle": 0,
144
+ "content": "1https://copilot.github.com/"
145
+ },
146
+ {
147
+ "type": "page_footnote",
148
+ "bbox": [
149
+ 0.193,
150
+ 0.911,
151
+ 0.475,
152
+ 0.924
153
+ ],
154
+ "angle": 0,
155
+ "content": "2https://openai.com/blog/openai-codex/"
156
+ },
157
+ {
158
+ "type": "list",
159
+ "bbox": [
160
+ 0.191,
161
+ 0.897,
162
+ 0.475,
163
+ 0.924
164
+ ],
165
+ "angle": 0,
166
+ "content": null
167
+ },
168
+ {
169
+ "type": "aside_text",
170
+ "bbox": [
171
+ 0.023,
172
+ 0.267,
173
+ 0.061,
174
+ 0.701
175
+ ],
176
+ "angle": 270,
177
+ "content": "arXiv:2202.13169v3 [cs.PL] 4 May 2022"
178
+ },
179
+ {
180
+ "type": "page_number",
181
+ "bbox": [
182
+ 0.494,
183
+ 0.949,
184
+ 0.504,
185
+ 0.96
186
+ ],
187
+ "angle": 0,
188
+ "content": "1"
189
+ }
190
+ ],
191
+ [
192
+ {
193
+ "type": "header",
194
+ "bbox": [
195
+ 0.173,
196
+ 0.033,
197
+ 0.536,
198
+ 0.049
199
+ ],
200
+ "angle": 0,
201
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.171,
207
+ 0.104,
208
+ 0.827,
209
+ 0.175
210
+ ],
211
+ "angle": 0,
212
+ "content": "Despite being trained on a mixture of a wide variety of text including news articles, online forums, and just a modest selection of (GitHub) software repositories (Gao et al., 2020), these language models can be used to generate source code with a reasonable performance Chen et al. (2021). In addition, there are a few open-source language models that are trained solely on source code. For example, CodeParrot (Tunstall et al., 2022) was trained on 180 GB of Python code."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.171,
218
+ 0.181,
219
+ 0.828,
220
+ 0.336
221
+ ],
222
+ "angle": 0,
223
+ "content": "Given the variety of model sizes and training schemes involved in these models and lack of comparisons between these, the impact of many modeling and training design decisions remains unclear. For instance, we do not know the precise selection of data on which Codex and other private models were trained; however, we do know that some public models (e.g., GPT-J) were trained on a mix of natural language and code in multiple programming languages, while other models (e.g., CodeParrot) were trained solely on code in one particular programming language. Multilingual models potentially provide better generalization, because different programming languages share similar keywords and properties, as shown by the success of multilingual models for natural language (Conneau & Lample, 2019) and for code (Zügner et al., 2021). This may hint that multilingual LMs can generalize across languages, outperform monolingual models and be useful for modeling low-resource programming languages, but this is yet to be verified empirically."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.171,
229
+ 0.341,
230
+ 0.828,
231
+ 0.563
232
+ ],
233
+ "angle": 0,
234
+ "content": "In this paper, we present a systematic evaluation of existing models of code – Codex, GPT-J, GPT-Neo, GPT-NeoX, and CodeParrot – across various programming languages. We aim to shed more light on the landscape of code modeling design decisions by comparing and contrasting these models, as well as providing a key missing link: thus far, no large open-source language model was trained exclusively on code from multiple programming languages. We provide three such models, ranging from 160M to 2.7B parameters, which we release under the umbrella name “PolyCoder”. First, we perform an extensive comparison of the training and evaluation settings between PolyCoder, open-source models, and Codex. Second, we evaluate the models on the HumanEval benchmark (Chen et al., 2021) and compare how do models of different sizes and training steps scale, and how different temperatures affect the generation quality. Finally, since HumanEval only evaluates the natural language to Python synthesis, we curate an unseen evaluation dataset in each of the 12 languages, to evaluate the perplexity of different models. We find that although Codex is allegedly focused on Python (Chen et al. (2021) §3.1), Codex performs surprisingly well in other programming languages too, and even better than GPT-J and GPT-NeoX that were trained on the Pile (Gao et al., 2020). Nonetheless, in the C programming language, our PolyCoder model achieves a lower perplexity than all these models, including Codex."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.171,
240
+ 0.57,
241
+ 0.825,
242
+ 0.64
243
+ ],
244
+ "angle": 0,
245
+ "content": "Although most current models perform worse than Codex, we hope that this systematic study helps future research in this area to design more efficient and effective models. More importantly, through this systematic evaluation of different models, we encourage the community to study and release medium-large scale language models for code, in response to the concerns expressed by Hellendoorn & Sawant (2021):"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.235,
251
+ 0.645,
252
+ 0.761,
253
+ 0.688
254
+ ],
255
+ "angle": 0,
256
+ "content": "[...] this exploding trend in cost to achieve the state of the art has left the ability to train and test such models limited to a select few large technology companies—and way beyond the resources of virtually all academic labs."
257
+ },
258
+ {
259
+ "type": "text",
260
+ "bbox": [
261
+ 0.171,
262
+ 0.692,
263
+ 0.825,
264
+ 0.721
265
+ ],
266
+ "angle": 0,
267
+ "content": "We believe that our efforts are a significant step towards democratization of large language models of code."
268
+ },
269
+ {
270
+ "type": "title",
271
+ "bbox": [
272
+ 0.172,
273
+ 0.744,
274
+ 0.347,
275
+ 0.759
276
+ ],
277
+ "angle": 0,
278
+ "content": "2 RELATED WORK"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.171,
284
+ 0.777,
285
+ 0.827,
286
+ 0.822
287
+ ],
288
+ "angle": 0,
289
+ "content": "At the core of code modeling lies ongoing work on pretraining of language models (LMs). Large-scale pretraining of LMs has had an astounding impact on natural language processing in recent years (Han et al., 2021). Figure 1 provides an overview of how different models compare in size and availability."
290
+ },
291
+ {
292
+ "type": "title",
293
+ "bbox": [
294
+ 0.172,
295
+ 0.84,
296
+ 0.39,
297
+ 0.854
298
+ ],
299
+ "angle": 0,
300
+ "content": "2.1 PRETRAINING METHODS"
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.171,
306
+ 0.866,
307
+ 0.825,
308
+ 0.896
309
+ ],
310
+ "angle": 0,
311
+ "content": "We discuss three popular pretraining methods used in code language modeling. An illustration of these methods are shown in Figure 2."
312
+ },
313
+ {
314
+ "type": "page_footnote",
315
+ "bbox": [
316
+ 0.192,
317
+ 0.91,
318
+ 0.561,
319
+ 0.924
320
+ ],
321
+ "angle": 0,
322
+ "content": "3The exact training set that Codex was trained on is unknown."
323
+ },
324
+ {
325
+ "type": "page_number",
326
+ "bbox": [
327
+ 0.494,
328
+ 0.949,
329
+ 0.505,
330
+ 0.96
331
+ ],
332
+ "angle": 0,
333
+ "content": "2"
334
+ }
335
+ ],
336
+ [
337
+ {
338
+ "type": "header",
339
+ "bbox": [
340
+ 0.173,
341
+ 0.033,
342
+ 0.536,
343
+ 0.049
344
+ ],
345
+ "angle": 0,
346
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
347
+ },
348
+ {
349
+ "type": "image",
350
+ "bbox": [
351
+ 0.248,
352
+ 0.11,
353
+ 0.75,
354
+ 0.248
355
+ ],
356
+ "angle": 0,
357
+ "content": null
358
+ },
359
+ {
360
+ "type": "image_caption",
361
+ "bbox": [
362
+ 0.172,
363
+ 0.262,
364
+ 0.828,
365
+ 0.299
366
+ ],
367
+ "angle": 0,
368
+ "content": "Figure 1: Existing language models of code, their sizes and availability (open source vs. not open-source)."
369
+ },
370
+ {
371
+ "type": "image",
372
+ "bbox": [
373
+ 0.174,
374
+ 0.324,
375
+ 0.826,
376
+ 0.419
377
+ ],
378
+ "angle": 0,
379
+ "content": null
380
+ },
381
+ {
382
+ "type": "image_caption",
383
+ "bbox": [
384
+ 0.322,
385
+ 0.433,
386
+ 0.673,
387
+ 0.449
388
+ ],
389
+ "angle": 0,
390
+ "content": "Figure 2: Three types of pretrained language models."
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.171,
396
+ 0.484,
397
+ 0.827,
398
+ 0.61
399
+ ],
400
+ "angle": 0,
401
+ "content": "Left-to-Right Language Models (Figure 2, left) Auto-regressive, Left-to-right LMs, predict the probability of a token given the previous tokens. In code modeling, CodeGPT (124M) (Lu et al., 2021), CodeParrot (1.5B) (Tunstall et al., 2022), GPT-Neo (2.7B) (Black et al., 2021), GPT-J (6B) (Wang & Komatsuzaki, 2021), Codex (12B) (Chen et al., 2021), GPT-NeoX (20B) (Black et al., 2022), and Google's (137B) (Austin et al., 2021) belong to this category. The left-to-right nature of these models makes them highly useful for program generation tasks, such as code completion. On the other hand, as code is usually not written in a single, left-to-write pass, it is not trivial to leverage context that appears \"after\" the location of the generation. In this paper, we focus on this family of models and will discuss the existing models in more detail in the following sections."
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.171,
407
+ 0.635,
408
+ 0.827,
409
+ 0.76
410
+ ],
411
+ "angle": 0,
412
+ "content": "Masked Language Models (Figure 2, middle) While auto-regressive language models are powerful for modeling the probability of sequences, their unidirectional nature makes them less suitable for producing effective whole-sequence representations for downstream tasks such as classification. One popular bidirectional objective function used widely in representation learning is masked language modeling (Devlin et al., 2018), where the aim is to predict masked text pieces based on surrounding context. CodeBERT (125M) (Feng et al., 2020) and CuBERT (345M) (Kanade et al., 2020) are examples of such models in code. In programming contexts, these methods provide useful representations of a sequence of code for downstream tasks such as code classification, clone detection, and defect detection."
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.171,
418
+ 0.785,
419
+ 0.827,
420
+ 0.925
421
+ ],
422
+ "angle": 0,
423
+ "content": "Encoder-decoder Models (Figure 2, right) An encoder-decoder model first uses an encoder to encode an input sequence, and then uses a left-to-right LM to decode an output sequence conditioned on the input sequence. Popular pretraining objectives include masked span prediction (Raffel et al., 2019) where the input sequence is randomly masked with multiple masks and the output sequence are the masked contents in order, and denoising sequence reconstruction (Lewis et al., 2019) where the input is a corrupted sequence and the output is the original sequence. These pretrained models are useful in many sequence-to-sequence tasks (Raffel et al., 2019). In code, CodeT5 (220M) (Wang et al., 2021), and PLBART (406M) (Ahmad et al., 2021) use the two objectives mentioned above respectively, and performs well in conditional generation downstream tasks such as code commenting, or natural language to code generation."
424
+ },
425
+ {
426
+ "type": "page_number",
427
+ "bbox": [
428
+ 0.494,
429
+ 0.949,
430
+ 0.504,
431
+ 0.96
432
+ ],
433
+ "angle": 0,
434
+ "content": "3"
435
+ }
436
+ ],
437
+ [
438
+ {
439
+ "type": "header",
440
+ "bbox": [
441
+ 0.173,
442
+ 0.033,
443
+ 0.536,
444
+ 0.049
445
+ ],
446
+ "angle": 0,
447
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
448
+ },
449
+ {
450
+ "type": "title",
451
+ "bbox": [
452
+ 0.172,
453
+ 0.104,
454
+ 0.357,
455
+ 0.119
456
+ ],
457
+ "angle": 0,
458
+ "content": "2.2 PRETRAINING DATA"
459
+ },
460
+ {
461
+ "type": "text",
462
+ "bbox": [
463
+ 0.171,
464
+ 0.13,
465
+ 0.827,
466
+ 0.229
467
+ ],
468
+ "angle": 0,
469
+ "content": "Some models (e.g. CodeParrot and CodeT5) are trained on GitHub code only, with corpora extracted using either Google BigQuery's GitHub dataset \\(^{4}\\), or CodeSearchNet (Husain et al., 2019). Others (e.g., GPT-Neo and GPT-J) are trained on \"the Pile\" (Gao et al., 2020), a large corpus containing a blend of natural language texts and code from various domains, including Stack Exchange dumps, software documentations, and popular (\\(>100\\) stars) GitHub repositories. The datasets on which other proprietary models (Codex, Google's) were trained on are unknown. One goal of our study is to try to shed light on what corpora might be the most useful for pretraining models of code."
470
+ },
471
+ {
472
+ "type": "title",
473
+ "bbox": [
474
+ 0.172,
475
+ 0.247,
476
+ 0.406,
477
+ 0.264
478
+ ],
479
+ "angle": 0,
480
+ "content": "3 EVALUATION SETTINGS"
481
+ },
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.171,
486
+ 0.278,
487
+ 0.761,
488
+ 0.293
489
+ ],
490
+ "angle": 0,
491
+ "content": "We evaluate all models using both extrinsic and intrinsic benchmarks, as described below."
492
+ },
493
+ {
494
+ "type": "text",
495
+ "bbox": [
496
+ 0.171,
497
+ 0.307,
498
+ 0.828,
499
+ 0.462
500
+ ],
501
+ "angle": 0,
502
+ "content": "Extrinsic Evaluation One of the most popular downstream tasks for code modeling is code generation given a natural language description. Following Chen et al. (2021), we evaluate all models on the HumanEval dataset. The dataset contains 164 prompts with descriptions in the form of code comments and function definitions, including argument names and function names, and test cases to judge whether the generated code is correct. To generate code given a prompt, we use the same sampling strategy as Chen et al. (2021), using softmax with a temperature parameter \\(\\text{softmax}(x / T)\\). We evaluate using a wide range of temperatures \\(T = [0.2, 0.4, 0.6, 0.8]\\) to control for the confidence of the model's predictions. Similarly to Codex, we use nucleus sampling (Holtzman et al., 2019) with top-\\(p = 0.95\\). We sample tokens from the model until we encounter one of the following stop sequences that indicate the end of a method: 'nclass', 'ndef', '\\nif', '\\nif', or '\\nprint'. We randomly sample 100 examples per prompt in the evaluation dataset."
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.171,
508
+ 0.474,
509
+ 0.827,
510
+ 0.603
511
+ ],
512
+ "angle": 0,
513
+ "content": "Intrinsic Evaluation To evaluate the intrinsic performance of different models, we compute the perplexity for each language on an unseen set of GitHub repositories. To prevent training-to-test data leakage for models such as GPT-Neo and GPT-J, we remove repositories in our evaluation dataset that appeared in the GitHub portion of the Pile training dataset \\(^{6}\\). To evaluate Codex, we use OpenAI's API \\(^{7}\\), choosing the code-davinci-001 engine. We note that the data that this model was trained on is unknown, so we cannot prevent data leakage from the training to the test set for Codex. We sampled 100 random files for each of the 12 programming languages in our evaluation dataset. To make perplexity comparable across different tokenization methods used in different models, we use Pygments \\(^{8}\\) to equally normalize the log-likelihood sum of each model, when computing perplexity."
514
+ },
515
+ {
516
+ "type": "title",
517
+ "bbox": [
518
+ 0.172,
519
+ 0.619,
520
+ 0.383,
521
+ 0.635
522
+ ],
523
+ "angle": 0,
524
+ "content": "4 COMPARED MODELS"
525
+ },
526
+ {
527
+ "type": "title",
528
+ "bbox": [
529
+ 0.172,
530
+ 0.65,
531
+ 0.351,
532
+ 0.663
533
+ ],
534
+ "angle": 0,
535
+ "content": "4.1 EXISTING MODELS"
536
+ },
537
+ {
538
+ "type": "text",
539
+ "bbox": [
540
+ 0.171,
541
+ 0.676,
542
+ 0.825,
543
+ 0.706
544
+ ],
545
+ "angle": 0,
546
+ "content": "As discussed in Section 2, we mainly focus on auto-regressive left-to-right pretrained language models, most suitable for code completion tasks."
547
+ },
548
+ {
549
+ "type": "text",
550
+ "bbox": [
551
+ 0.171,
552
+ 0.711,
553
+ 0.826,
554
+ 0.809
555
+ ],
556
+ "angle": 0,
557
+ "content": "We evaluate Codex, as it is currently deployed in real-world and has impressive performance in code completion (Chen et al., 2021). Codex uses the GPT-3 language model (Brown et al., 2020) as its underlying model architecture. Codex was trained on a dataset spanning 179GB (after deduplication) covering over 54 million public Python repositories obtained from GitHub on May 2020. As reflected in its impressive results in other programming languages than Python, we suspect that Codex was also trained on large corpora of additional programming languages. The model available for querying through a non-free API."
558
+ },
559
+ {
560
+ "type": "page_footnote",
561
+ "bbox": [
562
+ 0.172,
563
+ 0.816,
564
+ 0.827,
565
+ 0.842
566
+ ],
567
+ "angle": 0,
568
+ "content": "4https://cloud.google.com/blog/topics/public-datasets/github-on-bigquery-analyze-all-the-open-source-code"
569
+ },
570
+ {
571
+ "type": "page_footnote",
572
+ "bbox": [
573
+ 0.194,
574
+ 0.843,
575
+ 0.77,
576
+ 0.857
577
+ ],
578
+ "angle": 0,
579
+ "content": "5The absence of whitespace, which is significant in Python, signals an exit from the method body."
580
+ },
581
+ {
582
+ "type": "page_footnote",
583
+ "bbox": [
584
+ 0.194,
585
+ 0.857,
586
+ 0.549,
587
+ 0.87
588
+ ],
589
+ "angle": 0,
590
+ "content": "<sup>6</sup>https://github.com/EleutherAI/github-download"
591
+ },
592
+ {
593
+ "type": "page_footnote",
594
+ "bbox": [
595
+ 0.194,
596
+ 0.87,
597
+ 0.661,
598
+ 0.884
599
+ ],
600
+ "angle": 0,
601
+ "content": "<sup>7</sup>https://beta.openai.com/docs/engines/codex-series-private-beta"
602
+ },
603
+ {
604
+ "type": "page_footnote",
605
+ "bbox": [
606
+ 0.194,
607
+ 0.884,
608
+ 0.445,
609
+ 0.898
610
+ ],
611
+ "angle": 0,
612
+ "content": "<sup>8</sup>https://pygments.org/docs/lexers/"
613
+ },
614
+ {
615
+ "type": "page_footnote",
616
+ "bbox": [
617
+ 0.171,
618
+ 0.898,
619
+ 0.825,
620
+ 0.925
621
+ ],
622
+ "angle": 0,
623
+ "content": "Every model uses its original tokenizer for predicting the next token. We use the shared tokenizer only for computing the perplexity given the log-likelihood sum."
624
+ },
625
+ {
626
+ "type": "list",
627
+ "bbox": [
628
+ 0.171,
629
+ 0.816,
630
+ 0.827,
631
+ 0.925
632
+ ],
633
+ "angle": 0,
634
+ "content": null
635
+ },
636
+ {
637
+ "type": "page_number",
638
+ "bbox": [
639
+ 0.494,
640
+ 0.949,
641
+ 0.505,
642
+ 0.96
643
+ ],
644
+ "angle": 0,
645
+ "content": "4"
646
+ }
647
+ ],
648
+ [
649
+ {
650
+ "type": "header",
651
+ "bbox": [
652
+ 0.173,
653
+ 0.033,
654
+ 0.536,
655
+ 0.049
656
+ ],
657
+ "angle": 0,
658
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
659
+ },
660
+ {
661
+ "type": "text",
662
+ "bbox": [
663
+ 0.17,
664
+ 0.104,
665
+ 0.827,
666
+ 0.229
667
+ ],
668
+ "angle": 0,
669
+ "content": "As for open-source models, we compare GPT-Neo, GPT-J and GPT-NeoX, the largest variants having 2.7, 6 and 20 billion parameters, respectively. GPT-NeoX is the largest open-source pretrained language models available. These models are trained on the Pile dataset, so they are a good representatives of models that were trained on both natural language texts from various domains and source code from GitHub. We also compare CodeParrot with at most 1.5 billion parameters, a model that was only trained on Python code from GitHub. CodeParrot follows the process used in Chen et al. (2021) that obtained over 20M files Python files from Google BigQuery Github database, resulting in a 180GB dataset, which is comparable to Codex's Python training data, but the model itself is much smaller."
670
+ },
671
+ {
672
+ "type": "text",
673
+ "bbox": [
674
+ 0.171,
675
+ 0.236,
676
+ 0.825,
677
+ 0.281
678
+ ],
679
+ "angle": 0,
680
+ "content": "There was no large open-source language model trained almost exclusively on code from multiple programming languages. To fill this gap, we train a 2.7 billion model, PolyCoder, on a mixture of repositories from GitHub in 12 different programming languages."
681
+ },
682
+ {
683
+ "type": "table",
684
+ "bbox": [
685
+ 0.223,
686
+ 0.29,
687
+ 0.778,
688
+ 0.49
689
+ ],
690
+ "angle": 0,
691
+ "content": "<table><tr><td>Language</td><td>Repositories</td><td>Files</td><td>Size Before Filtering</td><td>Size After Filtering</td></tr><tr><td>C</td><td>10,749</td><td>3,037,112</td><td>221G</td><td>55G</td></tr><tr><td>C#</td><td>9,511</td><td>2,514,494</td><td>30G</td><td>21G</td></tr><tr><td>C++</td><td>13,726</td><td>4,289,506</td><td>115G</td><td>52G</td></tr><tr><td>Go</td><td>12,371</td><td>1,416,789</td><td>70G</td><td>15G</td></tr><tr><td>Java</td><td>15,044</td><td>5,120,129</td><td>60G</td><td>41G</td></tr><tr><td>JavaScript</td><td>25,144</td><td>1,774,174</td><td>66G</td><td>22G</td></tr><tr><td>PHP</td><td>9,960</td><td>1,714,058</td><td>21G</td><td>13G</td></tr><tr><td>Python</td><td>25,446</td><td>1,550,208</td><td>24G</td><td>16G</td></tr><tr><td>Ruby</td><td>5,826</td><td>674,343</td><td>5.0G</td><td>4.1G</td></tr><tr><td>Rust</td><td>4,991</td><td>304,842</td><td>5.2G</td><td>3.5G</td></tr><tr><td>Scala</td><td>1,497</td><td>245,100</td><td>2.2G</td><td>1.8G</td></tr><tr><td>TypeScript</td><td>12,830</td><td>1,441,926</td><td>12G</td><td>9.2G</td></tr><tr><td>Total</td><td>147,095</td><td>24,082,681</td><td>631.4G</td><td>253.6G</td></tr></table>"
692
+ },
693
+ {
694
+ "type": "table_caption",
695
+ "bbox": [
696
+ 0.383,
697
+ 0.5,
698
+ 0.612,
699
+ 0.515
700
+ ],
701
+ "angle": 0,
702
+ "content": "Table 1: Training corpus statistics."
703
+ },
704
+ {
705
+ "type": "title",
706
+ "bbox": [
707
+ 0.172,
708
+ 0.539,
709
+ 0.362,
710
+ 0.552
711
+ ],
712
+ "angle": 0,
713
+ "content": "4.2 POLYCODER'S DATA"
714
+ },
715
+ {
716
+ "type": "text",
717
+ "bbox": [
718
+ 0.17,
719
+ 0.565,
720
+ 0.827,
721
+ 0.65
722
+ ],
723
+ "angle": 0,
724
+ "content": "Raw Code Corpus Collection GitHub is an excellent source for publicly available source code of various programming languages. We cloned the most popular repositories for 12 popular programming languages with at least 50 stars (stopping at about 25K per language to avoid a too heavy skew towards popular programming languages) from GitHub in October 2021. For each project, each file belonging to the majority-language of that project was extracted, yielding the initial training set. This initial, unfiltered dataset spanned 631GB and 38.9M files."
725
+ },
726
+ {
727
+ "type": "text",
728
+ "bbox": [
729
+ 0.171,
730
+ 0.663,
731
+ 0.825,
732
+ 0.708
733
+ ],
734
+ "angle": 0,
735
+ "content": "Data Preprocessing The detailed data preprocessing strategy comparison with other models are analyzed in Table 2. In general, we tried to follow Codex's design decisions, although there is a fair bit of ambiguity in the description of its data preprocessing."
736
+ },
737
+ {
738
+ "type": "text",
739
+ "bbox": [
740
+ 0.17,
741
+ 0.72,
742
+ 0.825,
743
+ 0.779
744
+ ],
745
+ "angle": 0,
746
+ "content": "Deduplication and Filtering Similarly to Codex and CodeParrot, very large (>1MB) and very short (<100 tokens) files were filtered out, reducing the size of the dataset by \\(33\\%\\), from 631GB to 424GB. This only reduced the total number of files by \\(8\\%\\), showing that a small number of files were responsible for a large part of the corpus.[10]"
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.171,
752
+ 0.784,
753
+ 0.825,
754
+ 0.84
755
+ ],
756
+ "angle": 0,
757
+ "content": "Allamanis (2019) has shown that code duplication that commonly manifests in datasets of code adversely effects language modeling of code. Therefore, we deduplicated files based on a hash of their content, which reduced the number of files by nearly \\(30\\%\\), and the dataset size by additional \\(29\\%\\), leaving 24.1M files and 254GB of data."
758
+ },
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.171,
763
+ 0.846,
764
+ 0.825,
765
+ 0.89
766
+ ],
767
+ "angle": 0,
768
+ "content": "Overall, the filtering of very large and very short files plus dedduplication, reduced the number of files by \\(38\\%\\), and the dataset size by \\(61\\%\\), roughly on par with the \\(70\\%\\) dataset size reduction reported by CodeParrot. A key difference that remains is that other approaches use more fine-grained filtering"
769
+ },
770
+ {
771
+ "type": "page_footnote",
772
+ "bbox": [
773
+ 0.171,
774
+ 0.898,
775
+ 0.825,
776
+ 0.925
777
+ ],
778
+ "angle": 0,
779
+ "content": "10Codex additionally mentions removing \"auto-generated\" files, but the definition of this was not clear, so we omitted this step."
780
+ },
781
+ {
782
+ "type": "page_number",
783
+ "bbox": [
784
+ 0.494,
785
+ 0.949,
786
+ 0.505,
787
+ 0.96
788
+ ],
789
+ "angle": 0,
790
+ "content": "5"
791
+ }
792
+ ],
793
+ [
794
+ {
795
+ "type": "header",
796
+ "bbox": [
797
+ 0.173,
798
+ 0.033,
799
+ 0.536,
800
+ 0.049
801
+ ],
802
+ "angle": 0,
803
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
804
+ },
805
+ {
806
+ "type": "table",
807
+ "bbox": [
808
+ 0.174,
809
+ 0.102,
810
+ 0.835,
811
+ 0.27
812
+ ],
813
+ "angle": 0,
814
+ "content": "<table><tr><td></td><td>PolyCoder</td><td>CodeParrot</td><td>Codex</td></tr><tr><td>Dedup</td><td>Exact</td><td>Exact</td><td>Unclear, mentions “unique”</td></tr><tr><td>Filtering</td><td>Files &gt; 1 MB, &lt; 100 to-kens</td><td>Files &gt; 1MB, max line length &gt; 1000, mean line length &gt; 100, fraction of alphanumeric charac-ters &lt; 0.25, containing the word “auto-generated” or similar in the first 5 lines</td><td>Files &gt; 1MB, max line length &gt; 1000, mean line length &gt; 100, auto-generated (details unclear), contained small percentage of al-phenmeric characters (details unclear)</td></tr><tr><td>Tokenization</td><td>Trained GPT-2 tok-enizer on a random 5% subset (all languages)</td><td>Trained GPT-2 tokenizer on train split</td><td>GPT-3 tokenizer, add multi-whitespace tokens to reduce re-dundant whitespace tokens</td></tr></table>"
815
+ },
816
+ {
817
+ "type": "table_caption",
818
+ "bbox": [
819
+ 0.255,
820
+ 0.279,
821
+ 0.741,
822
+ 0.294
823
+ ],
824
+ "angle": 0,
825
+ "content": "Table 2: Comparison of data preprocessing strategies of different models."
826
+ },
827
+ {
828
+ "type": "text",
829
+ "bbox": [
830
+ 0.171,
831
+ 0.351,
832
+ 0.827,
833
+ 0.381
834
+ ],
835
+ "angle": 0,
836
+ "content": "strategies, such as limiting the maximum line length or average line length, filtering of probable auto-generated files, etc. For example, Chen et al. (2021) have filtered only \\(11\\%\\) of their training data."
837
+ },
838
+ {
839
+ "type": "text",
840
+ "bbox": [
841
+ 0.171,
842
+ 0.387,
843
+ 0.825,
844
+ 0.43
845
+ ],
846
+ "angle": 0,
847
+ "content": "The dataset statistics are shown in Table 1, showcasing data sizes per language before and after filtering. Our dataset contains less Python code (only 16G) than Codex or CodeParrot, and instead covers many different programming languages."
848
+ },
849
+ {
850
+ "type": "text",
851
+ "bbox": [
852
+ 0.171,
853
+ 0.478,
854
+ 0.827,
855
+ 0.536
856
+ ],
857
+ "angle": 0,
858
+ "content": "Tokenizer We train a GPT-2 tokenizer (using BPE (Sennrich et al., 2015)) on a random \\(5\\%\\) subset of all the pretraining data, containing all the languages. Codex uses an existing trained GPT-3 tokenizer, with the addition of multi-whitespace tokens to reduce the sequence length after tokenization, as consecutive whitespaces are more common in code than in text."
859
+ },
860
+ {
861
+ "type": "title",
862
+ "bbox": [
863
+ 0.172,
864
+ 0.586,
865
+ 0.395,
866
+ 0.6
867
+ ],
868
+ "angle": 0,
869
+ "content": "4.3 POLYCODER'S TRAINING"
870
+ },
871
+ {
872
+ "type": "text",
873
+ "bbox": [
874
+ 0.17,
875
+ 0.625,
876
+ 0.827,
877
+ 0.835
878
+ ],
879
+ "angle": 0,
880
+ "content": "Considering our budget, we chose the GPT-2 (Radford et al., 2019) as our model architecture. To study the effect of scaling of model size, we train 3 different sized models, with 2.7 billion, 400 million and 160 million parameters, as the largest 2.7B model being on par with GPT-Neo for fair comparison. The 2.7 billion model is a 32 layer, 2,560 dimensional Transformer model, with a max context window of 2048 tokens, trained with a batch size of 128 sequences (262K tokens). The model is trained for 150K steps. The 400 million model is a 24 layer, 1,024 dimensional variant, and the 160 million model is a 12 layer, 768 dimensional variant, otherwise idem. We use GPT-NeoX toolkit to train the model efficiently in parallel with 8 Nvidia RTX 8000 GPUs on a single machine. The wall time used to train the largest 2.7B model is about 6 weeks. In its default configuration, this model should train for 320K steps, which was not feasible with our resources. Instead, we adjusted the learning rate decay to half this number and trained for up to 150K steps (near-convergence). The training and validation loss curves for different sized models are shown in Figure 3. We see that even after training for 150K steps, the validation losses are still decreasing. This, combined with the shorter training schedule and faster learning rate decay, strongly signals that the models are still under-fitting and could benefit from longer training."
881
+ },
882
+ {
883
+ "type": "text",
884
+ "bbox": [
885
+ 0.17,
886
+ 0.84,
887
+ 0.827,
888
+ 0.926
889
+ ],
890
+ "angle": 0,
891
+ "content": "We compare the training setting and hyperparameters with CodeParrot and Codex in Table 3. Due to high computational costs, we were unable to perform hyperparameter search. Most hyperparameters are the same as those used in their respective GPT-2 model training \\(^{12}\\) to provide a good default with regards to the corresponding model size. Some key differences include context window sizes to allow for more tokens as context, batch sizes and tokens trained, as well as model initialization with or without natural language knowledge."
892
+ },
893
+ {
894
+ "type": "page_number",
895
+ "bbox": [
896
+ 0.494,
897
+ 0.949,
898
+ 0.506,
899
+ 0.96
900
+ ],
901
+ "angle": 0,
902
+ "content": "6"
903
+ }
904
+ ],
905
+ [
906
+ {
907
+ "type": "header",
908
+ "bbox": [
909
+ 0.173,
910
+ 0.033,
911
+ 0.536,
912
+ 0.049
913
+ ],
914
+ "angle": 0,
915
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
916
+ },
917
+ {
918
+ "type": "table",
919
+ "bbox": [
920
+ 0.18,
921
+ 0.102,
922
+ 0.822,
923
+ 0.309
924
+ ],
925
+ "angle": 0,
926
+ "content": "<table><tr><td></td><td>PolyCoder (2.7B)</td><td>CodeParrot (1.5B)</td><td>Codex (12B)</td></tr><tr><td>Model Initialization</td><td>From scratch</td><td>From scratch</td><td>Initialized from GPT-3</td></tr><tr><td>NL Knowledge</td><td>Learned from comments in the code</td><td>Learned from comments in the code</td><td>Natural language knowledge from GPT-3</td></tr><tr><td>Learning Rate</td><td>1.6e-4</td><td>2.0e-4</td><td>1e-4</td></tr><tr><td>Optimizer</td><td>AdamW</td><td>AdamW</td><td>AdamW</td></tr><tr><td>Adam betas</td><td>0.9, 0.999</td><td>0.9, 0.999</td><td>0.9, 0.95</td></tr><tr><td>Adam eps</td><td>1e-8</td><td>1e-8</td><td>1e-8</td></tr><tr><td>Weight Decay</td><td>-</td><td>0.1</td><td>0.1</td></tr><tr><td>Warmup Steps</td><td>1600</td><td>750</td><td>175</td></tr><tr><td>Learning Rate Decay</td><td>Cosine</td><td>Cosine</td><td>Cosine</td></tr><tr><td>Batch Size (#tokens)</td><td>262K</td><td>524K</td><td>2M</td></tr><tr><td>Training Steps</td><td>150K steps, 39B tokens</td><td>50K steps, 26B tokens</td><td>100B tokens</td></tr><tr><td>Context Window</td><td>2048</td><td>1024</td><td>4096</td></tr></table>"
927
+ },
928
+ {
929
+ "type": "table_caption",
930
+ "bbox": [
931
+ 0.171,
932
+ 0.329,
933
+ 0.825,
934
+ 0.345
935
+ ],
936
+ "angle": 0,
937
+ "content": "Table 3: Comparison of design decisions and hyper-parameters in training different models of code."
938
+ },
939
+ {
940
+ "type": "image",
941
+ "bbox": [
942
+ 0.183,
943
+ 0.365,
944
+ 0.492,
945
+ 0.491
946
+ ],
947
+ "angle": 0,
948
+ "content": null
949
+ },
950
+ {
951
+ "type": "image_caption",
952
+ "bbox": [
953
+ 0.301,
954
+ 0.504,
955
+ 0.374,
956
+ 0.518
957
+ ],
958
+ "angle": 0,
959
+ "content": "(a) Training"
960
+ },
961
+ {
962
+ "type": "image",
963
+ "bbox": [
964
+ 0.5,
965
+ 0.363,
966
+ 0.817,
967
+ 0.491
968
+ ],
969
+ "angle": 0,
970
+ "content": null
971
+ },
972
+ {
973
+ "type": "image_caption",
974
+ "bbox": [
975
+ 0.619,
976
+ 0.504,
977
+ 0.702,
978
+ 0.517
979
+ ],
980
+ "angle": 0,
981
+ "content": "(b) Validation"
982
+ },
983
+ {
984
+ "type": "image_caption",
985
+ "bbox": [
986
+ 0.245,
987
+ 0.529,
988
+ 0.75,
989
+ 0.545
990
+ ],
991
+ "angle": 0,
992
+ "content": "Figure 3: Training and validation loss during the 150K step training process."
993
+ },
994
+ {
995
+ "type": "title",
996
+ "bbox": [
997
+ 0.172,
998
+ 0.57,
999
+ 0.283,
1000
+ 0.585
1001
+ ],
1002
+ "angle": 0,
1003
+ "content": "5 RESULTS"
1004
+ },
1005
+ {
1006
+ "type": "title",
1007
+ "bbox": [
1008
+ 0.172,
1009
+ 0.603,
1010
+ 0.388,
1011
+ 0.617
1012
+ ],
1013
+ "angle": 0,
1014
+ "content": "5.1 EXTRINSIC EVALUATION"
1015
+ },
1016
+ {
1017
+ "type": "text",
1018
+ "bbox": [
1019
+ 0.171,
1020
+ 0.628,
1021
+ 0.827,
1022
+ 0.794
1023
+ ],
1024
+ "angle": 0,
1025
+ "content": "The overall results are shown in Table 4.\\(^{13}\\) The numbers are obtained by sampling with different temperatures and picking the best value for each metric. Among existing models, PolyCoder is worse than similarly sized GPT-Neo and the even smaller Codex 300M. Overall, PolyCoder lies after Codex, GPT-Neo/J, while performing stronger than CodeParrot. PolyCoder, which was trained only on code, falls behind a similar sized model (GPT-Neo 2.7B) trained on the Pile, a blend of natural language texts and code. Looking at the rightmost columns in Table 4 offers a potential explanation: in terms of total Python tokens seen during training, all models substantially exceed ours. This in partly because they use a higher proportion of Python code (we aimed to balance data volume across programming languages), and in part because of resource limitations, which lead to PolyCoder not observing its entire training data. In addition, the natural language blend in the training corpus may help code language modeling as well, especially with code-related texts such as Stack Exchange dumps being included."
1026
+ },
1027
+ {
1028
+ "type": "text",
1029
+ "bbox": [
1030
+ 0.171,
1031
+ 0.802,
1032
+ 0.825,
1033
+ 0.86
1034
+ ],
1035
+ "angle": 0,
1036
+ "content": "Compared to GPT-Neo (2.7B), PolyCoder has seen fewer Python tokens, but more code tokens in other programming languages, hinting that transfer from other languages to Python helps to achieve a similar performance. This suggests that future research could benefit from blending code in different programming languages, as well as natural language text."
1037
+ },
1038
+ {
1039
+ "type": "page_footnote",
1040
+ "bbox": [
1041
+ 0.189,
1042
+ 0.87,
1043
+ 0.483,
1044
+ 0.884
1045
+ ],
1046
+ "angle": 0,
1047
+ "content": "11https://github.com/EleutherAI/gpt-neox"
1048
+ },
1049
+ {
1050
+ "type": "page_footnote",
1051
+ "bbox": [
1052
+ 0.19,
1053
+ 0.884,
1054
+ 0.614,
1055
+ 0.898
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "\\(^{12}\\)https://github.com/EleutherAI/gpt-neox/tree/main/configs"
1059
+ },
1060
+ {
1061
+ "type": "page_footnote",
1062
+ "bbox": [
1063
+ 0.175,
1064
+ 0.898,
1065
+ 0.825,
1066
+ 0.925
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "<sup>13</sup>Due to the large model size of GPT-NeoX (20B) and limited computational budget, we did not include it in the HumanEval experiment."
1070
+ },
1071
+ {
1072
+ "type": "list",
1073
+ "bbox": [
1074
+ 0.175,
1075
+ 0.87,
1076
+ 0.825,
1077
+ 0.925
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": null
1081
+ },
1082
+ {
1083
+ "type": "page_number",
1084
+ "bbox": [
1085
+ 0.494,
1086
+ 0.949,
1087
+ 0.504,
1088
+ 0.96
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": "7"
1092
+ }
1093
+ ],
1094
+ [
1095
+ {
1096
+ "type": "header",
1097
+ "bbox": [
1098
+ 0.173,
1099
+ 0.033,
1100
+ 0.536,
1101
+ 0.049
1102
+ ],
1103
+ "angle": 0,
1104
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
1105
+ },
1106
+ {
1107
+ "type": "table",
1108
+ "bbox": [
1109
+ 0.174,
1110
+ 0.102,
1111
+ 0.847,
1112
+ 0.299
1113
+ ],
1114
+ "angle": 0,
1115
+ "content": "<table><tr><td>Model</td><td>Pass@1</td><td>Pass@10</td><td>Pass@100</td><td>Tokens Trained</td><td>Code Tokens</td><td>Python Tokens</td></tr><tr><td>PolyCoder (160M)</td><td>2.13%</td><td>3.35%</td><td>4.88%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>PolyCoder (400M)</td><td>2.96%</td><td>5.29%</td><td>11.59%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>PolyCoder (2.7B)</td><td>5.59%</td><td>9.84%</td><td>17.68%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>CodeParrot (110M)</td><td>3.80%</td><td>6.57%</td><td>12.78%</td><td>26B</td><td>26B</td><td>26B</td></tr><tr><td>CodeParrot (1.5B)</td><td>3.58%</td><td>8.03%</td><td>14.96%</td><td>26B</td><td>26B</td><td>26B</td></tr><tr><td>GPT-Neo (125M)</td><td>0.75%</td><td>1.88%</td><td>2.97%</td><td>300B</td><td>22.8B</td><td>3.1B</td></tr><tr><td>GPT-Neo (1.3B)</td><td>4.79%</td><td>7.47%</td><td>16.30%</td><td>380B</td><td>28.8B</td><td>3.9B</td></tr><tr><td>GPT-Neo (2.7B)</td><td>6.41%</td><td>11.27%</td><td>21.37%</td><td>420B</td><td>31.9B</td><td>4.3B</td></tr><tr><td>GPT-J (6B)</td><td>11.62%</td><td>15.74%</td><td>27.74%</td><td>402B</td><td>30.5B</td><td>4.1B</td></tr><tr><td>Codex (300M)</td><td>13.17%</td><td>20.37%</td><td>36.27%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr><tr><td>Codex (2.5B)</td><td>21.36%</td><td>35.42%</td><td>59.50%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr><tr><td>Codex (12B)</td><td>28.81%</td><td>46.81%</td><td>72.31%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr></table>"
1116
+ },
1117
+ {
1118
+ "type": "table_footnote",
1119
+ "bbox": [
1120
+ 0.48,
1121
+ 0.299,
1122
+ 0.835,
1123
+ 0.311
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "*Codex is initialized with another pretrained model, GPT-3."
1127
+ },
1128
+ {
1129
+ "type": "table_caption",
1130
+ "bbox": [
1131
+ 0.171,
1132
+ 0.323,
1133
+ 0.825,
1134
+ 0.355
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "Table 4: Results of different models on the HumanEval benchmark, and the number of different types of tokens seen during the training process."
1138
+ },
1139
+ {
1140
+ "type": "image",
1141
+ "bbox": [
1142
+ 0.182,
1143
+ 0.377,
1144
+ 0.386,
1145
+ 0.491
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": null
1149
+ },
1150
+ {
1151
+ "type": "image_caption",
1152
+ "bbox": [
1153
+ 0.248,
1154
+ 0.503,
1155
+ 0.317,
1156
+ 0.516
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "(a) Pass@1"
1160
+ },
1161
+ {
1162
+ "type": "image",
1163
+ "bbox": [
1164
+ 0.397,
1165
+ 0.377,
1166
+ 0.599,
1167
+ 0.49
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": null
1171
+ },
1172
+ {
1173
+ "type": "image_caption",
1174
+ "bbox": [
1175
+ 0.458,
1176
+ 0.503,
1177
+ 0.536,
1178
+ 0.516
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "(b) Pass@10"
1182
+ },
1183
+ {
1184
+ "type": "image",
1185
+ "bbox": [
1186
+ 0.614,
1187
+ 0.377,
1188
+ 0.817,
1189
+ 0.49
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": null
1193
+ },
1194
+ {
1195
+ "type": "image_caption",
1196
+ "bbox": [
1197
+ 0.673,
1198
+ 0.503,
1199
+ 0.756,
1200
+ 0.516
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": "(c) Pass@100"
1204
+ },
1205
+ {
1206
+ "type": "image_caption",
1207
+ "bbox": [
1208
+ 0.245,
1209
+ 0.528,
1210
+ 0.752,
1211
+ 0.543
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "Figure 4: The scaling effect of HumanEval performance on different models."
1215
+ },
1216
+ {
1217
+ "type": "text",
1218
+ "bbox": [
1219
+ 0.17,
1220
+ 0.575,
1221
+ 0.828,
1222
+ 0.797
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "Scaling Effect To further understand the effect of the number of model parameters with respect to HumanEval code completion performance, we show the Pass@1, Pass@10 and Pass@100 percentage with respect to the model size in Figure 4. We can see that the performance of the Codex models are significantly better than all the other open-source models across all numbers of parameters. The performance on HumanEval benchmark increases linearly with the magnitude (log scale) of the number of parameters in the model. Similar scaling effects could be found on PolyCoder and GPT-Neo/J models. Interestingly, the CodeParrot models that are trained only on Python seem to have reached a saturating performance with respect to increasing number of parameters, where the training corpus being focused on Python may have some effect. With higher number of parameters (2.7B), PolyCoder's performance is trending worse than that of GPT-Neo/J. Comparing GPT-Neo/J that is trained on Pile dataset containing a blend of text, Stack Exchange dumps and GitHub data, with PolyCoder that are trained on only GitHub repositories of popular programming languages, we hypothesize that the added text, especially texts in technical and software engineering domains, may be crucial for the larger model to boost the performance. We also compare the performance difference between the model trained after 100K steps versus the model after 150K steps in Appendix A, and find that training for longer helps the larger model more as it is still under-fitted."
1226
+ },
1227
+ {
1228
+ "type": "text",
1229
+ "bbox": [
1230
+ 0.17,
1231
+ 0.818,
1232
+ 0.829,
1233
+ 0.932
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": "Temperature Effect All the above results are obtained by sampling the language model with different temperatures and picking the best value for each metric. We are also interested in how different choices of temperature affects the final generation quality. We summarize the results in Figure 5. The general trend is for Pass@1, lower temperatures are better, and for Pass@100, a higher temperature will help, while for Pass@10 a temperature in the middle is better suited. We hypothesize that this is because a higher temperature during generation makes the model less confident in its predictions and thus allow for more exploration and more diverse outputs, resulting in better accuracy at Pass@100. Too high a temperature (0.8) is also hurtful if the model is capable enough."
1237
+ },
1238
+ {
1239
+ "type": "page_number",
1240
+ "bbox": [
1241
+ 0.494,
1242
+ 0.949,
1243
+ 0.504,
1244
+ 0.96
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": "8"
1248
+ }
1249
+ ],
1250
+ [
1251
+ {
1252
+ "type": "header",
1253
+ "bbox": [
1254
+ 0.173,
1255
+ 0.033,
1256
+ 0.536,
1257
+ 0.049
1258
+ ],
1259
+ "angle": 0,
1260
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
1261
+ },
1262
+ {
1263
+ "type": "image",
1264
+ "bbox": [
1265
+ 0.192,
1266
+ 0.107,
1267
+ 0.817,
1268
+ 0.293
1269
+ ],
1270
+ "angle": 0,
1271
+ "content": null
1272
+ },
1273
+ {
1274
+ "type": "image_caption",
1275
+ "bbox": [
1276
+ 0.171,
1277
+ 0.302,
1278
+ 0.828,
1279
+ 0.348
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "Figure 6: Perplexity comparison on our evaluation dataset of different models on different programming languages. Note that the y-axis is capped at 4; CodeParrot's entropy on all languages other than Python is much higher than shown here (see Table 5)."
1283
+ },
1284
+ {
1285
+ "type": "text",
1286
+ "bbox": [
1287
+ 0.171,
1288
+ 0.373,
1289
+ 0.551,
1290
+ 0.483
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "On the contrary, a lower temperature makes the model output very confident in its prediction and thus will be better suited for generating very few correct examples, and thus the better performance for Pass@1. In Appendix B we repeat these experiments with the smaller models as well. This suggests the importance of temperature and the need to tune it individually for different generation scenarios."
1294
+ },
1295
+ {
1296
+ "type": "image",
1297
+ "bbox": [
1298
+ 0.571,
1299
+ 0.381,
1300
+ 0.817,
1301
+ 0.519
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": null
1305
+ },
1306
+ {
1307
+ "type": "image_caption",
1308
+ "bbox": [
1309
+ 0.56,
1310
+ 0.537,
1311
+ 0.825,
1312
+ 0.58
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "Figure 5: HumanEval performance with different softmax temperatures during generation."
1316
+ },
1317
+ {
1318
+ "type": "title",
1319
+ "bbox": [
1320
+ 0.172,
1321
+ 0.501,
1322
+ 0.384,
1323
+ 0.515
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "5.2 INTRINSIC EVALUATION"
1327
+ },
1328
+ {
1329
+ "type": "text",
1330
+ "bbox": [
1331
+ 0.171,
1332
+ 0.528,
1333
+ 0.55,
1334
+ 0.596
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "The perplexity results on the evaluation datasets are shown in Figure 6, with detailed numbers in Appendix C. The plot caps the perplexity score to 4 as CodeParrot performs poorly in languages other than Python. It is important to note that although Codex's perplexities are lower than"
1338
+ },
1339
+ {
1340
+ "type": "text",
1341
+ "bbox": [
1342
+ 0.171,
1343
+ 0.597,
1344
+ 0.825,
1345
+ 0.626
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": "other models in most languages, Codex might have been trained on the test sets, and its results are thus over-optimistic."
1349
+ },
1350
+ {
1351
+ "type": "text",
1352
+ "bbox": [
1353
+ 0.171,
1354
+ 0.632,
1355
+ 0.827,
1356
+ 0.676
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": "Notably, PolyCoder outperforms Codex and all other models in the C language. Comparing the open-source models only, PolyCoder performs better than the similarly sized GPT-Neo 2.7B in C, JavaScript, Rust, Scala and TypeScript."
1360
+ },
1361
+ {
1362
+ "type": "text",
1363
+ "bbox": [
1364
+ 0.171,
1365
+ 0.681,
1366
+ 0.826,
1367
+ 0.793
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": "In the other 11 languages other than C, all other open-source models, including ours, are significantly worse (higher perplexity) than Codex. We hypothesize that this is due to the fact that PolyCoder is trained on an imbalanced mixture of different languages, with C and \\(\\mathrm{C + + }\\) being closely related and the two most dominant in the entire training corpus (Section 4.2). Thus, the larger volume in total (because of long files) makes C the most \"favored\" language by PolyCoder. The reason why PolyCoder does not outperform Codex in \\(\\mathrm{C + + }\\) is possibly due to the complexity of \\(\\mathrm{C + + }\\) language and Codex's significantly longer context window size (4096, compared to PolyCoder's 2048), or because Codex is possibly trained on more \\(\\mathrm{C + + }\\) training data."
1371
+ },
1372
+ {
1373
+ "type": "text",
1374
+ "bbox": [
1375
+ 0.171,
1376
+ 0.799,
1377
+ 0.827,
1378
+ 0.926
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "With the same pretraining corpus, the gain from a 2.7B model (GPT-Neo) to a 6B model (GPT-J) is significant over all languages. However, when increasing the model size further to 20B, the improvement varies across different languages. For example, the performance on Go, Java, Rust, Scala, JavaScript do not increase significantly when the model size increases by 3 times. This suggests that for some programming languages, and given the amounts of data, the capacity of GPT-J is sufficient. Interestingly, these languages seem to coincide with languages where PolyCoder outperforms a similarly sized model trained on Pile. This may hint that for the languages in which larger models do not provide additional gains, training the model only using code may be enough or slightly more helpful than training on both natural language and code."
1382
+ },
1383
+ {
1384
+ "type": "page_number",
1385
+ "bbox": [
1386
+ 0.494,
1387
+ 0.949,
1388
+ 0.504,
1389
+ 0.96
1390
+ ],
1391
+ "angle": 0,
1392
+ "content": "9"
1393
+ }
1394
+ ],
1395
+ [
1396
+ {
1397
+ "type": "header",
1398
+ "bbox": [
1399
+ 0.173,
1400
+ 0.033,
1401
+ 0.538,
1402
+ 0.049
1403
+ ],
1404
+ "angle": 0,
1405
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
1406
+ },
1407
+ {
1408
+ "type": "text",
1409
+ "bbox": [
1410
+ 0.171,
1411
+ 0.104,
1412
+ 0.827,
1413
+ 0.149
1414
+ ],
1415
+ "angle": 0,
1416
+ "content": "We can see that comparing different models, perplexity trends for Python correlates well with the HumanEval benchmark performance of the extrinsic evaluation (Section 5.1). This suggests that perplexity is a useful and low-cost metric to estimate other, downstream, metrics."
1417
+ },
1418
+ {
1419
+ "type": "title",
1420
+ "bbox": [
1421
+ 0.173,
1422
+ 0.166,
1423
+ 0.321,
1424
+ 0.182
1425
+ ],
1426
+ "angle": 0,
1427
+ "content": "6 CONCLUSION"
1428
+ },
1429
+ {
1430
+ "type": "text",
1431
+ "bbox": [
1432
+ 0.171,
1433
+ 0.197,
1434
+ 0.828,
1435
+ 0.298
1436
+ ],
1437
+ "angle": 0,
1438
+ "content": "In this paper, we perform a systematic evaluation of large language models for code. The performance generally benefits from larger models and longer training time. We also believe that the better results of GPT-Neo over PolyCoder in some languages show that training on natural language text and code can benefit the modeling of code. To help future research in the area, we release PolyCoder, a large open-source language model for code, trained exclusively on code in 12 different programming languages. In the C programming language, PolyCoder achieves lower perplexity than all models including Codex."
1439
+ },
1440
+ {
1441
+ "type": "title",
1442
+ "bbox": [
1443
+ 0.174,
1444
+ 0.315,
1445
+ 0.289,
1446
+ 0.33
1447
+ ],
1448
+ "angle": 0,
1449
+ "content": "REFERENCES"
1450
+ },
1451
+ {
1452
+ "type": "ref_text",
1453
+ "bbox": [
1454
+ 0.174,
1455
+ 0.338,
1456
+ 0.827,
1457
+ 0.409
1458
+ ],
1459
+ "angle": 0,
1460
+ "content": "Wasi Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. Unified pre-training for program understanding and generation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2655–2668, Online, June 2021. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2021.naacl-main.211."
1461
+ },
1462
+ {
1463
+ "type": "ref_text",
1464
+ "bbox": [
1465
+ 0.174,
1466
+ 0.417,
1467
+ 0.827,
1468
+ 0.46
1469
+ ],
1470
+ "angle": 0,
1471
+ "content": "Miltiadis Allamanis. The adverse effects of code duplication in machine learning models of code. In Proceedings of the 2019 ACM SIGPLAN International Symposium on New Ideas, New Paradigms, and Reflections on Programming and Software, pp. 143-153, 2019."
1472
+ },
1473
+ {
1474
+ "type": "ref_text",
1475
+ "bbox": [
1476
+ 0.175,
1477
+ 0.468,
1478
+ 0.825,
1479
+ 0.497
1480
+ ],
1481
+ "angle": 0,
1482
+ "content": "Uri Alon, Roy Sadaka, Omer Levy, and Eran Yahav. Structural language models of code. In International Conference on Machine Learning, pp. 245-256. PMLR, 2020."
1483
+ },
1484
+ {
1485
+ "type": "ref_text",
1486
+ "bbox": [
1487
+ 0.174,
1488
+ 0.504,
1489
+ 0.827,
1490
+ 0.547
1491
+ ],
1492
+ "angle": 0,
1493
+ "content": "Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021."
1494
+ },
1495
+ {
1496
+ "type": "ref_text",
1497
+ "bbox": [
1498
+ 0.175,
1499
+ 0.555,
1500
+ 0.827,
1501
+ 0.584
1502
+ ],
1503
+ "angle": 0,
1504
+ "content": "Alexei Baevski and Michael Auli. Adaptive input representations for neural language modeling. arXiv preprint arXiv:1809.10853, 2018."
1505
+ },
1506
+ {
1507
+ "type": "ref_text",
1508
+ "bbox": [
1509
+ 0.175,
1510
+ 0.592,
1511
+ 0.825,
1512
+ 0.621
1513
+ ],
1514
+ "angle": 0,
1515
+ "content": "Yoshua Bengio, Réjean Ducharme, Pascal Vincent, and Christian Jauvin. A neural probabilistic language model. Journal of machine learning research, 3(Feb):1137-1155, 2003."
1516
+ },
1517
+ {
1518
+ "type": "ref_text",
1519
+ "bbox": [
1520
+ 0.174,
1521
+ 0.628,
1522
+ 0.825,
1523
+ 0.672
1524
+ ],
1525
+ "angle": 0,
1526
+ "content": "Sid Black, Leo Gao, Phil Wang, Connor Leahy, and Stella Biderman. GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow, March 2021. URL https://doi.org/10.5281/zenodo.5297715. If you use this software, please cite it using these metadata."
1527
+ },
1528
+ {
1529
+ "type": "ref_text",
1530
+ "bbox": [
1531
+ 0.174,
1532
+ 0.679,
1533
+ 0.827,
1534
+ 0.737
1535
+ ],
1536
+ "angle": 0,
1537
+ "content": "Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, and Samuel Weinbach. GPT-NeoX-20B: An open-source autoregressive language model. 2022."
1538
+ },
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.174,
1543
+ 0.744,
1544
+ 0.827,
1545
+ 0.787
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020."
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.174,
1554
+ 0.794,
1555
+ 0.827,
1556
+ 0.838
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde, Jared Kaplan, Harri Edwards, Yura Burda, Nicholas Joseph, Greg Brockman, et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021."
1560
+ },
1561
+ {
1562
+ "type": "ref_text",
1563
+ "bbox": [
1564
+ 0.174,
1565
+ 0.845,
1566
+ 0.825,
1567
+ 0.874
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": "Alexis Conneau and Guillaume Lample. Cross-lingual language model pretraining. Advances in Neural Information Processing Systems, 32:7059-7069, 2019."
1571
+ },
1572
+ {
1573
+ "type": "ref_text",
1574
+ "bbox": [
1575
+ 0.174,
1576
+ 0.882,
1577
+ 0.825,
1578
+ 0.925
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "Aditya Desai, Sumit Gulwani, Vineet Hingorani, Nidhi Jain, Amey Karkare, Mark Marron, and Subhajit Roy. Program synthesis using natural language. In Proceedings of the 38th International Conference on Software Engineering, pp. 345-356, 2016."
1582
+ },
1583
+ {
1584
+ "type": "list",
1585
+ "bbox": [
1586
+ 0.174,
1587
+ 0.338,
1588
+ 0.827,
1589
+ 0.925
1590
+ ],
1591
+ "angle": 0,
1592
+ "content": null
1593
+ },
1594
+ {
1595
+ "type": "page_number",
1596
+ "bbox": [
1597
+ 0.491,
1598
+ 0.949,
1599
+ 0.509,
1600
+ 0.96
1601
+ ],
1602
+ "angle": 0,
1603
+ "content": "10"
1604
+ }
1605
+ ],
1606
+ [
1607
+ {
1608
+ "type": "header",
1609
+ "bbox": [
1610
+ 0.173,
1611
+ 0.033,
1612
+ 0.538,
1613
+ 0.049
1614
+ ],
1615
+ "angle": 0,
1616
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
1617
+ },
1618
+ {
1619
+ "type": "ref_text",
1620
+ "bbox": [
1621
+ 0.173,
1622
+ 0.103,
1623
+ 0.826,
1624
+ 0.134
1625
+ ],
1626
+ "angle": 0,
1627
+ "content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018."
1628
+ },
1629
+ {
1630
+ "type": "ref_text",
1631
+ "bbox": [
1632
+ 0.173,
1633
+ 0.142,
1634
+ 0.826,
1635
+ 0.185
1636
+ ],
1637
+ "angle": 0,
1638
+ "content": "Zhangyin Feng, Daya Guo, Duyu Tang, Nan Duan, Xiaocheng Feng, Ming Gong, Linjun Shou, Bing Qin, Ting Liu, Daxin Jiang, et al. Codebert: A pre-trained model for programming and natural languages. arXiv preprint arXiv:2002.08155, 2020."
1639
+ },
1640
+ {
1641
+ "type": "ref_text",
1642
+ "bbox": [
1643
+ 0.173,
1644
+ 0.194,
1645
+ 0.826,
1646
+ 0.237
1647
+ ],
1648
+ "angle": 0,
1649
+ "content": "Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020."
1650
+ },
1651
+ {
1652
+ "type": "ref_text",
1653
+ "bbox": [
1654
+ 0.173,
1655
+ 0.245,
1656
+ 0.826,
1657
+ 0.275
1658
+ ],
1659
+ "angle": 0,
1660
+ "content": "Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Liang Zhang, Wentao Han, Minlie Huang, et al. Pre-trained models: Past, present and future. AI Open, 2021."
1661
+ },
1662
+ {
1663
+ "type": "ref_text",
1664
+ "bbox": [
1665
+ 0.173,
1666
+ 0.284,
1667
+ 0.826,
1668
+ 0.327
1669
+ ],
1670
+ "angle": 0,
1671
+ "content": "Vincent J Hellendoorn and Premkumar Devanbu. Are deep neural networks the best choice for modeling source code? In Proceedings of the 2017 11th Joint Meeting on Foundations of Software Engineering, pp. 763-773, 2017."
1672
+ },
1673
+ {
1674
+ "type": "ref_text",
1675
+ "bbox": [
1676
+ 0.173,
1677
+ 0.336,
1678
+ 0.826,
1679
+ 0.379
1680
+ ],
1681
+ "angle": 0,
1682
+ "content": "Vincent J. Hellendoorn and Anand Ashok Sawant. The growing cost of deep learning for source code. Commun. ACM, 65(1):31-33, dec 2021. ISSN 0001-0782. doi: 10.1145/3501261. URL https://doi.org/10.1145/3501261."
1683
+ },
1684
+ {
1685
+ "type": "ref_text",
1686
+ "bbox": [
1687
+ 0.173,
1688
+ 0.388,
1689
+ 0.826,
1690
+ 0.417
1691
+ ],
1692
+ "angle": 0,
1693
+ "content": "Abram Hindle, Earl T Barr, Mark Gabel, Zhendong Su, and Premkumar Devanbu. On the naturalness of software. Communications of the ACM, 59(5):122-131, 2016."
1694
+ },
1695
+ {
1696
+ "type": "ref_text",
1697
+ "bbox": [
1698
+ 0.173,
1699
+ 0.425,
1700
+ 0.826,
1701
+ 0.455
1702
+ ],
1703
+ "angle": 0,
1704
+ "content": "Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration. arXiv preprint arXiv:1904.09751, 2019."
1705
+ },
1706
+ {
1707
+ "type": "ref_text",
1708
+ "bbox": [
1709
+ 0.173,
1710
+ 0.463,
1711
+ 0.826,
1712
+ 0.506
1713
+ ],
1714
+ "angle": 0,
1715
+ "content": "Hamel Husain, Ho-Hsiang Wu, Tiferet Gazit, Miltiadis Allamanis, and Marc Brockschmidt. Code-searchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436, 2019."
1716
+ },
1717
+ {
1718
+ "type": "ref_text",
1719
+ "bbox": [
1720
+ 0.173,
1721
+ 0.515,
1722
+ 0.826,
1723
+ 0.558
1724
+ ],
1725
+ "angle": 0,
1726
+ "content": "Aditya Kanade, Petros Maniatis, Gogul Balakrishnan, and Kensen Shi. Learning and evaluating contextual embedding of source code. In International Conference on Machine Learning, pp. 5110-5121. PMLR, 2020."
1727
+ },
1728
+ {
1729
+ "type": "ref_text",
1730
+ "bbox": [
1731
+ 0.173,
1732
+ 0.567,
1733
+ 0.826,
1734
+ 0.611
1735
+ ],
1736
+ "angle": 0,
1737
+ "content": "Rafael-Michael Karampatsis, Hlib Babii, Romain Robbes, Charles Sutton, and Andrea Janes. Big code! = big vocabulary: Open-vocabulary models for source code. In 2020 IEEE/ACM 42nd International Conference on Software Engineering (ICSE), pp. 1073-1085. IEEE, 2020."
1738
+ },
1739
+ {
1740
+ "type": "ref_text",
1741
+ "bbox": [
1742
+ 0.173,
1743
+ 0.619,
1744
+ 0.826,
1745
+ 0.675
1746
+ ],
1747
+ "angle": 0,
1748
+ "content": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461, 2019."
1749
+ },
1750
+ {
1751
+ "type": "ref_text",
1752
+ "bbox": [
1753
+ 0.173,
1754
+ 0.685,
1755
+ 0.826,
1756
+ 0.783
1757
+ ],
1758
+ "angle": 0,
1759
+ "content": "Shuai Lu, Daya Guo, Shuo Ren, Junjie Huang, Alexey Svyatkovskiy, Ambrosio Blanco, Colin Clement, Dawn Drain, Daxin Jiang, Duyu Tang, Ge Li, Lidong Zhou, Linjun Shou, Long Zhou, Michele Tufano, MING GONG, Ming Zhou, Nan Duan, Neel Sundaresan, Shao Kun Deng, Shengyu Fu, and Shujie LIU. CodeXGLUE: A machine learning benchmark dataset for code understanding and generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. URL https://openreview.net/forum?id=61E4dQXaUcb."
1760
+ },
1761
+ {
1762
+ "type": "ref_text",
1763
+ "bbox": [
1764
+ 0.173,
1765
+ 0.792,
1766
+ 0.826,
1767
+ 0.822
1768
+ ],
1769
+ "angle": 0,
1770
+ "content": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019."
1771
+ },
1772
+ {
1773
+ "type": "ref_text",
1774
+ "bbox": [
1775
+ 0.173,
1776
+ 0.83,
1777
+ 0.826,
1778
+ 0.873
1779
+ ],
1780
+ "angle": 0,
1781
+ "content": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683, 2019."
1782
+ },
1783
+ {
1784
+ "type": "ref_text",
1785
+ "bbox": [
1786
+ 0.173,
1787
+ 0.882,
1788
+ 0.826,
1789
+ 0.925
1790
+ ],
1791
+ "angle": 0,
1792
+ "content": "Veselin Raychev, Martin Vechev, and Eran Yahav. Code completion with statistical language models. In Proceedings of the 35th ACM SIGPLAN Conference on Programming Language Design and Implementation, pp. 419-428, 2014."
1793
+ },
1794
+ {
1795
+ "type": "list",
1796
+ "bbox": [
1797
+ 0.173,
1798
+ 0.103,
1799
+ 0.826,
1800
+ 0.925
1801
+ ],
1802
+ "angle": 0,
1803
+ "content": null
1804
+ },
1805
+ {
1806
+ "type": "page_number",
1807
+ "bbox": [
1808
+ 0.49,
1809
+ 0.948,
1810
+ 0.508,
1811
+ 0.961
1812
+ ],
1813
+ "angle": 0,
1814
+ "content": "11"
1815
+ }
1816
+ ],
1817
+ [
1818
+ {
1819
+ "type": "header",
1820
+ "bbox": [
1821
+ 0.173,
1822
+ 0.033,
1823
+ 0.538,
1824
+ 0.049
1825
+ ],
1826
+ "angle": 0,
1827
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
1828
+ },
1829
+ {
1830
+ "type": "ref_text",
1831
+ "bbox": [
1832
+ 0.172,
1833
+ 0.104,
1834
+ 0.825,
1835
+ 0.133
1836
+ ],
1837
+ "angle": 0,
1838
+ "content": "Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015."
1839
+ },
1840
+ {
1841
+ "type": "ref_text",
1842
+ "bbox": [
1843
+ 0.173,
1844
+ 0.141,
1845
+ 0.826,
1846
+ 0.171
1847
+ ],
1848
+ "angle": 0,
1849
+ "content": "Lewis Tunstall, Leandro von Werra, and Thomas Wolf. Natural Language Processing with Transformers.\" O'Reilly Media, Inc.\", 2022."
1850
+ },
1851
+ {
1852
+ "type": "ref_text",
1853
+ "bbox": [
1854
+ 0.173,
1855
+ 0.178,
1856
+ 0.826,
1857
+ 0.208
1858
+ ],
1859
+ "angle": 0,
1860
+ "content": "Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021."
1861
+ },
1862
+ {
1863
+ "type": "ref_text",
1864
+ "bbox": [
1865
+ 0.173,
1866
+ 0.215,
1867
+ 0.826,
1868
+ 0.258
1869
+ ],
1870
+ "angle": 0,
1871
+ "content": "Yue Wang, Weishi Wang, Shafiq Joty, and Steven CH Hoi. Codet5: Identifier-aware unified pre-trained encoder-decoder models for code understanding and generation. arXiv preprint arXiv:2109.00859, 2021."
1872
+ },
1873
+ {
1874
+ "type": "ref_text",
1875
+ "bbox": [
1876
+ 0.173,
1877
+ 0.266,
1878
+ 0.827,
1879
+ 0.322
1880
+ ],
1881
+ "angle": 0,
1882
+ "content": "Daniel Zügner, Tobias Kirschstein, Michele Catasta, Jure Leskovec, and Stephan Gunnemann. Language-agnostic representation learning of source code from structure and context. In International Conference on Learning Representations, 2021. URL https://openreview.net/for um?id=Xh5eMZVONGF."
1883
+ },
1884
+ {
1885
+ "type": "list",
1886
+ "bbox": [
1887
+ 0.172,
1888
+ 0.104,
1889
+ 0.827,
1890
+ 0.322
1891
+ ],
1892
+ "angle": 0,
1893
+ "content": null
1894
+ },
1895
+ {
1896
+ "type": "title",
1897
+ "bbox": [
1898
+ 0.173,
1899
+ 0.348,
1900
+ 0.523,
1901
+ 0.365
1902
+ ],
1903
+ "angle": 0,
1904
+ "content": "A SCALING EFFECT: TRAINED LONGER"
1905
+ },
1906
+ {
1907
+ "type": "text",
1908
+ "bbox": [
1909
+ 0.171,
1910
+ 0.379,
1911
+ 0.827,
1912
+ 0.477
1913
+ ],
1914
+ "angle": 0,
1915
+ "content": "We compare the performance difference between the model trained after 100K steps versus the model after 150K steps in Figure 7. We can see that in the larger 2.7B model, by training the model longer till 150K steps, the performance increases uniformly, with Pass@100 increasing the most. However, for a smaller model such as the 400M model, by training the model longer till 100K steps, the improvements are subdued and Pass@100 drops. This suggests that with the larger model, training for longer may provide additional boost in performance. This echoes with the observation from the training curve (Figure 3) as well."
1916
+ },
1917
+ {
1918
+ "type": "image",
1919
+ "bbox": [
1920
+ 0.292,
1921
+ 0.495,
1922
+ 0.495,
1923
+ 0.61
1924
+ ],
1925
+ "angle": 0,
1926
+ "content": null
1927
+ },
1928
+ {
1929
+ "type": "image_caption",
1930
+ "bbox": [
1931
+ 0.345,
1932
+ 0.62,
1933
+ 0.44,
1934
+ 0.634
1935
+ ],
1936
+ "angle": 0,
1937
+ "content": "(a) 2.7B Model"
1938
+ },
1939
+ {
1940
+ "type": "image",
1941
+ "bbox": [
1942
+ 0.504,
1943
+ 0.495,
1944
+ 0.709,
1945
+ 0.61
1946
+ ],
1947
+ "angle": 0,
1948
+ "content": null
1949
+ },
1950
+ {
1951
+ "type": "image_caption",
1952
+ "bbox": [
1953
+ 0.555,
1954
+ 0.62,
1955
+ 0.657,
1956
+ 0.634
1957
+ ],
1958
+ "angle": 0,
1959
+ "content": "(b) 400M Model"
1960
+ },
1961
+ {
1962
+ "type": "image_caption",
1963
+ "bbox": [
1964
+ 0.226,
1965
+ 0.646,
1966
+ 0.771,
1967
+ 0.661
1968
+ ],
1969
+ "angle": 0,
1970
+ "content": "Figure 7: HumanEval performance comparison after training the model for longer."
1971
+ },
1972
+ {
1973
+ "type": "title",
1974
+ "bbox": [
1975
+ 0.172,
1976
+ 0.689,
1977
+ 0.577,
1978
+ 0.705
1979
+ ],
1980
+ "angle": 0,
1981
+ "content": "B TEMPERATURE EFFECT: SMALLER MODELS"
1982
+ },
1983
+ {
1984
+ "type": "text",
1985
+ "bbox": [
1986
+ 0.171,
1987
+ 0.72,
1988
+ 0.827,
1989
+ 0.803
1990
+ ],
1991
+ "angle": 0,
1992
+ "content": "We show how temperature affects HumanEval performance on model of all three sizes in Figure 8. We find that for a larger model, e.g., the 2.7B model, a temperature as high as 0.8 is actually hurting the performance for Pass@100, suggesting that if the model is good enough, a very high temperature may cause the outputs to be too diverse, thus hurting the correctness. This suggests the importance of temperature and the need to tune it individually for different model capacity and different generation scenarios."
1993
+ },
1994
+ {
1995
+ "type": "title",
1996
+ "bbox": [
1997
+ 0.172,
1998
+ 0.824,
1999
+ 0.488,
2000
+ 0.839
2001
+ ],
2002
+ "angle": 0,
2003
+ "content": "C DETAILED PERPLEXITY RESULTS"
2004
+ },
2005
+ {
2006
+ "type": "text",
2007
+ "bbox": [
2008
+ 0.171,
2009
+ 0.855,
2010
+ 0.826,
2011
+ 0.926
2012
+ ],
2013
+ "angle": 0,
2014
+ "content": "We show the detailed perplexity of different models on different languages in Table 5. The number of tokens shown in the table is obtained after tokenizing the code in each language using their respective lexers, by Pygments. This number of tokens is used to normalize the perplexity scores to make them comparable across models. Note that CodeParrot is only trained on Python data and thus performs poorly in other languages."
2015
+ },
2016
+ {
2017
+ "type": "page_number",
2018
+ "bbox": [
2019
+ 0.49,
2020
+ 0.948,
2021
+ 0.509,
2022
+ 0.96
2023
+ ],
2024
+ "angle": 0,
2025
+ "content": "12"
2026
+ }
2027
+ ],
2028
+ [
2029
+ {
2030
+ "type": "header",
2031
+ "bbox": [
2032
+ 0.173,
2033
+ 0.033,
2034
+ 0.536,
2035
+ 0.049
2036
+ ],
2037
+ "angle": 0,
2038
+ "content": "Published as a workshop paper at DL4C @ ICLR 2022"
2039
+ },
2040
+ {
2041
+ "type": "image",
2042
+ "bbox": [
2043
+ 0.191,
2044
+ 0.209,
2045
+ 0.391,
2046
+ 0.322
2047
+ ],
2048
+ "angle": 0,
2049
+ "content": null
2050
+ },
2051
+ {
2052
+ "type": "image_caption",
2053
+ "bbox": [
2054
+ 0.243,
2055
+ 0.332,
2056
+ 0.337,
2057
+ 0.345
2058
+ ],
2059
+ "angle": 0,
2060
+ "content": "(a) 2.7B Model"
2061
+ },
2062
+ {
2063
+ "type": "image",
2064
+ "bbox": [
2065
+ 0.399,
2066
+ 0.209,
2067
+ 0.597,
2068
+ 0.322
2069
+ ],
2070
+ "angle": 0,
2071
+ "content": null
2072
+ },
2073
+ {
2074
+ "type": "image_caption",
2075
+ "bbox": [
2076
+ 0.446,
2077
+ 0.332,
2078
+ 0.547,
2079
+ 0.345
2080
+ ],
2081
+ "angle": 0,
2082
+ "content": "(b) 400M Model"
2083
+ },
2084
+ {
2085
+ "type": "image",
2086
+ "bbox": [
2087
+ 0.609,
2088
+ 0.209,
2089
+ 0.807,
2090
+ 0.322
2091
+ ],
2092
+ "angle": 0,
2093
+ "content": null
2094
+ },
2095
+ {
2096
+ "type": "image_caption",
2097
+ "bbox": [
2098
+ 0.658,
2099
+ 0.332,
2100
+ 0.758,
2101
+ 0.345
2102
+ ],
2103
+ "angle": 0,
2104
+ "content": "(c) 160M Model"
2105
+ },
2106
+ {
2107
+ "type": "image_caption",
2108
+ "bbox": [
2109
+ 0.198,
2110
+ 0.358,
2111
+ 0.799,
2112
+ 0.373
2113
+ ],
2114
+ "angle": 0,
2115
+ "content": "Figure 8: HumanEval performance using different softmax temperatures during generation."
2116
+ },
2117
+ {
2118
+ "type": "table",
2119
+ "bbox": [
2120
+ 0.174,
2121
+ 0.588,
2122
+ 0.861,
2123
+ 0.766
2124
+ ],
2125
+ "angle": 0,
2126
+ "content": "<table><tr><td>Language</td><td>#tokens</td><td>Codex*</td><td>PolyCoder 2.7B</td><td>GPT-Neo 2.7B</td><td>GPT-J 6B</td><td>GPT-NeoX</td><td>CodeParrot</td></tr><tr><td>C</td><td>55,333</td><td>2.55</td><td>2.33</td><td>3.69</td><td>2.82</td><td>2.37</td><td>19.23</td></tr><tr><td>C#</td><td>67,306</td><td>1.72</td><td>2.58</td><td>2.49</td><td>2.20</td><td>2.12</td><td>7.16</td></tr><tr><td>C++</td><td>69,627</td><td>1.95</td><td>2.99</td><td>2.87</td><td>2.47</td><td>2.32</td><td>8.48</td></tr><tr><td>Go</td><td>79,947</td><td>1.39</td><td>2.57</td><td>2.19</td><td>1.89</td><td>1.85</td><td>10.00</td></tr><tr><td>Java</td><td>65,484</td><td>1.94</td><td>2.92</td><td>2.78</td><td>2.49</td><td>2.47</td><td>6.79</td></tr><tr><td>JavaScript</td><td>54,620</td><td>2.17</td><td>3.06</td><td>3.07</td><td>2.73</td><td>2.62</td><td>9.23</td></tr><tr><td>PHP</td><td>45,682</td><td>1.98</td><td>3.70</td><td>3.61</td><td>2.81</td><td>2.45</td><td>19.91</td></tr><tr><td>Python</td><td>79,653</td><td>1.47</td><td>3.18</td><td>3.00</td><td>2.68</td><td>2.61</td><td>2.95</td></tr><tr><td>Ruby</td><td>46,537</td><td>1.39</td><td>3.96</td><td>3.77</td><td>3.13</td><td>2.89</td><td>14.26</td></tr><tr><td>Rust</td><td>107,717</td><td>1.96</td><td>3.24</td><td>3.30</td><td>2.92</td><td>2.92</td><td>8.68</td></tr><tr><td>Scala</td><td>65,756</td><td>1.75</td><td>3.87</td><td>3.88</td><td>3.37</td><td>3.33</td><td>12.91</td></tr><tr><td>JavaScript</td><td>55,895</td><td>2.40</td><td>3.61</td><td>3.90</td><td>3.43</td><td>3.41</td><td>12.54</td></tr></table>"
2127
+ },
2128
+ {
2129
+ "type": "table_footnote",
2130
+ "bbox": [
2131
+ 0.182,
2132
+ 0.767,
2133
+ 0.751,
2134
+ 0.791
2135
+ ],
2136
+ "angle": 0,
2137
+ "content": "* Since the exact training set of Codex is unknown, it might have been trained on these test sets, and Codex's results are over-optimistic."
2138
+ },
2139
+ {
2140
+ "type": "table_caption",
2141
+ "bbox": [
2142
+ 0.172,
2143
+ 0.803,
2144
+ 0.825,
2145
+ 0.818
2146
+ ],
2147
+ "angle": 0,
2148
+ "content": "Table 5: Perplexity of different models for different programming languages on our evaluation dataset."
2149
+ },
2150
+ {
2151
+ "type": "page_number",
2152
+ "bbox": [
2153
+ 0.49,
2154
+ 0.948,
2155
+ 0.508,
2156
+ 0.96
2157
+ ],
2158
+ "angle": 0,
2159
+ "content": "13"
2160
+ }
2161
+ ]
2162
+ ]
2202.13xxx/2202.13169/909a7465-0b93-460b-9a57-ce6ae5e551db_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0ed283cb5ea7bc799bddee017587d1a97ec7215c2300f8a38ca6b626b6f2c0b
3
+ size 503537
2202.13xxx/2202.13169/full.md ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A SYSTEMATIC EVALUATION OF LARGE LANGUAGE MODELS OF CODE
2
+
3
+ Frank F. Xu, Uri Alon, Graham Neubig, Vincent J. Hellendoorn
4
+
5
+ School of Computer Science
6
+
7
+ Carnegie Mellon University
8
+
9
+ {fangzhex,ualon,gneubig}@cs.cmu.edu,vhellendoorn@cmu.edu
10
+
11
+ # ABSTRACT
12
+
13
+ Large language models (LMs) of code have recently shown tremendous promise in completing code and synthesizing code from natural language descriptions. However, the current state-of-the-art code LMs (e.g., Codex (Chen et al., 2021)) are not publicly available, leaving many questions about their model and data design decisions. We aim to fill in some of these blanks through a systematic evaluation of the largest existing models: Codex, GPT-J, GPT-Neo, GPT-NeoX-20B, and CodeParrot, across various programming languages. Although Codex itself is not open-source, we find that existing open-source models do achieve close results in some programming languages, although targeted mainly for natural language modeling. We further identify an important missing piece in the form of a large open-source model trained exclusively on a multi-lingual corpus of code. We release a new model, PolyCoder, with 2.7B parameters based on the GPT-2 architecture, that was trained on 249GB of code across 12 programming languages on a single machine. In the C programming language, PolyCoder outperforms all models including Codex. Our trained models are open-source and publicly available at https://github.com/VHellendoorn/Code-LMs, which enables future research and application in this area.
14
+
15
+ # 1 INTRODUCTION
16
+
17
+ Language models (LMs) assign probabilities to sequences of tokens, and are widely applied to natural language text (Bengio et al., 2003; Baevski & Auli, 2018; Brown et al., 2020). Recently, LMs have shown impressive performance in modeling also source code, written in programming languages (Hindle et al., 2016; Hellendoorn & Devanbu, 2017; Alon et al., 2020; Karampatsis et al., 2020). These models excel at useful downstream tasks like code completion (Raychev et al., 2014) and synthesizing code from natural language descriptions (Desai et al., 2016). The current state-of-the-art large language models for code, such as Austin et al. (2021), have shown significant progress for AI-based programming assistance. Most notably, one of the largest of these models, Codex (Chen et al., 2021) has been deployed in the real-world production tool GitHub Copilot<sup>1</sup>, as an in-IDE developer assistant that automatically generates code based on the user's context.
18
+
19
+ Despite the great success of large language models of code, the strongest models are not publicly available. This prevents the application of these models outside of well-resourced companies and limits research in this field for low-resourced organizations. For example, Codex provides non-free access to the model's output through black-box API calls,[2] but the model's weights and training data are unavailable. This prevents researchers from fine-tuning and adapting this model to domains and tasks other than code completion. The lack of access to the model's internals also prevents the research community from studying other key aspects of these models, such as interpretability, distillation of the model for more efficient deployment, and incorporating additional components such as retrieval.
20
+
21
+ Several medium to large-sized pre-trained language models are publicly available, such as GPT-Neo (Black et al., 2021), GPT-J (Wang & Komatsuzaki, 2021) and GPT-NeoX (Black et al., 2022).
22
+
23
+ Despite being trained on a mixture of a wide variety of text including news articles, online forums, and just a modest selection of (GitHub) software repositories (Gao et al., 2020), these language models can be used to generate source code with a reasonable performance Chen et al. (2021). In addition, there are a few open-source language models that are trained solely on source code. For example, CodeParrot (Tunstall et al., 2022) was trained on 180 GB of Python code.
24
+
25
+ Given the variety of model sizes and training schemes involved in these models and lack of comparisons between these, the impact of many modeling and training design decisions remains unclear. For instance, we do not know the precise selection of data on which Codex and other private models were trained; however, we do know that some public models (e.g., GPT-J) were trained on a mix of natural language and code in multiple programming languages, while other models (e.g., CodeParrot) were trained solely on code in one particular programming language. Multilingual models potentially provide better generalization, because different programming languages share similar keywords and properties, as shown by the success of multilingual models for natural language (Conneau & Lample, 2019) and for code (Zügner et al., 2021). This may hint that multilingual LMs can generalize across languages, outperform monolingual models and be useful for modeling low-resource programming languages, but this is yet to be verified empirically.
26
+
27
+ In this paper, we present a systematic evaluation of existing models of code – Codex, GPT-J, GPT-Neo, GPT-NeoX, and CodeParrot – across various programming languages. We aim to shed more light on the landscape of code modeling design decisions by comparing and contrasting these models, as well as providing a key missing link: thus far, no large open-source language model was trained exclusively on code from multiple programming languages. We provide three such models, ranging from 160M to 2.7B parameters, which we release under the umbrella name “PolyCoder”. First, we perform an extensive comparison of the training and evaluation settings between PolyCoder, open-source models, and Codex. Second, we evaluate the models on the HumanEval benchmark (Chen et al., 2021) and compare how do models of different sizes and training steps scale, and how different temperatures affect the generation quality. Finally, since HumanEval only evaluates the natural language to Python synthesis, we curate an unseen evaluation dataset in each of the 12 languages, to evaluate the perplexity of different models. We find that although Codex is allegedly focused on Python (Chen et al. (2021) §3.1), Codex performs surprisingly well in other programming languages too, and even better than GPT-J and GPT-NeoX that were trained on the Pile (Gao et al., 2020). Nonetheless, in the C programming language, our PolyCoder model achieves a lower perplexity than all these models, including Codex.
28
+
29
+ Although most current models perform worse than Codex, we hope that this systematic study helps future research in this area to design more efficient and effective models. More importantly, through this systematic evaluation of different models, we encourage the community to study and release medium-large scale language models for code, in response to the concerns expressed by Hellendoorn & Sawant (2021):
30
+
31
+ [...] this exploding trend in cost to achieve the state of the art has left the ability to train and test such models limited to a select few large technology companies—and way beyond the resources of virtually all academic labs.
32
+
33
+ We believe that our efforts are a significant step towards democratization of large language models of code.
34
+
35
+ # 2 RELATED WORK
36
+
37
+ At the core of code modeling lies ongoing work on pretraining of language models (LMs). Large-scale pretraining of LMs has had an astounding impact on natural language processing in recent years (Han et al., 2021). Figure 1 provides an overview of how different models compare in size and availability.
38
+
39
+ # 2.1 PRETRAINING METHODS
40
+
41
+ We discuss three popular pretraining methods used in code language modeling. An illustration of these methods are shown in Figure 2.
42
+
43
+ ![](images/8901ec7df14752b423b34e5c99e95a10731491311b938f7f2cf25cb96b17bd25.jpg)
44
+ Figure 1: Existing language models of code, their sizes and availability (open source vs. not open-source).
45
+
46
+ ![](images/fb903304fc761cc4e48a0afedfb8cf30ade78db9160e691caf3bba079f453ebf.jpg)
47
+ Figure 2: Three types of pretrained language models.
48
+
49
+ Left-to-Right Language Models (Figure 2, left) Auto-regressive, Left-to-right LMs, predict the probability of a token given the previous tokens. In code modeling, CodeGPT (124M) (Lu et al., 2021), CodeParrot (1.5B) (Tunstall et al., 2022), GPT-Neo (2.7B) (Black et al., 2021), GPT-J (6B) (Wang & Komatsuzaki, 2021), Codex (12B) (Chen et al., 2021), GPT-NeoX (20B) (Black et al., 2022), and Google's (137B) (Austin et al., 2021) belong to this category. The left-to-right nature of these models makes them highly useful for program generation tasks, such as code completion. On the other hand, as code is usually not written in a single, left-to-write pass, it is not trivial to leverage context that appears "after" the location of the generation. In this paper, we focus on this family of models and will discuss the existing models in more detail in the following sections.
50
+
51
+ Masked Language Models (Figure 2, middle) While auto-regressive language models are powerful for modeling the probability of sequences, their unidirectional nature makes them less suitable for producing effective whole-sequence representations for downstream tasks such as classification. One popular bidirectional objective function used widely in representation learning is masked language modeling (Devlin et al., 2018), where the aim is to predict masked text pieces based on surrounding context. CodeBERT (125M) (Feng et al., 2020) and CuBERT (345M) (Kanade et al., 2020) are examples of such models in code. In programming contexts, these methods provide useful representations of a sequence of code for downstream tasks such as code classification, clone detection, and defect detection.
52
+
53
+ Encoder-decoder Models (Figure 2, right) An encoder-decoder model first uses an encoder to encode an input sequence, and then uses a left-to-right LM to decode an output sequence conditioned on the input sequence. Popular pretraining objectives include masked span prediction (Raffel et al., 2019) where the input sequence is randomly masked with multiple masks and the output sequence are the masked contents in order, and denoising sequence reconstruction (Lewis et al., 2019) where the input is a corrupted sequence and the output is the original sequence. These pretrained models are useful in many sequence-to-sequence tasks (Raffel et al., 2019). In code, CodeT5 (220M) (Wang et al., 2021), and PLBART (406M) (Ahmad et al., 2021) use the two objectives mentioned above respectively, and performs well in conditional generation downstream tasks such as code commenting, or natural language to code generation.
54
+
55
+ # 2.2 PRETRAINING DATA
56
+
57
+ Some models (e.g. CodeParrot and CodeT5) are trained on GitHub code only, with corpora extracted using either Google BigQuery's GitHub dataset $^{4}$ , or CodeSearchNet (Husain et al., 2019). Others (e.g., GPT-Neo and GPT-J) are trained on "the Pile" (Gao et al., 2020), a large corpus containing a blend of natural language texts and code from various domains, including Stack Exchange dumps, software documentations, and popular ( $>100$ stars) GitHub repositories. The datasets on which other proprietary models (Codex, Google's) were trained on are unknown. One goal of our study is to try to shed light on what corpora might be the most useful for pretraining models of code.
58
+
59
+ # 3 EVALUATION SETTINGS
60
+
61
+ We evaluate all models using both extrinsic and intrinsic benchmarks, as described below.
62
+
63
+ Extrinsic Evaluation One of the most popular downstream tasks for code modeling is code generation given a natural language description. Following Chen et al. (2021), we evaluate all models on the HumanEval dataset. The dataset contains 164 prompts with descriptions in the form of code comments and function definitions, including argument names and function names, and test cases to judge whether the generated code is correct. To generate code given a prompt, we use the same sampling strategy as Chen et al. (2021), using softmax with a temperature parameter $\text{softmax}(x / T)$ . We evaluate using a wide range of temperatures $T = [0.2, 0.4, 0.6, 0.8]$ to control for the confidence of the model's predictions. Similarly to Codex, we use nucleus sampling (Holtzman et al., 2019) with top- $p = 0.95$ . We sample tokens from the model until we encounter one of the following stop sequences that indicate the end of a method: 'nclass', 'ndef', '\nif', '\nif', or '\nprint'. We randomly sample 100 examples per prompt in the evaluation dataset.
64
+
65
+ Intrinsic Evaluation To evaluate the intrinsic performance of different models, we compute the perplexity for each language on an unseen set of GitHub repositories. To prevent training-to-test data leakage for models such as GPT-Neo and GPT-J, we remove repositories in our evaluation dataset that appeared in the GitHub portion of the Pile training dataset $^{6}$ . To evaluate Codex, we use OpenAI's API $^{7}$ , choosing the code-davinci-001 engine. We note that the data that this model was trained on is unknown, so we cannot prevent data leakage from the training to the test set for Codex. We sampled 100 random files for each of the 12 programming languages in our evaluation dataset. To make perplexity comparable across different tokenization methods used in different models, we use Pygments $^{8}$ to equally normalize the log-likelihood sum of each model, when computing perplexity.
66
+
67
+ # 4 COMPARED MODELS
68
+
69
+ # 4.1 EXISTING MODELS
70
+
71
+ As discussed in Section 2, we mainly focus on auto-regressive left-to-right pretrained language models, most suitable for code completion tasks.
72
+
73
+ We evaluate Codex, as it is currently deployed in real-world and has impressive performance in code completion (Chen et al., 2021). Codex uses the GPT-3 language model (Brown et al., 2020) as its underlying model architecture. Codex was trained on a dataset spanning 179GB (after deduplication) covering over 54 million public Python repositories obtained from GitHub on May 2020. As reflected in its impressive results in other programming languages than Python, we suspect that Codex was also trained on large corpora of additional programming languages. The model available for querying through a non-free API.
74
+
75
+ As for open-source models, we compare GPT-Neo, GPT-J and GPT-NeoX, the largest variants having 2.7, 6 and 20 billion parameters, respectively. GPT-NeoX is the largest open-source pretrained language models available. These models are trained on the Pile dataset, so they are a good representatives of models that were trained on both natural language texts from various domains and source code from GitHub. We also compare CodeParrot with at most 1.5 billion parameters, a model that was only trained on Python code from GitHub. CodeParrot follows the process used in Chen et al. (2021) that obtained over 20M files Python files from Google BigQuery Github database, resulting in a 180GB dataset, which is comparable to Codex's Python training data, but the model itself is much smaller.
76
+
77
+ There was no large open-source language model trained almost exclusively on code from multiple programming languages. To fill this gap, we train a 2.7 billion model, PolyCoder, on a mixture of repositories from GitHub in 12 different programming languages.
78
+
79
+ <table><tr><td>Language</td><td>Repositories</td><td>Files</td><td>Size Before Filtering</td><td>Size After Filtering</td></tr><tr><td>C</td><td>10,749</td><td>3,037,112</td><td>221G</td><td>55G</td></tr><tr><td>C#</td><td>9,511</td><td>2,514,494</td><td>30G</td><td>21G</td></tr><tr><td>C++</td><td>13,726</td><td>4,289,506</td><td>115G</td><td>52G</td></tr><tr><td>Go</td><td>12,371</td><td>1,416,789</td><td>70G</td><td>15G</td></tr><tr><td>Java</td><td>15,044</td><td>5,120,129</td><td>60G</td><td>41G</td></tr><tr><td>JavaScript</td><td>25,144</td><td>1,774,174</td><td>66G</td><td>22G</td></tr><tr><td>PHP</td><td>9,960</td><td>1,714,058</td><td>21G</td><td>13G</td></tr><tr><td>Python</td><td>25,446</td><td>1,550,208</td><td>24G</td><td>16G</td></tr><tr><td>Ruby</td><td>5,826</td><td>674,343</td><td>5.0G</td><td>4.1G</td></tr><tr><td>Rust</td><td>4,991</td><td>304,842</td><td>5.2G</td><td>3.5G</td></tr><tr><td>Scala</td><td>1,497</td><td>245,100</td><td>2.2G</td><td>1.8G</td></tr><tr><td>TypeScript</td><td>12,830</td><td>1,441,926</td><td>12G</td><td>9.2G</td></tr><tr><td>Total</td><td>147,095</td><td>24,082,681</td><td>631.4G</td><td>253.6G</td></tr></table>
80
+
81
+ Table 1: Training corpus statistics.
82
+
83
+ # 4.2 POLYCODER'S DATA
84
+
85
+ Raw Code Corpus Collection GitHub is an excellent source for publicly available source code of various programming languages. We cloned the most popular repositories for 12 popular programming languages with at least 50 stars (stopping at about 25K per language to avoid a too heavy skew towards popular programming languages) from GitHub in October 2021. For each project, each file belonging to the majority-language of that project was extracted, yielding the initial training set. This initial, unfiltered dataset spanned 631GB and 38.9M files.
86
+
87
+ Data Preprocessing The detailed data preprocessing strategy comparison with other models are analyzed in Table 2. In general, we tried to follow Codex's design decisions, although there is a fair bit of ambiguity in the description of its data preprocessing.
88
+
89
+ Deduplication and Filtering Similarly to Codex and CodeParrot, very large (>1MB) and very short (<100 tokens) files were filtered out, reducing the size of the dataset by $33\%$ , from 631GB to 424GB. This only reduced the total number of files by $8\%$ , showing that a small number of files were responsible for a large part of the corpus.[10]
90
+
91
+ Allamanis (2019) has shown that code duplication that commonly manifests in datasets of code adversely effects language modeling of code. Therefore, we deduplicated files based on a hash of their content, which reduced the number of files by nearly $30\%$ , and the dataset size by additional $29\%$ , leaving 24.1M files and 254GB of data.
92
+
93
+ Overall, the filtering of very large and very short files plus dedduplication, reduced the number of files by $38\%$ , and the dataset size by $61\%$ , roughly on par with the $70\%$ dataset size reduction reported by CodeParrot. A key difference that remains is that other approaches use more fine-grained filtering
94
+
95
+ <table><tr><td></td><td>PolyCoder</td><td>CodeParrot</td><td>Codex</td></tr><tr><td>Dedup</td><td>Exact</td><td>Exact</td><td>Unclear, mentions “unique”</td></tr><tr><td>Filtering</td><td>Files &gt; 1 MB, &lt; 100 to-kens</td><td>Files &gt; 1MB, max line length &gt; 1000, mean line length &gt; 100, fraction of alphanumeric charac-ters &lt; 0.25, containing the word “auto-generated” or similar in the first 5 lines</td><td>Files &gt; 1MB, max line length &gt; 1000, mean line length &gt; 100, auto-generated (details unclear), contained small percentage of al-phenmeric characters (details unclear)</td></tr><tr><td>Tokenization</td><td>Trained GPT-2 tok-enizer on a random 5% subset (all languages)</td><td>Trained GPT-2 tokenizer on train split</td><td>GPT-3 tokenizer, add multi-whitespace tokens to reduce re-dundant whitespace tokens</td></tr></table>
96
+
97
+ Table 2: Comparison of data preprocessing strategies of different models.
98
+
99
+ strategies, such as limiting the maximum line length or average line length, filtering of probable auto-generated files, etc. For example, Chen et al. (2021) have filtered only $11\%$ of their training data.
100
+
101
+ The dataset statistics are shown in Table 1, showcasing data sizes per language before and after filtering. Our dataset contains less Python code (only 16G) than Codex or CodeParrot, and instead covers many different programming languages.
102
+
103
+ Tokenizer We train a GPT-2 tokenizer (using BPE (Sennrich et al., 2015)) on a random $5\%$ subset of all the pretraining data, containing all the languages. Codex uses an existing trained GPT-3 tokenizer, with the addition of multi-whitespace tokens to reduce the sequence length after tokenization, as consecutive whitespaces are more common in code than in text.
104
+
105
+ # 4.3 POLYCODER'S TRAINING
106
+
107
+ Considering our budget, we chose the GPT-2 (Radford et al., 2019) as our model architecture. To study the effect of scaling of model size, we train 3 different sized models, with 2.7 billion, 400 million and 160 million parameters, as the largest 2.7B model being on par with GPT-Neo for fair comparison. The 2.7 billion model is a 32 layer, 2,560 dimensional Transformer model, with a max context window of 2048 tokens, trained with a batch size of 128 sequences (262K tokens). The model is trained for 150K steps. The 400 million model is a 24 layer, 1,024 dimensional variant, and the 160 million model is a 12 layer, 768 dimensional variant, otherwise idem. We use GPT-NeoX toolkit to train the model efficiently in parallel with 8 Nvidia RTX 8000 GPUs on a single machine. The wall time used to train the largest 2.7B model is about 6 weeks. In its default configuration, this model should train for 320K steps, which was not feasible with our resources. Instead, we adjusted the learning rate decay to half this number and trained for up to 150K steps (near-convergence). The training and validation loss curves for different sized models are shown in Figure 3. We see that even after training for 150K steps, the validation losses are still decreasing. This, combined with the shorter training schedule and faster learning rate decay, strongly signals that the models are still under-fitting and could benefit from longer training.
108
+
109
+ We compare the training setting and hyperparameters with CodeParrot and Codex in Table 3. Due to high computational costs, we were unable to perform hyperparameter search. Most hyperparameters are the same as those used in their respective GPT-2 model training $^{12}$ to provide a good default with regards to the corresponding model size. Some key differences include context window sizes to allow for more tokens as context, batch sizes and tokens trained, as well as model initialization with or without natural language knowledge.
110
+
111
+ <table><tr><td></td><td>PolyCoder (2.7B)</td><td>CodeParrot (1.5B)</td><td>Codex (12B)</td></tr><tr><td>Model Initialization</td><td>From scratch</td><td>From scratch</td><td>Initialized from GPT-3</td></tr><tr><td>NL Knowledge</td><td>Learned from comments in the code</td><td>Learned from comments in the code</td><td>Natural language knowledge from GPT-3</td></tr><tr><td>Learning Rate</td><td>1.6e-4</td><td>2.0e-4</td><td>1e-4</td></tr><tr><td>Optimizer</td><td>AdamW</td><td>AdamW</td><td>AdamW</td></tr><tr><td>Adam betas</td><td>0.9, 0.999</td><td>0.9, 0.999</td><td>0.9, 0.95</td></tr><tr><td>Adam eps</td><td>1e-8</td><td>1e-8</td><td>1e-8</td></tr><tr><td>Weight Decay</td><td>-</td><td>0.1</td><td>0.1</td></tr><tr><td>Warmup Steps</td><td>1600</td><td>750</td><td>175</td></tr><tr><td>Learning Rate Decay</td><td>Cosine</td><td>Cosine</td><td>Cosine</td></tr><tr><td>Batch Size (#tokens)</td><td>262K</td><td>524K</td><td>2M</td></tr><tr><td>Training Steps</td><td>150K steps, 39B tokens</td><td>50K steps, 26B tokens</td><td>100B tokens</td></tr><tr><td>Context Window</td><td>2048</td><td>1024</td><td>4096</td></tr></table>
112
+
113
+ Table 3: Comparison of design decisions and hyper-parameters in training different models of code.
114
+
115
+ ![](images/ce4ec24158b3fc74f61e49ee28e94ba448bbbb9762fb2b6cac7ff6cd5a5f9402.jpg)
116
+ (a) Training
117
+
118
+ ![](images/531b5a4ca06b7a48c158abde843453c21e2ce156e9fae318204a014701bc4f6f.jpg)
119
+ (b) Validation
120
+ Figure 3: Training and validation loss during the 150K step training process.
121
+
122
+ # 5 RESULTS
123
+
124
+ # 5.1 EXTRINSIC EVALUATION
125
+
126
+ The overall results are shown in Table 4. $^{13}$ The numbers are obtained by sampling with different temperatures and picking the best value for each metric. Among existing models, PolyCoder is worse than similarly sized GPT-Neo and the even smaller Codex 300M. Overall, PolyCoder lies after Codex, GPT-Neo/J, while performing stronger than CodeParrot. PolyCoder, which was trained only on code, falls behind a similar sized model (GPT-Neo 2.7B) trained on the Pile, a blend of natural language texts and code. Looking at the rightmost columns in Table 4 offers a potential explanation: in terms of total Python tokens seen during training, all models substantially exceed ours. This in partly because they use a higher proportion of Python code (we aimed to balance data volume across programming languages), and in part because of resource limitations, which lead to PolyCoder not observing its entire training data. In addition, the natural language blend in the training corpus may help code language modeling as well, especially with code-related texts such as Stack Exchange dumps being included.
127
+
128
+ Compared to GPT-Neo (2.7B), PolyCoder has seen fewer Python tokens, but more code tokens in other programming languages, hinting that transfer from other languages to Python helps to achieve a similar performance. This suggests that future research could benefit from blending code in different programming languages, as well as natural language text.
129
+
130
+ <table><tr><td>Model</td><td>Pass@1</td><td>Pass@10</td><td>Pass@100</td><td>Tokens Trained</td><td>Code Tokens</td><td>Python Tokens</td></tr><tr><td>PolyCoder (160M)</td><td>2.13%</td><td>3.35%</td><td>4.88%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>PolyCoder (400M)</td><td>2.96%</td><td>5.29%</td><td>11.59%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>PolyCoder (2.7B)</td><td>5.59%</td><td>9.84%</td><td>17.68%</td><td>39B</td><td>39B</td><td>2.5B</td></tr><tr><td>CodeParrot (110M)</td><td>3.80%</td><td>6.57%</td><td>12.78%</td><td>26B</td><td>26B</td><td>26B</td></tr><tr><td>CodeParrot (1.5B)</td><td>3.58%</td><td>8.03%</td><td>14.96%</td><td>26B</td><td>26B</td><td>26B</td></tr><tr><td>GPT-Neo (125M)</td><td>0.75%</td><td>1.88%</td><td>2.97%</td><td>300B</td><td>22.8B</td><td>3.1B</td></tr><tr><td>GPT-Neo (1.3B)</td><td>4.79%</td><td>7.47%</td><td>16.30%</td><td>380B</td><td>28.8B</td><td>3.9B</td></tr><tr><td>GPT-Neo (2.7B)</td><td>6.41%</td><td>11.27%</td><td>21.37%</td><td>420B</td><td>31.9B</td><td>4.3B</td></tr><tr><td>GPT-J (6B)</td><td>11.62%</td><td>15.74%</td><td>27.74%</td><td>402B</td><td>30.5B</td><td>4.1B</td></tr><tr><td>Codex (300M)</td><td>13.17%</td><td>20.37%</td><td>36.27%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr><tr><td>Codex (2.5B)</td><td>21.36%</td><td>35.42%</td><td>59.50%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr><tr><td>Codex (12B)</td><td>28.81%</td><td>46.81%</td><td>72.31%</td><td>100B*</td><td>100B*</td><td>100B*</td></tr></table>
131
+
132
+ *Codex is initialized with another pretrained model, GPT-3.
133
+
134
+ Table 4: Results of different models on the HumanEval benchmark, and the number of different types of tokens seen during the training process.
135
+
136
+ ![](images/fa7912260a4111029c9185fa123796d75bffb01e3e5c9091c7a753c35a7077f3.jpg)
137
+ (a) Pass@1
138
+
139
+ ![](images/dc20f2aa105d567d4ab5f926de020b293aeb95ee4a41df57fa27cd37db6e0029.jpg)
140
+ (b) Pass@10
141
+
142
+ ![](images/6b6e903178364401458cfb950fe83d4a1a5e44078a0144732feb2fac18362569.jpg)
143
+ (c) Pass@100
144
+ Figure 4: The scaling effect of HumanEval performance on different models.
145
+
146
+ Scaling Effect To further understand the effect of the number of model parameters with respect to HumanEval code completion performance, we show the Pass@1, Pass@10 and Pass@100 percentage with respect to the model size in Figure 4. We can see that the performance of the Codex models are significantly better than all the other open-source models across all numbers of parameters. The performance on HumanEval benchmark increases linearly with the magnitude (log scale) of the number of parameters in the model. Similar scaling effects could be found on PolyCoder and GPT-Neo/J models. Interestingly, the CodeParrot models that are trained only on Python seem to have reached a saturating performance with respect to increasing number of parameters, where the training corpus being focused on Python may have some effect. With higher number of parameters (2.7B), PolyCoder's performance is trending worse than that of GPT-Neo/J. Comparing GPT-Neo/J that is trained on Pile dataset containing a blend of text, Stack Exchange dumps and GitHub data, with PolyCoder that are trained on only GitHub repositories of popular programming languages, we hypothesize that the added text, especially texts in technical and software engineering domains, may be crucial for the larger model to boost the performance. We also compare the performance difference between the model trained after 100K steps versus the model after 150K steps in Appendix A, and find that training for longer helps the larger model more as it is still under-fitted.
147
+
148
+ Temperature Effect All the above results are obtained by sampling the language model with different temperatures and picking the best value for each metric. We are also interested in how different choices of temperature affects the final generation quality. We summarize the results in Figure 5. The general trend is for Pass@1, lower temperatures are better, and for Pass@100, a higher temperature will help, while for Pass@10 a temperature in the middle is better suited. We hypothesize that this is because a higher temperature during generation makes the model less confident in its predictions and thus allow for more exploration and more diverse outputs, resulting in better accuracy at Pass@100. Too high a temperature (0.8) is also hurtful if the model is capable enough.
149
+
150
+ ![](images/047da50880c13aa5511b30a163332f7695f6d9ca37e31c854adfedb2395f361f.jpg)
151
+ Figure 6: Perplexity comparison on our evaluation dataset of different models on different programming languages. Note that the y-axis is capped at 4; CodeParrot's entropy on all languages other than Python is much higher than shown here (see Table 5).
152
+
153
+ On the contrary, a lower temperature makes the model output very confident in its prediction and thus will be better suited for generating very few correct examples, and thus the better performance for Pass@1. In Appendix B we repeat these experiments with the smaller models as well. This suggests the importance of temperature and the need to tune it individually for different generation scenarios.
154
+
155
+ ![](images/2b2795d720f91a54fffacee7de8ddb77c13b3ab9633ed1f6894dec0663d39faf.jpg)
156
+ Figure 5: HumanEval performance with different softmax temperatures during generation.
157
+
158
+ # 5.2 INTRINSIC EVALUATION
159
+
160
+ The perplexity results on the evaluation datasets are shown in Figure 6, with detailed numbers in Appendix C. The plot caps the perplexity score to 4 as CodeParrot performs poorly in languages other than Python. It is important to note that although Codex's perplexities are lower than
161
+
162
+ other models in most languages, Codex might have been trained on the test sets, and its results are thus over-optimistic.
163
+
164
+ Notably, PolyCoder outperforms Codex and all other models in the C language. Comparing the open-source models only, PolyCoder performs better than the similarly sized GPT-Neo 2.7B in C, JavaScript, Rust, Scala and TypeScript.
165
+
166
+ In the other 11 languages other than C, all other open-source models, including ours, are significantly worse (higher perplexity) than Codex. We hypothesize that this is due to the fact that PolyCoder is trained on an imbalanced mixture of different languages, with C and $\mathrm{C + + }$ being closely related and the two most dominant in the entire training corpus (Section 4.2). Thus, the larger volume in total (because of long files) makes C the most "favored" language by PolyCoder. The reason why PolyCoder does not outperform Codex in $\mathrm{C + + }$ is possibly due to the complexity of $\mathrm{C + + }$ language and Codex's significantly longer context window size (4096, compared to PolyCoder's 2048), or because Codex is possibly trained on more $\mathrm{C + + }$ training data.
167
+
168
+ With the same pretraining corpus, the gain from a 2.7B model (GPT-Neo) to a 6B model (GPT-J) is significant over all languages. However, when increasing the model size further to 20B, the improvement varies across different languages. For example, the performance on Go, Java, Rust, Scala, JavaScript do not increase significantly when the model size increases by 3 times. This suggests that for some programming languages, and given the amounts of data, the capacity of GPT-J is sufficient. Interestingly, these languages seem to coincide with languages where PolyCoder outperforms a similarly sized model trained on Pile. This may hint that for the languages in which larger models do not provide additional gains, training the model only using code may be enough or slightly more helpful than training on both natural language and code.
169
+
170
+ We can see that comparing different models, perplexity trends for Python correlates well with the HumanEval benchmark performance of the extrinsic evaluation (Section 5.1). This suggests that perplexity is a useful and low-cost metric to estimate other, downstream, metrics.
171
+
172
+ # 6 CONCLUSION
173
+
174
+ In this paper, we perform a systematic evaluation of large language models for code. The performance generally benefits from larger models and longer training time. We also believe that the better results of GPT-Neo over PolyCoder in some languages show that training on natural language text and code can benefit the modeling of code. To help future research in the area, we release PolyCoder, a large open-source language model for code, trained exclusively on code in 12 different programming languages. In the C programming language, PolyCoder achieves lower perplexity than all models including Codex.
175
+
176
+ # REFERENCES
177
+
178
+ Wasi Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. Unified pre-training for program understanding and generation. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2655–2668, Online, June 2021. Association for Computational Linguistics. URL https://www.aclweb.org/anthology/2021.naacl-main.211.
179
+ Miltiadis Allamanis. The adverse effects of code duplication in machine learning models of code. In Proceedings of the 2019 ACM SIGPLAN International Symposium on New Ideas, New Paradigms, and Reflections on Programming and Software, pp. 143-153, 2019.
180
+ Uri Alon, Roy Sadaka, Omer Levy, and Eran Yahav. Structural language models of code. In International Conference on Machine Learning, pp. 245-256. PMLR, 2020.
181
+ Jacob Austin, Augustus Odena, Maxwell Nye, Maarten Bosma, Henryk Michalewski, David Dohan, Ellen Jiang, Carrie Cai, Michael Terry, Quoc Le, et al. Program synthesis with large language models. arXiv preprint arXiv:2108.07732, 2021.
182
+ Alexei Baevski and Michael Auli. Adaptive input representations for neural language modeling. arXiv preprint arXiv:1809.10853, 2018.
183
+ Yoshua Bengio, Réjean Ducharme, Pascal Vincent, and Christian Jauvin. A neural probabilistic language model. Journal of machine learning research, 3(Feb):1137-1155, 2003.
184
+ Sid Black, Leo Gao, Phil Wang, Connor Leahy, and Stella Biderman. GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow, March 2021. URL https://doi.org/10.5281/zenodo.5297715. If you use this software, please cite it using these metadata.
185
+ Sid Black, Stella Biderman, Eric Hallahan, Quentin Anthony, Leo Gao, Laurence Golding, Horace He, Connor Leahy, Kyle McDonell, Jason Phang, Michael Pieler, USVSN Sai Prashanth, Shivanshu Purohit, Laria Reynolds, Jonathan Tow, Ben Wang, and Samuel Weinbach. GPT-NeoX-20B: An open-source autoregressive language model. 2022.
186
+ Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. arXiv preprint arXiv:2005.14165, 2020.
187
+ Mark Chen, Jerry Tworek, Heewoo Jun, Qiming Yuan, Henrique Ponde, Jared Kaplan, Harri Edwards, Yura Burda, Nicholas Joseph, Greg Brockman, et al. Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374, 2021.
188
+ Alexis Conneau and Guillaume Lample. Cross-lingual language model pretraining. Advances in Neural Information Processing Systems, 32:7059-7069, 2019.
189
+ Aditya Desai, Sumit Gulwani, Vineet Hingorani, Nidhi Jain, Amey Karkare, Mark Marron, and Subhajit Roy. Program synthesis using natural language. In Proceedings of the 38th International Conference on Software Engineering, pp. 345-356, 2016.
190
+
191
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018.
192
+ Zhangyin Feng, Daya Guo, Duyu Tang, Nan Duan, Xiaocheng Feng, Ming Gong, Linjun Shou, Bing Qin, Ting Liu, Daxin Jiang, et al. Codebert: A pre-trained model for programming and natural languages. arXiv preprint arXiv:2002.08155, 2020.
193
+ Leo Gao, Stella Biderman, Sid Black, Laurence Golding, Travis Hoppe, Charles Foster, Jason Phang, Horace He, Anish Thite, Noa Nabeshima, et al. The pile: An 800gb dataset of diverse text for language modeling. arXiv preprint arXiv:2101.00027, 2020.
194
+ Xu Han, Zhengyan Zhang, Ning Ding, Yuxian Gu, Xiao Liu, Yuqi Huo, Jiezhong Qiu, Liang Zhang, Wentao Han, Minlie Huang, et al. Pre-trained models: Past, present and future. AI Open, 2021.
195
+ Vincent J Hellendoorn and Premkumar Devanbu. Are deep neural networks the best choice for modeling source code? In Proceedings of the 2017 11th Joint Meeting on Foundations of Software Engineering, pp. 763-773, 2017.
196
+ Vincent J. Hellendoorn and Anand Ashok Sawant. The growing cost of deep learning for source code. Commun. ACM, 65(1):31-33, dec 2021. ISSN 0001-0782. doi: 10.1145/3501261. URL https://doi.org/10.1145/3501261.
197
+ Abram Hindle, Earl T Barr, Mark Gabel, Zhendong Su, and Premkumar Devanbu. On the naturalness of software. Communications of the ACM, 59(5):122-131, 2016.
198
+ Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. The curious case of neural text degeneration. arXiv preprint arXiv:1904.09751, 2019.
199
+ Hamel Husain, Ho-Hsiang Wu, Tiferet Gazit, Miltiadis Allamanis, and Marc Brockschmidt. Code-searchnet challenge: Evaluating the state of semantic code search. arXiv preprint arXiv:1909.09436, 2019.
200
+ Aditya Kanade, Petros Maniatis, Gogul Balakrishnan, and Kensen Shi. Learning and evaluating contextual embedding of source code. In International Conference on Machine Learning, pp. 5110-5121. PMLR, 2020.
201
+ Rafael-Michael Karampatsis, Hlib Babii, Romain Robbes, Charles Sutton, and Andrea Janes. Big code! = big vocabulary: Open-vocabulary models for source code. In 2020 IEEE/ACM 42nd International Conference on Software Engineering (ICSE), pp. 1073-1085. IEEE, 2020.
202
+ Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Ves Stoyanov, and Luke Zettlemoyer. Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. arXiv preprint arXiv:1910.13461, 2019.
203
+ Shuai Lu, Daya Guo, Shuo Ren, Junjie Huang, Alexey Svyatkovskiy, Ambrosio Blanco, Colin Clement, Dawn Drain, Daxin Jiang, Duyu Tang, Ge Li, Lidong Zhou, Linjun Shou, Long Zhou, Michele Tufano, MING GONG, Ming Zhou, Nan Duan, Neel Sundaresan, Shao Kun Deng, Shengyu Fu, and Shujie LIU. CodeXGLUE: A machine learning benchmark dataset for code understanding and generation. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. URL https://openreview.net/forum?id=61E4dQXaUcb.
204
+ Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019.
205
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. arXiv preprint arXiv:1910.10683, 2019.
206
+ Veselin Raychev, Martin Vechev, and Eran Yahav. Code completion with statistical language models. In Proceedings of the 35th ACM SIGPLAN Conference on Programming Language Design and Implementation, pp. 419-428, 2014.
207
+
208
+ Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. arXiv preprint arXiv:1508.07909, 2015.
209
+ Lewis Tunstall, Leandro von Werra, and Thomas Wolf. Natural Language Processing with Transformers." O'Reilly Media, Inc.", 2022.
210
+ Ben Wang and Aran Komatsuzaki. GPT-J-6B: A 6 Billion Parameter Autoregressive Language Model. https://github.com/kingoflolz/mesh-transformer-jax, May 2021.
211
+ Yue Wang, Weishi Wang, Shafiq Joty, and Steven CH Hoi. Codet5: Identifier-aware unified pre-trained encoder-decoder models for code understanding and generation. arXiv preprint arXiv:2109.00859, 2021.
212
+ Daniel Zügner, Tobias Kirschstein, Michele Catasta, Jure Leskovec, and Stephan Gunnemann. Language-agnostic representation learning of source code from structure and context. In International Conference on Learning Representations, 2021. URL https://openreview.net/for um?id=Xh5eMZVONGF.
213
+
214
+ # A SCALING EFFECT: TRAINED LONGER
215
+
216
+ We compare the performance difference between the model trained after 100K steps versus the model after 150K steps in Figure 7. We can see that in the larger 2.7B model, by training the model longer till 150K steps, the performance increases uniformly, with Pass@100 increasing the most. However, for a smaller model such as the 400M model, by training the model longer till 100K steps, the improvements are subdued and Pass@100 drops. This suggests that with the larger model, training for longer may provide additional boost in performance. This echoes with the observation from the training curve (Figure 3) as well.
217
+
218
+ ![](images/b92970c300f8517e1232a2d9abf89620fd7c495e523649649f42aec630eb88b1.jpg)
219
+ (a) 2.7B Model
220
+
221
+ ![](images/4999c3b042853defebb733dc6522b2753ab67ad5e6459e5752dc0ca3ca2e2ff9.jpg)
222
+ (b) 400M Model
223
+ Figure 7: HumanEval performance comparison after training the model for longer.
224
+
225
+ # B TEMPERATURE EFFECT: SMALLER MODELS
226
+
227
+ We show how temperature affects HumanEval performance on model of all three sizes in Figure 8. We find that for a larger model, e.g., the 2.7B model, a temperature as high as 0.8 is actually hurting the performance for Pass@100, suggesting that if the model is good enough, a very high temperature may cause the outputs to be too diverse, thus hurting the correctness. This suggests the importance of temperature and the need to tune it individually for different model capacity and different generation scenarios.
228
+
229
+ # C DETAILED PERPLEXITY RESULTS
230
+
231
+ We show the detailed perplexity of different models on different languages in Table 5. The number of tokens shown in the table is obtained after tokenizing the code in each language using their respective lexers, by Pygments. This number of tokens is used to normalize the perplexity scores to make them comparable across models. Note that CodeParrot is only trained on Python data and thus performs poorly in other languages.
232
+
233
+ ![](images/e33241ef2a21c293287f32c85f0c9e2952d80eb80d541fc5c8beb257d965b88b.jpg)
234
+ (a) 2.7B Model
235
+
236
+ ![](images/5b509909fb3b14b2ed776f75a90d38440510d10ceddac64800d30bc9d64f8245.jpg)
237
+ (b) 400M Model
238
+ Figure 8: HumanEval performance using different softmax temperatures during generation.
239
+
240
+ ![](images/010d6157ca0c6fca469da4abe8737e53bc03cdd8cae6e8d9594ef8ab7b5a7a75.jpg)
241
+ (c) 160M Model
242
+
243
+ <table><tr><td>Language</td><td>#tokens</td><td>Codex*</td><td>PolyCoder 2.7B</td><td>GPT-Neo 2.7B</td><td>GPT-J 6B</td><td>GPT-NeoX</td><td>CodeParrot</td></tr><tr><td>C</td><td>55,333</td><td>2.55</td><td>2.33</td><td>3.69</td><td>2.82</td><td>2.37</td><td>19.23</td></tr><tr><td>C#</td><td>67,306</td><td>1.72</td><td>2.58</td><td>2.49</td><td>2.20</td><td>2.12</td><td>7.16</td></tr><tr><td>C++</td><td>69,627</td><td>1.95</td><td>2.99</td><td>2.87</td><td>2.47</td><td>2.32</td><td>8.48</td></tr><tr><td>Go</td><td>79,947</td><td>1.39</td><td>2.57</td><td>2.19</td><td>1.89</td><td>1.85</td><td>10.00</td></tr><tr><td>Java</td><td>65,484</td><td>1.94</td><td>2.92</td><td>2.78</td><td>2.49</td><td>2.47</td><td>6.79</td></tr><tr><td>JavaScript</td><td>54,620</td><td>2.17</td><td>3.06</td><td>3.07</td><td>2.73</td><td>2.62</td><td>9.23</td></tr><tr><td>PHP</td><td>45,682</td><td>1.98</td><td>3.70</td><td>3.61</td><td>2.81</td><td>2.45</td><td>19.91</td></tr><tr><td>Python</td><td>79,653</td><td>1.47</td><td>3.18</td><td>3.00</td><td>2.68</td><td>2.61</td><td>2.95</td></tr><tr><td>Ruby</td><td>46,537</td><td>1.39</td><td>3.96</td><td>3.77</td><td>3.13</td><td>2.89</td><td>14.26</td></tr><tr><td>Rust</td><td>107,717</td><td>1.96</td><td>3.24</td><td>3.30</td><td>2.92</td><td>2.92</td><td>8.68</td></tr><tr><td>Scala</td><td>65,756</td><td>1.75</td><td>3.87</td><td>3.88</td><td>3.37</td><td>3.33</td><td>12.91</td></tr><tr><td>JavaScript</td><td>55,895</td><td>2.40</td><td>3.61</td><td>3.90</td><td>3.43</td><td>3.41</td><td>12.54</td></tr></table>
244
+
245
+ * Since the exact training set of Codex is unknown, it might have been trained on these test sets, and Codex's results are over-optimistic.
246
+
247
+ Table 5: Perplexity of different models for different programming languages on our evaluation dataset.
2202.13xxx/2202.13169/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:281343502202bba39732b8b140a73da8dff25dc4afa5c7def5e0af5a259bf415
3
+ size 584003
2202.13xxx/2202.13169/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13200/ea734711-65b5-4be7-8325-31204388aeab_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13200/ea734711-65b5-4be7-8325-31204388aeab_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13200/ea734711-65b5-4be7-8325-31204388aeab_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33d4e9b52631a820369238154e0b4d3d65555b0e1912f3f8e9a4ec4fecad56b9
3
+ size 3329674
2202.13xxx/2202.13200/full.md ADDED
@@ -0,0 +1,450 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # The Dark Side of Perceptual Manipulations in Virtual Reality
2
+
3
+ Wen-Jie Tseng
4
+
5
+ LTCI, Telecom Paris, IP Paris
6
+
7
+ Palaiseau, France
8
+
9
+ wen-jie.tseng@telecom-paris.fr
10
+
11
+ Elise Bonnail
12
+
13
+ LTCI, Telecom Paris, IP Paris
14
+
15
+ Palaiseau, France
16
+
17
+ elise.bonnail@telecom-paris.fr
18
+
19
+ Eric Lecolinet
20
+
21
+ LTCI, Telecom Paris, IP Paris
22
+
23
+ Palaiseau, France
24
+
25
+ eric.lecolinet@telecom-paris.fr
26
+
27
+ Mark McGill
28
+
29
+ University of Glasgow
30
+
31
+ Glasgow, Scotland, UK
32
+
33
+ mark.mcgill@glasgow.ac.uk
34
+
35
+ Samuel Huron
36
+
37
+ CNRS i3 (UMR 9217)
38
+
39
+ Telecom Paris, IP Paris
40
+
41
+ Pallaiseau, France
42
+
43
+ samuel.huron@telecom-paris.fr
44
+
45
+ Jan Gugenheimer
46
+
47
+ LTCI, Telecom Paris, IP Paris
48
+
49
+ Palaiseau, France
50
+
51
+ jan.gugenheimer@telecom-paris.fr
52
+
53
+ # ABSTRACT
54
+
55
+ "Virtual-Physical Perceptual Manipulations" (VPPMs) such as redirected walking and haptics expand the user's capacity to interact with Virtual Reality (VR) beyond what would ordinarily physically be possible. VPPMs leverage knowledge of the limits of human perception to effect changes in the user's physical movements, becoming able to (perceptibly and imperceptibly) nudge their physical actions to enhance interactivity in VR. We explore the risks posed by the malicious use of VPPMs. First, we define, conceptualize and demonstrate the existence of VPPMs. Next, using speculative design workshops, we explore and characterize the threats/risks posed, proposing mitigations and preventative recommendations against the malicious use of VPPMs. Finally, we implement two sample applications to demonstrate how existing VPPMs could be trivially subverted to create the potential for physical harm. This paper aims to raise awareness that the current way we apply and publish VPPMs can lead to malicious exploits of our perceptual vulnerabilities.
56
+
57
+ # CCS CONCEPTS
58
+
59
+ - Human-centered computing $\rightarrow$ Virtual reality; Human computer interaction (HCI).
60
+
61
+ # KEYWORDS
62
+
63
+ virtual-physical perceptual manipulation, VPPM, physical harm, VR security
64
+
65
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
66
+
67
+ CHI '22, April 29-May 5, 2022, New Orleans, LA, USA
68
+
69
+ © 2022 Copyright held by the owner/author(s). Publication rights licensed to ACM.
70
+
71
+ ACM ISBN 978-1-4503-9157-3/22/04...$15.00
72
+
73
+ https://doi.org/10.1145/3491102.3517728
74
+
75
+ # ACM Reference Format:
76
+
77
+ Wen-Jie Tseng, Elise Bonnail, Mark McGill, Mohamed Khamis, Eric Lecolinet, Samuel Huron, and Jan Gugenheimer. 2022. The Dark Side of Perceptual Manipulations in Virtual Reality. In CHI Conference on Human Factors in Computing Systems (CHI '22), April 29-May 5, 2022, New Orleans, LA, USA. ACM, New York, NY, USA, 15 pages. https://doi.org/10.1145/3491102.3517728
78
+
79
+ # 1 INTRODUCTION
80
+
81
+ A particular direction of research at the intersection of Human-Computer Interaction (HCI) and Virtual Reality (VR) explores techniques that we define as Virtual-Physical Perceptual Manipulations (VPPMs). VPPM refers to Extended Reality (XR) driven exploits that alter the human multi-sensory perception of our physical actions and reactions to nudge the user's physical movements<sup>1</sup> (e.g., the position of body and hands). These techniques are often grounded in some threshold of the human perception (e.g., visual dominance [43, 63]) and designed to overcome physical limitations of the current VR technology, enabling new types of interaction. Research focuses predominantly on positive intents, either discovering new VPPMs [10, 31, 34] or presenting positive application scenarios for known VPPMs. For example, redirection techniques are used to provide haptic feedback by changing the user's arm movement [3, 23] or to enable a larger play area by steering the VR user's walking direction [44, 60].
82
+
83
+ However, a VPPM technique may vary in terms of prior consent and knowledge, and may also impact the user's ability to discern whether they are being manipulated. The user may be subjected to manipulation knowingly or unknowingly and the manipulation may or may not be perceptible to the user. Even if a user consents to being manipulated by VPPMs, they might not be aware of the consequences of their physical actions because most VPPMs are designed to be imperceptible to the user (i.e., below the perception threshold). Crucially, regardless of consent or knowledge, the intent
84
+
85
+ behind a VPPM is open to abuse (e.g., disguising an attack as legitimate redirected walking) and may be opaque or covert to the user. This ambiguity, in terms of consent to, awareness of, knowledge of, and intent behind a given VPPM is what gives rise to the significant potential for harm. Nothing is stopping malicious third parties from pursuing unknown, potentially harmful outcomes to the VR user using the perception thresholds published beforehand. The lack of a common definition has lead to a blind spot in research, where VPPMs are proposed and published without due consideration as to their potential for harm.
86
+
87
+ In this paper, we focus on what is arguably the worst-case scenario - imperceptible VPPMs are applied to the user unknowingly, without consent. In particular, we focus on the potential for harm at an individual level, where one VR user's physical actions (i.e., body motions) are manipulated to a physically abusive end. We defined this physically abusive outcome as physical harm - an action that causes hurt or damage relating to the VR user's body. The user is perceptually manipulated into physical action, and they perceive their agency while performing physical actions. Note that we focus on perceptual manipulation as opposed to physical manipulation. This means that approaches that physically manipulate the user through external devices such as Electrical Muscle Stimulation (EMS) [33] and exoskeletons are out of the scope of this paper. We exclude physical manipulations because these systems can physically direct or override the user's physical actions. Thus the user is implicitly aware of, having consented to this possibility through fitting these devices to their body. Whereas with VPPMs, any physical actions are the result of a reaction to the presented perceptual stimuli, introducing the ambiguity around agency, intent, and consent of applying a VPPM. Based on this definition, we explore what potential physical harm could be provoked to the VR user by manipulating their physical actions through VPPMs, and how malicious actors could potentially abuse VPPMs to provoke physical harm.
88
+
89
+ The paper explores the risks posed by VPPMs as follows. First, we demonstrate the potential threat of provoking physical harm using VPPMs by presenting a threat model. A malicious actor wants to inflict physical harm on the VR user, and they can compromise the VR system by tricking the VR user into installing malware or a malicious app. Second, to be able to deeper understand these types of threats, we conduct a speculative design workshop using focus groups [24, 39]. Because the physical harm exploited by VPPMs is a novel phenomenon, our goal is to broadly explore the space and to promote discussion between participants. Using a design workshop [18, 22, 48] helps us to generate ideas and identify problems around the potential impact of the malicious use exploited by VPPMs. We ran the workshop twice. The process of the workshops was video-recorded, transcribed, and coded using thematic analysis [7], unveiling 1) classifications of two main classes of attacks (puppetry and mismatching) using VPPMs in VR and 2) the characterization of potential physical harm. Based on this classification of attacks, we present key publications in the HCI and VR community employing VPPMs and note the lack of consideration given to malicious, subversive appropriation of this research. Finally, to demonstrate the process of subverting VPPMs from existing publications in the field of HCI, we implement two sample applications (SteppingOn and HittingFace) based on two prior CHI publications,
90
+
91
+ Haptic Retargeting [3] and Breaking the Tracking [45]. We use both applications to demonstrate and reflect on our process, showing how concepts from VPPM research could be trivially subverted to inflict malicious harm. We end this paper by discussing routes towards mitigating against, and preventing, malicious use of VPPMs for practitioners and the research community.
92
+
93
+ This work has three contributions: 1) the definition of Virtual-Physical Perceptual Manipulation (VPPM), classification of attacks, and characterization of physical harm that could be provoked by VPPMs derived by two speculative design workshops $(n = 8)$ ; 2) two applications showing how we can trivially appropriate existing results of VPPM research towards harmful intent; 3) mitigations and preventative recommendations for practitioners and the research community on how to deal with VPPMs in the future.
94
+
95
+ # 2 THREAT MODEL
96
+
97
+ In our threat model, an attacker wants to inflict physical harm on the VR user, and they can compromise the VR system. This can be done, for example, by tricking the VR user into installing malware or a malicious app. Similar to how smartphone spyware can use the affected smartphone sensors (e.g., as done in the Pegasus spyware<sup>2</sup>), the attacker can access information about the real-world environment around the VR user. This information can be extracted from tracking devices like the front-facing headset camera(s) used for inside-out tracking. The attacker can also exploit the sensors inside the VR headset and the controllers to understand the user's movement in real-time, or access all the standard APIs that are normally available to VR applications. A sample scenario is that a user is tricked into installing a malicious VR app that contains VPPMs that do not specify their intent and are disguised as a part of the application. The user is thus presented with a VR setup that manipulates them imperceptibly (e.g., walking, reaching objects). Because the attacker has access to information about the user's motion and the safety boundaries (e.g., Oculus Guardian), the attacker can inflict physical harm on the user through the setup. Examples of harm include tripping, hitting a wall, holding something dangerous, or walking into a dangerous area in the context of accidents [15] and bystander abuse [41]. Such harms may have significant implications on the user including even death [68].
98
+
99
+ # 3 RELATED WORK
100
+
101
+ Our work builds on prior research in presence, perceptual manipulations, ethics and security in VR.
102
+
103
+ # 3.1 VR Technologies and Experiences
104
+
105
+ VR technologies track the user's physical actions in a 3-D space using Head-Mounted Displays (HMDs) and controllers, providing stimuli (e.g., visual, auditory, haptic) to enable embodied interactions. Through these technologies, VR elicits strong immersive experiences that allow the user to have a subjective feeling of being present in a virtual environment and act realistically, despite the VR user consciously knowing that the virtual environment does not physically exist. The sense of being in a virtual environment is called presence [16, 52, 57] in VR. For example, participants tend to take a longer path on the simulated ground rather than walking
106
+
107
+ over a virtual pit [37]. VR users can also feel that the events happening in the virtual environment are real (e.g., plausibility illusion [50, 55]) and that the virtual body parts or even a full-body avatar have become a part of their own (e.g., embodiment illusion [58]). These illusory states in VR are the outcomes of our perception and do not directly affect our higher cognitive functions [17]. Enhancing the immersive experience and presence in VR becomes a common goal for designing new VR interaction or locomotion techniques. The existence of these illusions and the fact that they are working so well, is one of the main reasons why VPPMs can be applied so effortless to a variety of application scenarios.
108
+
109
+ # 3.2 Perceptual Manipulations in VR
110
+
111
+ VR is an excellent platform for applying perceptual manipulation. While VPPMs can apply across the reality-virtuality continuum, we focus on VR because of its greater capacity for inducing an illusion of non-mediation. The simulated content occupies the VR user's visual sensory input, and VR HMDs block the user's view of the outside world to enhance immersion. These features allow designers to make use of the visual dominance [43, 63] and the unawareness of sensory discrepancy [64, 66].
112
+
113
+ Research in HCI and VR develops techniques to manipulate the mapping between virtual and physical environments. Most of the time, they are below the human perception threshold, making them imperceptible. Previous research found that one can induce the pseudo-haptic feedback by controlling the visual input [31, 34] and that VR users are less sensitive to the visual-propriceptive conflict [10]. Although there is a difference between the virtual and physical environment, our perceptual system interprets the sensory information from VR, and the brain-body system reacts immediately to perform the physical actions [17].
114
+
115
+ Practitioners and researchers then start "hacking" human perception to overcome several limitations in current VR systems (e.g., limited tracked space, lack of haptic feedback). A popular example of such technique is redirected walking [44, 60, 61], steering the VR user's physical walking path by interactively and imperceptibly rotating the virtual scene. One can use slow-speed translation/rotation gain below the user's perception threshold or manipulate the stereo image in a see-through HMD [21] to achieve the effect. Redirected touching [23] and redirected haptics [3, 13] re-purpose the VR user's hand to a passive haptic prop by manipulating the visual of the user's arm or the virtual scene. These manipulations can also be applied to reduce physical movements and fatigue by improving ergonomics in VR [38], changing VR user's posture unobtrusively [53], and inducing a sensation of weight [45, 49].
116
+
117
+ While the aforementioned are applications of VPPMs that had positive intents, VPPMs can also be exploited maliciously to provoke harm on the VR user. The adversaries in that case can be VR developers who intentionally (or unintentionally) manipulate the user's perception in a way that has harmful consequences. Our method is to articulate how VPPMs in VR can be — and likely will be — abused in the future.
118
+
119
+ # 3.3 The Potential Harm and Attacks in VR
120
+
121
+ VR induces strong sensory feedback on our perception. Previous work discussed the ethical implications of conducting VR research
122
+
123
+ [5, 35] and of realism in VR and Augmented Reality (AR) [56]. In our work, we focus on uses of VR that are highly persuasive for benefits (e.g., training), but could also be used for malicious purposes. An example would be to incite a VR user to do something they would not normally do, which in turn leads to harming the VR user.
124
+
125
+ Through the VPPM techniques, one can change the VR user's perception of their physical actions. Current VR applications are dominantly achieved through embodied motions for enhancing presence [62]. Compared to interaction with desktop or mobile devices, VR involves a larger-scale of 3-D space, which means that the VR user is more likely to encounter physical harm caused by their actions. An example is to elicit the user to sit on a virtual chair that does not have a counterpart in the real world. More examples, like colliding or hitting real-world objects and falling over, have been identified in a recent work on common VR fails that happen to users at home [15].
126
+
127
+ Recently, security researchers started to explore the potential for immersive VR attacks. Casey et al. [11], presented a software vulnerability and were able to manipulate the visuals of the safety guardians of an HTC VIVE. Using this, the authors identified what they called the "Human Joystick Attack", which allows directing an immersed user's physical movement to a location without the user's knowledge. This attack falls under one of the five categories we identify in our classification of malicious VPPM use. Our work extends this previous research by understanding the larger class of attacks that could be possible using VPPMs. While the security community started to explore potential vulnerabilities in XR technologies, the current main focus is on finding and closing new factors of attack on the software and hardware [1, 40]. However, in this work we are not focusing on the technical weak spots but are actually exploring human weak spots. We argue that the HCI community is at the perfect intersection of computer science, psychology, cognitive science and design, to combine knowledge from those fields who are mostly publishing VPPMs.
128
+
129
+ # 4 METHOD:SPECULATIVEDESIGN WORKSHOP
130
+
131
+ Because we want to understand what malicious exploits of VPPM might look like in the future, we refer to methods such as speculative design [2] and design fiction [36]. These approaches allow us to both critique current practices and reflect on future technologies and their ethical implications. We broadly explore this space through a speculative design workshop using focus groups [24, 39] with researchers and designers. Participants had to a) brainstorm scenarios in which VPPMs can be used to induce physical harm to the VR user; b) identify one (or more) dimension upon which the scenarios from the brainstorming can be contextualized (e.g., the severity of physical harm); and c) rate the relevance of each dimension for studying and preventing future physical harms caused by VPPMs in VR.
132
+
133
+ # 4.1 Participants
134
+
135
+ We used snowball sampling and reached out to people from the mailing list. Eight participants (age: $M = 28.3$ , $SD = 2.1$ ) were recruited (Table 1). All researchers worked on VR/XR topics, publishing peer-reviewed papers in top-tier conferences like CHI and
136
+
137
+ Table 1: The background of participants. We asked participants to self-describe their profession and VR expertise.
138
+
139
+ <table><tr><td>ID</td><td>Gender</td><td>Profession</td><td>VR Expertise</td></tr><tr><td>W1P1</td><td>F</td><td>HCI researcher</td><td>expert</td></tr><tr><td>W1P2</td><td>M</td><td>HCI researcher</td><td>above average</td></tr><tr><td>W1P3</td><td>M</td><td>HCI researcher</td><td>above average</td></tr><tr><td>W1P4</td><td>M</td><td>XR/HCI designer</td><td>expert</td></tr><tr><td>W2P5</td><td>M</td><td>curator/designer</td><td>below average</td></tr><tr><td>W2P6</td><td>F</td><td>designer researcher</td><td>average</td></tr><tr><td>W2P7</td><td>F</td><td>HCI researcher</td><td>above average</td></tr><tr><td>W2P8</td><td>F</td><td>graphic/interaction designer</td><td>average</td></tr></table>
140
+
141
+ UIST. To get a more diverse group of people, ideas and perspectives, we additionally recruited participants that identified their work to be dominantly on design rather than on research or development. We argue there is a benefit in having a range of expertise as experts alone may be overly constrained in their thinking based on their knowledge of technical constraints or prior research [14]. Therefore it was important to have that blend of expert and non-expert/familiar participants. We also want to clarify that none of our participants were novices in the field of XR. Most of our participants rated their VR expertise to be at least average and mostly above average and expert. Overall, they had at least average and above-average experience with VR (5-point Likert scale, $M = 3.75$ , $SD = 1.03$ ). We ran the speculative design workshop twice with four participants each time.
142
+
143
+ The goal was to explore scenarios using VPPMs to provoke physical harm. With our introduction in the workshop, participants could design a malicious scenario using VPPM. P1, P2, P3, and P7 had a Computer Science background and worked on HCI and VR/XR research. P4 worked as an XR/HCI designer from the industry, who develops VR training platforms for surgeries. P5, P6, and P8 were designers who have a design background working in design research. A designer could think about diverse contexts and consequences of the abusive scenario, and a researcher could deep dive into the technical details if they consider it is necessary. Both workshops included researchers and designers, the first one (W1) was more researcher-focused, and the second one (W2) was more designer-focused. This setup allowed each workshop to enable discussions with different perspectives and elicit valid outcomes.
144
+
145
+ # 4.2 Procedure
146
+
147
+ Figure 1 shows the structure of our speculative design workshop. The workshop consisted of four steps: instruction, brainstorming, synthesizing, and voting. In the instruction step, we first introduced the VPPMs in VR by presenting examples in HCI and VR research, such as Haptic Retargeting [3], Body Follows Eye [53], and redirected walking [21, 61]. Next, we presented our goal – speculate on the potentially abusive VPPMs that could manipulate the VR user's body motions to induce physical harm. This part took 15 minutes to complete.
148
+
149
+ In the brainstorming step, we presented the following assumption: "In 10 to 20 years, VR technology has full body tracking and understands the physiological states of the VR user. People can use VR
150
+
151
+ ![](images/509b7d68e5e8b18138444b6d39f90b6fff641a9a18905a35472c812b327d52a5.jpg)
152
+ Figure 1: The steps, tasks and outcomes of the speculative design workshop.
153
+
154
+ in open space, and VR application becomes more than gaming and lab experiments. VPPMs are able to manipulate whole-body motions and are imperceptible to the VR user." Based on this assumption, we introduced the task:
155
+
156
+ Brainstorming Task: Speculate on a scenario manipulating the VR user's body motions to provoke physical harm.
157
+
158
+ Participants had to describe how they use VPPMs to elicit physical actions that provoke physical harm. One restriction in the brainstorming was that the VR user has to perceive agency on their physical actions. We do not consider body motions created by an external device (e.g., EMS or exoskeleton) as VPPMs because the VR user knows the motion is done by the system. Participants had 10 minutes time to brainstorm as many scenarios as they could individually. Afterwards, each participant presented their ideas and discussed it with the other participants (15 minutes).
159
+
160
+ After participants presented their scenarios, we continued with the third step:
161
+
162
+ Synthesizing Task: Identify one (or more) specific dimension to position the presented scenarios.
163
+
164
+ The goal of synthesizing was to understand the potential harm in more detail that could happen using VPPMs. We asked participants to find one or more specific dimension that can be used to position all the presented scenarios on (including the ones from other participants). The goal was to find terms and variables that are helpful to understand the potential harm. One example could be to use "amount of pain" as the variable and position scenarios that create little pain further on the left than scenarios that create more pain. Participants created dimensions individually for 10 minutes and took turns to present their outcomes altogether for 10 minutes.
165
+
166
+ Finally, in the voting step, we asked participants to rate the relevance of each dimension created in the synthesizing step:
167
+
168
+ Voting Task: Please rate the relevance of each dimension for studying and preventing future physical harm caused by VPPMs in VR.
169
+
170
+ The rating was a 5-point Likert scale ranging from strongly irrelevant to strongly relevant. Note that the two workshops had different scenarios and dimensions. The W1 participants rated the dimensions created in W1 and the same for W2. Here we were
171
+
172
+ interested in the consensus of the participants in each workshop. This part took five minutes to complete.
173
+
174
+ All participants engaged in the discussion during both the brainstorming and synthesizing steps. The discussion allowed participants to collaborate in groups to discuss the scenarios and dimensions they created. Therefore participants worked together to create the outcome. All of them contributed to the question about the potential malicious use of VPPMs (scenarios in the brainstorming step) and the range of the presented scenarios (dimensions in the synthesizing step). Participants worked on miro<sup>3</sup> remotely, and the both workshops lasted two hours. We recorded the brainstorming and the synthesizing steps.
175
+
176
+ # 5 WORKSHOP RESULTS
177
+
178
+ In this section, we first introduce the analysis of the results from our speculative design workshops. Next, the collected data and extracted results (e.g., including the classification of attacks and the characterization of physical harm) are presented. Finally, we summarize the observations from the workshop.
179
+
180
+ # 5.1 Data Analysis
181
+
182
+ Figure 1 (the right column) shows the outcome of each step of the workshop. Participants from the two workshops created 19 scenarios and 12 dimensions. The video footage of the workshops was transcribed and anonymized. The transcripts and scenarios were then iterated and coded by three authors in joint sessions. Participants did not take part in the analysis. We applied thematic analysis [7] to investigate the underlying themes of the transcribed data. The coding was always done together in nine sessions, each of which took on average two hours. Several sessions were re-watched during the coding sessions to arrive at a consistent interpretation consisting of categories and general themes. Conflicts were resolved by discussing each individual coding.
183
+
184
+ # 5.2 Scenarios
185
+
186
+ Figure 2 presents 19 scenarios: names, descriptions, techniques used to induce them, and the potential physical harm caused in them. Technique Used and Physical Harm are the codes identified in the thematic analysis. Several scenarios apply redirection techniques to affect the user's physical movements or actions and bring them to harmful consequences: Magic Maze, Window Game, Bad Surprise, Minecraftish, Danger Food, Getting Robbed, and Catch a Ride. Three scenarios try to break the habituation and trust of using a system to provoke physical harm (Apartment Hack, Falsely Mapped Apartment, Moving Platform). Some scenarios occlude the physical world with virtual content so the VR user is unaware of the physical harm: Getting Robbed, Start a Fight, Safari, Ocean VR. Insult simulator uses game instructions to make inappropriate gestures to insult the bystanders. The rest of the five scenarios are not directly associated with VPPMs. Technical Repair and Warming Down provide false information to induce harm. Spanning the City is a scenario about advertisement in VR. Double Kayaking Simulator does not specify the technique, and Long Lasting Use of VR is about overusing VR. The description of seven selected scenarios are presented in Appendix A as a representation of those using similar techniques.
187
+
188
+ # 5.3 Classification of Attacks
189
+
190
+ To find some commonalities and a potential classification of attacks, we applied open and axial coding on the Technique Used that was presented with each scenario (the label that described how participants wanted to achieve the effect). The identified codes were shown in Figure 2, the Attack(s) column. We identified two main classes of attacks: puppetry attacks and mismatching attacks. For the scenarios that did not reach a greater theme, we coded them as miscellaneous. They provided different insights like accidents (S05, S13) or social interaction (S11). Two scenarios (S02 and S19) were coded as unclassified because they were too specific and missed the technical detail. In the following, we focus on the definition of puppetry and mismatching attack and how they are integrated into the scenarios.
191
+
192
+ 5.3.1 Puppetry Attacks. These attacks control physical actions of different body parts of an immersed user. We argue that VPPMs allow controlling different body parts precisely as the technology and research progress. Therefore we use the term "puppetry" to represent the potential impact that this attack could happen on different levels of body parts in the future.
193
+
194
+ Walking Puppetry Attack. By applying redirected walking VPPMs, the malicious actor can steer the VR user's walking direction (Figure 3a). The walking puppetry attack was mentioned in several scenarios, including Magic Maze (S03), Window Game (S04), Getting Robbed (S08), Catch a Ride (S09), and Bad Surprise (S17). Participants applied this attack to make a VR user go to a location for provoking potential physical harm (e.g., falling, going to a dangerous area).
195
+
196
+ Arm-Movement Puppetry Attack. The arm-movement puppetry attack controls the physical actions of the VR user's arm. Redirected haptic techniques [3, 23] are the underlying VPPMs. By applying this attack, one can direct a VR user's hand to interact and break the user's property (Minecraftish, S12) or to reach a physical object that could be harmful during interaction (Danger Food, S15).
197
+
198
+ 5.3.2 Mismatching Attacks. Mismatching attacks are manipulations in which the adversary exploits a difference of information between a virtual object and its physical counterpart to elicit misinterpretation for the VR user. Here the environment for a VR user is true-positive, where each virtual object has a one-to-one representation in the real world.
199
+
200
+ False-Positive Attack. In Figure 3b, the false-positive attack creates virtual content that has no physical counterpart (e.g., a virtual chair) in a true-positive environment. The VR user habituates this one-to-one mapping environment therefore they believe the false-positive chair exists in the room. Interacting with these content could lead to physical harm (e.g., sitting on a virtual chair and falling on the floor). Scenarios using the false-positive attack are Falsely Mapped Apartment (S10) and Apartment Hack (S06). They require a perfectly mapped environment and a certain degree of trust from the user towards the VR environment. This trust, most of the time, builds upon how much a VR user is accustomed to the environment or interaction. In fact, the habituation can be achieved through repeating a single task. Once the user is used to the task and starts performing it without conscious attention, a false-positive attack becomes dangerous and impactful.
201
+
202
+ <table><tr><td>ID</td><td>Scenario</td><td>Description</td><td>Technique Used</td><td>Physical Harm</td><td>Attack(s)</td></tr><tr><td colspan="6">Workshop 1</td></tr><tr><td>S01</td><td>Safari</td><td>VR user explores a mapped safari environment and confronts untracked wild animals.</td><td>accident, mismatch</td><td>be bitten</td><td>miscellaneous</td></tr><tr><td>S02</td><td>Ocean VR</td><td>Exploring the ocean with VR.</td><td>not specified</td><td>drowning, be eaten by a shark</td><td>unclassified</td></tr><tr><td>S03</td><td>Magic Maze</td><td>User explores a VR maze in their apartment and gets redirected to a stairway.</td><td>redirected walking</td><td>fall</td><td>walking puppetry</td></tr><tr><td>S04</td><td>Window Game</td><td>User is redirected to an open window and follows the instruction of the game to sprint and jump outside of it.</td><td>redirected walking</td><td>fall</td><td>walking puppetry, false-negative mismatch</td></tr><tr><td>S05</td><td>Moving Platform</td><td>User interacts with a haptic display. At one point, the display stops, but the user doesn&#x27;t know and keeps moving and falls.</td><td>breaking expectations using a haptic system</td><td>fall</td><td>miscellaneous</td></tr><tr><td>S06</td><td>Apartment Hack</td><td>Map the entire apartment in VR then add a virtual chair.</td><td>mismatch</td><td>fall</td><td>false-positive mismatch</td></tr><tr><td>S07</td><td>Start a Fight</td><td>HMD recognizes if people walk around the user and incorporates them into the game as an enemy.</td><td>swapping</td><td>get punched, punching others</td><td>swapping mismatch</td></tr><tr><td>S08</td><td>Getting Robbed</td><td>Render VR scene over a shady street and redirect the user over there at night.</td><td>redirected walking swapping</td><td>get robbed, stabbed</td><td>walking puppetry</td></tr><tr><td>S09</td><td>Catch a Ride</td><td>Block the traffic noise outside and redirect the user onto an open road.</td><td>overloading sensory input, redirection techniques</td><td>get driven over</td><td>walking puppetry</td></tr><tr><td>S10</td><td>Falsely Mapped Apartment</td><td>Map the apartment in VR and start to remove or add virtual objects to break the user&#x27;s habituation to the environment.</td><td>removing or adding new virtual objects</td><td>hitting, breaking objects</td><td>false-positive, false-negative mismatch</td></tr><tr><td>S11</td><td>Insult Simulator</td><td>The user plays a game in VR but from outside the posture confronts bystanders.</td><td>narrative (game)</td><td>insulting others, get punched</td><td>miscellaneous</td></tr><tr><td>S12</td><td>Minecraftish</td><td>Redirected haptics nudge the user to grab similar objects to throw and break it.</td><td>redirected haptics swapping</td><td>throw, damage personal object</td><td>arm-movement puppetry</td></tr><tr><td colspan="6">Workshop 2</td></tr><tr><td>S13</td><td>Technical Repair</td><td>Technicians use VR to detect issues on a system like elevator.</td><td>wrong repair protocol / wrong zone of detection</td><td>injuries, fall into the pit</td><td>miscellaneous</td></tr><tr><td>S14</td><td>Spanning the City</td><td>Displaying advertisement in a VR tourism.</td><td>native advertising</td><td>accidentally clicking on virtual ads, stressful</td><td>miscellaneous</td></tr><tr><td>S15</td><td>Danger Food</td><td>Redirect the user&#x27;s hand to reach an apple in VR to bite but grabs a solid object in the real world.</td><td>haptic retargeting, swapping</td><td>broken teeth</td><td>arm-movement puppetry</td></tr><tr><td>S16</td><td>Long Lasting Use of VR</td><td>The long lasting user of VR burns the user&#x27;s eyes.</td><td>overstimulation of eye muscle</td><td>burn your eyes</td><td>miscellaneous</td></tr><tr><td>S17</td><td>Bad Surprise</td><td>Redirecting VR user to walk into the his supervisor&#x27;s office.</td><td>redirected walking</td><td>get embarrassed</td><td>walking puppetry</td></tr><tr><td>S18</td><td>Warming Down</td><td>VR training provides a wrong warm-up based on the user&#x27;s physiological state.</td><td>misinformation / wrong instruction</td><td>muscle issues, cramp</td><td>miscellaneous</td></tr><tr><td>S19</td><td>Double Kayaking Simulator</td><td>Two partners training on a kayaking simulator and punch each other during paddling training.</td><td>not specified</td><td>get punched, punching others</td><td>unclassified</td></tr></table>
203
+
204
+ Figure 2: An overview of 19 scenarios we collected, including the name, description, technique used, physical harm, and classified attack of each scenario.
205
+
206
+ False-Negative Attack. In this attack, the malicious actor deliberately hides the information from the physical environment. Therefore, the VR user is unaware of incoming dangers. For example, overriding traffic noise (Catch a Ride, S09) makes the user unaware of approaching vehicles, which in turn makes them vulnerable. In Falsely Mapped Apartment (S10), malicious actors provoke collision with the environment by removing an virtual object from a fully-mapped apartment. The false-negative attack could happen when using VR in an open space because the system needs to constantly
207
+
208
+ detect the surroundings. If the attack hides or disguises a physical object (e.g., hiding an opened window), the attacker could make the VR user even fall or jump out of this window (Window Game, S04). Which could even lead to a fatal outcome.
209
+
210
+ Swapping Attack. The swapping attack happens in the True-Positive situation where each virtual object maps to a physical object. However, the application renders a different virtual image that does not represent the identity of the physical object. Therefore,
211
+
212
+ ![](images/fc33bd70141c481f71b0e013dad00e57e04dbe44d48d4cde80e132febf4f0b63.jpg)
213
+ Figure 3: We illustrate the attacks by showing the sketching of three selected scenarios. For the color code, the blue outline represents the physical world, pink stands for the virtual content, and green shows how attack works. (a) The VR user is in Magic Maze scenario and thinks they walk along the direction to the physical door (the purple arrow). Malicious actors apply the walking puppetry attack to steer the VR user's walking direction and make them fall off a stairway. (b) The VR user locates in a fully-mapped apartment (Falsely Mapped Apartment). Malicious actors apply the false-positive mismatching attack to introduce a virtual chair. The user assumes the virtual chair is fully-mapped. So they sit on the chair, but end up falling on the floor. (c) A VR user is playing a zombie game where they have to fight with zombies using bare hands. Malicious actors use the swapping mismatching attack render the virtual zombie over a bystander and makes them start a fight.
214
+
215
+ the VR user believes they are interacting with the virtual one but inadvertently cause physical harm to themselves or to others. In Start a Fight (S07), the bystanders were rendered as the enemy avatars in a fighting VR game, which resulted in the VR user attacking bystanders (Figure 3c).
216
+
217
+ 5.3.3 Reflection on VPPM research and Potential Attacks. While some of the presented scenarios may stretch the imagination, we want to emphasize that for the most part these scenarios already exist in some prior work that started to work towards the potential VPPM and potential abuse. To demonstrate this we selected for every type of attack a few example publications from the field of HCI. We selected publications that were either working towards a VPPM, or presented a new application of VPPMs which could be used to reproduce the work. We want to emphasize that this is by far not an exhaustive list but should only work as an example.
218
+
219
+ For puppetry attacks, we select seven papers [21, 26, 27, 44, 46, 60, 61] in which the walking attack is possible, and four [3, 23, 45, 49] in which the arm-movement attack is possible. These publications mainly investigated redirection techniques and are often published at AR/VR conferences such as ISMAR, IEEE VR and UIST. In these papers, there are few hardware requirements (although some do need eye-tracking) and the implementations are described in detail. The thresholds of applying VPPMs are also provided in these publications. For mismatching attacks, we selected the following publications (false-positive: [12, 32, 70], false-negative: [19, 32], swapping: [20, 51, 54]). Among the selected publications, only Optical Marionette [21] mentioned the safety concern of manipulating the user's walking in the real world. There is a lack of consideration given to malicious, subversive appropriation of VPPM research.
220
+
221
+ # 5.4 Characterizing Physical Harm
222
+
223
+ In the synthesizing step, we asked participants to identify one (or more) specific dimension to position the presented scenarios. We report two dimensions (severity of physical harm and perceived agency) that received the highest score in the voting step of each workshop. Note that each workshop had a different output of scenarios and dimensions. Therefore, the consensus of the voting is within each workshop. Finally, we report on our last analysis of characterization of the physical harm we found in the workshop.
224
+
225
+ Severity of the Physical Harm. Overall, 16 instances of physical harm were mentioned, in which falling and punching each appeared four times. The severity of the physical harm is the most reported dimension in the synthesizing step (6 out of 12 dimensions). We interpret severity as how bad the physical harm can be on a VR user and can the VR user recover from the given physical harm. Figure 4a, from the left, the physical harm is a low, brief moment of discomfort (e.g., eyestrain, falling, punches). From the right, the physical harm becomes more unrecoverable (e.g., broken teeth, get driven over), and the extreme form of severity is death.
226
+
227
+ Perceived Agency. The Perceived Agency (D2) is one of the dimensions reported by participants in the synthesizing step (Figure 4b). The Perceived Agency is to what degree VR users consider the harm is caused by themselves. No agency means the VR user interprets the system (or application) caused the physical harm. For instance, If a user finds out the system blocks all the auditory information from outside but does not maneuver this setting, he perceives no agency in this case. On the other hand, full agency means VR users perceive the harmful consequence is done by themselves. The implication from the perceived agency dimension is whether a VR user falls into the same trick again. Nevertheless,
228
+
229
+ ![](images/0d9220139c8ebc9ef02ccb91c8a0cdec840a0629fa4232ccb5577595e40b5bf7.jpg)
230
+ severity of physical harm
231
+
232
+ ![](images/69cdc8b7c8946c485466f2d8076aa7ac4979baf7f3149e073f014e936ac0a587.jpg)
233
+ perceived agency
234
+
235
+ because the VPPM's manipulation may or may not be perceptible, a malicious exploit of VPPM can hide their maneuver on the user and make them blame themselves.
236
+
237
+ The Origin of Harm Created. Similar to how we coded the Technique Used to classify types of attacks, we now coded Physical Harm to find a classification of harm.
238
+
239
+ We find in the dimension of severity that physical harm can be caused by the user (e.g., fall down into stairway) or by others (e.g., someone punches the VR user). This was also mentioned in the origin of physical harm done (D6) in the synthesizing step. Therein, participants described who committed the physical harm in each scenarios. We extend this concept in our coding process and present a $2 \times 2$ matrix (Figure 5) to categorize physical harm by 1) VR user provokes/receives the harm and 2) is the other party an organism or non-organism.
240
+
241
+ Scenarios with the gray background fit into two quadrants at the same time. Because our task focused on inducing physical harm to the single VR user, most scenarios locate in the quadrant of receiving harm from non-organism (e.g., falling down a stairway, get driven over a car). Although we asked participants to create physical harm related to the VR user's body, damage to non-organism still came up during the workshop. For example, hitting furniture, breaking personal property by throwing them. We categorize these property damages into VR user provokes harm to non-organism. The VR user also provokes physical harm to organism (e.g., punching bystanders in Start a Fight, throwing a pet in Minecraft). Finally, the VR user can also get hurt from organism. An example would be get stabbed in Get Robbed or bitten by wild animals in Safari. This matrix indicating physical harm could be extended to more than one VR user in the future.
242
+
243
+ # 5.5 Observations from the Workshop
244
+
245
+ Among all the scenarios, seven $(7 / 19 = 37\%)$ of them applied puppetry attacks as a part of the technique used to provoke physical harm
246
+
247
+ ![](images/51b94418b9c4b683cc724ba8508ed7426d2badc89682d9e8cc5f0104fb0f9ce2.jpg)
248
+ Figure 4: We reported two dimensions selected from the synthesizing step. (a) Severity of the Physical Harm shows how bad physical harm can be and can a VR user recover from the given harm. This dimension varies from mild pain and discomfort (e.g., eyestrain, cramp) to the extreme case (e.g., drowning, get driven over). (b) The Perceived Agency indicates to what degree a VR user considers that the physical harm (or consequence) is caused by themselves.
249
+ Figure 5: The matrix categorizes the physical harm by 1) VR user provokes/receives the physical harm and 2) whether the other party is an organism or non-organism. Scenarios with the gray background fit into two quadrants.
250
+
251
+ to the VR user. The puppetry attack was several times combined with mismatching attacks (e.g., Catch A Ride: false-negative + walking puppetry) and easier to apply and deploy in VR applications. Therefore, they have the potential to become of the first archetypes of malicious attacks using VPPMs.
252
+
253
+ Game Mechanisms and Narratives. Most scenarios applied some form of narratives and game mechanics to bring the user into the context of VR. Using enriched narratives is associated with increased presence [67]. Current gaming applications in VR have already "remote-controlled" the VR user's physical actions through the game design. For example, VR rhythm games make the user do dancing poses originating from the song [65], or players have to maintain different poses by putting their head and hands in the right spot, which can be dabbing, lunges, squats, or even choreography (e.g., OhShape [25]). Because the VR user is immersed in the game and unaware of what their physical actions represent in the real world, malicious actor can make them do inappropriate posture to confront bystanders as described in Insult Simulator.
254
+
255
+ Habituation and Trust. In a discussion during W1, P2 mentioned, "because I believe any application we are talking about right now here requires a degree of trust." This trust in a VR application (system) can be built by the habituation to the environment or interaction. An example would be Falsey Mapped Apartment where the VR user is used to a fully mapped place. Malicious attacks remove or add a virtual object at one point to break this habituation and trust in the system. Another example is Moving Platform where the user interacts with a haptic display, and suddenly the system stops (accidentally or deliberately) to provoke physical harm. The VR user gets used to the interaction and is fully committed to the action they are doing. Then comes the moment to break the habituation and provoke physical harm.
256
+
257
+ # 6 DEMONSTRATING THE POTENTIAL FOR VPPM HARM
258
+
259
+ The workshop illustrates the significant scope and scale of physical harm potentially enabled by VPPMs. However, it would be easy to
260
+
261
+ ![](images/99614ddd01414b7a6d1213a05af5dce79cb1e9b29e3f24187c071e4e2794bab7.jpg)
262
+ Figure 6: (a) The SteppingOn setup has one physical and three virtual stairs. The application redirects the VR user to match the stepping feedback on the physical stair while (b) walking back and forth for collecting apples. (c) The application randomly turns off redirection to create a missing step.
263
+
264
+ write off many of these attacks as infeasible or impractical since our main method was grounded in speculative design and workshops.
265
+
266
+ To demonstrate that the potential for physical harm related to VPPMs is both plausible and pressing, we introduce two implementations of VPPM concepts. These implementations are grounded in two recent publications from CHI [3, 45]. We deliberately choose two publications from our community to emphasize the responsibility we carry when creating such techniques. Our two implementations are meant to demonstrate that with the information from the paper and some basic computer science knowledge, we were able to create two applications that are using the puppetry and mismatching attacks. These two applications could potentially be uploaded to open stores such as SideQuest and cause a certain amount of harm to the current early adopter population of VR technology. While they could be counteracted with simple additions to the publication process or platform-level mitigation (see section 7, Mitigations and Countermeasures), these are currently not in place. The existence of these current weak spots should be an additional call to action to platform developers and markets. Two applications (SteppingOn and HittingFace) are mainly leveraging the predominant form of VPPM (puppetry attack) exemplifying how VPPMs can be easily subverted and provoke physical harm to the VR user.
267
+
268
+ # 6.1 SteppingOn: Provoking Missing Steps Using Redirected Walking
269
+
270
+ SteppingOn enables the haptic feedback of stepping on a stair to collect virtual items in VR. The setup (Figure 6a) contains one physical stair functioning as a prop in the real world to support the haptic feedback of three virtual stairs in VR. The user has to walk towards the three virtual stairs to pick apples from the trees and return to the original point to put the apple at a certain position
271
+
272
+ ![](images/01974427bf9e77ddf0b0642cba9e2eb8d581cc1cbfdc3433cbd753f2e5a21b11.jpg)
273
+ Figure 7: (a) A VR user tests several baseball caps on his avatar in VR. (b) The concept of HittingFace is to change the offset between the virtual and physical movements while the user moving the controller closer to the HMD. (c) Because of the trajectory of the controller changes during the movement, HittingFace is able to provoke collision.
274
+
275
+ in VR (Figure 6b). SteppingOn always redirects the user toward the same physical stair while having the impression of visiting a different virtual stair each time. When the user drops an apple and turns their head to go back to the stairs, we rotate the VR scene. The rotation of the scene is imperceptible. Once the virtual stair aligns with the physical one, we stop rotating to prevent the alignment from being exceeded. Finally, we add two game mechanics (score and time limit) to make the user commit to grab the apples and climb the stairs. The user must collect as many apples and as fast as they can.
276
+
277
+ During the game, the application randomly turns off the redirection so that the user deviates from the targeted physical stair and makes a missing step (Figure 6c). This effect is similar to the moment when climbing stairs, where we think there is one more tread, but we are already standing at the landing, therefore, making an additional step. The missing step effect sometimes triggers small forms of a stumble and can be easily increased using a higher stair. The setup was inspired by Haptic Retargeting [3] and the concept of redirected walking [44].
278
+
279
+ # 6.2 HittingFace: Changing the Trajectory of Controller Movement to Provoke Collision with the HMD
280
+
281
+ HittingFace is a short example application that manipulates the trajectory of hands by adding an offset between the virtual and physical position of the controller to provoke collision between the controller and HMD. Figure 7a shows the scenario of HittingFace where the VR user puts on different baseball caps on their avatar to test their outfit. When the user selects a cap with controller, the application records the controller position $(P_{c})$ as the starting position $(P_{start})$ . Next, we calculate $(P_{hmd} - P_{c}) / (P_{hmd} - P_{start})$ as an indicator of how close the controller and the headset are. When the VR user puts on the baseball cap in VR, the controller is closer to the HMD. We add an offset to the direction of facing-forward. The application increases the offset abruptly, shifting the visual of controller away from the real one. Then the VR user moves the controller even closer to the HMD (Figure 7b), provoking collision in Figure 7c. This application was inspired by Breaking
282
+
283
+ the Tracking [45] that simulates the feedback of weight in VR by using perceptible tracking offsets.
284
+
285
+ # 6.3 Reflection
286
+
287
+ We started by defining the physical harm we wanted to provoke (e.g., fall, collision with the HMD). Next, we were thinking about incorporating physical harm into the physical movements and some game mechanics. Inspired by habituation, the applications exploit the manipulations after the user becomes familiar with the interaction. At one point, the applications start to nudge the user's physical movement (e.g., walking direction, hand movement trajectory) and provoke the physical harm we chose. Implementing these sample applications (SteppingOn and HittingFace) shows how current concepts from VPPM research can be trivially subverted.
288
+
289
+ We did not evaluate both applications due to the high risk of hurting participants. The implication of presenting both applications is to show how easy it can be to subvert an existing VPPM to provoke physical harm. Both demonstrations may seem easy to counter. An example would be detecting the discrepancy between the virtual and physical movements as a threshold to stop a VPPM technique. However, the malicious use of VPPMs and its countermeasure are both unexplored spaces for researchers and practitioners currently. The goal of the two applications is to raise awareness and initiate discussions in the HCI and VR communities. We further discuss mitigations and preventative recommendations for the malicious use of VPPMs from the end-user to the platform level in section 7.
290
+
291
+ # 7 MITIGATIONS AND COUNTERMEASURES
292
+
293
+ We have discussed the potential attacks, physical harm, and how to provoke them using VPPMs. In this section, we reflect on mitigations and preventative recommendations against the malicious use of VPPMs for practitioners and researchers.
294
+
295
+ Awareness and Consent of VR Users. When applying VPPMs, the user may be subjected to manipulation knowingly or unknowingly, and the manipulation may or may not be perceptible to the user. This notion is one possibility of how malicious actors hide their intention and provoke physical harm to the VR user. Given this, it would be reasonable to suggest future VR applications using VPPMs should at-a-minimum disclose that such an approach is being used and particularly the intent behind its usage.
296
+
297
+ Where a VPPM might be particularly risky or open to abuse, we would suggest it should be described to the user in sufficient detail to seek informed consent for applying such manipulations and perceptual cheats. For example, applications should be transparent about what kinds of actions are manipulated using VPPM, how these actions are represented, and the possible effects on VR users [8, 9]. At the same time, VR users are freely able to select different levels of deception provided by VPPMs [56]. This concept originates from reducing the realism of an XR application if a user only wants to try a little taste of the virtual environment. By providing this option, VR users can voluntarily choose to what degree they want to be manipulated by VPPMs if they feel comfortable with the manipulation. Applications using VPPMs also need to respect the VR user's right to withdraw anytime by providing an opt-out option for stopping the VPPM technique [6, 56].
298
+
299
+ Validation / App Store Protections. App platforms (e.g., Steam, Oculus Store, SideQuest) also need to verify what type of and how much VPPM is used in an application. In the same way that malicious actors have access to reference implementations and perceptual thresholds, so do the platforms that profit off of selling XR applications and experiences. Thus we assert the responsibility should, in part, fall on their shoulders to seek out ways to detect the presence of such manipulations in applications that they provide. In the long run, these platforms should build a standardized rating system for induced contents [59, 69] and VPPMs as additional information for end-users.
300
+
301
+ Platform-Level Mitigations: Provision and Detection. We anticipate that platform-level APIs (e.g., OpenXR<sup>4</sup>) could provide access to safe, permitted, and validated VPPMs that tie into mechanisms for awareness and consent. An example would be an OpenXR software library of redirection techniques that could prevent malicious implementations.
302
+
303
+ Considering the pipeline of AR/VR technology, a device requires sensing the raw data, extract information for the recognition of high-level semantics, and rendering on top of the HMD [47]. Platforms could implement low-level protections against the unpermitted usage of VPPMs in the sensing and rendering. For example, the discrepancy between virtual and physical movements could be monitored [28, 29]. If the physical movement deviates significantly from the virtual movement, this could reveal some types of VPPM (e.g., the gain-type ones). Similarly, one could imagine the platform detects the dangerous overlap between virtual contents and the physical environment. An example would be a virtual target overlaid on a physical lamp, which sounds like a risk of non-organism damage. This type of mitigation can be a part of reality-aware headsets where the virtual and physical context needs to be considered in making the experience safer for users.
304
+
305
+ Lastly, on the device level, one can apply permission-based security with access control lists [4]. Therefore a VR system may prevent a malicious third-party application from abusing access to the sensory data. For example, blocking the access to the captured image of cameras to avoid incorporating bystanders as an enemy avatar in Start A Fight.
306
+
307
+ Community-led Regulations and Guidelines. In time, we would expect that regulations could be formed around our proposed mitigations and preventative measures. There are a number of routes that could accomplish this. Most immediately, we propose such regulation could be formulated by not-for-profit organizations in this space (e.g., XRSI<sup>5</sup>, creating voluntary guidelines that could guide the actions both of app platforms and app developers [42]. Eventually, one could imagine firmer legal protections being put in place. An example would be an equivalent of $\mathrm{GDPR}^6$ such as an extended reality protection regulation (XRPR) that would include the right to perceptual integrity. As recent works discussed on human rights of neurotechnology (e.g., [72, 73], and The NeuroRights Foundation<sup>7</sup>), XRPR also has to include the right to agency and consent to choose one's own actions while using VPPMs.
308
+
309
+ The Role of the Research Community: Anticipation and Disclosure. VPPMs offer obvious advantages to interaction design and locomotion in particular, having been repeatedly pursued by research. Consequently, the implementation details of VPPMs and the perception thresholds found are open to everyone. However, this information is also available to malicious actors. This early insight gives malicious actors the chance to abusively exploit published results and concepts, for example, using VPPMs to enact harmful consequences on VR users. Fundamentally, the current way we apply and publish VPPMs is hacking human perception. We consider this hack as exposing a weak spot of our perceptual vulnerabilities. One can provide patches to fix the software backdoor, as we have previously discussed, but there is no patch to fix the hack of our perception directly.
310
+
311
+ In our view, these risks necessitate a change of approach regarding how we disseminate novel research related to VPPMs. We suggest that the research community should publish VPPM with the potential threats/risks in mind. The community should consider the perceptibility of a given VPPM instead of only optimizing for presence, immersion, and other usability measurements (e.g., performance). This approach would ensure one could apply VPPM always above the perception threshold during VR interaction, allowing VR users to know they are interacting with a certain degree of manipulation. This idea is already starting to get explored in the field of locomotion. Rietzler et al. [46] proposed using perceptible thresholds to reduce the space requirement for redirected walking, which could also benefit a transparent usage of VPPM. Finally, we suggest if a VPPM publication has the potential to enable abusive outcomes (e.g., if it has the potential to facilitate one of the attacks identified herein), then the author(s) should include discussion regarding the potential threat/risk posed at-a-minimum.
312
+
313
+ # 8 DISCUSSION
314
+
315
+ Our goal with this paper was to start the first exploration into how dangerous current VPPMs could become in the future. While we are able to observe current applications of VPPMs, we needed to apply speculative design methods to try to predict how these current VPPMs could be subverted in the future. Applying this method allowed us to present a definition of VPPMs and a set of speculative scenarios which we used to derive a classification of attacks and gain a better understanding of the characteristics of the potential harm arising from the VPPMs. We identified five potential attacks (puppetry: walking, arm-movement; mismatching: false-positive, false-negative, swapping), a categorization of harm (provoke/receive matrix), and two variables that participants found particularly important when thinking about VPPMs (severity of physical harm and perceived agency).
316
+
317
+ Physical harm is a novel problem that arises at the intersection of HCI, XR, and Security/Safety research. This unique combination aims at using methods from security research, combined with insights from HCI which are then applied to applications in XR. Additionally, XR may become an "ideal" platform to abuse perceptual vulnerabilities and manipulate the user's motion. The ability to manipulate the VR user's physical movements and actions could have way more impact than only hitting a piece of furniture.
318
+
319
+ Pursuing positive outcomes (e.g., speed, accuracy, enjoyment) is usually a common goal for HCI research. VPPMs help in overcoming the limitations of VR technologies. They also expose a weak spot of VR users who are particularly vulnerable because of losing connection with the real world. Our intention is to raise the awareness that the interaction design in VR using VPPMs could be used for malicious intention as well. Although examples shown in the applications may be easily thwarted, this is currently not the case because VPPMs are mostly used inside research. Meanwhile, it is necessary to ensure that developers are aware of these potential attacks and that they take measures to prevent or mitigate them. We want to emphasize the importance of the safety and security of the VR user, with a particular focus on physical harm done by human perception hacking. To our best knowledge, we are the first to establish the term, organize the knowledge in this domain, and lay out suggestions on how to deal with VPPMs (i.e., section 7).
320
+
321
+ # 8.1 Limitations
322
+
323
+ Our work encounters a methodological situation known as the Collingridge dilemma<sup>8</sup>. The malicious use of VPPMs cannot be easily predicted until they are extensively developed and widely used. However, at the point we can do that, the control or change to affect the usage of VPPMs is difficult because the technology has become entrenched. Therefore, we chose speculative design as our approach to both critique current practices, and reflect on future technologies and their implications.
324
+
325
+ The resulting scenarios show the possibilities of potential harm exploited by VPPMs. Using a speculative design workshop allows us to broadly explore this space. However, one outcome that we are not able to assess with the current method is the likelihood of malicious attacks using VPPMs and the occurrence of physical harm in the everyday usage of VR. Nonetheless, surveying the in-the-wild VR phenomena (e.g., VR fails [15] or interactions between VR users and bystanders [41]) could provide one route towards early detection of these attacks happening in practice, and such research would be aided by our findings.
326
+
327
+ Our participants were from HCI research and design research background. The resulting scenarios were more interaction design research oriented. We did not interpret the results depending on the participant's expertise because participants collaborated during the workshop to create outcomes (scenarios, dimensions). VPPMs are mainly used inside research currently. Therefore inputs from our participants are valid because it reflects on how research communities perceive the malicious use of VPPM and how we can mitigate it in the future. However, we acknowledge that our current results show only one perspective of the malicious use of VPPMs. Future research should consider similar studies and experiments with people from the safety and security area, technical VR/XR, and dark design patterns to provide in-depth technical details in this direction.
328
+
329
+ # 8.2 Future Work
330
+
331
+ Our work is a first exploration into a topic that could potentially grow exponentially in its risk at the moment when we have always on XR devices. Based on our current findings, we open the door
332
+
333
+ to further research into the malicious potential of perceptually manipulating users in the context of XR.
334
+
335
+ Intent beyond Physical Harm. Currently, this paper focuses on the physical harm, but we want to point out that the malicious user of VPPMs could accomplish more than 'just' provoking physical harm. The realism of VR technology can induce certain behavioral changes (e.g., given the virtual representation in VR, users with taller avatars negotiated more aggressively than users with shorter avatars [71]). Slater and colleagues [56] discussed the psychological realism of AR/VR and its possible impact on the user. In both workshops, participants (P2 and P5) mentioned the possibility of exploiting psychological harm to the user (e.g., VR application introduces a phobia to the VR user and make them forever be afraid of using an HMD). Unlike perceptually manipulating the physical movements, the psychological harm cannot only provoke immediate effect and reaction but also the long-term impact (e.g., trauma or phobia).
336
+
337
+ Harm beyond the VR User, and the Here-and-Now. Although we focus on provoking harm to one VR user, malicious attacks could easily go beyond that. We already find some examples in our workshops. For instance, Start a Fight (S07) renders bystanders as enemies in VR and makes the VR user punch them or vice versa. The other example is Minecraftish (S12) that the VR user throws an object at pedestrians. In the results of synthesizing step, P3 presented the social involvement dimension (D5) that starts with "harm yourself" to "harm others". Harm others could be exploited in several ways such as hitting bystanders (S07), insulting people (S11), or let others watch the VR user suffering or even dying (S04 and S09). The target of malicious actors varies from a VR user, multiple VR users, bystanders, to objects and organisms in the environment. VPPMs could also be used to create the circumstances for harm in the future, e.g., using the VR user to manipulate elements in the physical environment that might cause harm to bystanders later. We have examined only a narrow scope of the potential harms that could be made possible by VPPMs in the future, and suggest consideration be given to further understanding multi-user VPPMs, harm beyond the VR user, and creating the circumstances for harm beyond the VR session.
338
+
339
+ Challenges of VPPMs in AR and XR. We anticipate that researchers and practitioners can also apply VPPMs to AR and XR in the future. As an example, Optical Marionette [21] applied redirected walking on video see-through HMDs. In video see-through HMDs, malicious actors are still able to apply both puppetry and mismatching attacks since they still have full control over the visuals of the user. However, when using optical see-through HMDs (e.g., deceptive holograms [30]), applying puppetry attacks becomes more challenging because the user can observe their physical movements at the same time. Future VR, AR, and XR technologies would allow the user to break free the static play space towards moving around freely in the world. The safety risk may be amplified, and mismatching attacks are still able to trick the user (e.g., substitute the virtual and physical content on video/optical see-through HMDs to provoke falling over). Future research could continue to explore the novel attacks using VPPMs in this direction, understanding the common attacks shared across XR devices.
340
+
341
+ Broadly, whilst it would be understandable if there was still some scepticism regarding the prescience of the risks posed by VPPMs, it is our view that we have only just begun to understand the extent to which XR users are exposed to risks through these techniques. As XR technology and its requisite sensing grow in capability, so too will a malicious actors ability to exploit this technology for harmful intent. Consequently, it is paramount that research to this end be considered and acted upon before real harm is inflicted upon real users.
342
+
343
+ # 9 CONCLUSION
344
+
345
+ In this paper, we define VPPM as XR-driven exploits that alter the human multi-sensory perception of our physical actions and reactions to nudge the user's physical movements. Through speculative design workshops, we collect a set of harmful scenarios using VPPMs, identify two main classes (puppetry and mismatching) of potential attacks, and characterize physical harm. Two sample applications (SteppingOn and HittingFace) are implemented as an demonstration to show how current concepts from VPPM research can be trivially subverted. Finally, we propose platform-level mitigations and preventative recommendations for practitioners and researchers against the malicious use of VPPMs. Our work opens new research directions at the intersection between HCI, XR, and security research. We want to raise awareness that the current way we apply and publish VPPMs can lead to malicious use of our perceptual vulnerabilities. We consider the current practice provides a dangerous leak of human perceptual weak spots — human perception thresholds that cannot be patched — which can be used by future malicious actors. Overall, we argue that VPPMs do have the potential to be misused to provoke physical harm in the future and HCI as an academic discipline should become more cautious publishing such work and also reflect on the potential for abuse.
346
+
347
+ # ACKNOWLEDGMENTS
348
+
349
+ This work was partially conducted within the HARMFULVR JCJC project (ANR-21-CE33-0013) funded by French National Research Agency (ANR). We appreciate all the anonymous reviewers for their advice to improve this paper, and we thank the workshop participants for their contributions and time.
350
+
351
+ # A DESCRIPTIONS OF SELECTED SCENARIOS
352
+
353
+ [Magic Maze (S03), exploits: redirected walking, physical harm: fall]. Magic Maze is an application where the VR user explores a virtual maze in their apartment or a building. The application applies redirected walking to the VR user to control their walking direction in this space. As the application steers the VR user towards a stairway, they are unaware of the height difference and fall.
354
+
355
+ [Start a Fight (S07), exploits: swapping, physical harm: punch other, get punched]. In this scenario, a VR user plays a game in a public space where the goal is to fight enemies. The VR application detects bystanders in the real world and maps the enemy's avatar onto bystander so that the VR user punches them. This would result in harm to bystanders and potential harm for the users.
356
+
357
+ [Getting Robbed (S08), exploits: redirected walking, physical harm: get robbed, stabbed]. Getting Robbed shows that a VR user is in a
358
+
359
+ game like Pokemon Go and needs to walk around in VR to collect items. The items are located in dangerous places in the real world, and all the physical surroundings are replaced by the virtual game view. This results in a practically blindfolded user walking and not knowing where they are headed. This will then be abused once the victim was lured into a dangerous area (e.g., being robbed or physically attacked in an alley).
360
+
361
+ [Catch a Ride (S09), exploits: redirected walking, overplay audio feedback, physical harm: get driven over]. Catch A Ride is where a VR user is immersed in a VR game at an open space. The game has loud audio feedback that can overplay the sound from the real world. The game redirects the user onto an open road so they get hit by a car since they are not able to see or hear the traffic noise.
362
+
363
+ [Falsely Mapped Apartment (S10), exploits: remove or add virtual object in an one-to-one mapped environment, physical harm: fall or collision]. In this scenario, a future VR technology allows users to re-create a fully-mapped apartment in VR. The VR user can touch anything and sit anywhere as they do in the real world, believing that this mapping matches their real world home. The user habituates to this environment as each real-world object is mapped to a VR one. Malicious actors may exploit this by adding or removing virtual objects. Adding VR objects may result in injuring the user by, for example, sitting on a VR chair that has no physical counterpart. Similarly, removing VR objects may result in collisions with real-world objects, such as real-world tables that do not have counterparts in VR. The idea in this scenario is to first make accustomed to having a one-to-one mapping between the virtual and the real world and trust that this is the case, and then introduce/remove objects to unexpectedly break this mapping.
364
+
365
+ [Insult Simulator (S11), exploits: game mechanics, physical harm: insult bystanders, get punched]. In this scenario, the VR user plays a game in an open space where people are around, and the game mechanics lead the user to perform physical actions that appear insulting for onlookers without enough context. In an illustrated example by our participants, a user follows the narratives in VR to reach out with bare hands but may seem like they are performing a Nazi salute from outside. A bystander that does not know what the VR user is doing may feel insulted/offended as a result.
366
+
367
+ [Minercraftish (S12), exploits: redirected haptics and swapping, physical harm: throwing objects at others]. Minecraftish is another scenario that uses redirected haptics to make the VR user grab a real-world object that they think resembles a counter part in a Minecraft-style VR game. The application can access information captured by the VR headset, and at some point, it redirects the VR user to grab an object (or a pet) resembling the virtual content in the environment. Because the VR user thinks they are doing the task in VR and do not perceive the difference, they stack up or even throw a potentially harmful object (e.g., hot drink or sharp object) outside the window and hit pedestrians. The physical harm in this scenario affects the personal objects or other organisms (e.g., pet, bystander) in the environment.
368
+
369
+ # REFERENCES
370
+
371
+ [1] Nadisha-Marie Aliman and Leon Kester. 2020. Malicious Design in AIVR, Falsehood and Cybersecurity-oriented Immersive Defenses. In 2020 IEEE International
372
+
373
+ Conference on Artificial Intelligence and Virtual Reality (AIVR). IEEE, Utrecht, Netherlands, 130-137. https://doi.org/10.1109/AIVR50618.2020.00031
374
+ [2] James Auger. 2013. Speculative design: crafting the speculation. Digital Creativity 24, 1 (March 2013), 11-35. https://doi.org/10.1080/14626268.2013.767276
375
+ Publisher: Routledge_eprint: https://doi.org/10.1080/14626268.2013.767276.
376
+ [3] Mahdi Azmandian, Mark Hancock, Hrvoje Benko, Eyal Ofek, and Andrew D. Wilson. 2016. Haptic Retargeting: Dynamic Repurposing of Passive Haptics for Enhanced Virtual Reality Experiences. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI '16). Association for Computing Machinery, San Jose, California, USA, 1968-1979. https://doi.org/10.1145/2858036.2858226
377
+ [4] David Barrera, H. Gunes Kayacik, Paul C. van Oorschot, and Anil Somayaji. 2010. A methodology for empirical analysis of permission-based security models and its application to android. In Proceedings of the 17th ACM conference on Computer and communications security (CCS '10). Association for Computing Machinery, New York, NY, USA, 73-84. https://doi.org/10.1145/1866307.1866317
378
+ [5] Katharina-Maria Behr, Andreas Nosper, Christoph Klimmt, and Tilo Hartmann. 2005. Some Practical Considerations of Ethical Issues in VR Research. Presence: Teleoperators and Virtual Environments 14, 6 (Dec. 2005), 668-676. https://doi.org/10.1162/105474605775196535
379
+ [6] Steve Benford, Chris Greenhalgh, Gabriella Giannachi, Brendan Walker, Joe Marshall, and Tom Rodden. 2012. Uncomfortable interactions. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, New York, NY, USA, 2005-2014. https://doi.org/10.1145/2207676.2208347
380
+ [7] Virginia Braun and Victoria Clarke. 2006. Using thematic analysis in psychology. Qualitative Research in Psychology 3, 2 (Jan. 2006), 77-101. https://doi.org/10.1191/1478088706qp063oa Publisher: Routledge _eprint: https://www.tandfonline.com/doi/pdf/10.1191/1478088706qp063oa.
381
+ [8] Philip Brey. 1999. The ethics of representation and action in virtual reality. Ethics and Information Technology 1, 1 (March 1999), 5-14. https://doi.org/10.1023/A:1010069907461 Company: Springer Distributor: Springer Institution: Springer Label: Springer Number: 1 Publisher: Kluwer Academic Publishers.
382
+ [9] Philip Brey. 2014. Virtual Reality and Computer Simulation. In Ethics and Emerging Technologies, Ronald L. Sandler (Ed.). Palgrave Macmillan UK, London, 315-332. https://doi.org/10.1057/9781137349088_21
383
+ [10] E. Burns, S. Razzaque, A. T. Panter, M. C. Whitton, M. R. McCallus, and F. P. Brooks. 2005. The hand is slower than the eye: a quantitative exploration of visual dominance over proprioception. In IEEE Proceedings. VR 2005. Virtual Reality, 2005. IEEE, Bonn, Germany, 3-10. https://doi.org/10.1109/VR.2005.1492747 ISSN: 2375-5334.
384
+ [11] P. Casey, I. Baggili, and A. Yarramreddy. 2019. Immersive Virtual Reality Attacks and the Human Joystick. IEEE Transactions on Dependable and Secure Computing 18, 2 (2019), 1-1. https://doi.org/10.1109/TDSC.2019.2907942 Conference Name: IEEE Transactions on Dependable and Secure Computing.
385
+ [12] L. Cheng, E. Ofek, C. Holz, and A. D. Wilson. 2019. Vroamer: Generating On-TheFly VR Experiences While Walking inside Large, Unknown Real-World Building Environments. In 2019 IEEE Conference on Virtual Reality and 3D User Interfaces (VR). IEEE, Osaka, Japan, 359–366. https://doi.org/10.1109/VR.2019.8798074 ISSN: 2642-5254.
386
+ [13] Lung-Pan Cheng, Eyal Ofek, Christian Holz, Hrvoje Benko, and Andrew D. Wilson. 2017. Sparse Haptic Proxy: Touch Feedback in Virtual Environments Using a General Passive Prop. In Proceedings of the 2017 CHI Conference on Human Factors in Computing Systems (CHI '17). Association for Computing Machinery, Denver, Colorado, USA, 3718-3728. https://doi.org/10.1145/3025453.3025753
387
+ [14] Nigel Cross. 2004. Expertise in design: an overview. Design Studies 25, 5 (Sept. 2004), 427-441. https://doi.org/10.1016/j_DESTud.2004.06.002
388
+ [15] Emily Dao, Andreea Muresan, Kasper Hornbaek, and Jarrod Knibbe. 2021. Bad Breakdowns, Useful Seam, and Face Slapping: Analysis of VR Fails on YouTube. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems (CHI '21). Association for Computing Machinery, New York, NY, USA, 1-14. https://doi.org/10.1145/3411764.3445435
389
+ [16] John V. Draper, David B. Kaber, and John M. Usher. 1998. Telepresence. Human Factors 40, 3 (Sept. 1998), 354-375. https://doi.org/10.1518/001872098779591386 Publisher: SAGE Publications Inc.
390
+ [17] Mar Gonzalez-Franco and Jaron Lanier. 2017. Model of Illusions and Virtual Reality. Frontiers in Psychology 8 (2017), 1-8. https://doi.org/10.3389/fpsyg.2017.01125
391
+ [18] Kim Halskov and Peter Dalsgard. 2006. Inspiration Card Workshops. In Proceedings of the 6th Conference on Designing Interactive Systems (University Park, PA, USA) (DIS '06). Association for Computing Machinery, New York, NY, USA, 2-11. https://doi.org/10.1145/1142405.1142409
392
+ [19] Jeremy Hartmann, Christian Holz, Eyal Ofek, and Andrew D. Wilson. 2019. RealityCheck: Blending Virtual Environments with Situated Physical Reality. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, New York, NY, USA, 1-12. https://doi.org/10.1145/3290605.3300577
393
+
394
+ [20] Anuruddha Hettiarachchi and Daniel Wigdor. 2016. Annexing Reality: Enabling Opportunistic Use of Everyday Objects as Tangible Proxies in Augmented Reality. In Proceedings of the 2016 CHI Conference on Human Factors in Computing Systems (CHI '16). Association for Computing Machinery, San Jose, California, USA, 1957-1967. https://doi.org/10.1145/2858036.2858134
395
+ [21] Akira Ishii, Ippei Suzuki, Shinji Sakamoto, Keita Kanai, Kazuki Takazawa, Hiraku Doi, and Yoichi Ochiai. 2016. Optical Marionette: Graphical Manipulation of Human's Walking Direction. In Proceedings of the 29th Annual Symposium on User Interface Software and Technology (UIST '16). Association for Computing Machinery, New York, NY, USA, 705-716. https://doi.org/10.1145/2984511.2984545
396
+ [22] Robert Jungk and Norbert Mullert. 1987. Future Workshops: How to create desirable futures. Inst. for Social Inventions, UK.
397
+ [23] L. Kohli. 2010. Redirected touching: Warping space to remap passive haptics. In 2010 IEEE Symposium on 3D User Interfaces (3GUI). IEEE, Waltham, MA, USA, 129-130. https://doi.org/10.1109/3GUI.2010.5444703
398
+ [24] Richard A. Krueger and Mary Anne Casey. 2015. Focus groups: a practical guide for applied research (5th ed. ed.). Sage Publications, Thousand Oaks, CA.
399
+ [25] Odders Lab. 2020. OhShape, a New VR Rhythm Game. https://ohshapevr.com/
400
+ [26] E. Langbehn, P. Lubos, G. Bruder, and F. Steinicke. 2017. Bending the Curve: Sensitivity to Bending of Curved Paths and Application in Room-Scale VR. IEEE Transactions on Visualization and Computer Graphics 23, 4 (April 2017), 1389-1398. https://doi.org/10.1109/TVCG.2017.2657220 Conference Name: IEEE Transactions on Visualization and Computer Graphics.
401
+ [27] Eike Langbehn, Frank Steinicke, Markus Lappe, Gregory F. Welch, and Gerd Bruder. 2018. In the blink of an eye: leveraging blink-induced suppression for imperceptible position and orientation redirection in virtual reality. ACM Transactions on Graphics 37, 4 (July 2018), 66:1-66:11. https://doi.org/10.1145/3197517.3201335
402
+ [28] Kiron Lebeck, Tadayoshi Kohno, and Franziska Roesner. 2016. How to Safely Augment Reality: Challenges and Directions. In Proceedings of the 17th International Workshop on Mobile Computing Systems and Applications (St. Augustine, Florida, USA) (HotMobile '16). Association for Computing Machinery, New York, NY, USA, 45-50. https://doi.org/10.1145/2873587.2873595
403
+ [29] Kiron Lebeck, Kimberly Ruth, Tadayoshi Kohno, and Franziska Roesner. 2017. Securing Augmented Reality Output. In 2017 IEEE Symposium on Security and Privacy (SP). IEEE, San Jose, CA, USA, 320-337. https://doi.org/10.1109/SP.2017.13 ISSN: 2375-1207.
404
+ [30] Kiron Lebeck, Kimberly Ruth, Tadayoshi Kohno, and Franziska Roesner. 2018. Towards Security and Privacy for Multi-user Augmented Reality: Foundations with End Users. In 2018 IEEE Symposium on Security and Privacy (SP). IEEE, San Francisco, CA, USA, 392-408. https://doi.org/10.1109/SP.2018.00051
405
+ [31] A. Lecuyer, S. Coquillart, A. Kheddar, P. Richard, and P. Coiffet. 2000. Pseudohaptic feedback: can isometric input devices simulate force feedback? In Proceedings IEEE Virtual Reality 2000 (Cat. No.00CB37048). IEEE, New Brunswick, NJ, USA, 83-90. https://doi.org/10.1109/VR.2000.840369 ISSN: 1087-8270.
406
+ [32] David Lindlbauer and Andy D. Wilson. 2018. Remixed Reality: Manipulating Space and Time in Augmented Reality. Association for Computing Machinery, New York, NY, USA, 1-13. https://doi.org/10.1145/3173574.3173703
407
+ [33] Pedro Lopes, Alexandra Ion, and Patrick Baudisch. 2015. Impacto: Simulating Physical Impact by Combining Tactile Stimulation with Electrical Muscle Stimulation. In Proceedings of the 28th Annual ACM Symposium on User Interface Software & Technology (UIST '15). Association for Computing Machinery, New York, NY, USA, 11-19. https://doi.org/10.1145/2807442.2807443
408
+ [34] Anatole Lécuyer, Jean-Marie Burkhardt, and Laurent Etienne. 2004. Feeling bumps and holes without a haptic interface: the perception of pseudo-haptic textures. In Proceedings of the SIGCHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, New York, NY, USA, 239-246. https://doi.org/10.1145/985692.985723
409
+ [35] Michael Madary and Thomas K. Metzinger. 2016. Real Virtuality: A Code of Ethical Conduct. Recommendations for Good Scientific Practice and the Consumers of VR-Technology. Frontiers in Robotics and AI 3 (2016), 3. https://doi.org/10.3389/frobt.2016.00003
410
+ [36] Thomas Markussen and Eva Knutz. 2013. The poetics of design fiction. In Proceedings of the 6th International Conference on Designing Pleasurable Products and Interfaces (DPPI '13). Association for Computing Machinery, New York, NY, USA, 231-240. https://doi.org/10.1145/2513506.2513531
411
+ [37] Michael Meehan, Brent Insko, Mary Whitton, and Frederick P. Brooks. 2002. Physiological measures of presence in stressful virtual environments. In Proceedings of the 29th annual conference on Computer graphics and interactive techniques (SIGGRAPH '02). Association for Computing Machinery, New York, NY, USA, 645-652. https://doi.org/10.1145/566570.566630
412
+ [38] Roberto A. Montano Murillo, Sriram Subramanian, and Diego Martinez Plasencia. 2017. Erg-O: Ergonomic Optimization of Immersive Virtual Environments. In Proceedings of the 30th Annual ACM Symposium on User Interface Software and Technology (UIST '17). Association for Computing Machinery, Quebec City, QC, Canada, 759-771. https://doi.org/10.1145/3126594.3126605
413
+ [39] David Morgan. 1996. Focus groups as qualitative research (2nd ed. ed.). Sage Publications, Thousand Oaks, CA.
414
+
415
+ [40] Blessing Odeleye, George Loukas, Ryan Heartfield, and Fotios Spyridonis. 2021. Detecting framerate-oriented cyber attacks on user experience in virtual reality. In 1st International Workshop on Security for XR and XR for Security. VR4Sec, Vancouver, B.C., Canada, 1-5.
416
+ [41] Joseph O'Hagan, Julie R. Williamson, Mark McGill, and Mohamed Khamis. 2021. Safety, Power Imbalances, Ethics and Proxy Sex: Surveying In-The-Wild Interactions Between VR Users and Bystanders. In 2021 IEEE International Symposium on Mixed and Augmented Reality (ISMAR). IEEE, Bari, Italy, 211-220. https://doi.org/10.1109/ISMAR52148.2021.00036
417
+ [42] Kavya Pearlman. 2020. Virtual Reality Brings Real Risks: Are We Ready? https:// www.usenix.org/conference/enigma2020/presentation/pearlman Accessed: 2010-12-01.
418
+ [43] Michael I. Posner, Mary J. Nissen, and Raymond M. Klein. 1976. Visual dominance: An information-processing account of its origins and significance. Psychological Review 83, 2 (1976), 157-171. https://doi.org/10.1037/0033-295X.83.2.157 Place: US Publisher: American Psychological Association.
419
+ [44] Sharif Razzaque, David Swapp, Mel Slater, Mary C. Whitton, and Anthony Steed. 2002. Redirected walking in place. In Proceedings of the workshop on Virtual environments 2002 (EGVE '02). Eurographics Association, Goslar, DEU, 123-130.
420
+ [45] Michael Rietzler, Florian Geiselhart, Jan Gugenheimer, and Enrico Rukzio. 2018. Breaking the Tracking: Enabling Weight Perception using Perceivable Tracking Offsets. In Proceedings of the 2018 CHI Conference on Human Factors in Computing Systems (CHI '18). Association for Computing Machinery, Montreal QC, Canada, 1-12. https://doi.org/10.1145/3173574.3173702
421
+ [46] Michael Rietzler, Jan Gugenheimer, Teresa Hirzle, Martin Deubzer, Eike Langbehn, and Enrico Rukcio. 2018. Rethinking Redirected Walking: On the Use of Curvature Gains Beyond Perceptual Limitations and Revisiting Bending Gains. In 2018 IEEE International Symposium on Mixed and Augmented Reality (ISMAR). IEEE, Munich, Germany, 115-122. https://doi.org/10.1109/ISMAR.2018.00041 ISSN: 1554-7868.
422
+ [47] Franziska Roesner, Tadayoshi Kohno, and David Molnar. 2014. Security and privacy for augmented reality systems. Commun. ACM 57, 4 (April 2014), 88-96. https://doi.org/10.1145/2580723.2580730
423
+ [48] Daniela K. Rosner, Saba Kawas, Wenqi Li, Nicole Tilly, and Yi-Chen Sung. 2016. Out of Time, Out of Place: Reflections on Design Workshops as a Research Method. In Proceedings of the 19th ACM Conference on Computer-Supported Cooperative Work &amp; Social Computing (San Francisco, California, USA) (CSCW '16). Association for Computing Machinery, New York, NY, USA, 1131-1141. https://doi.org/10.1145/2818048.2820021
424
+ [49] Majed Samad, Elia Gatti, Anne Hermes, Hrvoje Benko, and Cesare Parise. 2019. Pseudo-Haptic Weight: Changing the Perceived Weight of Virtual Objects By Manipulating Control-Display Ratio. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (CHI '19). Association for Computing Machinery, Glasgow, Scotland Uk, 1–13. https://doi.org/10.1145/3290605.3300550
425
+ [50] Maria V. Sanchez-Vives and Mel Slater. 2005. From presence to consciousness through virtual reality. Nature Reviews Neuroscience 6, 4 (April 2005), 332-339. https://doi.org/10.1038/nrn1651 Number: 4 Publisher: Nature Publishing Group.
426
+ [51] Lior Shapira and Daniel Freedman. 2016. Reality Skins: Creating Immersive and Tactile Virtual Environments. In 2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR). IEEE, Merida, Mexico, 115-124. https://doi.org/10.1109/ISMAR.2016.23
427
+ [52] Thomas B. Sheridan. 1992. Musings on Telepresence and Virtual Presence. Presence: Teleoperators and Virtual Environments 1, 1 (Jan. 1992), 120-126. https://doi.org/10.1162/pres.1992.1.1.120
428
+ [53] Joon Gi Shin, Doheon Kim, Chaehan So, and Daniel Saakes. 2020. Body Follows Eye: Unobtrusive Posture Manipulation Through a Dynamic Content Position in Virtual Reality. In Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems (CHI '20). Association for Computing Machinery, New York, NY, USA, 1-14. https://doi.org/10.1145/3313831.3376794
429
+ [54] Adalberto L. Simeone, Eduardo Velloso, and Hans Gellersen. 2015. Substitutional Reality: Using the Physical Environment to Design Virtual Reality Experiences. In Proceedings of the 33rd Annual ACM Conference on Human Factors in Computing Systems (CHI '15). Association for Computing Machinery, New York, NY, USA, 3307-3316. https://doi.org/10.1145/2702123.2702389
430
+ [55] Mel Slater. 2009. Place illusion and plausibility can lead to realistic behaviour in immersive virtual environments. Philosophical Transactions of the Royal Society B: Biological Sciences 364, 1535 (Dec. 2009), 3549-3557. https://doi.org/10.1098/rstb.2009.0138 Publisher: Royal Society.
431
+ [56] Mel Slater, Cristina Gonzalez-Liencres, Patrick Haggard, Charlotte Vinkers, Rebecca Gregory-Clarke, Steve Jelley, Zillah Watson, Graham Breen, Raz Schwarz, William Steptoe, Dalila Szostak, Shivashankar Halan, Deborah Fox, and Jeremy Silver. 2020. The Ethics of Realism in Virtual and Augmented Reality. Frontiers in Virtual Reality 1 (2020), 1-13. https://doi.org/10.3389/frvir.2020.00001 Publisher: Frontiers.
432
+ [57] Mel Slater and Sylvia Wilbur. 1997. A Framework for Immersive Virtual Environments (FIVE): Speculations on the Role of Presence in Virtual Environments. Presence: Teleoperators and Virtual Environments 6, 6 (Dec. 1997), 603-616. https://doi.org/10.1162/pres.1997.6.6.603
433
+
434
+ [58] Bernhard Spanlang, Jean-Marie Normand, David Borland, Konstantina Kileni, Elias Giannopoulos, Ausias Pomés, Mar González-Franco, Daniel Perez-Marcos, Jorge Arroyo-Palacios, Xavi Navarro Muncunill, and Mel Slater. 2014. How to Build an Embodiment Lab: Achieving Body Representation Illusions in Virtual Reality. Frontiers in Robotics and AI 1 (2014), 1-22. https://doi.org/10.3389/frobt.2014.00009 Publisher: Frontiers.
435
+ [59] James S. Spiegel. 2018. The Ethics of Virtual Reality Technology: Social Hazards and Public Policy Recommendations. Science and Engineering Ethics 24, 5 (Oct. 2018), 1537-1550. https://doi.org/10.1007/s11948-017-9979-y
436
+ [60] F. Steinicke, G. Bruder, J. Jerald, H. Frenz, and M. Lappe. 2010. Estimation of Detection Thresholds for Redirected Walking Techniques. IEEE Transactions on Visualization and Computer Graphics 16, 1 (Jan. 2010), 17-27. https://doi.org/10.1109/TVCG.2009.62 Conference Name: IEEE Transactions on Visualization and Computer Graphics.
437
+ [61] Qi Sun, Anjul Patney, Li-Yi Wei, Omer Shapira, Jingwan Lu, Paul Arente, Suwen Zhu, Morgan Mcguire, David Luebkke, and Arie Kaufman. 2018. Towards virtual reality infinite walking: dynamic saccadic redirection. ACM Transactions on Graphics 37, 4 (July 2018), 67:1-67:13. https://doi.org/10.1145/3197517.3201294
438
+ [62] Martin Usoh, Kevin Arthur, Mary C. Whitton, Rui Bastos, Anthony Steed, Mel Slater, and Frederick P. Brooks. 1999. Walking &gt; walking-in-place &gt; flying, in virtual environments. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques (SIGGRAPH '99). ACM Press/Addison-Wesley Publishing Co., USA, 359-364. https://doi.org/10.1145/311535.311589
439
+ [63] R. J. van Beers, A. C. Sittig, and J. J. Gon. 1999. Integration of proprioceptive and visual position-information: An experimentally supported model. Journal of Neurophysiology 81, 3 (March 1999), 1355-1364. https://doi.org/10.1152/jn.1999.81.3.1355
440
+ [64] Robert J van Beers, Daniel M Wolpert, and Patrick Haggard. 2002. When Feeling Is More Important Than Seeing in Sensorimotor Adaptation. Current Biology 12, 10 (May 2002), 834-837. https://doi.org/10.1016/S0960-9822(02)00836-9
441
+ [65] VR Fitness Insider. 2018. Beat Saber - GANGNAM STYLE PERFECT. https://www.youtube.com/watch?v=SijuABl2nsY
442
+
443
+ [66] D. H. Warren and W. T. Cleaves. 1971. Visual-proprioceptive interaction under large amounts of conflict. Journal of Experimental Psychology 90, 2 (Oct. 1971), 206-214. https://doi.org/10.1037/h0031545
444
+ [67] Séamas Weech, Sophie Kenny, Markus Lenizky, and Michael Barnett-Cowan. 2020. Narrative and gaming experience interact to affect presence and cybersickness in virtual reality. International Journal of Human-Computer Studies 138 (June 2020), 102398. https://doi.org/10.1016/j.ijhc.s.2020.102398
445
+ [68] Tyler Wilde. 2017. Man dies in VR accident, reports Russian news agency. https://www.pcgamer.com/man-dies-in-vr-accident-acording-torussian-news-agency/ Accessed: 2021-09-01.
446
+ [69] Graham Wilson and Mark McGill. 2018. Violent Video Games in Virtual Reality: Re-Evaluating the Impact and Rating of Interactive Experiences. In Proceedings of the 2018 Annual Symposium on Computer-Human Interaction in Play (CHI PLAY '18). Association for Computing Machinery, New York, NY, USA, 535-548. https://doi.org/10.1145/3242671.3242684
447
+ [70] Jackie (Junrui) Yang, Christian Holz, Eyal Ofek, and Andrew D. Wilson. 2019. DreamWalker: Substituting Real-World Walking Experiences with a Virtual Reality. In Proceedings of the 32nd Annual ACM Symposium on User Interface Software and Technology (UIST '19). Association for Computing Machinery, New Orleans, LA, USA, 1093-1107. https://doi.org/10.1145/3332165.3347875
448
+ [71] Nick Yee, Jeremy N. Bailenson, and Nicolas Ducheneaut. 2009. The Proteus Effect: Implications of Transformed Digital Self-Representation on Online and Offline Behavior. Communication Research 36, 2 (Jan. 2009), 285–312. https://doi.org/10.1177/0093650208330254 Publisher: SAGE PublicationsSage CA: Los Angeles, CA.
449
+ [72] Rafael Yuste, Jared Genser, and Stephanie Herrmann. 2021. It's Time for NeuroRights . 154-165 pages. https://www.cirsd.org/en/horizons/horizons-winter-2021-issue-no-18/its-time-for-neuro--rights Accessed: 2021-12-01.
450
+ [73] Rafael Yuste, Sara Goering, Guoqiang Bi, Jose M Carmena, Adrian Carter, Joseph J Fins, Phoebe Friesen, Jack Gallant, Jane E Huggins, Judy Illes, et al. 2017. Four ethical priorities for neurotechnologies and AI. Nature News 551, 7679 (2017), 159.
2202.13xxx/2202.13200/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3b4e0639dd45ecf38b8a9ea064d4300b3af252c2938061d80d6a5ff9ff5f5de
3
+ size 506029
2202.13xxx/2202.13200/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13239/57c352a6-ea56-4510-913e-a92f5c7acb7d_content_list.json ADDED
@@ -0,0 +1,1347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "QOC: Quantum On-Chip Training with Parameter Shift and Gradient Pruning",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 245,
8
+ 85,
9
+ 751,
10
+ 138
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "$^{1}$ Hanrui Wang*, $^{2}$ Zirui Li*, $^{3}$ Jiaqi Gu, $^{4}$ Yongshan Ding, $^{3}$ David Z. Pan, $^{1}$ Song Han $^{1}$ Massachusetts Institute of Technology, $^{2}$ Rutgers University, $^{3}$ University of Taxes at Austin, $^{4}$ Yale University",
17
+ "bbox": [
18
+ 174,
19
+ 148,
20
+ 821,
21
+ 178
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "https://qmlsys.mit.edu",
28
+ "bbox": [
29
+ 436,
30
+ 184,
31
+ 560,
32
+ 198
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "ABSTRACT",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 75,
42
+ 207,
43
+ 174,
44
+ 220
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Parameterized Quantum Circuits (PQC) are drawing increasing research interest thanks to its potential to achieve quantum advantages on near-term Noisy Intermediate Scale Quantum (NISQ) hardware. In order to achieve scalable PQC learning, the training process needs to be offloaded to real quantum machines instead of using exponential-cost classical simulators. One common approach to obtain PQC gradients is parameter shift whose cost scales linearly with the number of qubits. We present QOC, the first experimental demonstration of practical on-chip PQC training with parameter shift. Nevertheless, we find that due to the significant quantum errors (noises) on real machines, gradients obtained from naive parameter shift have low fidelity and thus degrading the training accuracy. To this end, we further propose probabilistic gradient pruning to firstly identify gradients with potentially large errors and then remove them. Specifically, small gradients have larger relative errors than large ones, thus having a higher probability to be pruned. We perform extensive experiments with the Quantum Neural Network (QNN) benchmarks on 5 classification tasks using 5 real quantum machines. The results demonstrate that our on-chip training achieves over $90\\%$ and $60\\%$ accuracy for 2-class and 4-class image classification tasks. The probabilistic gradient pruning brings up to $7\\%$ PQC accuracy improvements over no pruning. Overall, we successfully obtain similar on-chip training accuracy compared with noise-free simulation but have much better training scalability. The QOC code is available in the TorchQuantum library.",
51
+ "bbox": [
52
+ 73,
53
+ 224,
54
+ 483,
55
+ 571
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "1 INTRODUCTION",
62
+ "text_level": 1,
63
+ "bbox": [
64
+ 75,
65
+ 583,
66
+ 246,
67
+ 597
68
+ ],
69
+ "page_idx": 0
70
+ },
71
+ {
72
+ "type": "text",
73
+ "text": "Quantum Computing (QC) has great potential to achieve exponential acceleration over classical computers, which represents a computational paradigm shift in various domains. Parameterized Quantum Circuits (PQC) are circuits containing trainable weights and are promising to achieve quantum advantages in current devices. Among them, Quantum Neural Network (QNN) is one of the popular algorithms for machine learning tasks.",
74
+ "bbox": [
75
+ 73,
76
+ 601,
77
+ 483,
78
+ 698
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "In order to achieve PQC quantum advantage, the number of qubit needs to be large enough, which casts great difficulty in the parameter training process. In existing PQC work [4, 11], the primary focus has been building quantum models that can outperform classical model accuracy. Thus they typically perform training on classical computers through software simulations and then perform inference with simulators as well (Figure 1 top). Although classical simulation is useful in understanding the capabilities of small-size PQC, it is not",
85
+ "bbox": [
86
+ 73,
87
+ 699,
88
+ 480,
89
+ 810
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "image",
95
+ "img_path": "images/8a7f3dfeb3b852bc5b6645c6ada98ad6117055dc5e50b84a9d324201b8979f27.jpg",
96
+ "image_caption": [
97
+ "Figure 1: In QOC, PQC training and inference are both performed on real quantum machines, making the whole pipeline scalable and practical."
98
+ ],
99
+ "image_footnote": [],
100
+ "bbox": [
101
+ 517,
102
+ 205,
103
+ 939,
104
+ 415
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "text",
110
+ "text": "scalable due to the exponentially increased time and memory costs $(O(2^n), n$ is the qubit number). As shown in Figure 2(a), the space (#Regs) and time (#Ops) complexity of classical simulation grow exponentially as the number of qubits increases. To the authors' knowledge, this is the first experimental demonstration of efficient and scalable PQC on-chip training protocol. The optimization of parametrized quantum gates is offloaded to the quantum chips with in-situ gradient computation using parameter shift [14]. We also perform PQC evaluation on real quantum machines, making the results more practical as in Figure 1 bottom.",
111
+ "bbox": [
112
+ 511,
113
+ 493,
114
+ 921,
115
+ 632
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "text",
121
+ "text": "One of the major challenges to enable scalable and efficient PQC on-chip learning is the robustness against quantum noise. In the current Noisy Intermediate Scale Quantum (NISQ) [16] era, the gate error rates on real quantum devices are non-negligible ( $10^{-3}$ to $10^{-2}$ ). In the context of PQC, such errors will lead to noisy gradients which can slow down convergence or even make training unstable. As shown in Figure 2(b), large gaps exist between the quantum on-chip training results and the classical noise-free simulation results.",
122
+ "bbox": [
123
+ 511,
124
+ 632,
125
+ 921,
126
+ 743
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "By carefully investigating the on-chip training process, we observe that small gradients tend to have large relative variations or even wrong directions under quantum noises, as shown in Figure 2(c). Also, not all gradient computations are necessary for the training process, especially for small-magnitude gradients. Those observations provide great opportunities for us to boost the robustness and efficiency of PQC on-chip learning. Inspired by that, we propose a probabilistic gradient pruning method to predict and only compute gradients of high reliability. Hence we can reduce noise impact and also save the required number of circuit runs on real quantum machines. In this paper, we are mainly using QNNs as benchmarks but the techniques can also be applied to other PQCs such as Variational Quantum Eigensolver (VQE). QOC has following contributions:",
133
+ "bbox": [
134
+ 511,
135
+ 743,
136
+ 921,
137
+ 924
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "aside_text",
143
+ "text": "arXiv:2202.13239v3 [quant-ph] 27 Jan 2025",
144
+ "bbox": [
145
+ 22,
146
+ 260,
147
+ 60,
148
+ 726
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "page_footnote",
154
+ "text": "*Equal Contribution. \nPermission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).",
155
+ "bbox": [
156
+ 73,
157
+ 818,
158
+ 482,
159
+ 878
160
+ ],
161
+ "page_idx": 0
162
+ },
163
+ {
164
+ "type": "page_footnote",
165
+ "text": "DAC '22, July 10-14, 2022, San Francisco, CA, USA",
166
+ "bbox": [
167
+ 75,
168
+ 880,
169
+ 308,
170
+ 891
171
+ ],
172
+ "page_idx": 0
173
+ },
174
+ {
175
+ "type": "page_footnote",
176
+ "text": "© 2022 Copyright held by the owner/author(s).",
177
+ "bbox": [
178
+ 75,
179
+ 892,
180
+ 297,
181
+ 902
182
+ ],
183
+ "page_idx": 0
184
+ },
185
+ {
186
+ "type": "page_footnote",
187
+ "text": "ACM ISBN 978-1-4503-9142-9/22/07.",
188
+ "bbox": [
189
+ 75,
190
+ 902,
191
+ 250,
192
+ 911
193
+ ],
194
+ "page_idx": 0
195
+ },
196
+ {
197
+ "type": "page_footnote",
198
+ "text": "https://doi.org/10.1145/3489517.3530495",
199
+ "bbox": [
200
+ 75,
201
+ 912,
202
+ 264,
203
+ 922
204
+ ],
205
+ "page_idx": 0
206
+ },
207
+ {
208
+ "type": "image",
209
+ "img_path": "images/27f297d8c53ba3f6d629f4e10784f4aaa0ef927647ae90f87bae6f823ca935f1.jpg",
210
+ "image_caption": [],
211
+ "image_footnote": [],
212
+ "bbox": [
213
+ 83,
214
+ 99,
215
+ 267,
216
+ 176
217
+ ],
218
+ "page_idx": 1
219
+ },
220
+ {
221
+ "type": "image",
222
+ "img_path": "images/51e9b2d6da5a633a0fafc746e416ab2071b3b0149b393009f8a43b9314337a0b.jpg",
223
+ "image_caption": [],
224
+ "image_footnote": [],
225
+ "bbox": [
226
+ 281,
227
+ 99,
228
+ 470,
229
+ 175
230
+ ],
231
+ "page_idx": 1
232
+ },
233
+ {
234
+ "type": "image",
235
+ "img_path": "images/9f2b1b94dffdd6c4f2c5e2ff0b91e125e407b67384e7f3f53fa39fa0f607a58e.jpg",
236
+ "image_caption": [
237
+ "(b)",
238
+ "Figure 2: (a) Classical simulation has unscalable computational and memory costs. (b) Noises create significant accuracy gaps between PQC (QNN) classical simulation and on-chip training. (c) Small gradients suffer from larger relative errors, thus being less reliable."
239
+ ],
240
+ "image_footnote": [],
241
+ "bbox": [
242
+ 81,
243
+ 181,
244
+ 277,
245
+ 303
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "image",
251
+ "img_path": "images/92dcaf2cf394b7e876363271f72076654b4c2c8aac7d9fe17406d9342c3f93ce.jpg",
252
+ "image_caption": [
253
+ "(c)"
254
+ ],
255
+ "image_footnote": [],
256
+ "bbox": [
257
+ 277,
258
+ 181,
259
+ 470,
260
+ 303
261
+ ],
262
+ "page_idx": 1
263
+ },
264
+ {
265
+ "type": "list",
266
+ "sub_type": "text",
267
+ "list_items": [
268
+ "- We are the first work to demonstrate the practicality of parameter shift on NISQ machines, achieving high PQC learning accuracy.",
269
+ "- A probabilistic gradient pruning method is proposed to improve the noise robustness by $5 - 7\\%$ and reduce the number of inference on real QC by $2\\times$ while maintaining the accuracy.",
270
+ "- Experimental deployment of QNN on 5 real quantum machines demonstrates that the proposed method can achieve over $90\\%$ and $60\\%$ accuracy for 2-class and 4-class image recognition tasks. Our framework enables scalable, robust, and efficient training of PQCs with large number of qubits and parameters.",
271
+ "- We open-source the parameter shift on-chip PQC training and gradient pruning code in the TorchQuantum library."
272
+ ],
273
+ "bbox": [
274
+ 83,
275
+ 392,
276
+ 482,
277
+ 559
278
+ ],
279
+ "page_idx": 1
280
+ },
281
+ {
282
+ "type": "text",
283
+ "text": "2 BACKGROUND",
284
+ "text_level": 1,
285
+ "bbox": [
286
+ 73,
287
+ 573,
288
+ 235,
289
+ 585
290
+ ],
291
+ "page_idx": 1
292
+ },
293
+ {
294
+ "type": "text",
295
+ "text": "Quantum basics. Quantum circuits use quantum bit (called qubit) to store information, which is a linear combination of two basis states: $|\\psi \\rangle = \\alpha |0\\rangle + \\beta |1\\rangle$ , for $\\alpha, \\beta \\in \\mathbb{C}$ , satisfying $|\\alpha|^2 + |\\beta|^2 = 1$ . An $n$ -qubit system can represent a linear combination of $2^n$ basis states. A $2^n$ -length complex state vector of all combination coefficients is used to describe the quantum state. To perform computation on a quantum system, a sequence of parametrized quantum gates are applied to perform unitary transformation on the statevector, i.e., $|\\psi(x, \\theta)\\rangle = \\dots U_2(x, \\theta_2)U_1(x, \\theta_1)|0\\rangle$ , where $x$ is the input data, and $\\theta = (\\theta_1, \\theta_2, \\ldots)$ are trainable parameters in quantum gates. In this way, input data and trainable parameters are embedded in the quantum state $|\\psi(x, \\theta)\\rangle$ . The computation results are obtained by qubit readout which measures the probability of a qubit state $|\\psi\\rangle$ collapsing to either $|0\\rangle$ (i.e., output $y = +1$ ) or $|1\\rangle$ (i.e., output $y = -1$ ) according to $|\\alpha|^2$ and $|\\beta|^2$ . With sufficient samples, we can compute the expectation value: $\\mathbb{E}[y] = (+1)|\\alpha|^2 + (-1)|\\beta|^2$ . A non-linear network can be constructed to perform ML tasks by cascading multiple blocks of quantum gates and measurements.",
296
+ "bbox": [
297
+ 73,
298
+ 590,
299
+ 482,
300
+ 840
301
+ ],
302
+ "page_idx": 1
303
+ },
304
+ {
305
+ "type": "text",
306
+ "text": "Quantum noise. In real quantum computer systems, errors (noises) would occur due to unwanted interactions between qubits, imperfect control signals, or interference from the environment [6]. For example, quantum gates introduce operation errors (e.g., coherent errors and stochastic errors) into the system, and qubits also suffer from decoherence error (spontaneous loss of its stored information)",
307
+ "bbox": [
308
+ 73,
309
+ 840,
310
+ 482,
311
+ 924
312
+ ],
313
+ "page_idx": 1
314
+ },
315
+ {
316
+ "type": "image",
317
+ "img_path": "images/79869f87b82d7f8f775b10185bf62d078028dd461d25105d17dd2d4c3689239b.jpg",
318
+ "image_caption": [
319
+ "Figure 3: Quantum Neural Network (QNN) architecture."
320
+ ],
321
+ "image_footnote": [],
322
+ "bbox": [
323
+ 535,
324
+ 75,
325
+ 915,
326
+ 189
327
+ ],
328
+ "page_idx": 1
329
+ },
330
+ {
331
+ "type": "text",
332
+ "text": "over time. These noisy systems need to be characterized [13] and calibrated [7] frequently to mitigate the noise impact.",
333
+ "bbox": [
334
+ 511,
335
+ 224,
336
+ 919,
337
+ 252
338
+ ],
339
+ "page_idx": 1
340
+ },
341
+ {
342
+ "type": "text",
343
+ "text": "Quantum neural networks. Quantum Machine Learning (QML) [1, 9, 17, 18, 21] aims to leverage QC techniques to solve machine learning tasks and achieve much higher efficiency. The path to quantum advantage on QML is typically provided by the quantum circuit's ability to generate and estimate highly complex kernels [5], which would otherwise be intractable to compute with conventional computers. They have been shown to have potential speed-up over classical counterparts in various tasks, including metric learning [12], data analysis [10]. As shown in Figure 3, the quantum neural network is one type of QML model using variational quantum circuits with trainable parameters to accomplish feature encoding of input data and perform complex-valued linear transformations thereafter. Most of QNN trainings are exploratory and rely on classical simulation of small quantum systems. In our work, on the contrary, we explore the practical setting: the QNN training and inference are both performed on real quantum devices.",
344
+ "bbox": [
345
+ 511,
346
+ 252,
347
+ 921,
348
+ 474
349
+ ],
350
+ "page_idx": 1
351
+ },
352
+ {
353
+ "type": "text",
354
+ "text": "Pruning. Pruning techniques are widely used in the field of DNN [3, 19, 20, 23], performing an important role of the trade-off between accuracy and memory or time cost [15]. Recently, pruning techniques have been used in quantum tasks. Pruning the ansatz can bring time-efficient circuit and even higher performance on real QC [17]. In our work, we apply pruning techniques to prune unreliable gradients in order to mitigate the noise during training.",
355
+ "bbox": [
356
+ 511,
357
+ 474,
358
+ 921,
359
+ 571
360
+ ],
361
+ "page_idx": 1
362
+ },
363
+ {
364
+ "type": "text",
365
+ "text": "3 METHODOLOGY",
366
+ "text_level": 1,
367
+ "bbox": [
368
+ 514,
369
+ 584,
370
+ 686,
371
+ 597
372
+ ],
373
+ "page_idx": 1
374
+ },
375
+ {
376
+ "type": "text",
377
+ "text": "To enable PQC on-chip learning, we first introduce an in-situ quantum gradient computation via parameter shift and its real QC implementation. A probabilistic gradient pruning method is proposed to save the gradient computation cost with enhanced noise-robustness and training efficiency. We study QNN as the benchmark PQC.",
378
+ "bbox": [
379
+ 511,
380
+ 602,
381
+ 921,
382
+ 672
383
+ ],
384
+ "page_idx": 1
385
+ },
386
+ {
387
+ "type": "text",
388
+ "text": "3.1 Parameter Shift Rule for Quantum Gradients",
389
+ "text_level": 1,
390
+ "bbox": [
391
+ 513,
392
+ 684,
393
+ 919,
394
+ 699
395
+ ],
396
+ "page_idx": 1
397
+ },
398
+ {
399
+ "type": "text",
400
+ "text": "Parameter shift rule states that we can calculate the gradient of each parameter in some quantum circuits by simply shifting the parameter twice and calculating the difference between two outputs, without changing the structure of circuits or using any ancilla qubits. Prior works elaborate it based on quantum circuit function [2], however, in the next subsection we will show how parameter shift rules combined with backpropagation can be used in a real PQC task. Suppose an $m$ -qubit quantum circuit is parametrized by $n$ parameters $\\theta = [\\theta_{1},\\dots ,\\theta_{i},\\dots ,\\theta_{n}]$ , the expectation value of measurements of this circuit can be represented by a circuit function,",
401
+ "bbox": [
402
+ 511,
403
+ 702,
404
+ 921,
405
+ 840
406
+ ],
407
+ "page_idx": 1
408
+ },
409
+ {
410
+ "type": "equation",
411
+ "text": "\n$$\nf (\\theta) = \\left\\langle \\psi \\right| U \\left(\\theta_ {i}\\right) ^ {\\dagger} \\widehat {Q} U \\left(\\theta_ {i}\\right) | \\psi \\rangle , \\quad f (\\theta) \\in \\mathbb {R} ^ {m}, \\theta \\in \\mathbb {R} ^ {n}. \\tag {1}\n$$\n",
412
+ "text_format": "latex",
413
+ "bbox": [
414
+ 566,
415
+ 845,
416
+ 921,
417
+ 862
418
+ ],
419
+ "page_idx": 1
420
+ },
421
+ {
422
+ "type": "text",
423
+ "text": "where $\\theta_{i}$ is the scalar parameter whose gradient is to be calculated, and $U(\\theta_{i})$ is the gate where $\\theta_{i}$ lies in. Here, for notation simplicity, we have already absorbed the unitaries before $U(\\theta_{i})$ into $\\langle \\psi |,\\left|\\psi \\right\\rangle$ Unitaries after $U(\\theta_{i})$ and observables are fused into $\\widehat{Q}$ . Usually, the",
424
+ "bbox": [
425
+ 511,
426
+ 866,
427
+ 921,
428
+ 924
429
+ ],
430
+ "page_idx": 1
431
+ },
432
+ {
433
+ "type": "text",
434
+ "text": "gates used in PQC can be written in the form $U(\\theta_i) = e^{-\\frac{i}{2}\\theta_iH}$ . Here $H$ is the Hermitian generator of $U$ with only 2 unique eigenvalues +1 and -1 ( $H$ 's eigenvalues can be $\\pm r$ , but for simplicity we assume it's $\\pm 1$ ). In this way, the gradients of the circuit function $f$ with respect to $\\theta_i$ are,",
435
+ "bbox": [
436
+ 73,
437
+ 90,
438
+ 480,
439
+ 161
440
+ ],
441
+ "page_idx": 2
442
+ },
443
+ {
444
+ "type": "equation",
445
+ "text": "\n$$\n\\frac {\\partial f (\\theta)}{\\partial \\theta_ {i}} = \\frac {1}{2} \\left(f \\left(\\theta_ {+}\\right) - f \\left(\\theta_ {-}\\right)\\right),\n$$\n",
446
+ "text_format": "latex",
447
+ "bbox": [
448
+ 102,
449
+ 164,
450
+ 264,
451
+ 189
452
+ ],
453
+ "page_idx": 2
454
+ },
455
+ {
456
+ "type": "equation",
457
+ "text": "\n$$\n\\theta_ {+} = \\left[ \\theta_ {1}, \\dots , \\theta_ {i} + \\frac {\\pi}{2}, \\dots , \\theta_ {n} \\right], \\theta_ {-} = \\left[ \\theta_ {1}, \\dots , \\theta_ {i} - \\frac {\\pi}{2}, \\dots , \\theta_ {n} \\right], \\tag {2}\n$$\n",
458
+ "text_format": "latex",
459
+ "bbox": [
460
+ 102,
461
+ 189,
462
+ 480,
463
+ 213
464
+ ],
465
+ "page_idx": 2
466
+ },
467
+ {
468
+ "type": "text",
469
+ "text": "where $\\theta_{+}$ and $\\theta_{-}$ are the positive shift and negative shift of $\\theta$ . Note that this parameter shift rule is fundamentally different from any numerical difference methods that only approximate the directional derivatives. Instead, Eq. 2 calculates the exact gradient w.r.t $\\theta_{i}$ without any approximation errors or numerical issues.",
470
+ "bbox": [
471
+ 73,
472
+ 215,
473
+ 480,
474
+ 282
475
+ ],
476
+ "page_idx": 2
477
+ },
478
+ {
479
+ "type": "text",
480
+ "text": "We apply softmax on the expectation values of measurements $f(\\theta)$ as the predicted probability for each class. Then we calculate the cross entropy between the predicted probability distribution $p$ and the target distribution $t$ as the classification loss $\\mathcal{L}$ ,",
481
+ "bbox": [
482
+ 73,
483
+ 284,
484
+ 480,
485
+ 339
486
+ ],
487
+ "page_idx": 2
488
+ },
489
+ {
490
+ "type": "equation",
491
+ "text": "\n$$\n\\mathcal {L} (\\theta) = - t ^ {T} \\cdot \\operatorname {s o f t m a x} (f (\\theta)) = - \\sum_ {j = 1} ^ {m} t _ {j} \\log p _ {j}, \\quad p _ {j} = \\frac {e ^ {f _ {j} (\\theta)}}{\\sum_ {j = 1} ^ {m} e ^ {f _ {j} (\\theta)}}. \\tag {3}\n$$\n",
492
+ "text_format": "latex",
493
+ "bbox": [
494
+ 84,
495
+ 343,
496
+ 482,
497
+ 376
498
+ ],
499
+ "page_idx": 2
500
+ },
501
+ {
502
+ "type": "text",
503
+ "text": "Then the gradient of the loss function with respect to $\\theta_{i}$ is $\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial\\theta_i} = \\left(\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)}\\right)^T\\frac{\\partial f(\\theta)}{\\partial\\theta_i}$ .",
504
+ "bbox": [
505
+ 75,
506
+ 380,
507
+ 480,
508
+ 421
509
+ ],
510
+ "page_idx": 2
511
+ },
512
+ {
513
+ "type": "text",
514
+ "text": "Here $\\frac{\\partial f(\\theta)}{\\partial\\theta_i}$ can be calculated on real quantum circuit by the parameter shift rule, and $\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)}$ can be efficiently calculated on classical devices using backpropagation supported by automatic differentiation frameworks, e.g., PyTorch and TensorFlow.",
515
+ "bbox": [
516
+ 73,
517
+ 421,
518
+ 482,
519
+ 486
520
+ ],
521
+ "page_idx": 2
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "Now we derive the parameter shift rule used in our PQC models.",
526
+ "bbox": [
527
+ 89,
528
+ 486,
529
+ 480,
530
+ 500
531
+ ],
532
+ "page_idx": 2
533
+ },
534
+ {
535
+ "type": "text",
536
+ "text": "Assume $U(\\theta_i) = R_X(\\theta_i), R_X(\\alpha) = e^{-\\frac{i}{2}\\alpha X}$ , where $X$ is the Pauli-X matrix.",
537
+ "bbox": [
538
+ 73,
539
+ 500,
540
+ 480,
541
+ 529
542
+ ],
543
+ "page_idx": 2
544
+ },
545
+ {
546
+ "type": "text",
547
+ "text": "Firstly, the RX gate is,",
548
+ "bbox": [
549
+ 89,
550
+ 531,
551
+ 227,
552
+ 544
553
+ ],
554
+ "page_idx": 2
555
+ },
556
+ {
557
+ "type": "equation",
558
+ "text": "\n$$\n\\begin{array}{l} R _ {X} (\\alpha) = e ^ {- \\frac {i}{2} \\alpha X} = \\sum_ {k = 0} ^ {\\infty} (- i \\alpha / 2) ^ {k} X ^ {k} / k! \\\\ = \\sum_ {k = 0} ^ {\\infty} (- i \\alpha / 2) ^ {2 k} X ^ {2 k} / (2 k)! + \\sum_ {k = 0} ^ {\\infty} (- i \\alpha / 2) ^ {2 k + 1} X ^ {2 k + 1} / (2 k + 1)! \\\\ = \\sum_ {k = 0} ^ {\\infty} (- 1) ^ {k} (\\alpha / 2) ^ {2 k} I / (2 k)! - i \\sum_ {k = 0} ^ {\\infty} (- 1) ^ {k} (\\alpha / 2) ^ {2 k + 1} X / (2 k + 1)! \\\\ = \\cos (\\alpha / 2) I - i \\sin (\\alpha / 2) X. \\tag {4} \\\\ \\end{array}\n$$\n",
559
+ "text_format": "latex",
560
+ "bbox": [
561
+ 83,
562
+ 547,
563
+ 482,
564
+ 676
565
+ ],
566
+ "page_idx": 2
567
+ },
568
+ {
569
+ "type": "equation",
570
+ "text": "\n$$\n\\mathrm {L e t} \\alpha = \\frac {\\pi}{2}, R _ {X} (\\pm \\frac {\\pi}{2}) = \\frac {1}{\\sqrt {2}} (I \\mp i X).\n$$\n",
571
+ "text_format": "latex",
572
+ "bbox": [
573
+ 73,
574
+ 676,
575
+ 282,
576
+ 695
577
+ ],
578
+ "page_idx": 2
579
+ },
580
+ {
581
+ "type": "text",
582
+ "text": "As $f(\\theta) = \\langle \\psi |R_X(\\theta_i)^\\dagger \\widehat{Q} R_X(\\theta_i)|\\psi \\rangle ,R_X(\\alpha)R_X(\\beta) = R_X(\\alpha +\\beta),$",
583
+ "bbox": [
584
+ 91,
585
+ 695,
586
+ 482,
587
+ 710
588
+ ],
589
+ "page_idx": 2
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "and $\\frac{\\partial}{\\partial\\alpha} R_X(\\alpha) = -\\frac{i}{2} XR_X(\\alpha)$ , we have",
594
+ "bbox": [
595
+ 75,
596
+ 709,
597
+ 303,
598
+ 726
599
+ ],
600
+ "page_idx": 2
601
+ },
602
+ {
603
+ "type": "equation",
604
+ "text": "\n$$\n\\begin{array}{l} \\frac {\\partial f (\\theta)}{\\partial \\theta_ {i}} = \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} (- \\frac {i}{2} X) ^ {\\dagger} \\widehat {Q} R _ {X} (\\theta_ {i}) | \\psi \\rangle + \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} \\widehat {Q} (- \\frac {i}{2} X) R _ {X} (\\theta_ {i}) | \\psi \\rangle \\\\ = \\frac {1}{4} \\left(\\langle \\psi | R _ {X} \\left(\\theta_ {i}\\right) ^ {\\dagger} (I - i X) ^ {\\dagger} \\widehat {Q} (I - i X) R _ {X} \\left(\\theta_ {i}\\right) | \\psi \\rangle \\right. \\\\ - \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} (I + i X) ^ {\\dagger} \\widehat {Q} (I + i X) R _ {X} (\\theta_ {i}) | \\psi \\rangle) \\\\ = \\frac {1}{2} \\left(\\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} R _ {X} (\\frac {\\pi}{2}) ^ {\\dagger} \\widehat {Q} R _ {X} (\\frac {\\pi}{2}) R _ {X} (\\theta_ {i}) | \\psi \\rangle \\right. \\\\ - \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} R _ {X} (- \\frac {\\pi}{2}) ^ {\\dagger} \\widehat {Q} R _ {X} (- \\frac {\\pi}{2}) R _ {X} (\\theta_ {i}) | \\psi \\rangle) \\\\ = \\frac {1}{2} \\left(f \\left(\\theta_ {+}\\right) - f \\left(\\theta_ {-}\\right)\\right). \\tag {5} \\\\ \\end{array}\n$$\n",
605
+ "text_format": "latex",
606
+ "bbox": [
607
+ 76,
608
+ 728,
609
+ 504,
610
+ 878
611
+ ],
612
+ "page_idx": 2
613
+ },
614
+ {
615
+ "type": "text",
616
+ "text": "Without loss of generality, the derivation holds for all unitaries of the form $e^{-\\frac{i}{2}\\alpha H}$ , e.g., RX, RY, RZ, XX, YY, ZZ, where $H$ is a Hermitian matrix with only 2 unique eigenvalues +1 and -1.",
617
+ "bbox": [
618
+ 73,
619
+ 878,
620
+ 482,
621
+ 924
622
+ ],
623
+ "page_idx": 2
624
+ },
625
+ {
626
+ "type": "image",
627
+ "img_path": "images/d920412fd3afaa79a9f44b2c88621943dcff72106784935c224cc16d32ba2fce.jpg",
628
+ "image_caption": [
629
+ "Figure 4: Quantum gradient calculation using the parameter shift rule on real quantum devices."
630
+ ],
631
+ "image_footnote": [],
632
+ "bbox": [
633
+ 517,
634
+ 89,
635
+ 911,
636
+ 305
637
+ ],
638
+ "page_idx": 2
639
+ },
640
+ {
641
+ "type": "text",
642
+ "text": "In our circuit functions, we assume each parameter lies in exactly one gate. However, there are cases that one parameter lies in multiple gates. In that case, we only need to calculate the gradient of the parameter in those gates separately and sum the gradients up to get the gradient of that parameter.",
643
+ "bbox": [
644
+ 511,
645
+ 344,
646
+ 921,
647
+ 414
648
+ ],
649
+ "page_idx": 2
650
+ },
651
+ {
652
+ "type": "text",
653
+ "text": "3.2 In-situ Gradient Computation on Real QC",
654
+ "text_level": 1,
655
+ "bbox": [
656
+ 513,
657
+ 426,
658
+ 900,
659
+ 443
660
+ ],
661
+ "page_idx": 2
662
+ },
663
+ {
664
+ "type": "text",
665
+ "text": "To realize PQC on-chip learning, we implement a TrainingEngine, described in Alg. 1. This TrainingEngine contains three parts.",
666
+ "bbox": [
667
+ 511,
668
+ 445,
669
+ 921,
670
+ 472
671
+ ],
672
+ "page_idx": 2
673
+ },
674
+ {
675
+ "type": "text",
676
+ "text": "Jacobian calculation via parameter shift. In the first part, we sample a mini-batch of training data $\\mathcal{I}$ in Line 6. For each example of the mini-batch, we set up the quantum encoder gates and then iteratively evaluate gradients for all parameters. In each iteration, we shift the parameter $\\theta_{i}$ twice by $+ \\pi /2$ and $-\\pi /2$ respectively. After each shift, we execute the shifted circuit on quantum hardware. The circuit will be created, validated, queued, and finally run on real quantum machines. As soon as we get the returned results of the two shifted circuits, i.e., $f(\\theta_{+})$ and $f(\\theta_{-})$ , we apply Eq. 2 to obtain the upstream gradient $\\frac{\\partial f(\\theta)}{\\partial \\theta_i}$ , illustrated in the left part of Figure 4.",
677
+ "bbox": [
678
+ 511,
679
+ 473,
680
+ 921,
681
+ 616
682
+ ],
683
+ "page_idx": 2
684
+ },
685
+ {
686
+ "type": "text",
687
+ "text": "Finally, we obtain the Jacobian matrix $\\frac{\\partial f(\\theta)}{\\partial\\theta}$ .",
688
+ "bbox": [
689
+ 513,
690
+ 616,
691
+ 787,
692
+ 633
693
+ ],
694
+ "page_idx": 2
695
+ },
696
+ {
697
+ "type": "text",
698
+ "text": "Down-stream gradient backpropagation. In the second part, we run the circuit without shift and get the measurement result $f(\\theta)$ . Then we apply soft max and cross-entropy function to the measured logits. In the end, we get the training loss $\\mathcal{L}(\\theta)$ . Then we run backpropagation only from the loss to the logits to get the down-stream gradients $\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)}$ , shown in the right part of Figure 4.",
699
+ "bbox": [
700
+ 511,
701
+ 633,
702
+ 921,
703
+ 720
704
+ ],
705
+ "page_idx": 2
706
+ },
707
+ {
708
+ "type": "text",
709
+ "text": "Gradient calculation. In the third part, we calculate the dot-product between down-stream gradients and the Jacobian and get the final gradients $\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial\\theta} = \\left(\\frac{\\partial f(\\theta)}{\\partial\\theta}\\right)^T\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)}$",
710
+ "bbox": [
711
+ 511,
712
+ 720,
713
+ 919,
714
+ 768
715
+ ],
716
+ "page_idx": 2
717
+ },
718
+ {
719
+ "type": "text",
720
+ "text": "3.3 Probabilistic Quantum Gradient Pruning",
721
+ "text_level": 1,
722
+ "bbox": [
723
+ 514,
724
+ 779,
725
+ 890,
726
+ 796
727
+ ],
728
+ "page_idx": 2
729
+ },
730
+ {
731
+ "type": "text",
732
+ "text": "On quantum chips, there exist various noises and errors that could potentially diminish the fidelity of the computation results. When the gradient magnitude is small, noises could easily overwhelm the signals, such that the gradients calculated on real quantum circuit become unreliable when they have small magnitude. Those unreliable gradients have harmful effects on training convergence. Skipping the evaluation on those unreliable gradients can benefit both training convergence and efficiency. Besides, we observe that for most parameters, if the gradient magnitudes are far from zero for several",
733
+ "bbox": [
734
+ 511,
735
+ 797,
736
+ 921,
737
+ 922
738
+ ],
739
+ "page_idx": 2
740
+ },
741
+ {
742
+ "type": "image",
743
+ "img_path": "images/7b71244f4229b946a384c54fe381ac936e7270ea771daeb758c425a1ff83505b.jpg",
744
+ "image_caption": [
745
+ "Figure 5: Efficient on-chip quantum gradient calculation with probabilistic gradient pruning. Gradient magnitudes are accumulated within the accumulation window and used as the sampling distribution. Based on the distribution, gradients are probabilistically pruned with a ratio $r$ in the pruning window to mitigate noises and stabilize training."
746
+ ],
747
+ "image_footnote": [],
748
+ "bbox": [
749
+ 112,
750
+ 90,
751
+ 893,
752
+ 234
753
+ ],
754
+ "page_idx": 3
755
+ },
756
+ {
757
+ "type": "text",
758
+ "text": "steps, it will likely keep far from zero in the next several steps. Similarly, if the gradient magnitude remains small for some steps, it will likely keep small in the next several steps. This means the gradient reliability is predictable to some extent. Therefore, we propose the gradient pruning method to sample the parameters whose gradients are more reliable. This method helps training converge faster while also saving time by skipping the evaluation of unreliable gradients.",
759
+ "bbox": [
760
+ 73,
761
+ 313,
762
+ 480,
763
+ 410
764
+ ],
765
+ "page_idx": 3
766
+ },
767
+ {
768
+ "type": "text",
769
+ "text": "Alg. 1 describes the PQC on-chip training flow with probabilistic gradient pruning. We divide all the training steps into $S$ stages and perform the pruning method periodically on each stage. For every stage, we split it into two phases, shown in Figure 5. The first phase is called magnitude accumulation with an accumulation window width $w_{a}$ , and the second is called probabilistic gradient pruning (PGP) with a pruning window width $w_{p}$ . We only apply pruning in the second phase, while the parameter subset is sampled from a probability distribution $\\tilde{\\theta} = \\{\\theta_i \\sim P_M(\\theta) | 1 \\leq i \\leq (1 - r)n\\}$ based on the gradient information collected within the accumulation window.",
770
+ "bbox": [
771
+ 73,
772
+ 410,
773
+ 482,
774
+ 549
775
+ ],
776
+ "page_idx": 3
777
+ },
778
+ {
779
+ "type": "text",
780
+ "text": "In Lines 4-9, within the accumulation window, we record the magnitude of gradients of each parameter in each step and accumulate them until the window is over. At the end of the first phase, we can get an accumulator $M$ that records the accumulated gradient magnitude for each parameter. Thus, when the pruning phase starts, we normalize the accumulated gradient magnitude and pass it to our sampler as the sampling distribution. In each pruning step, the sampler samples a subset of parameters $\\tilde{\\theta}$ with a pruning ratio of $r$ , and we only evaluate gradients for them while the rest $\\theta \\backslash \\tilde{\\theta}$ is temporarily frozen.",
781
+ "bbox": [
782
+ 73,
783
+ 550,
784
+ 482,
785
+ 689
786
+ ],
787
+ "page_idx": 3
788
+ },
789
+ {
790
+ "type": "text",
791
+ "text": "There are three important hyper-parameters in our gradient pruning method: 1) accumulation window width $w_{a}$ , 2) pruning ratio $r$ , and 3) pruning window width $w_{p}$ . The accumulation window width and pruning window width decide the reliability of the gradient trend evaluation and our confidence in it, respectively. The pruning ratio can be tuned to balance the gradient variances caused by noise perturbation and pruning. Thus, the percentage of the time saved by our probabilistic gradient pruning method is $r\\frac{w_p}{w_a + w_p}\\times 100\\%$ . In our experiments, we find that the setting $(w_{a} = 1,w_{p} = 2\\sim 3,r = 0.3\\sim 0.5)$ usually works well in all cases.",
792
+ "bbox": [
793
+ 73,
794
+ 690,
795
+ 482,
796
+ 832
797
+ ],
798
+ "page_idx": 3
799
+ },
800
+ {
801
+ "type": "text",
802
+ "text": "4 EXPERIMENTS",
803
+ "text_level": 1,
804
+ "bbox": [
805
+ 73,
806
+ 863,
807
+ 233,
808
+ 877
809
+ ],
810
+ "page_idx": 3
811
+ },
812
+ {
813
+ "type": "text",
814
+ "text": "In this section, we deploy our PQC on-chip learning framework on real QC and evaluate it on 5 QNN tasks for image and vowel recognition. Compared with classical QNN training protocols, we can",
815
+ "bbox": [
816
+ 73,
817
+ 881,
818
+ 482,
819
+ 924
820
+ ],
821
+ "page_idx": 3
822
+ },
823
+ {
824
+ "type": "code",
825
+ "sub_type": "algorithm",
826
+ "code_caption": [
827
+ "Algorithm 1: PQC On-Chip Training with Probabilistic Gradient Pruning"
828
+ ],
829
+ "code_body": "Input: Accumulation window width $w_{a}$ , gradient pruning ratio $r$ , pruning window width $w_{p}$ , training objective $\\mathcal{L}$ , initial parameters $\\theta^0 \\in \\mathbb{R}^n$ , training data $\\mathcal{D}_{trn}$ , initial step size $\\eta^0$ , and total stages $S$ . $\\theta \\gets \\theta^0$ , $\\eta \\gets \\eta^0$ $t \\gets 0$ ; \nfor $s = 1,2,\\dots,S$ do \nInitialize gradient magnitude accumulator $M \\gets 0^n$ ; \nfor $\\tau_{a} = 1,2,\\dots,w_{a}$ do \n $t \\gets t + 1$ ; \nSample a mini-batch $\\mathcal{I} \\sim \\mathcal{D}_{trn}$ ; \nIn-situ gradient evaluation via parameter shift $\\nabla_{\\theta} \\mathcal{L}_{\\mathcal{I}}(\\theta) = \\frac{1}{2} (\\frac{\\partial f(\\theta)}{\\partial \\theta})^T \\frac{\\partial \\mathcal{L}(\\theta)}{f(\\theta)}$ ; \nParameter update: $\\theta \\gets \\theta - \\eta \\nabla_{\\theta} \\mathcal{L}_{\\mathcal{I}}(\\theta)$ ; \nUpdate magnitude accumulator $M \\gets M + |\\nabla_{\\theta} \\mathcal{L}_{\\mathcal{I}}(\\theta)|$ ; \nfor $\\tau_{p} \\gets 1,2,\\dots,w_{p}$ do \n $t \\gets t + 1$ ; \nSample a mini-batch $\\mathcal{I} \\sim \\mathcal{D}_{trn}$ ; \nSample a subset with a ratio $r$ based on accumulated gradient magnitude: \n $\\tilde{\\theta} = \\{\\theta_i \\sim P_M(\\theta) | 1 \\leq i \\leq (1-r)n\\}$ ; \n $\\tilde{\\theta} \\gets \\tilde{\\theta} - \\eta \\nabla_{\\tilde{\\theta}} \\mathcal{L}_{\\mathcal{I}}(\\theta)$ ; \nOutput: Converged parameters $\\theta$",
830
+ "bbox": [
831
+ 522,
832
+ 345,
833
+ 921,
834
+ 683
835
+ ],
836
+ "page_idx": 3
837
+ },
838
+ {
839
+ "type": "text",
840
+ "text": "achieve $2 - 4\\%$ real QC test accuracy improvement with $2\\times$ convergence speedup. We also conduct extensive ablation studies to validate our scalability and the effectiveness of the proposed probabilistic gradient pruning method.",
841
+ "bbox": [
842
+ 513,
843
+ 710,
844
+ 921,
845
+ 768
846
+ ],
847
+ "page_idx": 3
848
+ },
849
+ {
850
+ "type": "text",
851
+ "text": "4.1 Experiment Setups",
852
+ "text_level": 1,
853
+ "bbox": [
854
+ 514,
855
+ 780,
856
+ 714,
857
+ 796
858
+ ],
859
+ "page_idx": 3
860
+ },
861
+ {
862
+ "type": "text",
863
+ "text": "Benchmarks. We conduct our experiments on 5 QML tasks. QML are all classification tasks including MNIST [8] 4-class (0, 1, 2, 3), 2-class (3 and 6); Fashion [22] 4-class (t-shirt/top, trouser, pullover, dress), 2-class (dress and shirt); Vowel 4-class(hid, hId, had, hOd). MNIST and Fashion 2-class use the front 500 images as the training set and randomly sampled 300 images as the validation set. MNIST, Fashion 4-class uses the front 100 images as the training set and also randomly sampled 300 images as the validation set. The input images are all $28 \\times 28$ . We firstly center-crop them to $24 \\times 24$ and",
864
+ "bbox": [
865
+ 511,
866
+ 797,
867
+ 921,
868
+ 924
869
+ ],
870
+ "page_idx": 3
871
+ },
872
+ {
873
+ "type": "table",
874
+ "img_path": "images/8a283280dddb8cf4ef099e56b402058b233c82683b62b845760c4404065d65bc.jpg",
875
+ "table_caption": [
876
+ "Table 1: Accuracy comparison among different settings. \"Simu.\" represents \"simulation\"."
877
+ ],
878
+ "table_footnote": [],
879
+ "table_body": "<table><tr><td>Method</td><td>Acc.</td><td>MNIST-4 Jarkata</td><td>MNIST-2 Jarkata</td><td>Fashion-4 Manila</td><td>Fashion-2 Santiago</td><td>Vowel-4 Lima</td></tr><tr><td>Classical-Train</td><td>Simu.</td><td>0.61</td><td>0.88</td><td>0.73</td><td>0.89</td><td>0.37</td></tr><tr><td>Classical-Train</td><td></td><td>0.59</td><td>0.79</td><td>0.54</td><td>0.89</td><td>0.31</td></tr><tr><td>QC-Train</td><td>QC</td><td>0.59</td><td>0.83</td><td>0.49</td><td>0.84</td><td>0.34</td></tr><tr><td>QC-Train-PGP</td><td></td><td>0.64</td><td>0.86</td><td>0.57</td><td>0.91</td><td>0.36</td></tr></table>",
880
+ "bbox": [
881
+ 75,
882
+ 119,
883
+ 483,
884
+ 205
885
+ ],
886
+ "page_idx": 4
887
+ },
888
+ {
889
+ "type": "image",
890
+ "img_path": "images/6dc9df553ec0022527cd548afb03ea5e91d37327c1c3c07294e670e00977b875.jpg",
891
+ "image_caption": [
892
+ "(a)",
893
+ "Figure 6: Real QC validation accuracy curves on different datasets and different quantum devices."
894
+ ],
895
+ "image_footnote": [],
896
+ "bbox": [
897
+ 78,
898
+ 205,
899
+ 277,
900
+ 328
901
+ ],
902
+ "page_idx": 4
903
+ },
904
+ {
905
+ "type": "image",
906
+ "img_path": "images/9b4e5f35cdae9569e43076fd8d83eca650ba3b00c3048cd2a1df8c99d0c51902.jpg",
907
+ "image_caption": [
908
+ "(b)"
909
+ ],
910
+ "image_footnote": [],
911
+ "bbox": [
912
+ 279,
913
+ 213,
914
+ 480,
915
+ 328
916
+ ],
917
+ "page_idx": 4
918
+ },
919
+ {
920
+ "type": "text",
921
+ "text": "then down-sample them to $4 \\times 4$ for MNIST and Fashion 2 and 4-class tasks. Vowel 4-class uses the front 100 samples as the training set and randomly sampled 300 samples as the validation set. For each sample, we perform principal component analysis (PCA) for the vowel features and take the 10 most significant dimensions.",
922
+ "bbox": [
923
+ 73,
924
+ 383,
925
+ 482,
926
+ 452
927
+ ],
928
+ "page_idx": 4
929
+ },
930
+ {
931
+ "type": "text",
932
+ "text": "All the tasks use four logical qubits. To embed classical image and vowel features to the quantum states, we first flatten them and then encode them with rotation gates. For down-sampled $4 \\times 4$ images, we use 4RY, 4RZ, 4RX, and 4RY gates as the encoder. We put the 16 classical input values to the phases of 16 rotation gates, respectively. Therefore we can encode the classical values to quantum states. For 10 vowel features, we use 4RY, 4RZ, and 2RX gates for encoding.",
933
+ "bbox": [
934
+ 73,
935
+ 452,
936
+ 482,
937
+ 549
938
+ ],
939
+ "page_idx": 4
940
+ },
941
+ {
942
+ "type": "text",
943
+ "text": "The encoding gates are our hand-designed circuits. Our circuits are composed of several layers. There are 7 kinds of layers used to construct our circuits. (i) RX layer: Add RX gates to all wires; (ii) RY layer: same structure as in RX layer; (iii) RZ layer: same structure as in RX layer; (iv) RZZ layer: add RZZ gates to all logical adjacent wires and the logical farthest wires to form a ring connection, for example, an RZZ layer in a 4-qubit circuit contains 4 RZZ gates which lie on wires 1 and 2, 2 and 3, 3 and 4, 4 and 1; (v) RXX layer: same structure as in RZZ layer; (vi) RZX layer: same structure as in RZZ layer; (vii) CZ layer: add CZ gates to all logical adjacent wires.",
944
+ "bbox": [
945
+ 73,
946
+ 549,
947
+ 482,
948
+ 686
949
+ ],
950
+ "page_idx": 4
951
+ },
952
+ {
953
+ "type": "text",
954
+ "text": "For MNIST and Fashion 2-class tasks, the circuit contains 1 RZZ layer followed by 1 RY layer. For MNIST 4-class task, the circuit contains 3 RX+RY+RZ+CZ layers (1 RX layer, 1 RY layer, 1 RZ layer, and 1 CZ layer in series). For Fashion 4-class task, the circuit contains 3 RZZ+RY layers (1 RZZ layer followed by 1 RY layer). For Vowel 4-class task, the circuit contains 2 RZZ+RXX layers (1 RZZ layer followed by 1 RXX layer).",
955
+ "bbox": [
956
+ 73,
957
+ 686,
958
+ 482,
959
+ 785
960
+ ],
961
+ "page_idx": 4
962
+ },
963
+ {
964
+ "type": "text",
965
+ "text": "For the output of our quantum circuits, we measure the expectation values on Pauli-Z basis and obtain a value $[-1, 1]$ from each qubit. For 2-class, we sum the qubit 0 and 1, 2, and 3 respectively to get 2 output values. For 4-class, we just use the four expectation values as 4 output values. Then we process the output values by Softmax to get probabilities.",
966
+ "bbox": [
967
+ 73,
968
+ 785,
969
+ 482,
970
+ 867
971
+ ],
972
+ "page_idx": 4
973
+ },
974
+ {
975
+ "type": "text",
976
+ "text": "Quantum devices and compiler configurations. We use IBM quantum computers via qiskit API [7] to submit our circuits to real superconducting quantum devices and achieve quantum on-chip training. We set all the circuits to run 1024 shots.",
977
+ "bbox": [
978
+ 73,
979
+ 867,
980
+ 482,
981
+ 922
982
+ ],
983
+ "page_idx": 4
984
+ },
985
+ {
986
+ "type": "image",
987
+ "img_path": "images/11175589a8bbca9222647d57c9d8ce68c9d366f2aa368b71994613d8148dc5e3.jpg",
988
+ "image_caption": [
989
+ "Figure 7: Ablation on pruning ratio, accumulation window width, and pruning window width."
990
+ ],
991
+ "image_footnote": [],
992
+ "bbox": [
993
+ 514,
994
+ 89,
995
+ 647,
996
+ 205
997
+ ],
998
+ "page_idx": 4
999
+ },
1000
+ {
1001
+ "type": "image",
1002
+ "img_path": "images/6ad97fb2fbe9faba587bf682a69612f0e987226a1128335905ee091ab2f1ac51.jpg",
1003
+ "image_caption": [],
1004
+ "image_footnote": [],
1005
+ "bbox": [
1006
+ 650,
1007
+ 89,
1008
+ 782,
1009
+ 205
1010
+ ],
1011
+ "page_idx": 4
1012
+ },
1013
+ {
1014
+ "type": "image",
1015
+ "img_path": "images/0259719eeafb9b7f427410b9a973bfe620bdb12843a2922dae38bddd29810452.jpg",
1016
+ "image_caption": [],
1017
+ "image_footnote": [],
1018
+ "bbox": [
1019
+ 787,
1020
+ 89,
1021
+ 919,
1022
+ 205
1023
+ ],
1024
+ "page_idx": 4
1025
+ },
1026
+ {
1027
+ "type": "text",
1028
+ "text": "Baseline. We have two baselines. (1) QC-Train: We train our model without gradient pruning, i.e., calculating gradients of every parameter in each step. The gradient calculation is deployed on real quantum circuits. (2) Classical-Train: We train our QNN model completely on classical computers. We use a vector to record the amplitudes of the quantum state, utilize complex matrix multiplication to simulate quantum gates, and sample based on the amplitude vector to simulate quantum measurement.",
1029
+ "bbox": [
1030
+ 511,
1031
+ 239,
1032
+ 921,
1033
+ 349
1034
+ ],
1035
+ "page_idx": 4
1036
+ },
1037
+ {
1038
+ "type": "text",
1039
+ "text": "The QC-Train-PGP line shows training on real quantum circuits while applying our probabilistic gradient pruning. In all the cases, we adopt accumulation window size 1, pruning ratio 0.5, and pruning window size 2, except for Fashion-4, we adopt pruning ratio 0.7, and other settings remain the same.",
1040
+ "bbox": [
1041
+ 511,
1042
+ 351,
1043
+ 921,
1044
+ 420
1045
+ ],
1046
+ "page_idx": 4
1047
+ },
1048
+ {
1049
+ "type": "text",
1050
+ "text": "4.2 Main Results",
1051
+ "text_level": 1,
1052
+ "bbox": [
1053
+ 514,
1054
+ 431,
1055
+ 666,
1056
+ 445
1057
+ ],
1058
+ "page_idx": 4
1059
+ },
1060
+ {
1061
+ "type": "text",
1062
+ "text": "QNN results. Table 1 shows the accuracy of comparison on 5 tasks. In each task, we show 4 accuracy values, which are (1) accuracy of Classical-Train tested on classical devices, (2) accuracy of Classical-Train tested on real quantum circuits; (3) accuracy of QC-Train tested on real quantum circuits; (4) accuracy of QC-Train-PGP tested on real quantum circuits. In each task, the accuracy is collected after finishing a certain number of circuit runs. We train and evaluate MNIST-2 and MNIST-2 on ibmq_jakarta, Fashion-4 on ibmq_manila, Fashion-2 on ibmq_santiago, and Vowel-4 on ibmq_lima.",
1063
+ "bbox": [
1064
+ 511,
1065
+ 450,
1066
+ 921,
1067
+ 574
1068
+ ],
1069
+ "page_idx": 4
1070
+ },
1071
+ {
1072
+ "type": "text",
1073
+ "text": "The noise-free accuracy is usually the highest among the other three, because it represents the accuracy without any noise perturbation. The QC-Train-PGP usually takes second place because compared to Classical-Train, it has the advantage of noise awareness, and compared to QC-Train, it suffers less from noise thanks to gradient pruning.",
1074
+ "bbox": [
1075
+ 511,
1076
+ 574,
1077
+ 921,
1078
+ 657
1079
+ ],
1080
+ "page_idx": 4
1081
+ },
1082
+ {
1083
+ "type": "text",
1084
+ "text": "Training curves. Figure 6 shows the real QC validation accuracy curve during training. The X-axis is the number of inferences (how many circuits have been run). The Y-axis is the accuracy of the validation dataset tested on real quantum circuits. MNIST 4-class runs on the ibmq_jakarta machine. We observe that given a fixed inference budget, our QC-Train-PGP achieves the best accuracy of $63.7\\%$ while the Classical-Train only achieves $59.3\\%$ .",
1085
+ "bbox": [
1086
+ 511,
1087
+ 657,
1088
+ 921,
1089
+ 753
1090
+ ],
1091
+ "page_idx": 4
1092
+ },
1093
+ {
1094
+ "type": "text",
1095
+ "text": "We further train Fashion 2-class on ibmq_santiago. QC-Train-PGP only takes $13.9\\mathrm{k}$ inferences to reach the peak accuracy $90.7\\%$ while the best accuracy Classical-Train can achieve is merely $88.7\\%$ at the cost of over $30\\mathrm{k}$ inferences.",
1096
+ "bbox": [
1097
+ 511,
1098
+ 753,
1099
+ 921,
1100
+ 809
1101
+ ],
1102
+ "page_idx": 4
1103
+ },
1104
+ {
1105
+ "type": "text",
1106
+ "text": "4.3 Ablation Studies",
1107
+ "text_level": 1,
1108
+ "bbox": [
1109
+ 514,
1110
+ 821,
1111
+ 692,
1112
+ 835
1113
+ ],
1114
+ "page_idx": 4
1115
+ },
1116
+ {
1117
+ "type": "text",
1118
+ "text": "Ablation on gradient pruning. In Figure 7, we evaluate the training performance with different pruning ratios $r$ , accumulation window size $w_{a}$ , and pruning window size $w_{p}$ on Fashion-4 and MNIST-2 tasks. We find that the $r = 0.5$ is generally a good setting for our tasks. Overly large pruning ratios will induce too many gradient variances that harm the training convergence. For the accumulation window",
1119
+ "bbox": [
1120
+ 511,
1121
+ 840,
1122
+ 921,
1123
+ 922
1124
+ ],
1125
+ "page_idx": 4
1126
+ },
1127
+ {
1128
+ "type": "table",
1129
+ "img_path": "images/16ce4f93a3a82210fc82426be8df037c57f4129a718d328d0a9fa20ee4b71bd4.jpg",
1130
+ "table_caption": [
1131
+ "Table 2: The proposed probabilistic pruning is better than deterministic pruning."
1132
+ ],
1133
+ "table_footnote": [],
1134
+ "table_body": "<table><tr><td>Method</td><td>MNIST-4</td><td>MNIST-2</td><td>Fashion-4</td><td>Fashion-2</td></tr><tr><td>Deterministic</td><td>0.61</td><td>0.82</td><td>0.72</td><td>0.89</td></tr><tr><td>Probabilistic</td><td>0.62</td><td>0.85</td><td>0.79</td><td>0.90</td></tr></table>",
1135
+ "bbox": [
1136
+ 73,
1137
+ 119,
1138
+ 480,
1139
+ 178
1140
+ ],
1141
+ "page_idx": 5
1142
+ },
1143
+ {
1144
+ "type": "table",
1145
+ "img_path": "images/42190921cf545d8f3742341ce397d7cc51ef012d5b2dfd36b01ceb7b6cb658d9.jpg",
1146
+ "table_caption": [
1147
+ "Table 3: Adam optimizer can outperform SGD and Momentum optimizers."
1148
+ ],
1149
+ "table_footnote": [],
1150
+ "table_body": "<table><tr><td>Optimizer</td><td>MNIST-4</td><td>MNIST-2</td><td>Fashion-4</td><td>Fashion-2</td></tr><tr><td>SGD</td><td>0.5</td><td>0.8</td><td>0.45</td><td>76</td></tr><tr><td>Momentum</td><td>0.55</td><td>0.83</td><td>0.66</td><td>0.90</td></tr><tr><td>Adam</td><td>0.61</td><td>0.88</td><td>0.75</td><td>0.91</td></tr></table>",
1151
+ "bbox": [
1152
+ 75,
1153
+ 214,
1154
+ 480,
1155
+ 289
1156
+ ],
1157
+ "page_idx": 5
1158
+ },
1159
+ {
1160
+ "type": "text",
1161
+ "text": "size, $w_{a} = 1$ or 2 are suitable choices. When $w_{a}$ is too large, the accumulated gradient magnitudes are similar among all parameters, leading to a nearly uniform sampling distribution. This will bring undifferentiated pruning, and the accuracy will drop as the Fashion-4 curve shows. The pruning window $w_{p}$ should also not be too large. As $w_{p}$ grows, the accumulated gradient magnitudes used to instruct our pruning become less reliable.",
1162
+ "bbox": [
1163
+ 73,
1164
+ 296,
1165
+ 480,
1166
+ 393
1167
+ ],
1168
+ "page_idx": 5
1169
+ },
1170
+ {
1171
+ "type": "text",
1172
+ "text": "Discussion on scalability. Figure 8 shows the superior scalability of quantum on-chip training. Classical simulation runtime exponentially increases as #qubits scales up, while the runtime on real quantum machines scales nearly linearly to #qubits. The classical curve in Figure 8 represents runtime and memory cost of running 50 circuits of different #qubits with 16 rotation gates and 32 RZZ gates. The curve before 22 qubits is measured on a single NVIDIA RTX 2080 Ti GPU; points after 24 qubits are extrapolated. The quantum curve before 27 qubits is tested on ibmq_toronto; the points after 30 qubits are extrapolated.",
1173
+ "bbox": [
1174
+ 73,
1175
+ 393,
1176
+ 480,
1177
+ 531
1178
+ ],
1179
+ "page_idx": 5
1180
+ },
1181
+ {
1182
+ "type": "text",
1183
+ "text": "We can observe clear quantum advantages on circuits with more than 27 qubits. In terms of memory cost, classical simulation consumes thousands of Gigabits for storage which is intractable. In contrast, on quantum machines, the information is stored in the quantum state of the circuit itself with negligible memory cost.",
1184
+ "bbox": [
1185
+ 73,
1186
+ 532,
1187
+ 480,
1188
+ 601
1189
+ ],
1190
+ "page_idx": 5
1191
+ },
1192
+ {
1193
+ "type": "text",
1194
+ "text": "Probabilistic vs. deterministic gradient pruning. Our pruning is decided by a random sampler based on the accumulated gradient magnitude. We call this probabilistic pruning. If the sampler only samples the parameters with the biggest accumulated gradient magnitude, this is called deterministic pruning. We adopt probabilistic pruning instead of deterministic pruning because deterministic pruning limits the degree of freedom and increases the gradient sampling bias. Table 2 shows that deterministic pruning has $1\\% -7\\%$ accuracy loss compared with probabilistic pruning.",
1195
+ "bbox": [
1196
+ 73,
1197
+ 601,
1198
+ 480,
1199
+ 726
1200
+ ],
1201
+ "page_idx": 5
1202
+ },
1203
+ {
1204
+ "type": "text",
1205
+ "text": "Different optimizers. Table 3 shows the accuracy tested on classical devices trained with different optimizers. The learning rate is controlled by a cosine scheduler from 0.3 in the beginning to 0.03 in the end. We test SGD, SGD with a momentum factor of 0.8, and Adam on MNIST-4, MNIST-2, Fashion-4, and Fashion-2, and found that Adam always performs the best. Hence, all the experiments are done using Adam optimizers by default.",
1206
+ "bbox": [
1207
+ 73,
1208
+ 726,
1209
+ 480,
1210
+ 823
1211
+ ],
1212
+ "page_idx": 5
1213
+ },
1214
+ {
1215
+ "type": "text",
1216
+ "text": "5 CONCLUSION",
1217
+ "text_level": 1,
1218
+ "bbox": [
1219
+ 73,
1220
+ 835,
1221
+ 223,
1222
+ 849
1223
+ ],
1224
+ "page_idx": 5
1225
+ },
1226
+ {
1227
+ "type": "text",
1228
+ "text": "In this work, for the first time, we present an efficient and robust on-chip training framework for PQC and demonstrate its effectiveness on real quantum devices. By leveraging parameter shift, we can calculate the exact quantum gradients directly on quantum machines, thus achieving high scalability. To alleviate the negative impact of",
1229
+ "bbox": [
1230
+ 73,
1231
+ 853,
1232
+ 483,
1233
+ 925
1234
+ ],
1235
+ "page_idx": 5
1236
+ },
1237
+ {
1238
+ "type": "image",
1239
+ "img_path": "images/5de9f81a763849e82fcb43bb1aef0a8a4850d5ba22e54df6c64e9a780ca516e9.jpg",
1240
+ "image_caption": [
1241
+ "Figure 8: Runtime and memory cost comparison between classical simulation and quantum on-chip run."
1242
+ ],
1243
+ "image_footnote": [],
1244
+ "bbox": [
1245
+ 517,
1246
+ 89,
1247
+ 705,
1248
+ 199
1249
+ ],
1250
+ "page_idx": 5
1251
+ },
1252
+ {
1253
+ "type": "image",
1254
+ "img_path": "images/7c89c05dc38e3b64c5ffe7db360e8c124643990ba88e4c349b884f9d4518d173.jpg",
1255
+ "image_caption": [],
1256
+ "image_footnote": [],
1257
+ "bbox": [
1258
+ 725,
1259
+ 89,
1260
+ 919,
1261
+ 198
1262
+ ],
1263
+ "page_idx": 5
1264
+ },
1265
+ {
1266
+ "type": "text",
1267
+ "text": "quantum noises on gradients, we further propose the probabilistic gradient pruning technique to avoid updating parameters with unreliable gradients. Experimental results on 5 classification tasks and 5 machines demonstrate that QOC achieves comparable accuracy with noise-free simulations. We hope QOC can open an avenue towards practical training of large PQC models for quantum advantage.",
1268
+ "bbox": [
1269
+ 511,
1270
+ 232,
1271
+ 921,
1272
+ 316
1273
+ ],
1274
+ "page_idx": 5
1275
+ },
1276
+ {
1277
+ "type": "text",
1278
+ "text": "ACKNOWLEDGMENT",
1279
+ "text_level": 1,
1280
+ "bbox": [
1281
+ 514,
1282
+ 327,
1283
+ 702,
1284
+ 340
1285
+ ],
1286
+ "page_idx": 5
1287
+ },
1288
+ {
1289
+ "type": "text",
1290
+ "text": "We acknowledge NSF CAREER Award #1943349, MIT-IBM Watson AI Lab, Baidu Fellowship, Qualcomm Innovation Fellowship, and IBM Quantum.",
1291
+ "bbox": [
1292
+ 513,
1293
+ 345,
1294
+ 921,
1295
+ 372
1296
+ ],
1297
+ "page_idx": 5
1298
+ },
1299
+ {
1300
+ "type": "text",
1301
+ "text": "REFERENCES",
1302
+ "text_level": 1,
1303
+ "bbox": [
1304
+ 514,
1305
+ 381,
1306
+ 633,
1307
+ 395
1308
+ ],
1309
+ "page_idx": 5
1310
+ },
1311
+ {
1312
+ "type": "list",
1313
+ "sub_type": "ref_text",
1314
+ "list_items": [
1315
+ "[1] Jacob Biamonte, Peter Wittek, Nicola Pancotti, Patrick Rebentrost, Nathan Wiebe, and Seth Lloyd. 2017. Quantum machine learning. Nature 549, 7671 (2017).",
1316
+ "[2] Gavin E Crooks. 2019. Gradients of parameterized quantum gates using the parameter-shift rule and gate decomposition. arXiv:1905.13311 (2019).",
1317
+ "[3] Song Han, Huizi Mao, and William J Dally. 2015. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149 (2015).",
1318
+ "[4] Aram W Harrow, Avinatan Hassidim, and Seth Lloyd. 2009. Quantum algorithm for linear systems of equations. Physical review letters 103, 15 (2009), 150502.",
1319
+ "[5] Vojtěch Havlíček et al. 2019. Supervised learning with quantum-enhanced feature spaces. Nature 567, 7747 (2019), 209-212.",
1320
+ "[6] Cheng-Yun Hsieh, Chen-Hung Wu, Chia-Hsien Huang, His-Sheng Goan, and James Chien Mo Li. 2020. Realistic fault models and fault simulation for quantum dot quantum circuits. In 2020 57th (DAC). IEEE, 1-6.",
1321
+ "[7] Qiskit IBM. [n.d.].",
1322
+ "[8] Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner. 1998. Gradient-based learning applied to document recognition. Proc. IEEE 86, 11 (1998), 2278-2324.",
1323
+ "[9] Zhiding Liang, Zhepeng Wang, Junhuan Yang, Lei Yang, Yiyu Shi, and Weiwen Jiang. 2021. Can Noise on Qubits Be Learned in Quantum Neural Network? A Case Study on QuantumFlow. In ICCAD. IEEE, 1-7.",
1324
+ "[10] Seth Lloyd, Silvano Garnerone, and Paolo Zanardi. 2016. Quantum algorithms for topological and geometric analysis of data. Nature communications 7, 1 (2016).",
1325
+ "[11] Seth Lloyd, Masoud Mohseni, and Patrick Rebentrost. 2013. Quantum algorithms for supervised and unsupervised machine learning. arXiv:1307.0411 (2013).",
1326
+ "[12] Seth Lloyd, Maria Schuld, Aroosa Ijaz, Josh Izaac, and Nathan Killoran. 2020. Quantum embeddings for machine learning. arXiv:2001.03622 (2020).",
1327
+ "[13] Easwar Magesan, Jay M Gambetta, and Joseph Emerson. 2012. Characterizing quantum gates via randomized benchmarking. Physical Review A 85, 4 (2012).",
1328
+ "[14] Kosuke Mitarai, Makoto Negoro, Masahiro Kitagawa, and Keisuke Fujii. 2018. Quantum circuit learning. Physical Review A (2018).",
1329
+ "[15] Le Thanh Nguyen-Meidine et al. 2020. Progressive Gradient Pruning for Classification, Detection and DomainAdaptation. arXiv:1906.08746 [cs.LG]",
1330
+ "[16] John Preskill. 2018. Quantum Computing in the NISQ era and beyond. Quantum 2 (2018), 79.",
1331
+ "[17] Hanrui Wang, Yongshan Ding, Jiaqi Gu, Yujun Lin, David Z Pan, Frederic T Chong, and Song Han. 2022. QuantumNAS: Noise-adaptive search for robust quantum circuits. HPCA (2022).",
1332
+ "[18] Hanrui Wang, Jiaqi Gu, Yongshan Ding, Zirui Li, Frederic T Chong, David Z Pan, and Song Han. 2022. QuantumNAT: Quantum Noise-Aware Training with Noise Injection, Quantization and Normalization. DAC (2022).",
1333
+ "[19] Hanrui Wang, Zhekai Zhang, and Song Han. 2021. SpAtten: Efficient sparse attention architecture with cascade token and head pruning. In HPCA. IEEE.",
1334
+ "[20] Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Hanrui Wang, Yujun Lin, and Song Han. 2020. Apq: Joint search for network architecture, pruning and quantization policy. In CVPR.",
1335
+ "[21] Zhepeng Wang, Zhiding Liang, Shanglin Zhou, et al. 2021. Exploration of Quantum Neural Architecture by Mixing Quantum Neuron Designs. In ICCAD. IEEE.",
1336
+ "[22] Han Xiao, Kashif Rasul, and Roland Vollgraf. 2017. Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv:1708.07747 (2017).",
1337
+ "[23] Zhekai Zhang, Hanrui Wang, Song Han, and William J Dally. 2020. SpArch: Efficient architecture for sparse matrix multiplication. In HPCA. IEEE."
1338
+ ],
1339
+ "bbox": [
1340
+ 516,
1341
+ 398,
1342
+ 921,
1343
+ 912
1344
+ ],
1345
+ "page_idx": 5
1346
+ }
1347
+ ]
2202.13xxx/2202.13239/57c352a6-ea56-4510-913e-a92f5c7acb7d_model.json ADDED
@@ -0,0 +1,1719 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.246,
7
+ 0.087,
8
+ 0.752,
9
+ 0.14
10
+ ],
11
+ "angle": 0,
12
+ "content": "QOC: Quantum On-Chip Training with Parameter Shift and Gradient Pruning"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.176,
18
+ 0.15,
19
+ 0.823,
20
+ 0.179
21
+ ],
22
+ "angle": 0,
23
+ "content": "\\(^{1}\\)Hanrui Wang*, \\(^{2}\\)Zirui Li*, \\(^{3}\\)Jiaqi Gu, \\(^{4}\\)Yongshan Ding, \\(^{3}\\)David Z. Pan, \\(^{1}\\)Song Han \\(^{1}\\)Massachusetts Institute of Technology, \\(^{2}\\)Rutgers University, \\(^{3}\\)University of Taxes at Austin, \\(^{4}\\)Yale University"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.437,
29
+ 0.185,
30
+ 0.562,
31
+ 0.199
32
+ ],
33
+ "angle": 0,
34
+ "content": "https://qmlsys.mit.edu"
35
+ },
36
+ {
37
+ "type": "aside_text",
38
+ "bbox": [
39
+ 0.023,
40
+ 0.261,
41
+ 0.061,
42
+ 0.727
43
+ ],
44
+ "angle": 270,
45
+ "content": "arXiv:2202.13239v3 [quant-ph] 27 Jan 2025"
46
+ },
47
+ {
48
+ "type": "title",
49
+ "bbox": [
50
+ 0.076,
51
+ 0.208,
52
+ 0.176,
53
+ 0.221
54
+ ],
55
+ "angle": 0,
56
+ "content": "ABSTRACT"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.074,
62
+ 0.226,
63
+ 0.485,
64
+ 0.573
65
+ ],
66
+ "angle": 0,
67
+ "content": "Parameterized Quantum Circuits (PQC) are drawing increasing research interest thanks to its potential to achieve quantum advantages on near-term Noisy Intermediate Scale Quantum (NISQ) hardware. In order to achieve scalable PQC learning, the training process needs to be offloaded to real quantum machines instead of using exponential-cost classical simulators. One common approach to obtain PQC gradients is parameter shift whose cost scales linearly with the number of qubits. We present QOC, the first experimental demonstration of practical on-chip PQC training with parameter shift. Nevertheless, we find that due to the significant quantum errors (noises) on real machines, gradients obtained from naive parameter shift have low fidelity and thus degrading the training accuracy. To this end, we further propose probabilistic gradient pruning to firstly identify gradients with potentially large errors and then remove them. Specifically, small gradients have larger relative errors than large ones, thus having a higher probability to be pruned. We perform extensive experiments with the Quantum Neural Network (QNN) benchmarks on 5 classification tasks using 5 real quantum machines. The results demonstrate that our on-chip training achieves over \\(90\\%\\) and \\(60\\%\\) accuracy for 2-class and 4-class image classification tasks. The probabilistic gradient pruning brings up to \\(7\\%\\) PQC accuracy improvements over no pruning. Overall, we successfully obtain similar on-chip training accuracy compared with noise-free simulation but have much better training scalability. The QOC code is available in the TorchQuantum library."
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.076,
73
+ 0.584,
74
+ 0.248,
75
+ 0.598
76
+ ],
77
+ "angle": 0,
78
+ "content": "1 INTRODUCTION"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.074,
84
+ 0.602,
85
+ 0.484,
86
+ 0.699
87
+ ],
88
+ "angle": 0,
89
+ "content": "Quantum Computing (QC) has great potential to achieve exponential acceleration over classical computers, which represents a computational paradigm shift in various domains. Parameterized Quantum Circuits (PQC) are circuits containing trainable weights and are promising to achieve quantum advantages in current devices. Among them, Quantum Neural Network (QNN) is one of the popular algorithms for machine learning tasks."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.074,
95
+ 0.7,
96
+ 0.482,
97
+ 0.811
98
+ ],
99
+ "angle": 0,
100
+ "content": "In order to achieve PQC quantum advantage, the number of qubit needs to be large enough, which casts great difficulty in the parameter training process. In existing PQC work [4, 11], the primary focus has been building quantum models that can outperform classical model accuracy. Thus they typically perform training on classical computers through software simulations and then perform inference with simulators as well (Figure 1 top). Although classical simulation is useful in understanding the capabilities of small-size PQC, it is not"
101
+ },
102
+ {
103
+ "type": "image",
104
+ "bbox": [
105
+ 0.518,
106
+ 0.207,
107
+ 0.941,
108
+ 0.416
109
+ ],
110
+ "angle": 0,
111
+ "content": null
112
+ },
113
+ {
114
+ "type": "image_caption",
115
+ "bbox": [
116
+ 0.514,
117
+ 0.426,
118
+ 0.925,
119
+ 0.469
120
+ ],
121
+ "angle": 0,
122
+ "content": "Figure 1: In QOC, PQC training and inference are both performed on real quantum machines, making the whole pipeline scalable and practical."
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.513,
128
+ 0.494,
129
+ 0.922,
130
+ 0.633
131
+ ],
132
+ "angle": 0,
133
+ "content": "scalable due to the exponentially increased time and memory costs \\((O(2^n), n\\) is the qubit number). As shown in Figure 2(a), the space (#Regs) and time (#Ops) complexity of classical simulation grow exponentially as the number of qubits increases. To the authors' knowledge, this is the first experimental demonstration of efficient and scalable PQC on-chip training protocol. The optimization of parametrized quantum gates is offloaded to the quantum chips with in-situ gradient computation using parameter shift [14]. We also perform PQC evaluation on real quantum machines, making the results more practical as in Figure 1 bottom."
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.513,
139
+ 0.633,
140
+ 0.922,
141
+ 0.744
142
+ ],
143
+ "angle": 0,
144
+ "content": "One of the major challenges to enable scalable and efficient PQC on-chip learning is the robustness against quantum noise. In the current Noisy Intermediate Scale Quantum (NISQ) [16] era, the gate error rates on real quantum devices are non-negligible (\\(10^{-3}\\) to \\(10^{-2}\\)). In the context of PQC, such errors will lead to noisy gradients which can slow down convergence or even make training unstable. As shown in Figure 2(b), large gaps exist between the quantum on-chip training results and the classical noise-free simulation results."
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.513,
150
+ 0.744,
151
+ 0.923,
152
+ 0.925
153
+ ],
154
+ "angle": 0,
155
+ "content": "By carefully investigating the on-chip training process, we observe that small gradients tend to have large relative variations or even wrong directions under quantum noises, as shown in Figure 2(c). Also, not all gradient computations are necessary for the training process, especially for small-magnitude gradients. Those observations provide great opportunities for us to boost the robustness and efficiency of PQC on-chip learning. Inspired by that, we propose a probabilistic gradient pruning method to predict and only compute gradients of high reliability. Hence we can reduce noise impact and also save the required number of circuit runs on real quantum machines. In this paper, we are mainly using QNNs as benchmarks but the techniques can also be applied to other PQCs such as Variational Quantum Eigensolver (VQE). QOC has following contributions:"
156
+ },
157
+ {
158
+ "type": "page_footnote",
159
+ "bbox": [
160
+ 0.074,
161
+ 0.819,
162
+ 0.483,
163
+ 0.88
164
+ ],
165
+ "angle": 0,
166
+ "content": "*Equal Contribution. \nPermission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s)."
167
+ },
168
+ {
169
+ "type": "page_footnote",
170
+ "bbox": [
171
+ 0.076,
172
+ 0.881,
173
+ 0.31,
174
+ 0.892
175
+ ],
176
+ "angle": 0,
177
+ "content": "DAC '22, July 10-14, 2022, San Francisco, CA, USA"
178
+ },
179
+ {
180
+ "type": "page_footnote",
181
+ "bbox": [
182
+ 0.076,
183
+ 0.893,
184
+ 0.298,
185
+ 0.903
186
+ ],
187
+ "angle": 0,
188
+ "content": "© 2022 Copyright held by the owner/author(s)."
189
+ },
190
+ {
191
+ "type": "page_footnote",
192
+ "bbox": [
193
+ 0.076,
194
+ 0.903,
195
+ 0.25,
196
+ 0.912
197
+ ],
198
+ "angle": 0,
199
+ "content": "ACM ISBN 978-1-4503-9142-9/22/07."
200
+ },
201
+ {
202
+ "type": "page_footnote",
203
+ "bbox": [
204
+ 0.076,
205
+ 0.913,
206
+ 0.266,
207
+ 0.923
208
+ ],
209
+ "angle": 0,
210
+ "content": "https://doi.org/10.1145/3489517.3530495"
211
+ },
212
+ {
213
+ "type": "list",
214
+ "bbox": [
215
+ 0.074,
216
+ 0.819,
217
+ 0.483,
218
+ 0.923
219
+ ],
220
+ "angle": 0,
221
+ "content": null
222
+ }
223
+ ],
224
+ [
225
+ {
226
+ "type": "image",
227
+ "bbox": [
228
+ 0.084,
229
+ 0.101,
230
+ 0.268,
231
+ 0.178
232
+ ],
233
+ "angle": 0,
234
+ "content": null
235
+ },
236
+ {
237
+ "type": "image",
238
+ "bbox": [
239
+ 0.282,
240
+ 0.101,
241
+ 0.471,
242
+ 0.176
243
+ ],
244
+ "angle": 0,
245
+ "content": null
246
+ },
247
+ {
248
+ "type": "image",
249
+ "bbox": [
250
+ 0.082,
251
+ 0.182,
252
+ 0.278,
253
+ 0.304
254
+ ],
255
+ "angle": 0,
256
+ "content": null
257
+ },
258
+ {
259
+ "type": "image_caption",
260
+ "bbox": [
261
+ 0.174,
262
+ 0.306,
263
+ 0.191,
264
+ 0.318
265
+ ],
266
+ "angle": 0,
267
+ "content": "(b)"
268
+ },
269
+ {
270
+ "type": "image",
271
+ "bbox": [
272
+ 0.279,
273
+ 0.182,
274
+ 0.472,
275
+ 0.304
276
+ ],
277
+ "angle": 0,
278
+ "content": null
279
+ },
280
+ {
281
+ "type": "image_caption",
282
+ "bbox": [
283
+ 0.369,
284
+ 0.306,
285
+ 0.385,
286
+ 0.318
287
+ ],
288
+ "angle": 0,
289
+ "content": "(c)"
290
+ },
291
+ {
292
+ "type": "image_caption",
293
+ "bbox": [
294
+ 0.074,
295
+ 0.32,
296
+ 0.483,
297
+ 0.388
298
+ ],
299
+ "angle": 0,
300
+ "content": "Figure 2: (a) Classical simulation has unscalable computational and memory costs. (b) Noises create significant accuracy gaps between PQC (QNN) classical simulation and on-chip training. (c) Small gradients suffer from larger relative errors, thus being less reliable."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.084,
306
+ 0.393,
307
+ 0.483,
308
+ 0.42
309
+ ],
310
+ "angle": 0,
311
+ "content": "- We are the first work to demonstrate the practicality of parameter shift on NISQ machines, achieving high PQC learning accuracy."
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.084,
317
+ 0.421,
318
+ 0.482,
319
+ 0.462
320
+ ],
321
+ "angle": 0,
322
+ "content": "- A probabilistic gradient pruning method is proposed to improve the noise robustness by \\(5 - 7\\%\\) and reduce the number of inference on real QC by \\(2\\times\\) while maintaining the accuracy."
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.084,
328
+ 0.462,
329
+ 0.482,
330
+ 0.531
331
+ ],
332
+ "angle": 0,
333
+ "content": "- Experimental deployment of QNN on 5 real quantum machines demonstrates that the proposed method can achieve over \\(90\\%\\) and \\(60\\%\\) accuracy for 2-class and 4-class image recognition tasks. Our framework enables scalable, robust, and efficient training of PQCs with large number of qubits and parameters."
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.084,
339
+ 0.531,
340
+ 0.482,
341
+ 0.56
342
+ ],
343
+ "angle": 0,
344
+ "content": "- We open-source the parameter shift on-chip PQC training and gradient pruning code in the TorchQuantum library."
345
+ },
346
+ {
347
+ "type": "list",
348
+ "bbox": [
349
+ 0.084,
350
+ 0.393,
351
+ 0.483,
352
+ 0.56
353
+ ],
354
+ "angle": 0,
355
+ "content": null
356
+ },
357
+ {
358
+ "type": "title",
359
+ "bbox": [
360
+ 0.075,
361
+ 0.574,
362
+ 0.236,
363
+ 0.587
364
+ ],
365
+ "angle": 0,
366
+ "content": "2 BACKGROUND"
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.074,
372
+ 0.592,
373
+ 0.483,
374
+ 0.841
375
+ ],
376
+ "angle": 0,
377
+ "content": "Quantum basics. Quantum circuits use quantum bit (called qubit) to store information, which is a linear combination of two basis states: \\( |\\psi \\rangle = \\alpha |0\\rangle + \\beta |1\\rangle \\), for \\( \\alpha, \\beta \\in \\mathbb{C} \\), satisfying \\( |\\alpha|^2 + |\\beta|^2 = 1 \\). An \\( n \\)-qubit system can represent a linear combination of \\( 2^n \\) basis states. A \\( 2^n \\)-length complex state vector of all combination coefficients is used to describe the quantum state. To perform computation on a quantum system, a sequence of parametrized quantum gates are applied to perform unitary transformation on the statevector, i.e., \\( |\\psi(x, \\theta)\\rangle = \\dots U_2(x, \\theta_2)U_1(x, \\theta_1)|0\\rangle \\), where \\( x \\) is the input data, and \\( \\theta = (\\theta_1, \\theta_2, \\ldots) \\) are trainable parameters in quantum gates. In this way, input data and trainable parameters are embedded in the quantum state \\( |\\psi(x, \\theta)\\rangle \\). The computation results are obtained by qubit readout which measures the probability of a qubit state \\( |\\psi\\rangle \\) collapsing to either \\( |0\\rangle \\) (i.e., output \\( y = +1 \\)) or \\( |1\\rangle \\) (i.e., output \\( y = -1 \\)) according to \\( |\\alpha|^2 \\) and \\( |\\beta|^2 \\). With sufficient samples, we can compute the expectation value: \\( \\mathbb{E}[y] = (+1)|\\alpha|^2 + (-1)|\\beta|^2 \\). A non-linear network can be constructed to perform ML tasks by cascading multiple blocks of quantum gates and measurements."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.074,
383
+ 0.841,
384
+ 0.483,
385
+ 0.925
386
+ ],
387
+ "angle": 0,
388
+ "content": "Quantum noise. In real quantum computer systems, errors (noises) would occur due to unwanted interactions between qubits, imperfect control signals, or interference from the environment [6]. For example, quantum gates introduce operation errors (e.g., coherent errors and stochastic errors) into the system, and qubits also suffer from decoherence error (spontaneous loss of its stored information)"
389
+ },
390
+ {
391
+ "type": "image",
392
+ "bbox": [
393
+ 0.536,
394
+ 0.076,
395
+ 0.916,
396
+ 0.19
397
+ ],
398
+ "angle": 0,
399
+ "content": null
400
+ },
401
+ {
402
+ "type": "image_caption",
403
+ "bbox": [
404
+ 0.53,
405
+ 0.205,
406
+ 0.905,
407
+ 0.219
408
+ ],
409
+ "angle": 0,
410
+ "content": "Figure 3: Quantum Neural Network (QNN) architecture."
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.513,
416
+ 0.225,
417
+ 0.921,
418
+ 0.253
419
+ ],
420
+ "angle": 0,
421
+ "content": "over time. These noisy systems need to be characterized [13] and calibrated [7] frequently to mitigate the noise impact."
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.513,
427
+ 0.253,
428
+ 0.922,
429
+ 0.475
430
+ ],
431
+ "angle": 0,
432
+ "content": "Quantum neural networks. Quantum Machine Learning (QML) [1, 9, 17, 18, 21] aims to leverage QC techniques to solve machine learning tasks and achieve much higher efficiency. The path to quantum advantage on QML is typically provided by the quantum circuit's ability to generate and estimate highly complex kernels [5], which would otherwise be intractable to compute with conventional computers. They have been shown to have potential speed-up over classical counterparts in various tasks, including metric learning [12], data analysis [10]. As shown in Figure 3, the quantum neural network is one type of QML model using variational quantum circuits with trainable parameters to accomplish feature encoding of input data and perform complex-valued linear transformations thereafter. Most of QNN trainings are exploratory and rely on classical simulation of small quantum systems. In our work, on the contrary, we explore the practical setting: the QNN training and inference are both performed on real quantum devices."
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.513,
438
+ 0.475,
439
+ 0.922,
440
+ 0.572
441
+ ],
442
+ "angle": 0,
443
+ "content": "Pruning. Pruning techniques are widely used in the field of DNN [3, 19, 20, 23], performing an important role of the trade-off between accuracy and memory or time cost [15]. Recently, pruning techniques have been used in quantum tasks. Pruning the ansatz can bring time-efficient circuit and even higher performance on real QC [17]. In our work, we apply pruning techniques to prune unreliable gradients in order to mitigate the noise during training."
444
+ },
445
+ {
446
+ "type": "title",
447
+ "bbox": [
448
+ 0.515,
449
+ 0.585,
450
+ 0.687,
451
+ 0.598
452
+ ],
453
+ "angle": 0,
454
+ "content": "3 METHODOLOGY"
455
+ },
456
+ {
457
+ "type": "text",
458
+ "bbox": [
459
+ 0.513,
460
+ 0.603,
461
+ 0.922,
462
+ 0.673
463
+ ],
464
+ "angle": 0,
465
+ "content": "To enable PQC on-chip learning, we first introduce an in-situ quantum gradient computation via parameter shift and its real QC implementation. A probabilistic gradient pruning method is proposed to save the gradient computation cost with enhanced noise-robustness and training efficiency. We study QNN as the benchmark PQC."
466
+ },
467
+ {
468
+ "type": "title",
469
+ "bbox": [
470
+ 0.514,
471
+ 0.685,
472
+ 0.921,
473
+ 0.7
474
+ ],
475
+ "angle": 0,
476
+ "content": "3.1 Parameter Shift Rule for Quantum Gradients"
477
+ },
478
+ {
479
+ "type": "text",
480
+ "bbox": [
481
+ 0.513,
482
+ 0.703,
483
+ 0.922,
484
+ 0.842
485
+ ],
486
+ "angle": 0,
487
+ "content": "Parameter shift rule states that we can calculate the gradient of each parameter in some quantum circuits by simply shifting the parameter twice and calculating the difference between two outputs, without changing the structure of circuits or using any ancilla qubits. Prior works elaborate it based on quantum circuit function [2], however, in the next subsection we will show how parameter shift rules combined with backpropagation can be used in a real PQC task. Suppose an \\( m \\)-qubit quantum circuit is parametrized by \\( n \\) parameters \\( \\theta = [\\theta_{1},\\dots ,\\theta_{i},\\dots ,\\theta_{n}] \\), the expectation value of measurements of this circuit can be represented by a circuit function,"
488
+ },
489
+ {
490
+ "type": "equation",
491
+ "bbox": [
492
+ 0.568,
493
+ 0.847,
494
+ 0.922,
495
+ 0.863
496
+ ],
497
+ "angle": 0,
498
+ "content": "\\[\nf (\\theta) = \\left\\langle \\psi \\right| U \\left(\\theta_ {i}\\right) ^ {\\dagger} \\widehat {Q} U \\left(\\theta_ {i}\\right) | \\psi \\rangle , \\quad f (\\theta) \\in \\mathbb {R} ^ {m}, \\theta \\in \\mathbb {R} ^ {n}. \\tag {1}\n\\]"
499
+ },
500
+ {
501
+ "type": "text",
502
+ "bbox": [
503
+ 0.513,
504
+ 0.867,
505
+ 0.922,
506
+ 0.925
507
+ ],
508
+ "angle": 0,
509
+ "content": "where \\(\\theta_{i}\\) is the scalar parameter whose gradient is to be calculated, and \\(U(\\theta_{i})\\) is the gate where \\(\\theta_{i}\\) lies in. Here, for notation simplicity, we have already absorbed the unitaries before \\(U(\\theta_{i})\\) into \\(\\langle \\psi |,\\left|\\psi \\right\\rangle\\) Unitaries after \\(U(\\theta_{i})\\) and observables are fused into \\(\\widehat{Q}\\) . Usually, the"
510
+ }
511
+ ],
512
+ [
513
+ {
514
+ "type": "text",
515
+ "bbox": [
516
+ 0.074,
517
+ 0.092,
518
+ 0.482,
519
+ 0.162
520
+ ],
521
+ "angle": 0,
522
+ "content": "gates used in PQC can be written in the form \\( U(\\theta_i) = e^{-\\frac{i}{2}\\theta_iH} \\). Here \\( H \\) is the Hermitian generator of \\( U \\) with only 2 unique eigenvalues +1 and -1 (\\( H \\)'s eigenvalues can be \\( \\pm r \\), but for simplicity we assume it's \\( \\pm 1 \\)). In this way, the gradients of the circuit function \\( f \\) with respect to \\( \\theta_i \\) are,"
523
+ },
524
+ {
525
+ "type": "equation",
526
+ "bbox": [
527
+ 0.104,
528
+ 0.165,
529
+ 0.266,
530
+ 0.19
531
+ ],
532
+ "angle": 0,
533
+ "content": "\\[\n\\frac {\\partial f (\\theta)}{\\partial \\theta_ {i}} = \\frac {1}{2} \\left(f \\left(\\theta_ {+}\\right) - f \\left(\\theta_ {-}\\right)\\right),\n\\]"
534
+ },
535
+ {
536
+ "type": "equation",
537
+ "bbox": [
538
+ 0.104,
539
+ 0.19,
540
+ 0.482,
541
+ 0.214
542
+ ],
543
+ "angle": 0,
544
+ "content": "\\[\n\\theta_ {+} = \\left[ \\theta_ {1}, \\dots , \\theta_ {i} + \\frac {\\pi}{2}, \\dots , \\theta_ {n} \\right], \\theta_ {-} = \\left[ \\theta_ {1}, \\dots , \\theta_ {i} - \\frac {\\pi}{2}, \\dots , \\theta_ {n} \\right], \\tag {2}\n\\]"
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.074,
550
+ 0.216,
551
+ 0.482,
552
+ 0.284
553
+ ],
554
+ "angle": 0,
555
+ "content": "where \\(\\theta_{+}\\) and \\(\\theta_{-}\\) are the positive shift and negative shift of \\(\\theta\\). Note that this parameter shift rule is fundamentally different from any numerical difference methods that only approximate the directional derivatives. Instead, Eq. 2 calculates the exact gradient w.r.t \\(\\theta_{i}\\) without any approximation errors or numerical issues."
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.075,
561
+ 0.285,
562
+ 0.482,
563
+ 0.34
564
+ ],
565
+ "angle": 0,
566
+ "content": "We apply softmax on the expectation values of measurements \\( f(\\theta) \\) as the predicted probability for each class. Then we calculate the cross entropy between the predicted probability distribution \\( p \\) and the target distribution \\( t \\) as the classification loss \\( \\mathcal{L} \\),"
567
+ },
568
+ {
569
+ "type": "equation",
570
+ "bbox": [
571
+ 0.085,
572
+ 0.344,
573
+ 0.483,
574
+ 0.377
575
+ ],
576
+ "angle": 0,
577
+ "content": "\\[\n\\mathcal {L} (\\theta) = - t ^ {T} \\cdot \\operatorname {s o f t m a x} (f (\\theta)) = - \\sum_ {j = 1} ^ {m} t _ {j} \\log p _ {j}, \\quad p _ {j} = \\frac {e ^ {f _ {j} (\\theta)}}{\\sum_ {j = 1} ^ {m} e ^ {f _ {j} (\\theta)}}. \\tag {3}\n\\]"
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.076,
583
+ 0.381,
584
+ 0.482,
585
+ 0.422
586
+ ],
587
+ "angle": 0,
588
+ "content": "Then the gradient of the loss function with respect to \\(\\theta_{i}\\) is \\(\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial\\theta_i} = \\left(\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)}\\right)^T\\frac{\\partial f(\\theta)}{\\partial\\theta_i}\\)."
589
+ },
590
+ {
591
+ "type": "text",
592
+ "bbox": [
593
+ 0.075,
594
+ 0.422,
595
+ 0.483,
596
+ 0.487
597
+ ],
598
+ "angle": 0,
599
+ "content": "Here \\(\\frac{\\partial f(\\theta)}{\\partial\\theta_i}\\) can be calculated on real quantum circuit by the parameter shift rule, and \\(\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)}\\) can be efficiently calculated on classical devices using backpropagation supported by automatic differentiation frameworks, e.g., PyTorch and TensorFlow."
600
+ },
601
+ {
602
+ "type": "text",
603
+ "bbox": [
604
+ 0.09,
605
+ 0.487,
606
+ 0.482,
607
+ 0.501
608
+ ],
609
+ "angle": 0,
610
+ "content": "Now we derive the parameter shift rule used in our PQC models."
611
+ },
612
+ {
613
+ "type": "text",
614
+ "bbox": [
615
+ 0.075,
616
+ 0.501,
617
+ 0.482,
618
+ 0.53
619
+ ],
620
+ "angle": 0,
621
+ "content": "Assume \\( U(\\theta_i) = R_X(\\theta_i), R_X(\\alpha) = e^{-\\frac{i}{2}\\alpha X} \\), where \\( X \\) is the Pauli-X matrix."
622
+ },
623
+ {
624
+ "type": "text",
625
+ "bbox": [
626
+ 0.091,
627
+ 0.532,
628
+ 0.228,
629
+ 0.545
630
+ ],
631
+ "angle": 0,
632
+ "content": "Firstly, the RX gate is,"
633
+ },
634
+ {
635
+ "type": "equation",
636
+ "bbox": [
637
+ 0.084,
638
+ 0.549,
639
+ 0.483,
640
+ 0.677
641
+ ],
642
+ "angle": 0,
643
+ "content": "\\[\n\\begin{array}{l} R _ {X} (\\alpha) = e ^ {- \\frac {i}{2} \\alpha X} = \\sum_ {k = 0} ^ {\\infty} (- i \\alpha / 2) ^ {k} X ^ {k} / k! \\\\ = \\sum_ {k = 0} ^ {\\infty} (- i \\alpha / 2) ^ {2 k} X ^ {2 k} / (2 k)! + \\sum_ {k = 0} ^ {\\infty} (- i \\alpha / 2) ^ {2 k + 1} X ^ {2 k + 1} / (2 k + 1)! \\\\ = \\sum_ {k = 0} ^ {\\infty} (- 1) ^ {k} (\\alpha / 2) ^ {2 k} I / (2 k)! - i \\sum_ {k = 0} ^ {\\infty} (- 1) ^ {k} (\\alpha / 2) ^ {2 k + 1} X / (2 k + 1)! \\\\ = \\cos (\\alpha / 2) I - i \\sin (\\alpha / 2) X. \\tag {4} \\\\ \\end{array}\n\\]"
644
+ },
645
+ {
646
+ "type": "equation",
647
+ "bbox": [
648
+ 0.075,
649
+ 0.677,
650
+ 0.284,
651
+ 0.696
652
+ ],
653
+ "angle": 0,
654
+ "content": "\\[\n\\mathrm {L e t} \\alpha = \\frac {\\pi}{2}, R _ {X} (\\pm \\frac {\\pi}{2}) = \\frac {1}{\\sqrt {2}} (I \\mp i X).\n\\]"
655
+ },
656
+ {
657
+ "type": "text",
658
+ "bbox": [
659
+ 0.093,
660
+ 0.696,
661
+ 0.483,
662
+ 0.711
663
+ ],
664
+ "angle": 0,
665
+ "content": "As \\(f(\\theta) = \\langle \\psi |R_X(\\theta_i)^\\dagger \\widehat{Q} R_X(\\theta_i)|\\psi \\rangle ,R_X(\\alpha)R_X(\\beta) = R_X(\\alpha +\\beta),\\)"
666
+ },
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.076,
671
+ 0.71,
672
+ 0.305,
673
+ 0.727
674
+ ],
675
+ "angle": 0,
676
+ "content": "and \\(\\frac{\\partial}{\\partial\\alpha} R_X(\\alpha) = -\\frac{i}{2} XR_X(\\alpha)\\), we have"
677
+ },
678
+ {
679
+ "type": "equation",
680
+ "bbox": [
681
+ 0.078,
682
+ 0.729,
683
+ 0.505,
684
+ 0.879
685
+ ],
686
+ "angle": 0,
687
+ "content": "\\[\n\\begin{array}{l} \\frac {\\partial f (\\theta)}{\\partial \\theta_ {i}} = \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} (- \\frac {i}{2} X) ^ {\\dagger} \\widehat {Q} R _ {X} (\\theta_ {i}) | \\psi \\rangle + \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} \\widehat {Q} (- \\frac {i}{2} X) R _ {X} (\\theta_ {i}) | \\psi \\rangle \\\\ = \\frac {1}{4} \\left(\\langle \\psi | R _ {X} \\left(\\theta_ {i}\\right) ^ {\\dagger} (I - i X) ^ {\\dagger} \\widehat {Q} (I - i X) R _ {X} \\left(\\theta_ {i}\\right) | \\psi \\rangle \\right. \\\\ - \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} (I + i X) ^ {\\dagger} \\widehat {Q} (I + i X) R _ {X} (\\theta_ {i}) | \\psi \\rangle) \\\\ = \\frac {1}{2} \\left(\\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} R _ {X} (\\frac {\\pi}{2}) ^ {\\dagger} \\widehat {Q} R _ {X} (\\frac {\\pi}{2}) R _ {X} (\\theta_ {i}) | \\psi \\rangle \\right. \\\\ - \\langle \\psi | R _ {X} (\\theta_ {i}) ^ {\\dagger} R _ {X} (- \\frac {\\pi}{2}) ^ {\\dagger} \\widehat {Q} R _ {X} (- \\frac {\\pi}{2}) R _ {X} (\\theta_ {i}) | \\psi \\rangle) \\\\ = \\frac {1}{2} \\left(f \\left(\\theta_ {+}\\right) - f \\left(\\theta_ {-}\\right)\\right). \\tag {5} \\\\ \\end{array}\n\\]"
688
+ },
689
+ {
690
+ "type": "text",
691
+ "bbox": [
692
+ 0.074,
693
+ 0.88,
694
+ 0.483,
695
+ 0.925
696
+ ],
697
+ "angle": 0,
698
+ "content": "Without loss of generality, the derivation holds for all unitaries of the form \\( e^{-\\frac{i}{2}\\alpha H} \\), e.g., RX, RY, RZ, XX, YY, ZZ, where \\( H \\) is a Hermitian matrix with only 2 unique eigenvalues +1 and -1."
699
+ },
700
+ {
701
+ "type": "image",
702
+ "bbox": [
703
+ 0.518,
704
+ 0.09,
705
+ 0.912,
706
+ 0.306
707
+ ],
708
+ "angle": 0,
709
+ "content": null
710
+ },
711
+ {
712
+ "type": "image_caption",
713
+ "bbox": [
714
+ 0.514,
715
+ 0.314,
716
+ 0.921,
717
+ 0.342
718
+ ],
719
+ "angle": 0,
720
+ "content": "Figure 4: Quantum gradient calculation using the parameter shift rule on real quantum devices."
721
+ },
722
+ {
723
+ "type": "text",
724
+ "bbox": [
725
+ 0.513,
726
+ 0.345,
727
+ 0.922,
728
+ 0.415
729
+ ],
730
+ "angle": 0,
731
+ "content": "In our circuit functions, we assume each parameter lies in exactly one gate. However, there are cases that one parameter lies in multiple gates. In that case, we only need to calculate the gradient of the parameter in those gates separately and sum the gradients up to get the gradient of that parameter."
732
+ },
733
+ {
734
+ "type": "title",
735
+ "bbox": [
736
+ 0.514,
737
+ 0.428,
738
+ 0.901,
739
+ 0.444
740
+ ],
741
+ "angle": 0,
742
+ "content": "3.2 In-situ Gradient Computation on Real QC"
743
+ },
744
+ {
745
+ "type": "text",
746
+ "bbox": [
747
+ 0.513,
748
+ 0.446,
749
+ 0.922,
750
+ 0.473
751
+ ],
752
+ "angle": 0,
753
+ "content": "To realize PQC on-chip learning, we implement a TrainingEngine, described in Alg. 1. This TrainingEngine contains three parts."
754
+ },
755
+ {
756
+ "type": "text",
757
+ "bbox": [
758
+ 0.513,
759
+ 0.474,
760
+ 0.922,
761
+ 0.617
762
+ ],
763
+ "angle": 0,
764
+ "content": "Jacobian calculation via parameter shift. In the first part, we sample a mini-batch of training data \\(\\mathcal{I}\\) in Line 6. For each example of the mini-batch, we set up the quantum encoder gates and then iteratively evaluate gradients for all parameters. In each iteration, we shift the parameter \\(\\theta_{i}\\) twice by \\(+ \\pi /2\\) and \\(-\\pi /2\\) respectively. After each shift, we execute the shifted circuit on quantum hardware. The circuit will be created, validated, queued, and finally run on real quantum machines. As soon as we get the returned results of the two shifted circuits, i.e., \\(f(\\theta_{+})\\) and \\(f(\\theta_{-})\\), we apply Eq. 2 to obtain the upstream gradient \\(\\frac{\\partial f(\\theta)}{\\partial \\theta_i}\\), illustrated in the left part of Figure 4."
765
+ },
766
+ {
767
+ "type": "text",
768
+ "bbox": [
769
+ 0.514,
770
+ 0.617,
771
+ 0.789,
772
+ 0.635
773
+ ],
774
+ "angle": 0,
775
+ "content": "Finally, we obtain the Jacobian matrix \\(\\frac{\\partial f(\\theta)}{\\partial\\theta}\\)."
776
+ },
777
+ {
778
+ "type": "text",
779
+ "bbox": [
780
+ 0.513,
781
+ 0.635,
782
+ 0.922,
783
+ 0.722
784
+ ],
785
+ "angle": 0,
786
+ "content": "Down-stream gradient backpropagation. In the second part, we run the circuit without shift and get the measurement result \\( f(\\theta) \\). Then we apply soft max and cross-entropy function to the measured logits. In the end, we get the training loss \\( \\mathcal{L}(\\theta) \\). Then we run backpropagation only from the loss to the logits to get the down-stream gradients \\( \\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)} \\), shown in the right part of Figure 4."
787
+ },
788
+ {
789
+ "type": "text",
790
+ "bbox": [
791
+ 0.513,
792
+ 0.722,
793
+ 0.921,
794
+ 0.77
795
+ ],
796
+ "angle": 0,
797
+ "content": "Gradient calculation. In the third part, we calculate the dot-product between down-stream gradients and the Jacobian and get the final gradients \\(\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial\\theta} = \\left(\\frac{\\partial f(\\theta)}{\\partial\\theta}\\right)^T\\frac{\\partial\\mathcal{L}(\\theta)}{\\partial f(\\theta)}\\)"
798
+ },
799
+ {
800
+ "type": "title",
801
+ "bbox": [
802
+ 0.515,
803
+ 0.78,
804
+ 0.891,
805
+ 0.797
806
+ ],
807
+ "angle": 0,
808
+ "content": "3.3 Probabilistic Quantum Gradient Pruning"
809
+ },
810
+ {
811
+ "type": "text",
812
+ "bbox": [
813
+ 0.513,
814
+ 0.799,
815
+ 0.922,
816
+ 0.924
817
+ ],
818
+ "angle": 0,
819
+ "content": "On quantum chips, there exist various noises and errors that could potentially diminish the fidelity of the computation results. When the gradient magnitude is small, noises could easily overwhelm the signals, such that the gradients calculated on real quantum circuit become unreliable when they have small magnitude. Those unreliable gradients have harmful effects on training convergence. Skipping the evaluation on those unreliable gradients can benefit both training convergence and efficiency. Besides, we observe that for most parameters, if the gradient magnitudes are far from zero for several"
820
+ }
821
+ ],
822
+ [
823
+ {
824
+ "type": "image",
825
+ "bbox": [
826
+ 0.114,
827
+ 0.092,
828
+ 0.895,
829
+ 0.236
830
+ ],
831
+ "angle": 0,
832
+ "content": null
833
+ },
834
+ {
835
+ "type": "image_caption",
836
+ "bbox": [
837
+ 0.073,
838
+ 0.251,
839
+ 0.923,
840
+ 0.294
841
+ ],
842
+ "angle": 0,
843
+ "content": "Figure 5: Efficient on-chip quantum gradient calculation with probabilistic gradient pruning. Gradient magnitudes are accumulated within the accumulation window and used as the sampling distribution. Based on the distribution, gradients are probabilistically pruned with a ratio \\( r \\) in the pruning window to mitigate noises and stabilize training."
844
+ },
845
+ {
846
+ "type": "text",
847
+ "bbox": [
848
+ 0.074,
849
+ 0.314,
850
+ 0.482,
851
+ 0.411
852
+ ],
853
+ "angle": 0,
854
+ "content": "steps, it will likely keep far from zero in the next several steps. Similarly, if the gradient magnitude remains small for some steps, it will likely keep small in the next several steps. This means the gradient reliability is predictable to some extent. Therefore, we propose the gradient pruning method to sample the parameters whose gradients are more reliable. This method helps training converge faster while also saving time by skipping the evaluation of unreliable gradients."
855
+ },
856
+ {
857
+ "type": "text",
858
+ "bbox": [
859
+ 0.074,
860
+ 0.411,
861
+ 0.483,
862
+ 0.55
863
+ ],
864
+ "angle": 0,
865
+ "content": "Alg. 1 describes the PQC on-chip training flow with probabilistic gradient pruning. We divide all the training steps into \\(S\\) stages and perform the pruning method periodically on each stage. For every stage, we split it into two phases, shown in Figure 5. The first phase is called magnitude accumulation with an accumulation window width \\(w_{a}\\), and the second is called probabilistic gradient pruning (PGP) with a pruning window width \\(w_{p}\\). We only apply pruning in the second phase, while the parameter subset is sampled from a probability distribution \\(\\tilde{\\theta} = \\{\\theta_i \\sim P_M(\\theta) | 1 \\leq i \\leq (1 - r)n\\}\\) based on the gradient information collected within the accumulation window."
866
+ },
867
+ {
868
+ "type": "text",
869
+ "bbox": [
870
+ 0.074,
871
+ 0.551,
872
+ 0.483,
873
+ 0.69
874
+ ],
875
+ "angle": 0,
876
+ "content": "In Lines 4-9, within the accumulation window, we record the magnitude of gradients of each parameter in each step and accumulate them until the window is over. At the end of the first phase, we can get an accumulator \\( M \\) that records the accumulated gradient magnitude for each parameter. Thus, when the pruning phase starts, we normalize the accumulated gradient magnitude and pass it to our sampler as the sampling distribution. In each pruning step, the sampler samples a subset of parameters \\( \\tilde{\\theta} \\) with a pruning ratio of \\( r \\), and we only evaluate gradients for them while the rest \\( \\theta \\backslash \\tilde{\\theta} \\) is temporarily frozen."
877
+ },
878
+ {
879
+ "type": "text",
880
+ "bbox": [
881
+ 0.074,
882
+ 0.691,
883
+ 0.483,
884
+ 0.833
885
+ ],
886
+ "angle": 0,
887
+ "content": "There are three important hyper-parameters in our gradient pruning method: 1) accumulation window width \\( w_{a} \\), 2) pruning ratio \\( r \\), and 3) pruning window width \\( w_{p} \\). The accumulation window width and pruning window width decide the reliability of the gradient trend evaluation and our confidence in it, respectively. The pruning ratio can be tuned to balance the gradient variances caused by noise perturbation and pruning. Thus, the percentage of the time saved by our probabilistic gradient pruning method is \\( r\\frac{w_p}{w_a + w_p}\\times 100\\% \\). In our experiments, we find that the setting \\( (w_{a} = 1,w_{p} = 2\\sim 3,r = 0.3\\sim 0.5) \\) usually works well in all cases."
888
+ },
889
+ {
890
+ "type": "title",
891
+ "bbox": [
892
+ 0.075,
893
+ 0.864,
894
+ 0.235,
895
+ 0.878
896
+ ],
897
+ "angle": 0,
898
+ "content": "4 EXPERIMENTS"
899
+ },
900
+ {
901
+ "type": "text",
902
+ "bbox": [
903
+ 0.074,
904
+ 0.882,
905
+ 0.483,
906
+ 0.925
907
+ ],
908
+ "angle": 0,
909
+ "content": "In this section, we deploy our PQC on-chip learning framework on real QC and evaluate it on 5 QNN tasks for image and vowel recognition. Compared with classical QNN training protocols, we can"
910
+ },
911
+ {
912
+ "type": "code_caption",
913
+ "bbox": [
914
+ 0.523,
915
+ 0.315,
916
+ 0.886,
917
+ 0.344
918
+ ],
919
+ "angle": 0,
920
+ "content": "Algorithm 1: PQC On-Chip Training with Probabilistic Gradient Pruning"
921
+ },
922
+ {
923
+ "type": "algorithm",
924
+ "bbox": [
925
+ 0.524,
926
+ 0.346,
927
+ 0.922,
928
+ 0.684
929
+ ],
930
+ "angle": 0,
931
+ "content": "Input: Accumulation window width \\(w_{a}\\), gradient pruning ratio \\(r\\), pruning window width \\(w_{p}\\), training objective \\(\\mathcal{L}\\), initial parameters \\(\\theta^0 \\in \\mathbb{R}^n\\), training data \\(\\mathcal{D}_{trn}\\), initial step size \\(\\eta^0\\), and total stages \\(S\\). \\(\\theta \\gets \\theta^0\\), \\(\\eta \\gets \\eta^0\\) \\(t \\gets 0\\); \nfor \\(s = 1,2,\\dots,S\\) do \nInitialize gradient magnitude accumulator \\(M \\gets 0^n\\); \nfor \\(\\tau_{a} = 1,2,\\dots,w_{a}\\) do \n\\(t \\gets t + 1\\); \nSample a mini-batch \\(\\mathcal{I} \\sim \\mathcal{D}_{trn}\\); \nIn-situ gradient evaluation via parameter shift \\(\\nabla_{\\theta} \\mathcal{L}_{\\mathcal{I}}(\\theta) = \\frac{1}{2} (\\frac{\\partial f(\\theta)}{\\partial \\theta})^T \\frac{\\partial \\mathcal{L}(\\theta)}{f(\\theta)}\\); \nParameter update: \\(\\theta \\gets \\theta - \\eta \\nabla_{\\theta} \\mathcal{L}_{\\mathcal{I}}(\\theta)\\); \nUpdate magnitude accumulator \\(M \\gets M + |\\nabla_{\\theta} \\mathcal{L}_{\\mathcal{I}}(\\theta)|\\); \nfor \\(\\tau_{p} \\gets 1,2,\\dots,w_{p}\\) do \n\\(t \\gets t + 1\\); \nSample a mini-batch \\(\\mathcal{I} \\sim \\mathcal{D}_{trn}\\); \nSample a subset with a ratio \\(r\\) based on accumulated gradient magnitude: \n\\(\\tilde{\\theta} = \\{\\theta_i \\sim P_M(\\theta) | 1 \\leq i \\leq (1-r)n\\}\\); \n\\(\\tilde{\\theta} \\gets \\tilde{\\theta} - \\eta \\nabla_{\\tilde{\\theta}} \\mathcal{L}_{\\mathcal{I}}(\\theta)\\); \nOutput: Converged parameters \\(\\theta\\)"
932
+ },
933
+ {
934
+ "type": "text",
935
+ "bbox": [
936
+ 0.514,
937
+ 0.712,
938
+ 0.923,
939
+ 0.769
940
+ ],
941
+ "angle": 0,
942
+ "content": "achieve \\(2 - 4\\%\\) real QC test accuracy improvement with \\(2\\times\\) convergence speedup. We also conduct extensive ablation studies to validate our scalability and the effectiveness of the proposed probabilistic gradient pruning method."
943
+ },
944
+ {
945
+ "type": "title",
946
+ "bbox": [
947
+ 0.515,
948
+ 0.781,
949
+ 0.715,
950
+ 0.797
951
+ ],
952
+ "angle": 0,
953
+ "content": "4.1 Experiment Setups"
954
+ },
955
+ {
956
+ "type": "text",
957
+ "bbox": [
958
+ 0.513,
959
+ 0.799,
960
+ 0.922,
961
+ 0.925
962
+ ],
963
+ "angle": 0,
964
+ "content": "Benchmarks. We conduct our experiments on 5 QML tasks. QML are all classification tasks including MNIST [8] 4-class (0, 1, 2, 3), 2-class (3 and 6); Fashion [22] 4-class (t-shirt/top, trouser, pullover, dress), 2-class (dress and shirt); Vowel 4-class(hid, hId, had, hOd). MNIST and Fashion 2-class use the front 500 images as the training set and randomly sampled 300 images as the validation set. MNIST, Fashion 4-class uses the front 100 images as the training set and also randomly sampled 300 images as the validation set. The input images are all \\(28 \\times 28\\). We firstly center-crop them to \\(24 \\times 24\\) and"
965
+ }
966
+ ],
967
+ [
968
+ {
969
+ "type": "table_caption",
970
+ "bbox": [
971
+ 0.075,
972
+ 0.09,
973
+ 0.483,
974
+ 0.118
975
+ ],
976
+ "angle": 0,
977
+ "content": "Table 1: Accuracy comparison among different settings. \"Simu.\" represents \"simulation\"."
978
+ },
979
+ {
980
+ "type": "table",
981
+ "bbox": [
982
+ 0.076,
983
+ 0.12,
984
+ 0.485,
985
+ 0.206
986
+ ],
987
+ "angle": 0,
988
+ "content": "<table><tr><td>Method</td><td>Acc.</td><td>MNIST-4 Jarkata</td><td>MNIST-2 Jarkata</td><td>Fashion-4 Manila</td><td>Fashion-2 Santiago</td><td>Vowel-4 Lima</td></tr><tr><td>Classical-Train</td><td>Simu.</td><td>0.61</td><td>0.88</td><td>0.73</td><td>0.89</td><td>0.37</td></tr><tr><td>Classical-Train</td><td></td><td>0.59</td><td>0.79</td><td>0.54</td><td>0.89</td><td>0.31</td></tr><tr><td>QC-Train</td><td>QC</td><td>0.59</td><td>0.83</td><td>0.49</td><td>0.84</td><td>0.34</td></tr><tr><td>QC-Train-PGP</td><td></td><td>0.64</td><td>0.86</td><td>0.57</td><td>0.91</td><td>0.36</td></tr></table>"
989
+ },
990
+ {
991
+ "type": "image",
992
+ "bbox": [
993
+ 0.08,
994
+ 0.207,
995
+ 0.278,
996
+ 0.329
997
+ ],
998
+ "angle": 0,
999
+ "content": null
1000
+ },
1001
+ {
1002
+ "type": "image_caption",
1003
+ "bbox": [
1004
+ 0.171,
1005
+ 0.331,
1006
+ 0.187,
1007
+ 0.343
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": "(a)"
1011
+ },
1012
+ {
1013
+ "type": "image",
1014
+ "bbox": [
1015
+ 0.28,
1016
+ 0.214,
1017
+ 0.482,
1018
+ 0.329
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": null
1022
+ },
1023
+ {
1024
+ "type": "image_caption",
1025
+ "bbox": [
1026
+ 0.374,
1027
+ 0.332,
1028
+ 0.389,
1029
+ 0.343
1030
+ ],
1031
+ "angle": 0,
1032
+ "content": "(b)"
1033
+ },
1034
+ {
1035
+ "type": "image_caption",
1036
+ "bbox": [
1037
+ 0.075,
1038
+ 0.345,
1039
+ 0.483,
1040
+ 0.373
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "Figure 6: Real QC validation accuracy curves on different datasets and different quantum devices."
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "bbox": [
1048
+ 0.074,
1049
+ 0.384,
1050
+ 0.483,
1051
+ 0.453
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "then down-sample them to \\(4 \\times 4\\) for MNIST and Fashion 2 and 4-class tasks. Vowel 4-class uses the front 100 samples as the training set and randomly sampled 300 samples as the validation set. For each sample, we perform principal component analysis (PCA) for the vowel features and take the 10 most significant dimensions."
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "bbox": [
1059
+ 0.074,
1060
+ 0.453,
1061
+ 0.483,
1062
+ 0.55
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "All the tasks use four logical qubits. To embed classical image and vowel features to the quantum states, we first flatten them and then encode them with rotation gates. For down-sampled \\(4 \\times 4\\) images, we use 4RY, 4RZ, 4RX, and 4RY gates as the encoder. We put the 16 classical input values to the phases of 16 rotation gates, respectively. Therefore we can encode the classical values to quantum states. For 10 vowel features, we use 4RY, 4RZ, and 2RX gates for encoding."
1066
+ },
1067
+ {
1068
+ "type": "text",
1069
+ "bbox": [
1070
+ 0.074,
1071
+ 0.55,
1072
+ 0.483,
1073
+ 0.688
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "The encoding gates are our hand-designed circuits. Our circuits are composed of several layers. There are 7 kinds of layers used to construct our circuits. (i) RX layer: Add RX gates to all wires; (ii) RY layer: same structure as in RX layer; (iii) RZ layer: same structure as in RX layer; (iv) RZZ layer: add RZZ gates to all logical adjacent wires and the logical farthest wires to form a ring connection, for example, an RZZ layer in a 4-qubit circuit contains 4 RZZ gates which lie on wires 1 and 2, 2 and 3, 3 and 4, 4 and 1; (v) RXX layer: same structure as in RZZ layer; (vi) RZX layer: same structure as in RZZ layer; (vii) CZ layer: add CZ gates to all logical adjacent wires."
1077
+ },
1078
+ {
1079
+ "type": "text",
1080
+ "bbox": [
1081
+ 0.074,
1082
+ 0.688,
1083
+ 0.483,
1084
+ 0.786
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "For MNIST and Fashion 2-class tasks, the circuit contains 1 RZZ layer followed by 1 RY layer. For MNIST 4-class task, the circuit contains 3 RX+RY+RZ+CZ layers (1 RX layer, 1 RY layer, 1 RZ layer, and 1 CZ layer in series). For Fashion 4-class task, the circuit contains 3 RZZ+RY layers (1 RZZ layer followed by 1 RY layer). For Vowel 4-class task, the circuit contains 2 RZZ+RXX layers (1 RZZ layer followed by 1 RXX layer)."
1088
+ },
1089
+ {
1090
+ "type": "text",
1091
+ "bbox": [
1092
+ 0.074,
1093
+ 0.786,
1094
+ 0.483,
1095
+ 0.868
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "For the output of our quantum circuits, we measure the expectation values on Pauli-Z basis and obtain a value \\([-1, 1]\\) from each qubit. For 2-class, we sum the qubit 0 and 1, 2, and 3 respectively to get 2 output values. For 4-class, we just use the four expectation values as 4 output values. Then we process the output values by Softmax to get probabilities."
1099
+ },
1100
+ {
1101
+ "type": "text",
1102
+ "bbox": [
1103
+ 0.074,
1104
+ 0.868,
1105
+ 0.483,
1106
+ 0.924
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "Quantum devices and compiler configurations. We use IBM quantum computers via qiskit API [7] to submit our circuits to real superconducting quantum devices and achieve quantum on-chip training. We set all the circuits to run 1024 shots."
1110
+ },
1111
+ {
1112
+ "type": "image",
1113
+ "bbox": [
1114
+ 0.516,
1115
+ 0.09,
1116
+ 0.648,
1117
+ 0.207
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": null
1121
+ },
1122
+ {
1123
+ "type": "image",
1124
+ "bbox": [
1125
+ 0.651,
1126
+ 0.09,
1127
+ 0.783,
1128
+ 0.206
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": null
1132
+ },
1133
+ {
1134
+ "type": "image",
1135
+ "bbox": [
1136
+ 0.789,
1137
+ 0.09,
1138
+ 0.921,
1139
+ 0.206
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": null
1143
+ },
1144
+ {
1145
+ "type": "image_caption",
1146
+ "bbox": [
1147
+ 0.514,
1148
+ 0.211,
1149
+ 0.921,
1150
+ 0.238
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "Figure 7: Ablation on pruning ratio, accumulation window width, and pruning window width."
1154
+ },
1155
+ {
1156
+ "type": "text",
1157
+ "bbox": [
1158
+ 0.513,
1159
+ 0.24,
1160
+ 0.922,
1161
+ 0.35
1162
+ ],
1163
+ "angle": 0,
1164
+ "content": "Baseline. We have two baselines. (1) QC-Train: We train our model without gradient pruning, i.e., calculating gradients of every parameter in each step. The gradient calculation is deployed on real quantum circuits. (2) Classical-Train: We train our QNN model completely on classical computers. We use a vector to record the amplitudes of the quantum state, utilize complex matrix multiplication to simulate quantum gates, and sample based on the amplitude vector to simulate quantum measurement."
1165
+ },
1166
+ {
1167
+ "type": "text",
1168
+ "bbox": [
1169
+ 0.513,
1170
+ 0.352,
1171
+ 0.922,
1172
+ 0.421
1173
+ ],
1174
+ "angle": 0,
1175
+ "content": "The QC-Train-PGP line shows training on real quantum circuits while applying our probabilistic gradient pruning. In all the cases, we adopt accumulation window size 1, pruning ratio 0.5, and pruning window size 2, except for Fashion-4, we adopt pruning ratio 0.7, and other settings remain the same."
1176
+ },
1177
+ {
1178
+ "type": "title",
1179
+ "bbox": [
1180
+ 0.515,
1181
+ 0.433,
1182
+ 0.667,
1183
+ 0.446
1184
+ ],
1185
+ "angle": 0,
1186
+ "content": "4.2 Main Results"
1187
+ },
1188
+ {
1189
+ "type": "text",
1190
+ "bbox": [
1191
+ 0.513,
1192
+ 0.451,
1193
+ 0.922,
1194
+ 0.575
1195
+ ],
1196
+ "angle": 0,
1197
+ "content": "QNN results. Table 1 shows the accuracy of comparison on 5 tasks. In each task, we show 4 accuracy values, which are (1) accuracy of Classical-Train tested on classical devices, (2) accuracy of Classical-Train tested on real quantum circuits; (3) accuracy of QC-Train tested on real quantum circuits; (4) accuracy of QC-Train-PGP tested on real quantum circuits. In each task, the accuracy is collected after finishing a certain number of circuit runs. We train and evaluate MNIST-2 and MNIST-2 on ibmq_jakarta, Fashion-4 on ibmq_manila, Fashion-2 on ibmq_santiago, and Vowel-4 on ibmq_lima."
1198
+ },
1199
+ {
1200
+ "type": "text",
1201
+ "bbox": [
1202
+ 0.513,
1203
+ 0.575,
1204
+ 0.922,
1205
+ 0.658
1206
+ ],
1207
+ "angle": 0,
1208
+ "content": "The noise-free accuracy is usually the highest among the other three, because it represents the accuracy without any noise perturbation. The QC-Train-PGP usually takes second place because compared to Classical-Train, it has the advantage of noise awareness, and compared to QC-Train, it suffers less from noise thanks to gradient pruning."
1209
+ },
1210
+ {
1211
+ "type": "text",
1212
+ "bbox": [
1213
+ 0.513,
1214
+ 0.658,
1215
+ 0.922,
1216
+ 0.755
1217
+ ],
1218
+ "angle": 0,
1219
+ "content": "Training curves. Figure 6 shows the real QC validation accuracy curve during training. The X-axis is the number of inferences (how many circuits have been run). The Y-axis is the accuracy of the validation dataset tested on real quantum circuits. MNIST 4-class runs on the ibmq_jakarta machine. We observe that given a fixed inference budget, our QC-Train-PGP achieves the best accuracy of \\(63.7\\%\\) while the Classical-Train only achieves \\(59.3\\%\\)."
1220
+ },
1221
+ {
1222
+ "type": "text",
1223
+ "bbox": [
1224
+ 0.513,
1225
+ 0.755,
1226
+ 0.922,
1227
+ 0.81
1228
+ ],
1229
+ "angle": 0,
1230
+ "content": "We further train Fashion 2-class on ibmq_santiago. QC-Train-PGP only takes \\(13.9\\mathrm{k}\\) inferences to reach the peak accuracy \\(90.7\\%\\) while the best accuracy Classical-Train can achieve is merely \\(88.7\\%\\) at the cost of over \\(30\\mathrm{k}\\) inferences."
1231
+ },
1232
+ {
1233
+ "type": "title",
1234
+ "bbox": [
1235
+ 0.515,
1236
+ 0.822,
1237
+ 0.694,
1238
+ 0.836
1239
+ ],
1240
+ "angle": 0,
1241
+ "content": "4.3 Ablation Studies"
1242
+ },
1243
+ {
1244
+ "type": "text",
1245
+ "bbox": [
1246
+ 0.513,
1247
+ 0.841,
1248
+ 0.922,
1249
+ 0.924
1250
+ ],
1251
+ "angle": 0,
1252
+ "content": "Ablation on gradient pruning. In Figure 7, we evaluate the training performance with different pruning ratios \\( r \\), accumulation window size \\( w_{a} \\), and pruning window size \\( w_{p} \\) on Fashion-4 and MNIST-2 tasks. We find that the \\( r = 0.5 \\) is generally a good setting for our tasks. Overly large pruning ratios will induce too many gradient variances that harm the training convergence. For the accumulation window"
1253
+ }
1254
+ ],
1255
+ [
1256
+ {
1257
+ "type": "table_caption",
1258
+ "bbox": [
1259
+ 0.075,
1260
+ 0.09,
1261
+ 0.482,
1262
+ 0.119
1263
+ ],
1264
+ "angle": 0,
1265
+ "content": "Table 2: The proposed probabilistic pruning is better than deterministic pruning."
1266
+ },
1267
+ {
1268
+ "type": "table",
1269
+ "bbox": [
1270
+ 0.075,
1271
+ 0.12,
1272
+ 0.482,
1273
+ 0.179
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": "<table><tr><td>Method</td><td>MNIST-4</td><td>MNIST-2</td><td>Fashion-4</td><td>Fashion-2</td></tr><tr><td>Deterministic</td><td>0.61</td><td>0.82</td><td>0.72</td><td>0.89</td></tr><tr><td>Probabilistic</td><td>0.62</td><td>0.85</td><td>0.79</td><td>0.90</td></tr></table>"
1277
+ },
1278
+ {
1279
+ "type": "table_caption",
1280
+ "bbox": [
1281
+ 0.075,
1282
+ 0.185,
1283
+ 0.482,
1284
+ 0.214
1285
+ ],
1286
+ "angle": 0,
1287
+ "content": "Table 3: Adam optimizer can outperform SGD and Momentum optimizers."
1288
+ },
1289
+ {
1290
+ "type": "table",
1291
+ "bbox": [
1292
+ 0.076,
1293
+ 0.215,
1294
+ 0.482,
1295
+ 0.29
1296
+ ],
1297
+ "angle": 0,
1298
+ "content": "<table><tr><td>Optimizer</td><td>MNIST-4</td><td>MNIST-2</td><td>Fashion-4</td><td>Fashion-2</td></tr><tr><td>SGD</td><td>0.5</td><td>0.8</td><td>0.45</td><td>76</td></tr><tr><td>Momentum</td><td>0.55</td><td>0.83</td><td>0.66</td><td>0.90</td></tr><tr><td>Adam</td><td>0.61</td><td>0.88</td><td>0.75</td><td>0.91</td></tr></table>"
1299
+ },
1300
+ {
1301
+ "type": "text",
1302
+ "bbox": [
1303
+ 0.074,
1304
+ 0.297,
1305
+ 0.482,
1306
+ 0.394
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": "size, \\( w_{a} = 1 \\) or 2 are suitable choices. When \\( w_{a} \\) is too large, the accumulated gradient magnitudes are similar among all parameters, leading to a nearly uniform sampling distribution. This will bring undifferentiated pruning, and the accuracy will drop as the Fashion-4 curve shows. The pruning window \\( w_{p} \\) should also not be too large. As \\( w_{p} \\) grows, the accumulated gradient magnitudes used to instruct our pruning become less reliable."
1310
+ },
1311
+ {
1312
+ "type": "text",
1313
+ "bbox": [
1314
+ 0.074,
1315
+ 0.394,
1316
+ 0.482,
1317
+ 0.532
1318
+ ],
1319
+ "angle": 0,
1320
+ "content": "Discussion on scalability. Figure 8 shows the superior scalability of quantum on-chip training. Classical simulation runtime exponentially increases as #qubits scales up, while the runtime on real quantum machines scales nearly linearly to #qubits. The classical curve in Figure 8 represents runtime and memory cost of running 50 circuits of different #qubits with 16 rotation gates and 32 RZZ gates. The curve before 22 qubits is measured on a single NVIDIA RTX 2080 Ti GPU; points after 24 qubits are extrapolated. The quantum curve before 27 qubits is tested on ibmq_toronto; the points after 30 qubits are extrapolated."
1321
+ },
1322
+ {
1323
+ "type": "text",
1324
+ "bbox": [
1325
+ 0.074,
1326
+ 0.533,
1327
+ 0.482,
1328
+ 0.602
1329
+ ],
1330
+ "angle": 0,
1331
+ "content": "We can observe clear quantum advantages on circuits with more than 27 qubits. In terms of memory cost, classical simulation consumes thousands of Gigabits for storage which is intractable. In contrast, on quantum machines, the information is stored in the quantum state of the circuit itself with negligible memory cost."
1332
+ },
1333
+ {
1334
+ "type": "text",
1335
+ "bbox": [
1336
+ 0.074,
1337
+ 0.602,
1338
+ 0.482,
1339
+ 0.727
1340
+ ],
1341
+ "angle": 0,
1342
+ "content": "Probabilistic vs. deterministic gradient pruning. Our pruning is decided by a random sampler based on the accumulated gradient magnitude. We call this probabilistic pruning. If the sampler only samples the parameters with the biggest accumulated gradient magnitude, this is called deterministic pruning. We adopt probabilistic pruning instead of deterministic pruning because deterministic pruning limits the degree of freedom and increases the gradient sampling bias. Table 2 shows that deterministic pruning has \\(1\\% -7\\%\\) accuracy loss compared with probabilistic pruning."
1343
+ },
1344
+ {
1345
+ "type": "text",
1346
+ "bbox": [
1347
+ 0.074,
1348
+ 0.727,
1349
+ 0.482,
1350
+ 0.824
1351
+ ],
1352
+ "angle": 0,
1353
+ "content": "Different optimizers. Table 3 shows the accuracy tested on classical devices trained with different optimizers. The learning rate is controlled by a cosine scheduler from 0.3 in the beginning to 0.03 in the end. We test SGD, SGD with a momentum factor of 0.8, and Adam on MNIST-4, MNIST-2, Fashion-4, and Fashion-2, and found that Adam always performs the best. Hence, all the experiments are done using Adam optimizers by default."
1354
+ },
1355
+ {
1356
+ "type": "title",
1357
+ "bbox": [
1358
+ 0.075,
1359
+ 0.836,
1360
+ 0.225,
1361
+ 0.85
1362
+ ],
1363
+ "angle": 0,
1364
+ "content": "5 CONCLUSION"
1365
+ },
1366
+ {
1367
+ "type": "text",
1368
+ "bbox": [
1369
+ 0.074,
1370
+ 0.854,
1371
+ 0.485,
1372
+ 0.926
1373
+ ],
1374
+ "angle": 0,
1375
+ "content": "In this work, for the first time, we present an efficient and robust on-chip training framework for PQC and demonstrate its effectiveness on real quantum devices. By leveraging parameter shift, we can calculate the exact quantum gradients directly on quantum machines, thus achieving high scalability. To alleviate the negative impact of"
1376
+ },
1377
+ {
1378
+ "type": "image",
1379
+ "bbox": [
1380
+ 0.518,
1381
+ 0.09,
1382
+ 0.707,
1383
+ 0.2
1384
+ ],
1385
+ "angle": 0,
1386
+ "content": null
1387
+ },
1388
+ {
1389
+ "type": "image",
1390
+ "bbox": [
1391
+ 0.727,
1392
+ 0.09,
1393
+ 0.921,
1394
+ 0.199
1395
+ ],
1396
+ "angle": 0,
1397
+ "content": null
1398
+ },
1399
+ {
1400
+ "type": "image_caption",
1401
+ "bbox": [
1402
+ 0.514,
1403
+ 0.203,
1404
+ 0.924,
1405
+ 0.232
1406
+ ],
1407
+ "angle": 0,
1408
+ "content": "Figure 8: Runtime and memory cost comparison between classical simulation and quantum on-chip run."
1409
+ },
1410
+ {
1411
+ "type": "text",
1412
+ "bbox": [
1413
+ 0.513,
1414
+ 0.233,
1415
+ 0.923,
1416
+ 0.318
1417
+ ],
1418
+ "angle": 0,
1419
+ "content": "quantum noises on gradients, we further propose the probabilistic gradient pruning technique to avoid updating parameters with unreliable gradients. Experimental results on 5 classification tasks and 5 machines demonstrate that QOC achieves comparable accuracy with noise-free simulations. We hope QOC can open an avenue towards practical training of large PQC models for quantum advantage."
1420
+ },
1421
+ {
1422
+ "type": "title",
1423
+ "bbox": [
1424
+ 0.515,
1425
+ 0.328,
1426
+ 0.704,
1427
+ 0.342
1428
+ ],
1429
+ "angle": 0,
1430
+ "content": "ACKNOWLEDGMENT"
1431
+ },
1432
+ {
1433
+ "type": "text",
1434
+ "bbox": [
1435
+ 0.514,
1436
+ 0.346,
1437
+ 0.922,
1438
+ 0.373
1439
+ ],
1440
+ "angle": 0,
1441
+ "content": "We acknowledge NSF CAREER Award #1943349, MIT-IBM Watson AI Lab, Baidu Fellowship, Qualcomm Innovation Fellowship, and IBM Quantum."
1442
+ },
1443
+ {
1444
+ "type": "title",
1445
+ "bbox": [
1446
+ 0.516,
1447
+ 0.382,
1448
+ 0.634,
1449
+ 0.396
1450
+ ],
1451
+ "angle": 0,
1452
+ "content": "REFERENCES"
1453
+ },
1454
+ {
1455
+ "type": "ref_text",
1456
+ "bbox": [
1457
+ 0.522,
1458
+ 0.399,
1459
+ 0.922,
1460
+ 0.42
1461
+ ],
1462
+ "angle": 0,
1463
+ "content": "[1] Jacob Biamonte, Peter Wittek, Nicola Pancotti, Patrick Rebentrost, Nathan Wiebe, and Seth Lloyd. 2017. Quantum machine learning. Nature 549, 7671 (2017)."
1464
+ },
1465
+ {
1466
+ "type": "ref_text",
1467
+ "bbox": [
1468
+ 0.522,
1469
+ 0.42,
1470
+ 0.922,
1471
+ 0.44
1472
+ ],
1473
+ "angle": 0,
1474
+ "content": "[2] Gavin E Crooks. 2019. Gradients of parameterized quantum gates using the parameter-shift rule and gate decomposition. arXiv:1905.13311 (2019)."
1475
+ },
1476
+ {
1477
+ "type": "ref_text",
1478
+ "bbox": [
1479
+ 0.522,
1480
+ 0.44,
1481
+ 0.922,
1482
+ 0.47
1483
+ ],
1484
+ "angle": 0,
1485
+ "content": "[3] Song Han, Huizi Mao, and William J Dally. 2015. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149 (2015)."
1486
+ },
1487
+ {
1488
+ "type": "ref_text",
1489
+ "bbox": [
1490
+ 0.522,
1491
+ 0.47,
1492
+ 0.921,
1493
+ 0.49
1494
+ ],
1495
+ "angle": 0,
1496
+ "content": "[4] Aram W Harrow, Avinatan Hassidim, and Seth Lloyd. 2009. Quantum algorithm for linear systems of equations. Physical review letters 103, 15 (2009), 150502."
1497
+ },
1498
+ {
1499
+ "type": "ref_text",
1500
+ "bbox": [
1501
+ 0.522,
1502
+ 0.491,
1503
+ 0.921,
1504
+ 0.51
1505
+ ],
1506
+ "angle": 0,
1507
+ "content": "[5] Vojtěch Havlíček et al. 2019. Supervised learning with quantum-enhanced feature spaces. Nature 567, 7747 (2019), 209-212."
1508
+ },
1509
+ {
1510
+ "type": "ref_text",
1511
+ "bbox": [
1512
+ 0.522,
1513
+ 0.511,
1514
+ 0.921,
1515
+ 0.54
1516
+ ],
1517
+ "angle": 0,
1518
+ "content": "[6] Cheng-Yun Hsieh, Chen-Hung Wu, Chia-Hsien Huang, His-Sheng Goan, and James Chien Mo Li. 2020. Realistic fault models and fault simulation for quantum dot quantum circuits. In 2020 57th (DAC). IEEE, 1-6."
1519
+ },
1520
+ {
1521
+ "type": "ref_text",
1522
+ "bbox": [
1523
+ 0.522,
1524
+ 0.54,
1525
+ 0.631,
1526
+ 0.551
1527
+ ],
1528
+ "angle": 0,
1529
+ "content": "[7] Qiskit IBM. [n.d.]."
1530
+ },
1531
+ {
1532
+ "type": "ref_text",
1533
+ "bbox": [
1534
+ 0.522,
1535
+ 0.551,
1536
+ 0.921,
1537
+ 0.571
1538
+ ],
1539
+ "angle": 0,
1540
+ "content": "[8] Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner. 1998. Gradient-based learning applied to document recognition. Proc. IEEE 86, 11 (1998), 2278-2324."
1541
+ },
1542
+ {
1543
+ "type": "ref_text",
1544
+ "bbox": [
1545
+ 0.522,
1546
+ 0.571,
1547
+ 0.921,
1548
+ 0.601
1549
+ ],
1550
+ "angle": 0,
1551
+ "content": "[9] Zhiding Liang, Zhepeng Wang, Junhuan Yang, Lei Yang, Yiyu Shi, and Weiwen Jiang. 2021. Can Noise on Qubits Be Learned in Quantum Neural Network? A Case Study on QuantumFlow. In ICCAD. IEEE, 1-7."
1552
+ },
1553
+ {
1554
+ "type": "ref_text",
1555
+ "bbox": [
1556
+ 0.517,
1557
+ 0.601,
1558
+ 0.921,
1559
+ 0.621
1560
+ ],
1561
+ "angle": 0,
1562
+ "content": "[10] Seth Lloyd, Silvano Garnerone, and Paolo Zanardi. 2016. Quantum algorithms for topological and geometric analysis of data. Nature communications 7, 1 (2016)."
1563
+ },
1564
+ {
1565
+ "type": "ref_text",
1566
+ "bbox": [
1567
+ 0.517,
1568
+ 0.621,
1569
+ 0.921,
1570
+ 0.641
1571
+ ],
1572
+ "angle": 0,
1573
+ "content": "[11] Seth Lloyd, Masoud Mohseni, and Patrick Rebentrost. 2013. Quantum algorithms for supervised and unsupervised machine learning. arXiv:1307.0411 (2013)."
1574
+ },
1575
+ {
1576
+ "type": "ref_text",
1577
+ "bbox": [
1578
+ 0.518,
1579
+ 0.641,
1580
+ 0.921,
1581
+ 0.661
1582
+ ],
1583
+ "angle": 0,
1584
+ "content": "[12] Seth Lloyd, Maria Schuld, Aroosa Ijaz, Josh Izaac, and Nathan Killoran. 2020. Quantum embeddings for machine learning. arXiv:2001.03622 (2020)."
1585
+ },
1586
+ {
1587
+ "type": "ref_text",
1588
+ "bbox": [
1589
+ 0.518,
1590
+ 0.661,
1591
+ 0.921,
1592
+ 0.682
1593
+ ],
1594
+ "angle": 0,
1595
+ "content": "[13] Easwar Magesan, Jay M Gambetta, and Joseph Emerson. 2012. Characterizing quantum gates via randomized benchmarking. Physical Review A 85, 4 (2012)."
1596
+ },
1597
+ {
1598
+ "type": "ref_text",
1599
+ "bbox": [
1600
+ 0.518,
1601
+ 0.682,
1602
+ 0.921,
1603
+ 0.702
1604
+ ],
1605
+ "angle": 0,
1606
+ "content": "[14] Kosuke Mitarai, Makoto Negoro, Masahiro Kitagawa, and Keisuke Fujii. 2018. Quantum circuit learning. Physical Review A (2018)."
1607
+ },
1608
+ {
1609
+ "type": "ref_text",
1610
+ "bbox": [
1611
+ 0.518,
1612
+ 0.702,
1613
+ 0.921,
1614
+ 0.722
1615
+ ],
1616
+ "angle": 0,
1617
+ "content": "[15] Le Thanh Nguyen-Meidine et al. 2020. Progressive Gradient Pruning for Classification, Detection and DomainAdaptation. arXiv:1906.08746 [cs.LG]"
1618
+ },
1619
+ {
1620
+ "type": "ref_text",
1621
+ "bbox": [
1622
+ 0.518,
1623
+ 0.722,
1624
+ 0.921,
1625
+ 0.742
1626
+ ],
1627
+ "angle": 0,
1628
+ "content": "[16] John Preskill. 2018. Quantum Computing in the NISQ era and beyond. Quantum 2 (2018), 79."
1629
+ },
1630
+ {
1631
+ "type": "ref_text",
1632
+ "bbox": [
1633
+ 0.518,
1634
+ 0.742,
1635
+ 0.921,
1636
+ 0.772
1637
+ ],
1638
+ "angle": 0,
1639
+ "content": "[17] Hanrui Wang, Yongshan Ding, Jiaqi Gu, Yujun Lin, David Z Pan, Frederic T Chong, and Song Han. 2022. QuantumNAS: Noise-adaptive search for robust quantum circuits. HPCA (2022)."
1640
+ },
1641
+ {
1642
+ "type": "ref_text",
1643
+ "bbox": [
1644
+ 0.518,
1645
+ 0.772,
1646
+ 0.921,
1647
+ 0.802
1648
+ ],
1649
+ "angle": 0,
1650
+ "content": "[18] Hanrui Wang, Jiaqi Gu, Yongshan Ding, Zirui Li, Frederic T Chong, David Z Pan, and Song Han. 2022. QuantumNAT: Quantum Noise-Aware Training with Noise Injection, Quantization and Normalization. DAC (2022)."
1651
+ },
1652
+ {
1653
+ "type": "ref_text",
1654
+ "bbox": [
1655
+ 0.518,
1656
+ 0.802,
1657
+ 0.921,
1658
+ 0.823
1659
+ ],
1660
+ "angle": 0,
1661
+ "content": "[19] Hanrui Wang, Zhekai Zhang, and Song Han. 2021. SpAtten: Efficient sparse attention architecture with cascade token and head pruning. In HPCA. IEEE."
1662
+ },
1663
+ {
1664
+ "type": "ref_text",
1665
+ "bbox": [
1666
+ 0.518,
1667
+ 0.823,
1668
+ 0.921,
1669
+ 0.853
1670
+ ],
1671
+ "angle": 0,
1672
+ "content": "[20] Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Hanrui Wang, Yujun Lin, and Song Han. 2020. Apq: Joint search for network architecture, pruning and quantization policy. In CVPR."
1673
+ },
1674
+ {
1675
+ "type": "ref_text",
1676
+ "bbox": [
1677
+ 0.518,
1678
+ 0.853,
1679
+ 0.921,
1680
+ 0.873
1681
+ ],
1682
+ "angle": 0,
1683
+ "content": "[21] Zhepeng Wang, Zhiding Liang, Shanglin Zhou, et al. 2021. Exploration of Quantum Neural Architecture by Mixing Quantum Neuron Designs. In ICCAD. IEEE."
1684
+ },
1685
+ {
1686
+ "type": "ref_text",
1687
+ "bbox": [
1688
+ 0.518,
1689
+ 0.873,
1690
+ 0.921,
1691
+ 0.893
1692
+ ],
1693
+ "angle": 0,
1694
+ "content": "[22] Han Xiao, Kashif Rasul, and Roland Vollgraf. 2017. Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv:1708.07747 (2017)."
1695
+ },
1696
+ {
1697
+ "type": "ref_text",
1698
+ "bbox": [
1699
+ 0.518,
1700
+ 0.893,
1701
+ 0.921,
1702
+ 0.914
1703
+ ],
1704
+ "angle": 0,
1705
+ "content": "[23] Zhekai Zhang, Hanrui Wang, Song Han, and William J Dally. 2020. SpArch: Efficient architecture for sparse matrix multiplication. In HPCA. IEEE."
1706
+ },
1707
+ {
1708
+ "type": "list",
1709
+ "bbox": [
1710
+ 0.517,
1711
+ 0.399,
1712
+ 0.922,
1713
+ 0.914
1714
+ ],
1715
+ "angle": 0,
1716
+ "content": null
1717
+ }
1718
+ ]
1719
+ ]
2202.13xxx/2202.13239/57c352a6-ea56-4510-913e-a92f5c7acb7d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a5e84000f4280374101d5b0fe903380a2d09b1785cc2257d62688713d3bf5f4
3
+ size 1346529
2202.13xxx/2202.13239/full.md ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # QOC: Quantum On-Chip Training with Parameter Shift and Gradient Pruning
2
+
3
+ $^{1}$ Hanrui Wang*, $^{2}$ Zirui Li*, $^{3}$ Jiaqi Gu, $^{4}$ Yongshan Ding, $^{3}$ David Z. Pan, $^{1}$ Song Han $^{1}$ Massachusetts Institute of Technology, $^{2}$ Rutgers University, $^{3}$ University of Taxes at Austin, $^{4}$ Yale University
4
+
5
+ https://qmlsys.mit.edu
6
+
7
+ # ABSTRACT
8
+
9
+ Parameterized Quantum Circuits (PQC) are drawing increasing research interest thanks to its potential to achieve quantum advantages on near-term Noisy Intermediate Scale Quantum (NISQ) hardware. In order to achieve scalable PQC learning, the training process needs to be offloaded to real quantum machines instead of using exponential-cost classical simulators. One common approach to obtain PQC gradients is parameter shift whose cost scales linearly with the number of qubits. We present QOC, the first experimental demonstration of practical on-chip PQC training with parameter shift. Nevertheless, we find that due to the significant quantum errors (noises) on real machines, gradients obtained from naive parameter shift have low fidelity and thus degrading the training accuracy. To this end, we further propose probabilistic gradient pruning to firstly identify gradients with potentially large errors and then remove them. Specifically, small gradients have larger relative errors than large ones, thus having a higher probability to be pruned. We perform extensive experiments with the Quantum Neural Network (QNN) benchmarks on 5 classification tasks using 5 real quantum machines. The results demonstrate that our on-chip training achieves over $90\%$ and $60\%$ accuracy for 2-class and 4-class image classification tasks. The probabilistic gradient pruning brings up to $7\%$ PQC accuracy improvements over no pruning. Overall, we successfully obtain similar on-chip training accuracy compared with noise-free simulation but have much better training scalability. The QOC code is available in the TorchQuantum library.
10
+
11
+ # 1 INTRODUCTION
12
+
13
+ Quantum Computing (QC) has great potential to achieve exponential acceleration over classical computers, which represents a computational paradigm shift in various domains. Parameterized Quantum Circuits (PQC) are circuits containing trainable weights and are promising to achieve quantum advantages in current devices. Among them, Quantum Neural Network (QNN) is one of the popular algorithms for machine learning tasks.
14
+
15
+ In order to achieve PQC quantum advantage, the number of qubit needs to be large enough, which casts great difficulty in the parameter training process. In existing PQC work [4, 11], the primary focus has been building quantum models that can outperform classical model accuracy. Thus they typically perform training on classical computers through software simulations and then perform inference with simulators as well (Figure 1 top). Although classical simulation is useful in understanding the capabilities of small-size PQC, it is not
16
+
17
+ ![](images/8a7f3dfeb3b852bc5b6645c6ada98ad6117055dc5e50b84a9d324201b8979f27.jpg)
18
+ Figure 1: In QOC, PQC training and inference are both performed on real quantum machines, making the whole pipeline scalable and practical.
19
+
20
+ scalable due to the exponentially increased time and memory costs $(O(2^n), n$ is the qubit number). As shown in Figure 2(a), the space (#Regs) and time (#Ops) complexity of classical simulation grow exponentially as the number of qubits increases. To the authors' knowledge, this is the first experimental demonstration of efficient and scalable PQC on-chip training protocol. The optimization of parametrized quantum gates is offloaded to the quantum chips with in-situ gradient computation using parameter shift [14]. We also perform PQC evaluation on real quantum machines, making the results more practical as in Figure 1 bottom.
21
+
22
+ One of the major challenges to enable scalable and efficient PQC on-chip learning is the robustness against quantum noise. In the current Noisy Intermediate Scale Quantum (NISQ) [16] era, the gate error rates on real quantum devices are non-negligible ( $10^{-3}$ to $10^{-2}$ ). In the context of PQC, such errors will lead to noisy gradients which can slow down convergence or even make training unstable. As shown in Figure 2(b), large gaps exist between the quantum on-chip training results and the classical noise-free simulation results.
23
+
24
+ By carefully investigating the on-chip training process, we observe that small gradients tend to have large relative variations or even wrong directions under quantum noises, as shown in Figure 2(c). Also, not all gradient computations are necessary for the training process, especially for small-magnitude gradients. Those observations provide great opportunities for us to boost the robustness and efficiency of PQC on-chip learning. Inspired by that, we propose a probabilistic gradient pruning method to predict and only compute gradients of high reliability. Hence we can reduce noise impact and also save the required number of circuit runs on real quantum machines. In this paper, we are mainly using QNNs as benchmarks but the techniques can also be applied to other PQCs such as Variational Quantum Eigensolver (VQE). QOC has following contributions:
25
+
26
+ ![](images/27f297d8c53ba3f6d629f4e10784f4aaa0ef927647ae90f87bae6f823ca935f1.jpg)
27
+
28
+ ![](images/51e9b2d6da5a633a0fafc746e416ab2071b3b0149b393009f8a43b9314337a0b.jpg)
29
+
30
+ ![](images/9f2b1b94dffdd6c4f2c5e2ff0b91e125e407b67384e7f3f53fa39fa0f607a58e.jpg)
31
+ (b)
32
+ Figure 2: (a) Classical simulation has unscalable computational and memory costs. (b) Noises create significant accuracy gaps between PQC (QNN) classical simulation and on-chip training. (c) Small gradients suffer from larger relative errors, thus being less reliable.
33
+
34
+ ![](images/92dcaf2cf394b7e876363271f72076654b4c2c8aac7d9fe17406d9342c3f93ce.jpg)
35
+ (c)
36
+
37
+ - We are the first work to demonstrate the practicality of parameter shift on NISQ machines, achieving high PQC learning accuracy.
38
+ - A probabilistic gradient pruning method is proposed to improve the noise robustness by $5 - 7\%$ and reduce the number of inference on real QC by $2\times$ while maintaining the accuracy.
39
+ - Experimental deployment of QNN on 5 real quantum machines demonstrates that the proposed method can achieve over $90\%$ and $60\%$ accuracy for 2-class and 4-class image recognition tasks. Our framework enables scalable, robust, and efficient training of PQCs with large number of qubits and parameters.
40
+ - We open-source the parameter shift on-chip PQC training and gradient pruning code in the TorchQuantum library.
41
+
42
+ # 2 BACKGROUND
43
+
44
+ Quantum basics. Quantum circuits use quantum bit (called qubit) to store information, which is a linear combination of two basis states: $|\psi \rangle = \alpha |0\rangle + \beta |1\rangle$ , for $\alpha, \beta \in \mathbb{C}$ , satisfying $|\alpha|^2 + |\beta|^2 = 1$ . An $n$ -qubit system can represent a linear combination of $2^n$ basis states. A $2^n$ -length complex state vector of all combination coefficients is used to describe the quantum state. To perform computation on a quantum system, a sequence of parametrized quantum gates are applied to perform unitary transformation on the statevector, i.e., $|\psi(x, \theta)\rangle = \dots U_2(x, \theta_2)U_1(x, \theta_1)|0\rangle$ , where $x$ is the input data, and $\theta = (\theta_1, \theta_2, \ldots)$ are trainable parameters in quantum gates. In this way, input data and trainable parameters are embedded in the quantum state $|\psi(x, \theta)\rangle$ . The computation results are obtained by qubit readout which measures the probability of a qubit state $|\psi\rangle$ collapsing to either $|0\rangle$ (i.e., output $y = +1$ ) or $|1\rangle$ (i.e., output $y = -1$ ) according to $|\alpha|^2$ and $|\beta|^2$ . With sufficient samples, we can compute the expectation value: $\mathbb{E}[y] = (+1)|\alpha|^2 + (-1)|\beta|^2$ . A non-linear network can be constructed to perform ML tasks by cascading multiple blocks of quantum gates and measurements.
45
+
46
+ Quantum noise. In real quantum computer systems, errors (noises) would occur due to unwanted interactions between qubits, imperfect control signals, or interference from the environment [6]. For example, quantum gates introduce operation errors (e.g., coherent errors and stochastic errors) into the system, and qubits also suffer from decoherence error (spontaneous loss of its stored information)
47
+
48
+ ![](images/79869f87b82d7f8f775b10185bf62d078028dd461d25105d17dd2d4c3689239b.jpg)
49
+ Figure 3: Quantum Neural Network (QNN) architecture.
50
+
51
+ over time. These noisy systems need to be characterized [13] and calibrated [7] frequently to mitigate the noise impact.
52
+
53
+ Quantum neural networks. Quantum Machine Learning (QML) [1, 9, 17, 18, 21] aims to leverage QC techniques to solve machine learning tasks and achieve much higher efficiency. The path to quantum advantage on QML is typically provided by the quantum circuit's ability to generate and estimate highly complex kernels [5], which would otherwise be intractable to compute with conventional computers. They have been shown to have potential speed-up over classical counterparts in various tasks, including metric learning [12], data analysis [10]. As shown in Figure 3, the quantum neural network is one type of QML model using variational quantum circuits with trainable parameters to accomplish feature encoding of input data and perform complex-valued linear transformations thereafter. Most of QNN trainings are exploratory and rely on classical simulation of small quantum systems. In our work, on the contrary, we explore the practical setting: the QNN training and inference are both performed on real quantum devices.
54
+
55
+ Pruning. Pruning techniques are widely used in the field of DNN [3, 19, 20, 23], performing an important role of the trade-off between accuracy and memory or time cost [15]. Recently, pruning techniques have been used in quantum tasks. Pruning the ansatz can bring time-efficient circuit and even higher performance on real QC [17]. In our work, we apply pruning techniques to prune unreliable gradients in order to mitigate the noise during training.
56
+
57
+ # 3 METHODOLOGY
58
+
59
+ To enable PQC on-chip learning, we first introduce an in-situ quantum gradient computation via parameter shift and its real QC implementation. A probabilistic gradient pruning method is proposed to save the gradient computation cost with enhanced noise-robustness and training efficiency. We study QNN as the benchmark PQC.
60
+
61
+ # 3.1 Parameter Shift Rule for Quantum Gradients
62
+
63
+ Parameter shift rule states that we can calculate the gradient of each parameter in some quantum circuits by simply shifting the parameter twice and calculating the difference between two outputs, without changing the structure of circuits or using any ancilla qubits. Prior works elaborate it based on quantum circuit function [2], however, in the next subsection we will show how parameter shift rules combined with backpropagation can be used in a real PQC task. Suppose an $m$ -qubit quantum circuit is parametrized by $n$ parameters $\theta = [\theta_{1},\dots ,\theta_{i},\dots ,\theta_{n}]$ , the expectation value of measurements of this circuit can be represented by a circuit function,
64
+
65
+ $$
66
+ f (\theta) = \left\langle \psi \right| U \left(\theta_ {i}\right) ^ {\dagger} \widehat {Q} U \left(\theta_ {i}\right) | \psi \rangle , \quad f (\theta) \in \mathbb {R} ^ {m}, \theta \in \mathbb {R} ^ {n}. \tag {1}
67
+ $$
68
+
69
+ where $\theta_{i}$ is the scalar parameter whose gradient is to be calculated, and $U(\theta_{i})$ is the gate where $\theta_{i}$ lies in. Here, for notation simplicity, we have already absorbed the unitaries before $U(\theta_{i})$ into $\langle \psi |,\left|\psi \right\rangle$ Unitaries after $U(\theta_{i})$ and observables are fused into $\widehat{Q}$ . Usually, the
70
+
71
+ gates used in PQC can be written in the form $U(\theta_i) = e^{-\frac{i}{2}\theta_iH}$ . Here $H$ is the Hermitian generator of $U$ with only 2 unique eigenvalues +1 and -1 ( $H$ 's eigenvalues can be $\pm r$ , but for simplicity we assume it's $\pm 1$ ). In this way, the gradients of the circuit function $f$ with respect to $\theta_i$ are,
72
+
73
+ $$
74
+ \frac {\partial f (\theta)}{\partial \theta_ {i}} = \frac {1}{2} \left(f \left(\theta_ {+}\right) - f \left(\theta_ {-}\right)\right),
75
+ $$
76
+
77
+ $$
78
+ \theta_ {+} = \left[ \theta_ {1}, \dots , \theta_ {i} + \frac {\pi}{2}, \dots , \theta_ {n} \right], \theta_ {-} = \left[ \theta_ {1}, \dots , \theta_ {i} - \frac {\pi}{2}, \dots , \theta_ {n} \right], \tag {2}
79
+ $$
80
+
81
+ where $\theta_{+}$ and $\theta_{-}$ are the positive shift and negative shift of $\theta$ . Note that this parameter shift rule is fundamentally different from any numerical difference methods that only approximate the directional derivatives. Instead, Eq. 2 calculates the exact gradient w.r.t $\theta_{i}$ without any approximation errors or numerical issues.
82
+
83
+ We apply softmax on the expectation values of measurements $f(\theta)$ as the predicted probability for each class. Then we calculate the cross entropy between the predicted probability distribution $p$ and the target distribution $t$ as the classification loss $\mathcal{L}$ ,
84
+
85
+ $$
86
+ \mathcal {L} (\theta) = - t ^ {T} \cdot \operatorname {s o f t m a x} (f (\theta)) = - \sum_ {j = 1} ^ {m} t _ {j} \log p _ {j}, \quad p _ {j} = \frac {e ^ {f _ {j} (\theta)}}{\sum_ {j = 1} ^ {m} e ^ {f _ {j} (\theta)}}. \tag {3}
87
+ $$
88
+
89
+ Then the gradient of the loss function with respect to $\theta_{i}$ is $\frac{\partial\mathcal{L}(\theta)}{\partial\theta_i} = \left(\frac{\partial\mathcal{L}(\theta)}{\partial f(\theta)}\right)^T\frac{\partial f(\theta)}{\partial\theta_i}$ .
90
+
91
+ Here $\frac{\partial f(\theta)}{\partial\theta_i}$ can be calculated on real quantum circuit by the parameter shift rule, and $\frac{\partial\mathcal{L}(\theta)}{\partial f(\theta)}$ can be efficiently calculated on classical devices using backpropagation supported by automatic differentiation frameworks, e.g., PyTorch and TensorFlow.
92
+
93
+ Now we derive the parameter shift rule used in our PQC models.
94
+
95
+ Assume $U(\theta_i) = R_X(\theta_i), R_X(\alpha) = e^{-\frac{i}{2}\alpha X}$ , where $X$ is the Pauli-X matrix.
96
+
97
+ Firstly, the RX gate is,
98
+
99
+ $$
100
+ \begin{array}{l} R _ {X} (\alpha) = e ^ {- \frac {i}{2} \alpha X} = \sum_ {k = 0} ^ {\infty} (- i \alpha / 2) ^ {k} X ^ {k} / k! \\ = \sum_ {k = 0} ^ {\infty} (- i \alpha / 2) ^ {2 k} X ^ {2 k} / (2 k)! + \sum_ {k = 0} ^ {\infty} (- i \alpha / 2) ^ {2 k + 1} X ^ {2 k + 1} / (2 k + 1)! \\ = \sum_ {k = 0} ^ {\infty} (- 1) ^ {k} (\alpha / 2) ^ {2 k} I / (2 k)! - i \sum_ {k = 0} ^ {\infty} (- 1) ^ {k} (\alpha / 2) ^ {2 k + 1} X / (2 k + 1)! \\ = \cos (\alpha / 2) I - i \sin (\alpha / 2) X. \tag {4} \\ \end{array}
101
+ $$
102
+
103
+ $$
104
+ \mathrm {L e t} \alpha = \frac {\pi}{2}, R _ {X} (\pm \frac {\pi}{2}) = \frac {1}{\sqrt {2}} (I \mp i X).
105
+ $$
106
+
107
+ As $f(\theta) = \langle \psi |R_X(\theta_i)^\dagger \widehat{Q} R_X(\theta_i)|\psi \rangle ,R_X(\alpha)R_X(\beta) = R_X(\alpha +\beta),$
108
+
109
+ and $\frac{\partial}{\partial\alpha} R_X(\alpha) = -\frac{i}{2} XR_X(\alpha)$ , we have
110
+
111
+ $$
112
+ \begin{array}{l} \frac {\partial f (\theta)}{\partial \theta_ {i}} = \langle \psi | R _ {X} (\theta_ {i}) ^ {\dagger} (- \frac {i}{2} X) ^ {\dagger} \widehat {Q} R _ {X} (\theta_ {i}) | \psi \rangle + \langle \psi | R _ {X} (\theta_ {i}) ^ {\dagger} \widehat {Q} (- \frac {i}{2} X) R _ {X} (\theta_ {i}) | \psi \rangle \\ = \frac {1}{4} \left(\langle \psi | R _ {X} \left(\theta_ {i}\right) ^ {\dagger} (I - i X) ^ {\dagger} \widehat {Q} (I - i X) R _ {X} \left(\theta_ {i}\right) | \psi \rangle \right. \\ - \langle \psi | R _ {X} (\theta_ {i}) ^ {\dagger} (I + i X) ^ {\dagger} \widehat {Q} (I + i X) R _ {X} (\theta_ {i}) | \psi \rangle) \\ = \frac {1}{2} \left(\langle \psi | R _ {X} (\theta_ {i}) ^ {\dagger} R _ {X} (\frac {\pi}{2}) ^ {\dagger} \widehat {Q} R _ {X} (\frac {\pi}{2}) R _ {X} (\theta_ {i}) | \psi \rangle \right. \\ - \langle \psi | R _ {X} (\theta_ {i}) ^ {\dagger} R _ {X} (- \frac {\pi}{2}) ^ {\dagger} \widehat {Q} R _ {X} (- \frac {\pi}{2}) R _ {X} (\theta_ {i}) | \psi \rangle) \\ = \frac {1}{2} \left(f \left(\theta_ {+}\right) - f \left(\theta_ {-}\right)\right). \tag {5} \\ \end{array}
113
+ $$
114
+
115
+ Without loss of generality, the derivation holds for all unitaries of the form $e^{-\frac{i}{2}\alpha H}$ , e.g., RX, RY, RZ, XX, YY, ZZ, where $H$ is a Hermitian matrix with only 2 unique eigenvalues +1 and -1.
116
+
117
+ ![](images/d920412fd3afaa79a9f44b2c88621943dcff72106784935c224cc16d32ba2fce.jpg)
118
+ Figure 4: Quantum gradient calculation using the parameter shift rule on real quantum devices.
119
+
120
+ In our circuit functions, we assume each parameter lies in exactly one gate. However, there are cases that one parameter lies in multiple gates. In that case, we only need to calculate the gradient of the parameter in those gates separately and sum the gradients up to get the gradient of that parameter.
121
+
122
+ # 3.2 In-situ Gradient Computation on Real QC
123
+
124
+ To realize PQC on-chip learning, we implement a TrainingEngine, described in Alg. 1. This TrainingEngine contains three parts.
125
+
126
+ Jacobian calculation via parameter shift. In the first part, we sample a mini-batch of training data $\mathcal{I}$ in Line 6. For each example of the mini-batch, we set up the quantum encoder gates and then iteratively evaluate gradients for all parameters. In each iteration, we shift the parameter $\theta_{i}$ twice by $+ \pi /2$ and $-\pi /2$ respectively. After each shift, we execute the shifted circuit on quantum hardware. The circuit will be created, validated, queued, and finally run on real quantum machines. As soon as we get the returned results of the two shifted circuits, i.e., $f(\theta_{+})$ and $f(\theta_{-})$ , we apply Eq. 2 to obtain the upstream gradient $\frac{\partial f(\theta)}{\partial \theta_i}$ , illustrated in the left part of Figure 4.
127
+
128
+ Finally, we obtain the Jacobian matrix $\frac{\partial f(\theta)}{\partial\theta}$ .
129
+
130
+ Down-stream gradient backpropagation. In the second part, we run the circuit without shift and get the measurement result $f(\theta)$ . Then we apply soft max and cross-entropy function to the measured logits. In the end, we get the training loss $\mathcal{L}(\theta)$ . Then we run backpropagation only from the loss to the logits to get the down-stream gradients $\frac{\partial\mathcal{L}(\theta)}{\partial f(\theta)}$ , shown in the right part of Figure 4.
131
+
132
+ Gradient calculation. In the third part, we calculate the dot-product between down-stream gradients and the Jacobian and get the final gradients $\frac{\partial\mathcal{L}(\theta)}{\partial\theta} = \left(\frac{\partial f(\theta)}{\partial\theta}\right)^T\frac{\partial\mathcal{L}(\theta)}{\partial f(\theta)}$
133
+
134
+ # 3.3 Probabilistic Quantum Gradient Pruning
135
+
136
+ On quantum chips, there exist various noises and errors that could potentially diminish the fidelity of the computation results. When the gradient magnitude is small, noises could easily overwhelm the signals, such that the gradients calculated on real quantum circuit become unreliable when they have small magnitude. Those unreliable gradients have harmful effects on training convergence. Skipping the evaluation on those unreliable gradients can benefit both training convergence and efficiency. Besides, we observe that for most parameters, if the gradient magnitudes are far from zero for several
137
+
138
+ ![](images/7b71244f4229b946a384c54fe381ac936e7270ea771daeb758c425a1ff83505b.jpg)
139
+ Figure 5: Efficient on-chip quantum gradient calculation with probabilistic gradient pruning. Gradient magnitudes are accumulated within the accumulation window and used as the sampling distribution. Based on the distribution, gradients are probabilistically pruned with a ratio $r$ in the pruning window to mitigate noises and stabilize training.
140
+
141
+ steps, it will likely keep far from zero in the next several steps. Similarly, if the gradient magnitude remains small for some steps, it will likely keep small in the next several steps. This means the gradient reliability is predictable to some extent. Therefore, we propose the gradient pruning method to sample the parameters whose gradients are more reliable. This method helps training converge faster while also saving time by skipping the evaluation of unreliable gradients.
142
+
143
+ Alg. 1 describes the PQC on-chip training flow with probabilistic gradient pruning. We divide all the training steps into $S$ stages and perform the pruning method periodically on each stage. For every stage, we split it into two phases, shown in Figure 5. The first phase is called magnitude accumulation with an accumulation window width $w_{a}$ , and the second is called probabilistic gradient pruning (PGP) with a pruning window width $w_{p}$ . We only apply pruning in the second phase, while the parameter subset is sampled from a probability distribution $\tilde{\theta} = \{\theta_i \sim P_M(\theta) | 1 \leq i \leq (1 - r)n\}$ based on the gradient information collected within the accumulation window.
144
+
145
+ In Lines 4-9, within the accumulation window, we record the magnitude of gradients of each parameter in each step and accumulate them until the window is over. At the end of the first phase, we can get an accumulator $M$ that records the accumulated gradient magnitude for each parameter. Thus, when the pruning phase starts, we normalize the accumulated gradient magnitude and pass it to our sampler as the sampling distribution. In each pruning step, the sampler samples a subset of parameters $\tilde{\theta}$ with a pruning ratio of $r$ , and we only evaluate gradients for them while the rest $\theta \backslash \tilde{\theta}$ is temporarily frozen.
146
+
147
+ There are three important hyper-parameters in our gradient pruning method: 1) accumulation window width $w_{a}$ , 2) pruning ratio $r$ , and 3) pruning window width $w_{p}$ . The accumulation window width and pruning window width decide the reliability of the gradient trend evaluation and our confidence in it, respectively. The pruning ratio can be tuned to balance the gradient variances caused by noise perturbation and pruning. Thus, the percentage of the time saved by our probabilistic gradient pruning method is $r\frac{w_p}{w_a + w_p}\times 100\%$ . In our experiments, we find that the setting $(w_{a} = 1,w_{p} = 2\sim 3,r = 0.3\sim 0.5)$ usually works well in all cases.
148
+
149
+ # 4 EXPERIMENTS
150
+
151
+ In this section, we deploy our PQC on-chip learning framework on real QC and evaluate it on 5 QNN tasks for image and vowel recognition. Compared with classical QNN training protocols, we can
152
+
153
+ Algorithm 1: PQC On-Chip Training with Probabilistic Gradient Pruning
154
+ Input: Accumulation window width $w_{a}$ , gradient pruning ratio $r$ , pruning window width $w_{p}$ , training objective $\mathcal{L}$ , initial parameters $\theta^0 \in \mathbb{R}^n$ , training data $\mathcal{D}_{trn}$ , initial step size $\eta^0$ , and total stages $S$ . $\theta \gets \theta^0$ , $\eta \gets \eta^0$ $t \gets 0$ ;
155
+ for $s = 1,2,\dots,S$ do
156
+ Initialize gradient magnitude accumulator $M \gets 0^n$ ;
157
+ for $\tau_{a} = 1,2,\dots,w_{a}$ do
158
+ $t \gets t + 1$ ;
159
+ Sample a mini-batch $\mathcal{I} \sim \mathcal{D}_{trn}$ ;
160
+ In-situ gradient evaluation via parameter shift $\nabla_{\theta} \mathcal{L}_{\mathcal{I}}(\theta) = \frac{1}{2} (\frac{\partial f(\theta)}{\partial \theta})^T \frac{\partial \mathcal{L}(\theta)}{f(\theta)}$ ;
161
+ Parameter update: $\theta \gets \theta - \eta \nabla_{\theta} \mathcal{L}_{\mathcal{I}}(\theta)$ ;
162
+ Update magnitude accumulator $M \gets M + |\nabla_{\theta} \mathcal{L}_{\mathcal{I}}(\theta)|$ ;
163
+ for $\tau_{p} \gets 1,2,\dots,w_{p}$ do
164
+ $t \gets t + 1$ ;
165
+ Sample a mini-batch $\mathcal{I} \sim \mathcal{D}_{trn}$ ;
166
+ Sample a subset with a ratio $r$ based on accumulated gradient magnitude:
167
+ $\tilde{\theta} = \{\theta_i \sim P_M(\theta) | 1 \leq i \leq (1-r)n\}$ ;
168
+ $\tilde{\theta} \gets \tilde{\theta} - \eta \nabla_{\tilde{\theta}} \mathcal{L}_{\mathcal{I}}(\theta)$ ;
169
+ Output: Converged parameters $\theta$
170
+
171
+ achieve $2 - 4\%$ real QC test accuracy improvement with $2\times$ convergence speedup. We also conduct extensive ablation studies to validate our scalability and the effectiveness of the proposed probabilistic gradient pruning method.
172
+
173
+ # 4.1 Experiment Setups
174
+
175
+ Benchmarks. We conduct our experiments on 5 QML tasks. QML are all classification tasks including MNIST [8] 4-class (0, 1, 2, 3), 2-class (3 and 6); Fashion [22] 4-class (t-shirt/top, trouser, pullover, dress), 2-class (dress and shirt); Vowel 4-class(hid, hId, had, hOd). MNIST and Fashion 2-class use the front 500 images as the training set and randomly sampled 300 images as the validation set. MNIST, Fashion 4-class uses the front 100 images as the training set and also randomly sampled 300 images as the validation set. The input images are all $28 \times 28$ . We firstly center-crop them to $24 \times 24$ and
176
+
177
+ Table 1: Accuracy comparison among different settings. "Simu." represents "simulation".
178
+
179
+ <table><tr><td>Method</td><td>Acc.</td><td>MNIST-4 Jarkata</td><td>MNIST-2 Jarkata</td><td>Fashion-4 Manila</td><td>Fashion-2 Santiago</td><td>Vowel-4 Lima</td></tr><tr><td>Classical-Train</td><td>Simu.</td><td>0.61</td><td>0.88</td><td>0.73</td><td>0.89</td><td>0.37</td></tr><tr><td>Classical-Train</td><td></td><td>0.59</td><td>0.79</td><td>0.54</td><td>0.89</td><td>0.31</td></tr><tr><td>QC-Train</td><td>QC</td><td>0.59</td><td>0.83</td><td>0.49</td><td>0.84</td><td>0.34</td></tr><tr><td>QC-Train-PGP</td><td></td><td>0.64</td><td>0.86</td><td>0.57</td><td>0.91</td><td>0.36</td></tr></table>
180
+
181
+ ![](images/6dc9df553ec0022527cd548afb03ea5e91d37327c1c3c07294e670e00977b875.jpg)
182
+ (a)
183
+ Figure 6: Real QC validation accuracy curves on different datasets and different quantum devices.
184
+
185
+ ![](images/9b4e5f35cdae9569e43076fd8d83eca650ba3b00c3048cd2a1df8c99d0c51902.jpg)
186
+ (b)
187
+
188
+ then down-sample them to $4 \times 4$ for MNIST and Fashion 2 and 4-class tasks. Vowel 4-class uses the front 100 samples as the training set and randomly sampled 300 samples as the validation set. For each sample, we perform principal component analysis (PCA) for the vowel features and take the 10 most significant dimensions.
189
+
190
+ All the tasks use four logical qubits. To embed classical image and vowel features to the quantum states, we first flatten them and then encode them with rotation gates. For down-sampled $4 \times 4$ images, we use 4RY, 4RZ, 4RX, and 4RY gates as the encoder. We put the 16 classical input values to the phases of 16 rotation gates, respectively. Therefore we can encode the classical values to quantum states. For 10 vowel features, we use 4RY, 4RZ, and 2RX gates for encoding.
191
+
192
+ The encoding gates are our hand-designed circuits. Our circuits are composed of several layers. There are 7 kinds of layers used to construct our circuits. (i) RX layer: Add RX gates to all wires; (ii) RY layer: same structure as in RX layer; (iii) RZ layer: same structure as in RX layer; (iv) RZZ layer: add RZZ gates to all logical adjacent wires and the logical farthest wires to form a ring connection, for example, an RZZ layer in a 4-qubit circuit contains 4 RZZ gates which lie on wires 1 and 2, 2 and 3, 3 and 4, 4 and 1; (v) RXX layer: same structure as in RZZ layer; (vi) RZX layer: same structure as in RZZ layer; (vii) CZ layer: add CZ gates to all logical adjacent wires.
193
+
194
+ For MNIST and Fashion 2-class tasks, the circuit contains 1 RZZ layer followed by 1 RY layer. For MNIST 4-class task, the circuit contains 3 RX+RY+RZ+CZ layers (1 RX layer, 1 RY layer, 1 RZ layer, and 1 CZ layer in series). For Fashion 4-class task, the circuit contains 3 RZZ+RY layers (1 RZZ layer followed by 1 RY layer). For Vowel 4-class task, the circuit contains 2 RZZ+RXX layers (1 RZZ layer followed by 1 RXX layer).
195
+
196
+ For the output of our quantum circuits, we measure the expectation values on Pauli-Z basis and obtain a value $[-1, 1]$ from each qubit. For 2-class, we sum the qubit 0 and 1, 2, and 3 respectively to get 2 output values. For 4-class, we just use the four expectation values as 4 output values. Then we process the output values by Softmax to get probabilities.
197
+
198
+ Quantum devices and compiler configurations. We use IBM quantum computers via qiskit API [7] to submit our circuits to real superconducting quantum devices and achieve quantum on-chip training. We set all the circuits to run 1024 shots.
199
+
200
+ ![](images/11175589a8bbca9222647d57c9d8ce68c9d366f2aa368b71994613d8148dc5e3.jpg)
201
+ Figure 7: Ablation on pruning ratio, accumulation window width, and pruning window width.
202
+
203
+ ![](images/6ad97fb2fbe9faba587bf682a69612f0e987226a1128335905ee091ab2f1ac51.jpg)
204
+
205
+ ![](images/0259719eeafb9b7f427410b9a973bfe620bdb12843a2922dae38bddd29810452.jpg)
206
+
207
+ Baseline. We have two baselines. (1) QC-Train: We train our model without gradient pruning, i.e., calculating gradients of every parameter in each step. The gradient calculation is deployed on real quantum circuits. (2) Classical-Train: We train our QNN model completely on classical computers. We use a vector to record the amplitudes of the quantum state, utilize complex matrix multiplication to simulate quantum gates, and sample based on the amplitude vector to simulate quantum measurement.
208
+
209
+ The QC-Train-PGP line shows training on real quantum circuits while applying our probabilistic gradient pruning. In all the cases, we adopt accumulation window size 1, pruning ratio 0.5, and pruning window size 2, except for Fashion-4, we adopt pruning ratio 0.7, and other settings remain the same.
210
+
211
+ # 4.2 Main Results
212
+
213
+ QNN results. Table 1 shows the accuracy of comparison on 5 tasks. In each task, we show 4 accuracy values, which are (1) accuracy of Classical-Train tested on classical devices, (2) accuracy of Classical-Train tested on real quantum circuits; (3) accuracy of QC-Train tested on real quantum circuits; (4) accuracy of QC-Train-PGP tested on real quantum circuits. In each task, the accuracy is collected after finishing a certain number of circuit runs. We train and evaluate MNIST-2 and MNIST-2 on ibmq_jakarta, Fashion-4 on ibmq_manila, Fashion-2 on ibmq_santiago, and Vowel-4 on ibmq_lima.
214
+
215
+ The noise-free accuracy is usually the highest among the other three, because it represents the accuracy without any noise perturbation. The QC-Train-PGP usually takes second place because compared to Classical-Train, it has the advantage of noise awareness, and compared to QC-Train, it suffers less from noise thanks to gradient pruning.
216
+
217
+ Training curves. Figure 6 shows the real QC validation accuracy curve during training. The X-axis is the number of inferences (how many circuits have been run). The Y-axis is the accuracy of the validation dataset tested on real quantum circuits. MNIST 4-class runs on the ibmq_jakarta machine. We observe that given a fixed inference budget, our QC-Train-PGP achieves the best accuracy of $63.7\%$ while the Classical-Train only achieves $59.3\%$ .
218
+
219
+ We further train Fashion 2-class on ibmq_santiago. QC-Train-PGP only takes $13.9\mathrm{k}$ inferences to reach the peak accuracy $90.7\%$ while the best accuracy Classical-Train can achieve is merely $88.7\%$ at the cost of over $30\mathrm{k}$ inferences.
220
+
221
+ # 4.3 Ablation Studies
222
+
223
+ Ablation on gradient pruning. In Figure 7, we evaluate the training performance with different pruning ratios $r$ , accumulation window size $w_{a}$ , and pruning window size $w_{p}$ on Fashion-4 and MNIST-2 tasks. We find that the $r = 0.5$ is generally a good setting for our tasks. Overly large pruning ratios will induce too many gradient variances that harm the training convergence. For the accumulation window
224
+
225
+ Table 2: The proposed probabilistic pruning is better than deterministic pruning.
226
+
227
+ <table><tr><td>Method</td><td>MNIST-4</td><td>MNIST-2</td><td>Fashion-4</td><td>Fashion-2</td></tr><tr><td>Deterministic</td><td>0.61</td><td>0.82</td><td>0.72</td><td>0.89</td></tr><tr><td>Probabilistic</td><td>0.62</td><td>0.85</td><td>0.79</td><td>0.90</td></tr></table>
228
+
229
+ Table 3: Adam optimizer can outperform SGD and Momentum optimizers.
230
+
231
+ <table><tr><td>Optimizer</td><td>MNIST-4</td><td>MNIST-2</td><td>Fashion-4</td><td>Fashion-2</td></tr><tr><td>SGD</td><td>0.5</td><td>0.8</td><td>0.45</td><td>76</td></tr><tr><td>Momentum</td><td>0.55</td><td>0.83</td><td>0.66</td><td>0.90</td></tr><tr><td>Adam</td><td>0.61</td><td>0.88</td><td>0.75</td><td>0.91</td></tr></table>
232
+
233
+ size, $w_{a} = 1$ or 2 are suitable choices. When $w_{a}$ is too large, the accumulated gradient magnitudes are similar among all parameters, leading to a nearly uniform sampling distribution. This will bring undifferentiated pruning, and the accuracy will drop as the Fashion-4 curve shows. The pruning window $w_{p}$ should also not be too large. As $w_{p}$ grows, the accumulated gradient magnitudes used to instruct our pruning become less reliable.
234
+
235
+ Discussion on scalability. Figure 8 shows the superior scalability of quantum on-chip training. Classical simulation runtime exponentially increases as #qubits scales up, while the runtime on real quantum machines scales nearly linearly to #qubits. The classical curve in Figure 8 represents runtime and memory cost of running 50 circuits of different #qubits with 16 rotation gates and 32 RZZ gates. The curve before 22 qubits is measured on a single NVIDIA RTX 2080 Ti GPU; points after 24 qubits are extrapolated. The quantum curve before 27 qubits is tested on ibmq_toronto; the points after 30 qubits are extrapolated.
236
+
237
+ We can observe clear quantum advantages on circuits with more than 27 qubits. In terms of memory cost, classical simulation consumes thousands of Gigabits for storage which is intractable. In contrast, on quantum machines, the information is stored in the quantum state of the circuit itself with negligible memory cost.
238
+
239
+ Probabilistic vs. deterministic gradient pruning. Our pruning is decided by a random sampler based on the accumulated gradient magnitude. We call this probabilistic pruning. If the sampler only samples the parameters with the biggest accumulated gradient magnitude, this is called deterministic pruning. We adopt probabilistic pruning instead of deterministic pruning because deterministic pruning limits the degree of freedom and increases the gradient sampling bias. Table 2 shows that deterministic pruning has $1\% -7\%$ accuracy loss compared with probabilistic pruning.
240
+
241
+ Different optimizers. Table 3 shows the accuracy tested on classical devices trained with different optimizers. The learning rate is controlled by a cosine scheduler from 0.3 in the beginning to 0.03 in the end. We test SGD, SGD with a momentum factor of 0.8, and Adam on MNIST-4, MNIST-2, Fashion-4, and Fashion-2, and found that Adam always performs the best. Hence, all the experiments are done using Adam optimizers by default.
242
+
243
+ # 5 CONCLUSION
244
+
245
+ In this work, for the first time, we present an efficient and robust on-chip training framework for PQC and demonstrate its effectiveness on real quantum devices. By leveraging parameter shift, we can calculate the exact quantum gradients directly on quantum machines, thus achieving high scalability. To alleviate the negative impact of
246
+
247
+ ![](images/5de9f81a763849e82fcb43bb1aef0a8a4850d5ba22e54df6c64e9a780ca516e9.jpg)
248
+ Figure 8: Runtime and memory cost comparison between classical simulation and quantum on-chip run.
249
+
250
+ ![](images/7c89c05dc38e3b64c5ffe7db360e8c124643990ba88e4c349b884f9d4518d173.jpg)
251
+
252
+ quantum noises on gradients, we further propose the probabilistic gradient pruning technique to avoid updating parameters with unreliable gradients. Experimental results on 5 classification tasks and 5 machines demonstrate that QOC achieves comparable accuracy with noise-free simulations. We hope QOC can open an avenue towards practical training of large PQC models for quantum advantage.
253
+
254
+ # ACKNOWLEDGMENT
255
+
256
+ We acknowledge NSF CAREER Award #1943349, MIT-IBM Watson AI Lab, Baidu Fellowship, Qualcomm Innovation Fellowship, and IBM Quantum.
257
+
258
+ # REFERENCES
259
+
260
+ [1] Jacob Biamonte, Peter Wittek, Nicola Pancotti, Patrick Rebentrost, Nathan Wiebe, and Seth Lloyd. 2017. Quantum machine learning. Nature 549, 7671 (2017).
261
+ [2] Gavin E Crooks. 2019. Gradients of parameterized quantum gates using the parameter-shift rule and gate decomposition. arXiv:1905.13311 (2019).
262
+ [3] Song Han, Huizi Mao, and William J Dally. 2015. Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding. arXiv preprint arXiv:1510.00149 (2015).
263
+ [4] Aram W Harrow, Avinatan Hassidim, and Seth Lloyd. 2009. Quantum algorithm for linear systems of equations. Physical review letters 103, 15 (2009), 150502.
264
+ [5] Vojtěch Havlíček et al. 2019. Supervised learning with quantum-enhanced feature spaces. Nature 567, 7747 (2019), 209-212.
265
+ [6] Cheng-Yun Hsieh, Chen-Hung Wu, Chia-Hsien Huang, His-Sheng Goan, and James Chien Mo Li. 2020. Realistic fault models and fault simulation for quantum dot quantum circuits. In 2020 57th (DAC). IEEE, 1-6.
266
+ [7] Qiskit IBM. [n.d.].
267
+ [8] Y. Lecun, L. Bottou, Y. Bengio, and P. Haffner. 1998. Gradient-based learning applied to document recognition. Proc. IEEE 86, 11 (1998), 2278-2324.
268
+ [9] Zhiding Liang, Zhepeng Wang, Junhuan Yang, Lei Yang, Yiyu Shi, and Weiwen Jiang. 2021. Can Noise on Qubits Be Learned in Quantum Neural Network? A Case Study on QuantumFlow. In ICCAD. IEEE, 1-7.
269
+ [10] Seth Lloyd, Silvano Garnerone, and Paolo Zanardi. 2016. Quantum algorithms for topological and geometric analysis of data. Nature communications 7, 1 (2016).
270
+ [11] Seth Lloyd, Masoud Mohseni, and Patrick Rebentrost. 2013. Quantum algorithms for supervised and unsupervised machine learning. arXiv:1307.0411 (2013).
271
+ [12] Seth Lloyd, Maria Schuld, Aroosa Ijaz, Josh Izaac, and Nathan Killoran. 2020. Quantum embeddings for machine learning. arXiv:2001.03622 (2020).
272
+ [13] Easwar Magesan, Jay M Gambetta, and Joseph Emerson. 2012. Characterizing quantum gates via randomized benchmarking. Physical Review A 85, 4 (2012).
273
+ [14] Kosuke Mitarai, Makoto Negoro, Masahiro Kitagawa, and Keisuke Fujii. 2018. Quantum circuit learning. Physical Review A (2018).
274
+ [15] Le Thanh Nguyen-Meidine et al. 2020. Progressive Gradient Pruning for Classification, Detection and DomainAdaptation. arXiv:1906.08746 [cs.LG]
275
+ [16] John Preskill. 2018. Quantum Computing in the NISQ era and beyond. Quantum 2 (2018), 79.
276
+ [17] Hanrui Wang, Yongshan Ding, Jiaqi Gu, Yujun Lin, David Z Pan, Frederic T Chong, and Song Han. 2022. QuantumNAS: Noise-adaptive search for robust quantum circuits. HPCA (2022).
277
+ [18] Hanrui Wang, Jiaqi Gu, Yongshan Ding, Zirui Li, Frederic T Chong, David Z Pan, and Song Han. 2022. QuantumNAT: Quantum Noise-Aware Training with Noise Injection, Quantization and Normalization. DAC (2022).
278
+ [19] Hanrui Wang, Zhekai Zhang, and Song Han. 2021. SpAtten: Efficient sparse attention architecture with cascade token and head pruning. In HPCA. IEEE.
279
+ [20] Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Hanrui Wang, Yujun Lin, and Song Han. 2020. Apq: Joint search for network architecture, pruning and quantization policy. In CVPR.
280
+ [21] Zhepeng Wang, Zhiding Liang, Shanglin Zhou, et al. 2021. Exploration of Quantum Neural Architecture by Mixing Quantum Neuron Designs. In ICCAD. IEEE.
281
+ [22] Han Xiao, Kashif Rasul, and Roland Vollgraf. 2017. Fashion-mnist: a novel image dataset for benchmarking machine learning algorithms. arXiv:1708.07747 (2017).
282
+ [23] Zhekai Zhang, Hanrui Wang, Song Han, and William J Dally. 2020. SpArch: Efficient architecture for sparse matrix multiplication. In HPCA. IEEE.
2202.13xxx/2202.13239/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a419aa501e6a9f07644163dae6d71a83cdc9e6f54e697c3c5d22e54d10fad466
3
+ size 479397
2202.13xxx/2202.13239/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2202.13xxx/2202.13257/df5b9240-81ca-4fc3-8c6d-62c6bd827cf6_content_list.json ADDED
@@ -0,0 +1,1747 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Controllable Natural Language Generation with Contrastive Prefixes",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 139,
8
+ 89,
9
+ 855,
10
+ 110
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jing Qian $^{1}$ , Li Dong $^{2}$ , Yelong Shen $^{2}$ , Furu Wei $^{2}$ , Weizhu Chen $^{2}$",
17
+ "bbox": [
18
+ 228,
19
+ 129,
20
+ 771,
21
+ 147
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "<sup>1</sup>University of California, Santa Barbara",
28
+ "bbox": [
29
+ 337,
30
+ 148,
31
+ 665,
32
+ 164
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "2Microsoft Corporation",
39
+ "bbox": [
40
+ 403,
41
+ 165,
42
+ 598,
43
+ 181
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "jing_qian@cs.ucsb.edu",
50
+ "bbox": [
51
+ 374,
52
+ 181,
53
+ 628,
54
+ 198
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "{lidong1, yeshe, fuwei, wzchen}@microsoft.com",
61
+ "bbox": [
62
+ 248,
63
+ 199,
64
+ 754,
65
+ 214
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "Abstract",
72
+ "text_level": 1,
73
+ "bbox": [
74
+ 260,
75
+ 252,
76
+ 339,
77
+ 266
78
+ ],
79
+ "page_idx": 0
80
+ },
81
+ {
82
+ "type": "text",
83
+ "text": "To guide the generation of large pretrained language models (LM), previous work has focused on directly fine-tuning the language model or utilizing an attribute discriminator. In this work, we propose a novel lightweight framework for controllable GPT2 (Radford et al., 2019) generation, which utilizes a set of small attribute-specific vectors, called prefixes (Li and Liang, 2021), to steer natural language generation. Different from Li and Liang (2021), where each prefix is trained independently, we take the relationship among prefixes into consideration and train multiple prefixes simultaneously, as illustrated in Figure 1. We propose a novel supervised method and also an unsupervised method to train the prefixes for single-aspect control while the combination of these two methods can achieve multi-aspect control. Experimental results on both single-aspect and multi-aspect control show that our methods can guide generation towards the desired attributes while keeping high linguistic quality.",
84
+ "bbox": [
85
+ 141,
86
+ 279,
87
+ 460,
88
+ 608
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "1 Introduction",
95
+ "text_level": 1,
96
+ "bbox": [
97
+ 114,
98
+ 619,
99
+ 258,
100
+ 634
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "The goal of controllable Natural Language Generation (NLG) is to guide generation towards the desired attributes in the concerned aspects of the text. For example, the aspect can be topic or sentiment, and sentiment may have two attributes: positive and negative. Previous work has focused on directly fine-tuning the existing models (Keskar et al., 2019; Hu et al., 2017; Ficler and Goldberg, 2017) or using a discriminator to guide generation (Dathathri et al., 2020; Krause et al., 2020; Holtzman et al., 2018). CTRL (Keskar et al., 2019) achieves controllability at the expense of training a large conditional LM. GeDi (Krause et al., 2020) also trains conditional LMs but uses them as discriminators to guide generation, introducing additional 345M parameters. Besides, GeDi focuses on single-aspect control, ignoring the need for multi-aspect control.",
107
+ "bbox": [
108
+ 112,
109
+ 645,
110
+ 489,
111
+ 919
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "image",
117
+ "img_path": "images/1be9674df27246bb0309e237899dd796df601288f4b44c3c1729f821f4b61867.jpg",
118
+ "image_caption": [
119
+ "Figure 1: A comparison of prefix-tuning (Li and Liang, 2021) (top) and our framework (bottom) on sentiment control. The solid arrows show the training process, while the dashed ones show the inference (generation) process. In our proposed framework, the training can be supervised, semi-supervised, or unsupervised."
120
+ ],
121
+ "image_footnote": [],
122
+ "bbox": [
123
+ 534,
124
+ 249,
125
+ 860,
126
+ 434
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "text",
132
+ "text": "PPLM (Dathathri et al., 2020) guides generation by iteratively updating the LM's hidden activations. However, this decoding strategy is extremely computationally intensive, resulting in a slow generation speed (Gehman et al., 2020).",
133
+ "bbox": [
134
+ 507,
135
+ 558,
136
+ 884,
137
+ 639
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "text",
143
+ "text": "Prefix-tuning (Li and Liang, 2021) proposes to optimize a prefix, which is a small continuous task-specific vector, as a lightweight alternative to fin-tuning an NLG task, such as table-to-text generation or summarization. Inspired by Li and Liang (2021), we propose to use prefixes, a set of small continuous attribute-specific vectors, to steer NLG. Compared with using an attribute model or a generative discriminator (Dathathri et al., 2020; Krause et al., 2020), using learned prefixes to achieve controllability has the following benefits. First, it introduces fewer additional parameters ( $\\sim 0.2\\% - 2\\%$ of GPT2 parameters in our experiments). Second, using prefixes keeps the inference speed comparable to that of the original GPT2 model.",
144
+ "bbox": [
145
+ 507,
146
+ 642,
147
+ 884,
148
+ 883
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "In a general sense, prefix-tuning (Li and Liang, 2021) can be considered as controlling the genera",
155
+ "bbox": [
156
+ 507,
157
+ 887,
158
+ 882,
159
+ 917
160
+ ],
161
+ "page_idx": 0
162
+ },
163
+ {
164
+ "type": "aside_text",
165
+ "text": "arXiv:2202.13257v1 [cs.CL] 27 Feb 2022",
166
+ "bbox": [
167
+ 21,
168
+ 309,
169
+ 60,
170
+ 724
171
+ ],
172
+ "page_idx": 0
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "tion of language models. Prefix-tuning views each prefix as an independent control task thus trains each prefix separately (top in Figure 1). However, one aspect of controllability in NLG involves multiple attributes, which might have a relationship with each other. For example, the sentiment aspect usually has two attributes: positive and negative, which are in opposition to each other. We think that this opposite relationship can be helpful to improve the controllability of a prefix. Therefore, we propose a novel supervised method and a novel unsupervised one in our framework, which takes the relationship among prefixes into consideration and trains multiple prefixes simultaneously with novel training objectives, as illustrated in Figure 1.",
177
+ "bbox": [
178
+ 115,
179
+ 84,
180
+ 485,
181
+ 324
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "text",
187
+ "text": "Experimental results on the single-aspect control tasks (sentiment control, detoxification, and topic control) show that our proposed methods can guide generation towards the target attribute while keeping high linguistic quality, even when only several dozen labeled examples are available. In addition to single-aspect control, multi-aspect control can be achieved by combining the proposed supervised method with the unsupervised method in our framework. Experimental results on the sentiment and topic control show that the prefixes trained with our method can successfully control these two aspects simultaneously.",
188
+ "bbox": [
189
+ 115,
190
+ 326,
191
+ 485,
192
+ 533
193
+ ],
194
+ "page_idx": 1
195
+ },
196
+ {
197
+ "type": "text",
198
+ "text": "Our main contributions are as follows:",
199
+ "bbox": [
200
+ 134,
201
+ 535,
202
+ 418,
203
+ 549
204
+ ],
205
+ "page_idx": 1
206
+ },
207
+ {
208
+ "type": "list",
209
+ "sub_type": "text",
210
+ "list_items": [
211
+ "- We propose a novel framework that utilizes prefixes with frozen LMs as a lightweight alternative for controllable GPT2 generation.",
212
+ "- We propose a supervised method and an unsupervised method with novel objectives for prefix training, where the relationship among prefixes are considered and multiple prefixes are trained simultaneously.",
213
+ "- This work provides a unified perspective for single-aspect control and multi-aspect control. Experimental results show that our methods can effectively guide generation in both single-aspect control and multi-aspect control."
214
+ ],
215
+ "bbox": [
216
+ 114,
217
+ 558,
218
+ 485,
219
+ 784
220
+ ],
221
+ "page_idx": 1
222
+ },
223
+ {
224
+ "type": "text",
225
+ "text": "2 Related Work",
226
+ "text_level": 1,
227
+ "bbox": [
228
+ 115,
229
+ 797,
230
+ 268,
231
+ 812
232
+ ],
233
+ "page_idx": 1
234
+ },
235
+ {
236
+ "type": "text",
237
+ "text": "Ficler and Goldberg (2017) control the stylistic aspects of the generated text with a conditioned RNN (Recurrent Neural Network) LM. Holtzman et al. (2018) compose a committee of discriminators to guide an RNN generator towards the generations with the desired linguistic quality. Hu et al.",
238
+ "bbox": [
239
+ 115,
240
+ 822,
241
+ 485,
242
+ 917
243
+ ],
244
+ "page_idx": 1
245
+ },
246
+ {
247
+ "type": "text",
248
+ "text": "(2017) aim at controlling the sentiment and tense of the generated text by combining variational autoencoders (VAE) and attribute discriminators.",
249
+ "bbox": [
250
+ 510,
251
+ 84,
252
+ 880,
253
+ 131
254
+ ],
255
+ "page_idx": 1
256
+ },
257
+ {
258
+ "type": "text",
259
+ "text": "More recently, with the advent of Transformers and large pretrained language models, such as GPT2, an extensive body of work has focused on controlling the generation of these Transformer-based models. Keskar et al. (2019) train a 1.63 billion-parameter conditional transformer LM from scratch with 55 attribute control codes to guide generation. However, this method is expensive and lacks flexibility since the control codes are fixed. Dathathri et al. (2020) address these limitations by developing a plug-and-play model which leverages an attribute discriminator to perturb the LM's hidden activations. However, updating gradients at the token level results in slow inference. Instead of updating the hidden activations, Krause et al. (2020); Yang and Klein (2021); Lin and Riedl (2021) introduce generative discriminators to re-weight the next token distributions on the fly during inference, thus improving the inference speed.",
260
+ "bbox": [
261
+ 510,
262
+ 134,
263
+ 880,
264
+ 439
265
+ ],
266
+ "page_idx": 1
267
+ },
268
+ {
269
+ "type": "text",
270
+ "text": "Our work is mostly related to Yu et al. (2021); Li and Liang (2021). Yu et al. (2021) use a pretrained LM followed by an attribute alignment function to encode the tokens of the target attributes and the resulting hidden states are used to control generation. Different from their work, we do not take the tokens of the target attributes as input. Instead, we directly train a set of parameters, which acts as the prepended hidden states of GPT2, to control generation. Avoiding using attribute tokens can circumvent the problems when it is difficult to describe the desired attribute with only one word. Besides, Yu et al. (2021) focus on attributes disentanglement, which is not a focus in our work, so our training methods are different. Prefix-tuning (Li and Liang, 2021) can, in a general sense, be viewed as controlling the generation of LMs, where the LM is controlled to depict a specific NLG task, while in this work, the LM is controlled to carry specific attributes in a generation. Besides, our proposed methods for prefix training are different from Li and Liang (2021), as stated in Section 1.",
271
+ "bbox": [
272
+ 510,
273
+ 441,
274
+ 880,
275
+ 793
276
+ ],
277
+ "page_idx": 1
278
+ },
279
+ {
280
+ "type": "text",
281
+ "text": "3 Method",
282
+ "text_level": 1,
283
+ "bbox": [
284
+ 512,
285
+ 810,
286
+ 611,
287
+ 825
288
+ ],
289
+ "page_idx": 1
290
+ },
291
+ {
292
+ "type": "text",
293
+ "text": "Our method uses prefixes to guide GPT2 generation, where a prefix is a continuous attribute-specific vector prepended to the activations of GPT2. Prefixes are free parameters denoted as $H_{\\theta}$ . Different from Li and Liang (2021), where",
294
+ "bbox": [
295
+ 512,
296
+ 839,
297
+ 880,
298
+ 917
299
+ ],
300
+ "page_idx": 1
301
+ },
302
+ {
303
+ "type": "text",
304
+ "text": "each prefix is trained independently, we consider the relationship among attributes and train multiple prefixes simultaneously, so $H_{\\theta}$ is of dimension $N \\times M \\times D$ , where $N$ is the number of prefixes. In single-aspect control, $N$ equals the number of attributes in the concerned aspect. $M$ is the length of a prefix. $D = 2 \\times L \\times E$ is the dimension of the activation in GPT2, where $L$ is the number of transformer layers, $E$ is the hidden size, and 2 indicates one key vector and one value vector. Following Li and Liang (2021), we reparametrize $H_{\\theta}[i,j,:] = W_iH_{\\theta}'[i,j,:]$ by a smaller parameter $(H_{\\theta}'')$ composed with a large matrix $(W_i)$ . After the training finishes, only $H_{\\theta}$ needs to be saved for generation while $W$ and $H_{\\theta}'$ can be discarded. Since the GPT2 parameters are kept frozen during training, they do not need to be saved either. Figure 2 shows an example of the generation process under the control of a trained prefix. The prefixes can be trained in a supervised, semi-supervised, or unsupervised way. Since the semi-supervised method is a combination of the supervised and the unsupervised method, we introduce the supervised and the unsupervised method in this section. For clarity, we introduce these methods under the single-aspect control setting.",
305
+ "bbox": [
306
+ 112,
307
+ 84,
308
+ 489,
309
+ 502
310
+ ],
311
+ "page_idx": 2
312
+ },
313
+ {
314
+ "type": "text",
315
+ "text": "3.1 Supervised Method",
316
+ "text_level": 1,
317
+ "bbox": [
318
+ 112,
319
+ 514,
320
+ 314,
321
+ 529
322
+ ],
323
+ "page_idx": 2
324
+ },
325
+ {
326
+ "type": "text",
327
+ "text": "Suppose the concerned aspect has the attribute set $Y$ , each training example is a pair of $(x, y)$ where $x$ is the input text and $y \\in Y$ is the attribute label of $x$ . Note that the attribute label also indicates the ground truth index of the prefix in $H_{\\theta}$ , so $y$ also refers to the prefix index in the following description. As mentioned in Section 1, we introduce an additional discriminative loss to train multiple prefixes simultaneously. Therefore, the training loss $\\mathcal{L}_{sup}$ is a weighted sum of the language model loss $\\mathcal{L}_{LM}$ and the discriminative loss $\\mathcal{L}_d$ :",
328
+ "bbox": [
329
+ 112,
330
+ 535,
331
+ 489,
332
+ 712
333
+ ],
334
+ "page_idx": 2
335
+ },
336
+ {
337
+ "type": "equation",
338
+ "text": "\n$$\n\\mathcal {L} _ {s u p} = \\omega_ {1} \\mathcal {L} _ {L M} + \\omega_ {2} \\mathcal {L} _ {d} \\tag {1}\n$$\n",
339
+ "text_format": "latex",
340
+ "bbox": [
341
+ 240,
342
+ 727,
343
+ 485,
344
+ 741
345
+ ],
346
+ "page_idx": 2
347
+ },
348
+ {
349
+ "type": "equation",
350
+ "text": "\n$$\n\\mathcal {L} _ {L M} = - \\sum_ {t = 1} ^ {T} \\log p \\left(x _ {t} \\mid x _ {< t}, y\\right) \\tag {2}\n$$\n",
351
+ "text_format": "latex",
352
+ "bbox": [
353
+ 194,
354
+ 747,
355
+ 485,
356
+ 787
357
+ ],
358
+ "page_idx": 2
359
+ },
360
+ {
361
+ "type": "equation",
362
+ "text": "\n$$\n\\mathcal {L} _ {d} = - \\log \\frac {p (y) p (x | y)}{\\sum_ {y ^ {\\prime} \\in Y} p \\left(y ^ {\\prime}\\right) p \\left(x \\mid y ^ {\\prime}\\right)} \\tag {3}\n$$\n",
363
+ "text_format": "latex",
364
+ "bbox": [
365
+ 179,
366
+ 791,
367
+ 485,
368
+ 826
369
+ ],
370
+ "page_idx": 2
371
+ },
372
+ {
373
+ "type": "text",
374
+ "text": "The computation of $\\log p(x_t|x_{< t},y)$ is parameterized as $\\log p_{\\theta ,\\gamma}(x_t|x_{< t},H_\\theta [y,:,:])$ where $\\gamma$ is the set of fixed GPT2 parameters, and $\\theta$ represents learnable prefix parameters. $\\log p(x|y) = \\sum_t\\log p(x_t|x_{< t},y)$ , so the parameterization of",
375
+ "bbox": [
376
+ 112,
377
+ 839,
378
+ 489,
379
+ 920
380
+ ],
381
+ "page_idx": 2
382
+ },
383
+ {
384
+ "type": "image",
385
+ "img_path": "images/7991b554ca7d0d3cd5d53b0a06511d7003e6d1baf4da5e9af2a2a98a26df6f4c.jpg",
386
+ "image_caption": [
387
+ "Figure 2: An illustration of the GPT2 generation process unfolded through time, controlled by a positive sentiment prefix $H_{1} = H_{\\theta}[1,:,:]$ . \"The book\" is the given prompt. \"is good\" is the generated completion."
388
+ ],
389
+ "image_footnote": [],
390
+ "bbox": [
391
+ 526,
392
+ 80,
393
+ 867,
394
+ 212
395
+ ],
396
+ "page_idx": 2
397
+ },
398
+ {
399
+ "type": "text",
400
+ "text": "$\\log p(x|y)$ is the sum of $\\log p_{\\theta ,\\gamma}(x_t|x_{< t},H_\\theta [y,(:,:])$ over $t$",
401
+ "bbox": [
402
+ 507,
403
+ 303,
404
+ 882,
405
+ 332
406
+ ],
407
+ "page_idx": 2
408
+ },
409
+ {
410
+ "type": "text",
411
+ "text": "Note that each prefix can be trained independently using $\\mathcal{L}_{LM}$ alone, which would be the same as prefix-tuning (Li and Liang, 2021). Intuitively, prefixes trained by $\\mathcal{L}_{LM}$ are infused with the information of what is encouraged to generate. However, we observe that in controllable NLG, it is helpful to also infuse a prefix with the information of what is discouraged to generate. Given a training example $(x,y)$ , the prefix $H_{\\theta}[y,:,:]$ should be optimized towards generating $x$ , while the other prefixes should be discouraged to generate $x$ . To achieve this goal, all the prefixes in $H_{\\theta}$ should be trained simultaneously. Therefore, the discriminative loss $\\mathcal{L}_d$ is introduced. As in equation 3, optimizing $\\mathcal{L}_d$ improves the attribute alignment $p(y|x)$ by increasing $p(x|y)$ and lowering $p(x|\\bar{y})$ , $\\bar{y} \\in Y\\backslash \\{y\\}$ at the same time. We assume uniform prior, so $p(y)$ and $p(y^{\\prime})$ can be canceled out in Equation 3. Figure 3 illustrates the training process with two prefixes.",
412
+ "bbox": [
413
+ 507,
414
+ 336,
415
+ 884,
416
+ 643
417
+ ],
418
+ "page_idx": 2
419
+ },
420
+ {
421
+ "type": "text",
422
+ "text": "3.2 Unsupervised Method",
423
+ "text_level": 1,
424
+ "bbox": [
425
+ 507,
426
+ 655,
427
+ 727,
428
+ 671
429
+ ],
430
+ "page_idx": 2
431
+ },
432
+ {
433
+ "type": "text",
434
+ "text": "In the unsupervised setting, we assume the attribute set $Y$ of the concerned aspect is known. The training example consists of input text $x$ only. The attribute label $y$ is no longer available and thus the index of the prefix associated with $x$ is unknown. In other words, the index of the prefix corresponding to $x$ is a latent variable $z$ , whose posterior distribution follows a categorical distribution. Inspired by VQ-VAE (van den Oord et al., 2017), we consider the prefixes as discrete latent representations. We take the backbone model in the above supervised method as the decoder and introduce an encoder to parameterize the categorical distribution $q(z|x)$ . According to $q(z|x)$ , a prefix index $z$ is selected and the prefix $H_{\\theta}[z,:,:)$ is then fed into the decoder",
435
+ "bbox": [
436
+ 507,
437
+ 677,
438
+ 882,
439
+ 920
440
+ ],
441
+ "page_idx": 2
442
+ },
443
+ {
444
+ "type": "image",
445
+ "img_path": "images/5a6bf752b4a545663c0310d8662e3d6eb0a786a26f114dd0b2f150ceda0b2a00.jpg",
446
+ "image_caption": [
447
+ "Figure 3: An illustration of the supervised training method on sentiment control. $H_0$ is the prefix of negative sentiment. $H_1$ is the prefix of positive sentiment. Note that training without $\\mathcal{L}_d$ is equivalent to Li and Liang (2021), where $H_0$ and $H_1$ are trained separately. The GPT2 is pretrained, and its parameters are frozen."
448
+ ],
449
+ "image_footnote": [],
450
+ "bbox": [
451
+ 174,
452
+ 80,
453
+ 826,
454
+ 284
455
+ ],
456
+ "page_idx": 3
457
+ },
458
+ {
459
+ "type": "image",
460
+ "img_path": "images/4bf8377daa0779c9b138490f8b035cede0127d2926b68c211570feefd60006c2.jpg",
461
+ "image_caption": [
462
+ "Figure 4: An illustration of the unsupervised training method. $H_{\\theta}$ denotes the 2 prefixes. $z$ is the latent variable indicating the index of the prefix corresponding to the input text $x$ . $\\bar{z}$ is the latent variable indicating the index of the opposite prefix. $\\otimes$ is matrix multiplication. $\\mathcal{L}_{KL}$ is not shown in this figure for clarity."
463
+ ],
464
+ "image_footnote": [],
465
+ "bbox": [
466
+ 154,
467
+ 348,
468
+ 845,
469
+ 530
470
+ ],
471
+ "page_idx": 3
472
+ },
473
+ {
474
+ "type": "text",
475
+ "text": "to reconstruct the input text $x$ . Since the selection process of the prefixes is non-differentiable, we use Gumbel-Softmax (GS) relaxation (Jang et al., 2017; Maddison et al., 2017) following Sønderby et al. (2017); Ramesh et al. (2021). Formally, $q(z|x)$ is computed as follows:",
476
+ "bbox": [
477
+ 112,
478
+ 606,
479
+ 487,
480
+ 702
481
+ ],
482
+ "page_idx": 3
483
+ },
484
+ {
485
+ "type": "equation",
486
+ "text": "\n$$\nq (z | x) = G S (- \\| \\operatorname {E n c} (x) - H _ {\\theta} \\| _ {2}, \\tau) \\tag {4}\n$$\n",
487
+ "text_format": "latex",
488
+ "bbox": [
489
+ 157,
490
+ 713,
491
+ 485,
492
+ 730
493
+ ],
494
+ "page_idx": 3
495
+ },
496
+ {
497
+ "type": "text",
498
+ "text": "where $\\tau$ is the temperature of Gumbel-Softmax, and $Enc$ is the encoder function. We use a pretrained GPT-2 model followed by a linear layer as the encoder. To train the prefixes, the loss function is a weighted sum of the three loss terms:",
499
+ "bbox": [
500
+ 112,
501
+ 740,
502
+ 487,
503
+ 820
504
+ ],
505
+ "page_idx": 3
506
+ },
507
+ {
508
+ "type": "equation",
509
+ "text": "\n$$\n\\mathcal {L} _ {u n s} = \\omega_ {1} \\mathcal {L} _ {L M} + \\omega_ {2} \\mathcal {L} _ {K L} + \\omega_ {3} \\mathcal {L} _ {c} \\tag {5}\n$$\n",
510
+ "text_format": "latex",
511
+ "bbox": [
512
+ 166,
513
+ 832,
514
+ 485,
515
+ 848
516
+ ],
517
+ "page_idx": 3
518
+ },
519
+ {
520
+ "type": "equation",
521
+ "text": "\n$$\n\\mathcal {L} _ {L M} = - \\sum_ {t = 1} ^ {T} \\log p \\left(x _ {t} \\mid x _ {< t}, z\\right) \\tag {6}\n$$\n",
522
+ "text_format": "latex",
523
+ "bbox": [
524
+ 200,
525
+ 851,
526
+ 485,
527
+ 894
528
+ ],
529
+ "page_idx": 3
530
+ },
531
+ {
532
+ "type": "equation",
533
+ "text": "\n$$\n\\mathcal {L} _ {K L} = K L [ q (z | x) | | p (z) ] \\tag {7}\n$$\n",
534
+ "text_format": "latex",
535
+ "bbox": [
536
+ 236,
537
+ 897,
538
+ 485,
539
+ 915
540
+ ],
541
+ "page_idx": 3
542
+ },
543
+ {
544
+ "type": "text",
545
+ "text": "where $\\mathcal{L}_{LM}$ is the language model loss. Similar as that in the supervised method, the computation of $\\log p(x_t|x_{< t},z)$ is parameterized as $\\log p_{\\theta ,\\gamma}(x_t|x_{< t},H_\\theta [z,:;:])$ . $\\mathcal{L}_{KL}$ is the KullbackLeibler divergence, where we assume the prior $p(z)$ to be uniform. Note that these two terms constitute the loss function of VAE. Optimizing these two loss terms improves the evidence lower bound of $\\log p(x)$ . Similar to the intuition behind $\\mathcal{L}_d$ in the supervised method, if the ground truth prefix for $x$ is $H_{\\theta}[y,:;]$ , then the other prefixes should be discouraged to generate $x$ . However, $\\mathcal{L}_d$ requires the ground truth label $y$ for computation. Instead, we introduce an unsupervised contrastive loss $\\mathcal{L}_c$",
546
+ "bbox": [
547
+ 507,
548
+ 606,
549
+ 882,
550
+ 831
551
+ ],
552
+ "page_idx": 3
553
+ },
554
+ {
555
+ "type": "equation",
556
+ "text": "\n$$\n\\mathcal {L} _ {c} = \\max (m - \\| p (z | x) - p (\\bar {z} | x) \\| _ {2}, 0) ^ {2} \\tag {8}\n$$\n",
557
+ "text_format": "latex",
558
+ "bbox": [
559
+ 531,
560
+ 848,
561
+ 880,
562
+ 868
563
+ ],
564
+ "page_idx": 3
565
+ },
566
+ {
567
+ "type": "text",
568
+ "text": "where $m$ is a pre-set margin and $\\bar{z}$ is another latent variable indicating the index of the opposite prefix",
569
+ "bbox": [
570
+ 507,
571
+ 887,
572
+ 880,
573
+ 917
574
+ ],
575
+ "page_idx": 3
576
+ },
577
+ {
578
+ "type": "text",
579
+ "text": "of $x$ . $q(\\bar{z} |x)$ is computed as follows:",
580
+ "bbox": [
581
+ 112,
582
+ 84,
583
+ 386,
584
+ 99
585
+ ],
586
+ "page_idx": 4
587
+ },
588
+ {
589
+ "type": "equation",
590
+ "text": "\n$$\nq (\\bar {z} | x) = G S \\left(\\| E n c (x) - H _ {\\theta} \\| _ {2}, \\tau\\right) \\tag {9}\n$$\n",
591
+ "text_format": "latex",
592
+ "bbox": [
593
+ 166,
594
+ 105,
595
+ 485,
596
+ 124
597
+ ],
598
+ "page_idx": 4
599
+ },
600
+ {
601
+ "type": "text",
602
+ "text": "$\\mathcal{L}_c$ is aimed at increasing the attribute alignment by pushing $p(z|x)$ away from $p(\\bar{z} |x)$ by a margin. The computation of $p(z|x)$ is as follows:",
603
+ "bbox": [
604
+ 112,
605
+ 130,
606
+ 487,
607
+ 179
608
+ ],
609
+ "page_idx": 4
610
+ },
611
+ {
612
+ "type": "equation",
613
+ "text": "\n$$\np (z | x) = \\frac {p (z) p (x | z)}{\\sum_ {z ^ {\\prime} \\in Y} p \\left(z ^ {\\prime}\\right) p \\left(x \\mid z ^ {\\prime}\\right)} \\tag {10}\n$$\n",
614
+ "text_format": "latex",
615
+ "bbox": [
616
+ 184,
617
+ 184,
618
+ 485,
619
+ 219
620
+ ],
621
+ "page_idx": 4
622
+ },
623
+ {
624
+ "type": "text",
625
+ "text": "We assume uniform prior, so $p(z)$ and $p(z^{\\prime})$ can be canceled out. Similar as the parameterization of $\\log p(x|y)$ in the supervised method, the parameterization of $\\log p(x|z)$ is the sum of $\\log p_{\\theta ,\\gamma}(x_t|x_{< t},H_\\theta [z,:,:])$ over $t$ . The training process is illustrated in Figure 4.",
626
+ "bbox": [
627
+ 112,
628
+ 223,
629
+ 487,
630
+ 321
631
+ ],
632
+ "page_idx": 4
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "4 Experiments",
637
+ "text_level": 1,
638
+ "bbox": [
639
+ 112,
640
+ 331,
641
+ 260,
642
+ 348
643
+ ],
644
+ "page_idx": 4
645
+ },
646
+ {
647
+ "type": "text",
648
+ "text": "We experiment with three tasks: sentiment control, detoxification, and topic control. We compare our method to GPT2, PPLM, and GeDi. We focus on English text in all the experiments and we experiment with GPT2-medium (345M parameters) for all the methods. We use the original implementation of PPLM and GeDi released by Dathathri et al. (2020) and Krause et al. (2020), and the hyperparameters are set to the reported value in the original paper. The detailed hyperparameters in each task are listed in appendix A. For the GPT2 model, we do experiments under two settings. First, the GPT2 model generates completions of each prompt in the evaluation dataset, which is denoted as GPT2-medium. Second, GPT2-medium + prompt engineering prepends a guiding sentence to each testing prompt and then generates completions of each augmented prompt. We evaluate the linguistic quality and attribute alignment of the generation. The linguistic quality is evaluated using the perplexity calculated by GPT2-large (774M parameters).",
649
+ "bbox": [
650
+ 112,
651
+ 355,
652
+ 489,
653
+ 693
654
+ ],
655
+ "page_idx": 4
656
+ },
657
+ {
658
+ "type": "text",
659
+ "text": "To evaluate the robustness of our supervised method with the size of the training dataset, we experiment with the following three different settings: 1) using the complete training dataset; 2) using 1,000 examples per attribute for training; 3) using 24 examples per attribute for training. We evaluate our unsupervised method on the sentiment control task and the detoxification task, which are binary tasks. Note that different from the supervised method, our unsupervised method does not use any attribute labels, so the order of the attributes in the trained prefixes is undetermined. After the prefixes finish training using the unsupervised method, we manually check the order of the attributes.",
660
+ "bbox": [
661
+ 112,
662
+ 694,
663
+ 489,
664
+ 917
665
+ ],
666
+ "page_idx": 4
667
+ },
668
+ {
669
+ "type": "text",
670
+ "text": "4.1 Single-Aspect Control",
671
+ "text_level": 1,
672
+ "bbox": [
673
+ 507,
674
+ 84,
675
+ 729,
676
+ 99
677
+ ],
678
+ "page_idx": 4
679
+ },
680
+ {
681
+ "type": "text",
682
+ "text": "4.1.1 Tasks",
683
+ "text_level": 1,
684
+ "bbox": [
685
+ 507,
686
+ 105,
687
+ 615,
688
+ 118
689
+ ],
690
+ "page_idx": 4
691
+ },
692
+ {
693
+ "type": "text",
694
+ "text": "Sentiment Control Same as GeDi, we use IMDb movie reviews (Maas et al., 2011) to train our model. The number of prefixes is 2. Note that GeDi only uses $11.25\\mathrm{k}$ examples from the dataset for training. To be a fair comparison, we randomly sample $11.25\\mathrm{k}$ examples from the dataset to train our model. To evaluate the sentiment alignment of the generated text, we finetune a RoBERTa (Liu et al., 2019) classifier using the Yelp Review dataset (Zhang et al., 2015). The prompts used for evaluation are the same as those in the PPLM experiment (Dathathri et al., 2020). For each of the 15 prompts, 45 completions are generated. In the GPT2-medium + prompt engineering setting, we prepend each prompt with the guiding sentence \"This is a negative review:\" for negative sentiment control, and similarly, we prepend each prompt with \"This is a positive review:\" for positive sentiment control.",
695
+ "bbox": [
696
+ 505,
697
+ 124,
698
+ 885,
699
+ 431
700
+ ],
701
+ "page_idx": 4
702
+ },
703
+ {
704
+ "type": "text",
705
+ "text": "Detoxification We use Jigsaw Toxic Comment Classification Challenge Dataset<sup>1</sup> to train our model. The number of prefixes is 2. Google Perspective API<sup>2</sup> is used for toxicity evaluation. The testing prompts are collected from RealToxicityPrompts (Gehman et al., 2020). We use the prompts categorized as \"challenging\" in the dataset. We further filter out the prompts with toxicity larger than 0.5, scored by Perspective. The resulted evaluation dataset consists of 203 prompts. For each of these prompts, 20 completions are generated. In the GPT2-medium + prompt engineering setting, we preprocess each prompt with the guiding sentence \"This is a non-toxic comment:\".",
706
+ "bbox": [
707
+ 505,
708
+ 441,
709
+ 885,
710
+ 667
711
+ ],
712
+ "page_idx": 4
713
+ },
714
+ {
715
+ "type": "text",
716
+ "text": "Topic Control We experiment with the AGNews dataset and DBPedia dataset (Zhang et al., 2015). The number of prefixes is 4 and 14, respectively. The prompts used for evaluation are the same as those in the PPLM experiment (Dathathri et al., 2020). For each of the 20 prompts, 45 completions are generated. Same as that in GeDi, we split each of the original training datasets in half. One half is used to train prefixes, while the other half is used to train a RoBERTa topic classifier for topic relevance evaluation. In the GPT2-medium + prompt engineering setting, the guiding sentence follows",
717
+ "bbox": [
718
+ 507,
719
+ 676,
720
+ 885,
721
+ 870
722
+ ],
723
+ "page_idx": 4
724
+ },
725
+ {
726
+ "type": "page_footnote",
727
+ "text": "<sup>1</sup>https://www.kaggle.com/c/jigsaw-toxic-comment-classification-challenge/",
728
+ "bbox": [
729
+ 507,
730
+ 879,
731
+ 850,
732
+ 904
733
+ ],
734
+ "page_idx": 4
735
+ },
736
+ {
737
+ "type": "page_footnote",
738
+ "text": "$^{2}$ https://www.perspectiveapi.com",
739
+ "bbox": [
740
+ 532,
741
+ 904,
742
+ 734,
743
+ 917
744
+ ],
745
+ "page_idx": 4
746
+ },
747
+ {
748
+ "type": "text",
749
+ "text": "the template \"The following is about [TOPIC]\". We do not compare with PPLM in the topic control task since PPLM uses a bag-of-words attribute model to do topic control, where the 7 predefined topics are different from the topics in the AGNews dataset or the DBPedia dataset.",
750
+ "bbox": [
751
+ 112,
752
+ 84,
753
+ 487,
754
+ 179
755
+ ],
756
+ "page_idx": 5
757
+ },
758
+ {
759
+ "type": "text",
760
+ "text": "All the experiments are conducted on NVIDIA Tesla V100 GPUs. The detailed hyper-parameters for each experiment are listed in appendix A.",
761
+ "bbox": [
762
+ 112,
763
+ 181,
764
+ 485,
765
+ 229
766
+ ],
767
+ "page_idx": 5
768
+ },
769
+ {
770
+ "type": "text",
771
+ "text": "4.1.2 Results",
772
+ "text_level": 1,
773
+ "bbox": [
774
+ 112,
775
+ 239,
776
+ 231,
777
+ 253
778
+ ],
779
+ "page_idx": 5
780
+ },
781
+ {
782
+ "type": "text",
783
+ "text": "In the unsupervised setting, GPT2-medium + prompt engineering shows controllability on sentiment control (Table 1) and topic control (Table 3). However, this method does not work on the detoxification task (Table 2). Our unsupervised method significantly lowers the toxicity on the detoxification task and the ablation study shows that the contrastive loss $\\mathcal{L}_c$ is crucial. On the sentiment control task, our unsupervised method does not achieve good attribute alignment when the target sentiment is negative, but it performs well when the target sentiment is positive. One possible reason is that compared with the differences between toxic and normal sentences, the difference between positive sentiment and negative sentiment is more subtle, so it is more challenging for the GPT2 encoder in our unsupervised model to accurately separate the unlabeled data into two sentiments. As a result, the encoder's implicit criterion to categorize the input text may not be exactly the sentiment, which is also the reason that after removing the contrastive loss $\\mathcal{L}_c$ in the unsupervised loss function, the attribute relevance on the negative sentiment is higher while that on the positive sentiment is lower.",
784
+ "bbox": [
785
+ 112,
786
+ 258,
787
+ 489,
788
+ 643
789
+ ],
790
+ "page_idx": 5
791
+ },
792
+ {
793
+ "type": "text",
794
+ "text": "In the supervised setting with full data, our supervised method consistently achieves better controllability than PPLM while maintaining the linguistic quality of the generations (Table 1, 2). Although GeDi achieves a high attribute alignment score on the three tasks, it severely sacrifices the linguistic quality, as indicated by the high perplexity. In the few-shot setting, where the number of labeled training examples is reduced to 1000 or 24 examples per attribute, our supervised method can still maintain good controllability on the three tasks, showing the robustness of our method to the size of the training data.",
795
+ "bbox": [
796
+ 112,
797
+ 645,
798
+ 489,
799
+ 851
800
+ ],
801
+ "page_idx": 5
802
+ },
803
+ {
804
+ "type": "text",
805
+ "text": "Ablation study shows the importance of the discriminative loss $\\mathcal{L}_d$ in our supervised method. As mentioned in section 3, training without $\\mathcal{L}_d$ is equivalent to prefix-tuning. Comparing the results",
806
+ "bbox": [
807
+ 112,
808
+ 854,
809
+ 487,
810
+ 919
811
+ ],
812
+ "page_idx": 5
813
+ },
814
+ {
815
+ "type": "text",
816
+ "text": "of $Ours - \\mathcal{L}_d$ and GPT2-medium show that directly using prefix-tuning can achieve controllability on the sentiment or the topic. However, it is less effective on detoxification. The reason is that different from topic control or sentiment control, detoxification requires the model to avoid generating some words or phrases according to the context, which can not be achieved by prefix-tuning. $\\mathcal{L}_d$ fills this gap by increasing $p(x|y)$ and lowering $p(x|\\bar{y})$ at the same time. Therefore, incorporating $\\mathcal{L}_d$ is of critical importance to the detoxification task. In the DBPedia topic control task, adding $\\mathcal{L}_d$ also achieves a large improvement on attribute alignment. The number of attributes in this task is much larger than that in the other tasks, so incorporating $\\mathcal{L}_d$ can effectively push the prefixes to capture the unique features of each topic.",
817
+ "bbox": [
818
+ 507,
819
+ 84,
820
+ 884,
821
+ 357
822
+ ],
823
+ "page_idx": 5
824
+ },
825
+ {
826
+ "type": "text",
827
+ "text": "We compare the average inference speed of our methods with the baselines (Table 5). The inference speed of PPLM is several dozen times slower than that of the original GPT2 model. GeDi's inference speed is much faster than that of PPLM. The inference speed of our method is the closest to that of the original GPT2.",
828
+ "bbox": [
829
+ 507,
830
+ 357,
831
+ 884,
832
+ 470
833
+ ],
834
+ "page_idx": 5
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "4.1.3 Human Evaluation",
839
+ "text_level": 1,
840
+ "bbox": [
841
+ 507,
842
+ 481,
843
+ 719,
844
+ 495
845
+ ],
846
+ "page_idx": 5
847
+ },
848
+ {
849
+ "type": "text",
850
+ "text": "Besides automatic evaluation, we also conduct human evaluations on Amazon Mechanical Turk to compare the performance of the baselines and our methods. In each task, workers are presented with a prompt along with the completions generated by different methods. Workers are instructed to answer two questions: \"Which one has the best linguistic quality?\" and \"The target attribute is [ATT]. Which one aligns best with the target attribute?\". [ATT] is the control attribute used when generating the completions. In order to evaluate the linguistic quality and the attribute alignment separately, the workers are instructed not to consider the control aspect or the factual errors when answering the first question and not to consider the linguistic quality when answering the second question. The user interface provided to the workers is shown in the appendix (Figure 5). We conduct human evaluations on the results of the sentiment control experiment and those of the AGNews topic control experiment separately. 100 tasks are randomly sampled from the results of each control experiment. Each task is assigned to 3 different Mechanical Turk workers and the annotations are aggregated by majority voting. To ensure data quality, we restrict the workers to be in Canada or United States with",
851
+ "bbox": [
852
+ 507,
853
+ 500,
854
+ 884,
855
+ 917
856
+ ],
857
+ "page_idx": 5
858
+ },
859
+ {
860
+ "type": "table",
861
+ "img_path": "images/546fb6db6f4dddb5f0293097ee9998b631ea1555ce4218d6fd7305a6698bbe77.jpg",
862
+ "table_caption": [],
863
+ "table_footnote": [],
864
+ "table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"2\">Negative</td><td colspan=\"2\">Positive</td></tr><tr><td>PPL.↓</td><td>Att. Rel. %↑</td><td>PPL.↓</td><td>Att. Rel. %↑</td></tr><tr><td colspan=\"5\">Unsupervised training</td></tr><tr><td>GPT2-medium</td><td>13.63</td><td>43.8</td><td>13.63</td><td>56.2</td></tr><tr><td>+ prompt engineering</td><td>15.47</td><td>71.6</td><td>15.42</td><td>74.4</td></tr><tr><td>Ours</td><td>17.95</td><td>40.7</td><td>18.72</td><td>77.6</td></tr><tr><td>- Lc</td><td>30.74</td><td>54.9</td><td>18.22</td><td>64.1</td></tr><tr><td colspan=\"5\">Supervised training (few-shot learning)</td></tr><tr><td>Ours (24 samples)</td><td>21.11</td><td>66.9</td><td>19.36</td><td>81.3</td></tr><tr><td>Ours (1k samples)</td><td>14.61</td><td>74.1</td><td>15.46</td><td>79.3</td></tr><tr><td colspan=\"5\">Supervised training (using full data)</td></tr><tr><td>PPLM</td><td>14.39</td><td>54.0</td><td>16.08</td><td>82.7</td></tr><tr><td>GeDi</td><td>151.48</td><td>96.7</td><td>105.62</td><td>96.0</td></tr><tr><td>Ours</td><td>14.25</td><td>79.9</td><td>13.97</td><td>83.3</td></tr><tr><td>- Ld (prefix-tuning)</td><td>14.07</td><td>65.1</td><td>13.74</td><td>75.5</td></tr></table>",
865
+ "bbox": [
866
+ 114,
867
+ 82,
868
+ 579,
869
+ 291
870
+ ],
871
+ "page_idx": 6
872
+ },
873
+ {
874
+ "type": "table",
875
+ "img_path": "images/3dd61202e7f42b157f9889e5db57f79158677c0c7ae88dcc95f80ca85add0105.jpg",
876
+ "table_caption": [
877
+ "Table 1: Results on sentiment control. \"PPL\": perplexity scores. \"Att. Rel.\" attribute relevance. $\\left. {-{\\mathcal{L}}_{c}/ - {\\mathcal{L}}_{d}}\\right\\rbrack$ : ablating loss terms as described in Eq. 8 and Eq. 3. Ours $- {\\mathcal{L}}_{d}$ is equivalent to prefix-tuning (Li and Liang, 2021)."
878
+ ],
879
+ "table_footnote": [],
880
+ "table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"2\">AGNews</td><td colspan=\"2\">DBPedia</td></tr><tr><td>PPL.↓</td><td>Att. Rel. %↑</td><td>PPL.↓</td><td>Att. Rel. %↑</td></tr><tr><td colspan=\"5\">Unsupervised training</td></tr><tr><td>GPT2-medium</td><td>14.06</td><td>25.0</td><td>14.06</td><td>7.2</td></tr><tr><td>+ prompt engineering</td><td>15.36</td><td>69.7</td><td>16.38</td><td>46.6</td></tr><tr><td colspan=\"5\">Supervised training (few-shot learning)</td></tr><tr><td>Ours (24 samples)</td><td>56.26</td><td>81.5</td><td>45.02</td><td>80.6</td></tr><tr><td>Ours (1k samples)</td><td>24.28</td><td>89.5</td><td>36.19</td><td>89.3</td></tr><tr><td colspan=\"5\">Supervised training (using full data)</td></tr><tr><td>GeDi</td><td>119.08</td><td>96.4</td><td>-</td><td>-</td></tr><tr><td>Ours</td><td>22.69</td><td>91.6</td><td>35.41</td><td>90.3</td></tr><tr><td>- \\( {\\mathcal{L}}_{d} \\) (prefix-tuning)</td><td>24.31</td><td>85.5</td><td>25.17</td><td>56.5</td></tr></table>",
881
+ "bbox": [
882
+ 117,
883
+ 370,
884
+ 566,
885
+ 545
886
+ ],
887
+ "page_idx": 6
888
+ },
889
+ {
890
+ "type": "table",
891
+ "img_path": "images/b92822e75d00461f63a8fef0b71354e392445919417bb9bd99c09fa68709d787.jpg",
892
+ "table_caption": [
893
+ "Table 3: Results on topic control. “ $-\\mathcal{L}_d$ ”: ablating loss terms as described in Eq. 3. Ours - $\\mathcal{L}_d$ is equivalent to prefix-tuning."
894
+ ],
895
+ "table_footnote": [],
896
+ "table_body": "<table><tr><td>Methods</td><td>PPL.↓</td><td>Tox.%↓</td></tr><tr><td colspan=\"3\">Unsupervised training</td></tr><tr><td>GPT2-medium</td><td>37.18</td><td>57.4</td></tr><tr><td>+ prompt engineering</td><td>39.00</td><td>62.3</td></tr><tr><td>Ours</td><td>100.18</td><td>17.6</td></tr><tr><td>- Lc</td><td>76.66</td><td>60.1</td></tr><tr><td colspan=\"3\">Supervised training (few-shot learning)</td></tr><tr><td>Ours (24 samples)</td><td>95.34</td><td>18.8</td></tr><tr><td>Ours (1k samples)</td><td>69.16</td><td>31.1</td></tr><tr><td colspan=\"3\">Supervised training (using full data)</td></tr><tr><td>PPLM</td><td>148.5</td><td>30.0</td></tr><tr><td>GeDi</td><td>166.01</td><td>20.5</td></tr><tr><td>Ours</td><td>85.34</td><td>21.7</td></tr><tr><td>- Ld (prefix-tuning)</td><td>78.67</td><td>51.7</td></tr></table>",
897
+ "bbox": [
898
+ 611,
899
+ 82,
900
+ 894,
901
+ 277
902
+ ],
903
+ "page_idx": 6
904
+ },
905
+ {
906
+ "type": "table",
907
+ "img_path": "images/b2a7772ef135472585608d5b1e29d49074394df855a3a149d4ac40b1a67135bd.jpg",
908
+ "table_caption": [
909
+ "Table 2: Results on detoxification. \"Tox\": toxicity. $\\left\\lbrack {-{\\mathcal{L}}_{c}/ - {\\mathcal{L}}_{d}}\\right\\rbrack$ : ablating loss terms as in Eq. 8 and Eq. 3. Ours- ${\\mathcal{L}}_{d}$ is equivalent to prefix-tuning (Li and Liang, 2021)."
910
+ ],
911
+ "table_footnote": [],
912
+ "table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"2\">Sentiment</td><td colspan=\"2\">Topic</td></tr><tr><td>Att.↑</td><td>Lin.↑</td><td>Att.↑</td><td>Lin.↑</td></tr><tr><td>GPT2 + prompt engineering</td><td>0.29</td><td>0.38</td><td>0.17</td><td>0.29</td></tr><tr><td>PPLM</td><td>0.16</td><td>0.24</td><td>-</td><td>-</td></tr><tr><td>GeDi</td><td>0.21</td><td>0.16</td><td>0.49</td><td>0.17</td></tr><tr><td>Ours</td><td>0.34</td><td>0.22</td><td>0.34</td><td>0.54</td></tr></table>",
913
+ "bbox": [
914
+ 600,
915
+ 378,
916
+ 880,
917
+ 480
918
+ ],
919
+ "page_idx": 6
920
+ },
921
+ {
922
+ "type": "table",
923
+ "img_path": "images/5149fe799bb5c97736f35230cc697b2cc679d929170a6e3b7a3514b1658108d7.jpg",
924
+ "table_caption": [
925
+ "Table 4: Human evaluation on sentiment control and AGNews topic control. The values in the table are the ratio of each method selected in the attribute alignment (Att.) questions and the linguistic quality (Lin.) questions separately."
926
+ ],
927
+ "table_footnote": [],
928
+ "table_body": "<table><tr><td>Methods</td><td>Time Cost (second)↓</td></tr><tr><td>GPT2-medium</td><td>0.507</td></tr><tr><td>PPLM</td><td>11.212</td></tr><tr><td>GeDi</td><td>0.960</td></tr><tr><td>Ours</td><td>0.643</td></tr></table>",
929
+ "bbox": [
930
+ 168,
931
+ 604,
932
+ 433,
933
+ 682
934
+ ],
935
+ "page_idx": 6
936
+ },
937
+ {
938
+ "type": "text",
939
+ "text": "Table 5: The average time for generating a completion.",
940
+ "bbox": [
941
+ 112,
942
+ 690,
943
+ 485,
944
+ 706
945
+ ],
946
+ "page_idx": 6
947
+ },
948
+ {
949
+ "type": "text",
950
+ "text": "a HIT approval rate higher than $95\\%$ . In total, 81 workers participated in the human evaluation. For the sentiment control task, we compare the results of GPT2-medium + prompt engineering, PPLM, GeDi, and our supervised method (with full training dataset). For the AGNews topic control task, PPLM is not evaluated as explained above. The results are shown in Table 4. The inter-annotator agreement on the sentiment task and the AGNews task is 0.39 and 0.30 in Fleiss' $\\kappa$ , respectively. Appendix B lists other details of the human evaluation.",
951
+ "bbox": [
952
+ 112,
953
+ 741,
954
+ 489,
955
+ 919
956
+ ],
957
+ "page_idx": 6
958
+ },
959
+ {
960
+ "type": "text",
961
+ "text": "In the sentiment control task, the result of human evaluation on linguistic quality is generally consistent with the result of automatic evaluation. However, different from the result of the automatic evaluation, annotators are more inclined to select Ours and $GPT2 + prompt$ engineering when evaluating attribute alignment. Although the annotators are instructed not to consider linguistic quality when evaluating sentiment alignment, they tend to select the one with better linguistic quality when multiple completions exhibits equally good attribute alignment. In the AGNews topic control task, the result of human evaluation on attribute alignment is generally consistent with the result of automatic evaluation. However, in more than half of the linguistic quality questions, the annotators select Ours, although $GPT2-medium + prompt$ engineering achieves lower perplexity than Ours. On inspection, we find that $GPT2-medium + prompt$",
962
+ "bbox": [
963
+ 507,
964
+ 608,
965
+ 884,
966
+ 914
967
+ ],
968
+ "page_idx": 6
969
+ },
970
+ {
971
+ "type": "table",
972
+ "img_path": "images/df3751eb87cad081d0236d2aa2c6856efbf58721d74a0705e153c00ef4e876e9.jpg",
973
+ "table_caption": [],
974
+ "table_footnote": [],
975
+ "table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"3\">Negative</td><td colspan=\"3\">Positive</td></tr><tr><td>PPL.↓</td><td>Senti. Rel. %↑</td><td>Topic Rel. %↑</td><td>PPL.↓</td><td>Senti. Rel. %↑</td><td>Topic Rel. %↑</td></tr><tr><td>GPT2-medium</td><td>14.06</td><td>58.5</td><td>7.2</td><td>14.06</td><td>41.5</td><td>7.2</td></tr><tr><td>+ prompt engineering</td><td>18.28</td><td>75.1</td><td>44.1</td><td>18.29</td><td>66.7</td><td>43.6</td></tr><tr><td>Ours (concatenation)</td><td>18.17</td><td>66.0</td><td>64.9</td><td>16.79</td><td>81.8</td><td>71.2</td></tr><tr><td>Ours (semi-supervised)</td><td>41.25</td><td>81.2</td><td>76.9</td><td>38.45</td><td>88.9</td><td>73.1</td></tr><tr><td>- Ld</td><td>33.84</td><td>61.0</td><td>38.1</td><td>28.13</td><td>81.0</td><td>45.3</td></tr><tr><td>- Lenc</td><td>78.03</td><td>78.2</td><td>86.1</td><td>61.35</td><td>90.7</td><td>86.5</td></tr></table>",
976
+ "bbox": [
977
+ 127,
978
+ 80,
979
+ 870,
980
+ 193
981
+ ],
982
+ "page_idx": 7
983
+ },
984
+ {
985
+ "type": "text",
986
+ "text": "Table 6: Experimental results of the multi-aspect control task. \"PPL\": perplexity scores. \"Senti. Rel.\" sentiment relevance. \"Topic Rel.\" topic relevance. $\\left. {-{\\mathcal{L}}_{d}/ - {\\mathcal{L}}_{enc}}\\right\\rbrack$ : ablating loss terms as described in Eq. 3 and Eq. 12.",
987
+ "bbox": [
988
+ 112,
989
+ 202,
990
+ 882,
991
+ 233
992
+ ],
993
+ "page_idx": 7
994
+ },
995
+ {
996
+ "type": "text",
997
+ "text": "engineering in this task exhibits a more severe repetition problem compared to that in the sentiment control task. This inconsistency shows the limitation of using automatic evaluations, as alluded to in Welbl et al. (2021).",
998
+ "bbox": [
999
+ 112,
1000
+ 256,
1001
+ 487,
1002
+ 336
1003
+ ],
1004
+ "page_idx": 7
1005
+ },
1006
+ {
1007
+ "type": "text",
1008
+ "text": "Both human evaluation and automatic evaluation show that the linguistic quality of GeDi is inferior to that of the other methods. One possible reason is the length of the prompt. In the original experiment in Krause et al. (2020), each prompt is at least 150 characters for sentiment control evaluation and at least 30 characters for topic control evaluation. However, we use the prompts as in Dathathri et al. (2020), where the average prompt length is 11.8 characters for sentiment control evaluation and 14.5 characters for topic control evaluation. The generated examples are shown in the appendix (Table 7).",
1009
+ "bbox": [
1010
+ 112,
1011
+ 340,
1012
+ 489,
1013
+ 533
1014
+ ],
1015
+ "page_idx": 7
1016
+ },
1017
+ {
1018
+ "type": "text",
1019
+ "text": "4.2 Multi-Aspect Control",
1020
+ "text_level": 1,
1021
+ "bbox": [
1022
+ 112,
1023
+ 554,
1024
+ 329,
1025
+ 569
1026
+ ],
1027
+ "page_idx": 7
1028
+ },
1029
+ {
1030
+ "type": "text",
1031
+ "text": "Our method can also be applied to multi-aspect control. Directly applying our supervised method to multi-aspect control requires training examples with multi-aspect labels. However, such datasets are usually not readily available since most of the datasets are labeled for a single task. Although multi-aspect labeled examples are limited, we have training examples with single-aspect labels from multiple aspects, which can be utilized to achieve multi-aspect control. One method is to train a set of prefixes for each aspect separately using our supervised method and then concatenate the prefixes from different aspects for generation. This method is denoted as Ours (concatenation) in the result table. Another method is to train the prefixes of multiple aspects simultaneously by considering each single-aspect labeled example as partially labeled. We use a semi-supervised method for training, which is a combination of our supervised method and unsupervised method in Section 3. The model structure is the same as in the unsupervised",
1032
+ "bbox": [
1033
+ 112,
1034
+ 581,
1035
+ 489,
1036
+ 919
1037
+ ],
1038
+ "page_idx": 7
1039
+ },
1040
+ {
1041
+ "type": "text",
1042
+ "text": "method (Figure 4). The loss function is as follows:",
1043
+ "bbox": [
1044
+ 507,
1045
+ 256,
1046
+ 882,
1047
+ 272
1048
+ ],
1049
+ "page_idx": 7
1050
+ },
1051
+ {
1052
+ "type": "equation",
1053
+ "text": "\n$$\n\\mathcal {L} = \\omega_ {1} \\mathcal {L} _ {L M} + \\omega_ {2} \\mathcal {L} _ {d} + \\omega_ {3} \\mathcal {L} _ {e n c} \\tag {11}\n$$\n",
1054
+ "text_format": "latex",
1055
+ "bbox": [
1056
+ 579,
1057
+ 286,
1058
+ 880,
1059
+ 302
1060
+ ],
1061
+ "page_idx": 7
1062
+ },
1063
+ {
1064
+ "type": "equation",
1065
+ "text": "\n$$\n\\mathcal {L} _ {e n c} = - \\log q (z _ {s u p} = y | x) \\tag {12}\n$$\n",
1066
+ "text_format": "latex",
1067
+ "bbox": [
1068
+ 557,
1069
+ 306,
1070
+ 880,
1071
+ 322
1072
+ ],
1073
+ "page_idx": 7
1074
+ },
1075
+ {
1076
+ "type": "equation",
1077
+ "text": "\n$$\nq (z | x) = \\sigma (- \\| E n c (x) - H _ {\\theta} \\| _ {2}) \\tag {13}\n$$\n",
1078
+ "text_format": "latex",
1079
+ "bbox": [
1080
+ 542,
1081
+ 326,
1082
+ 880,
1083
+ 342
1084
+ ],
1085
+ "page_idx": 7
1086
+ },
1087
+ {
1088
+ "type": "text",
1089
+ "text": "where the latent variable $z$ is the concatenation of the latent variable of each aspect, including both the supervised aspects and the unsupervised ones $z = [z_{sup};z_{uns}]$ . $\\mathcal{L}_{enc}$ is used to train the encoder. It is introduced because the partially labeled examples imply the ground truth indexes of the prefixes in the labeled aspect, providing supervision for both the prefix and the encoder. $\\sigma$ is the softmax function.",
1090
+ "bbox": [
1091
+ 507,
1092
+ 355,
1093
+ 882,
1094
+ 483
1095
+ ],
1096
+ "page_idx": 7
1097
+ },
1098
+ {
1099
+ "type": "text",
1100
+ "text": "We experiment with controlling the following two aspects simultaneously: sentiment and topic. We use the binary sentiment dataset from Amazon review (Zhang et al., 2015) and the DBPedia topic dataset. The prompts used for evaluation are the same as those in the topic control experiment. For each of the 20 prompts, 45 completions are generated. In the GPT2-medium + prompt engineering setting, the guiding sentence follows the template \"This is a [SENTIMENT] review on [TOPIC]:\". In Ours (concatenation), the sentiment prefixes and the topic prefixes are trained separately using our supervised method and then concatenated as multi-aspect prefixes. In Ours (semi-supervised), we reuse the prefixes trained in the single-aspect control tasks to initialize $H_{\\theta}$ . All the experiments are conducted on NVIDIA Tesla V100 GPUs. The hyper-parameters are listed in appendix A.",
1101
+ "bbox": [
1102
+ 505,
1103
+ 483,
1104
+ 882,
1105
+ 774
1106
+ ],
1107
+ "page_idx": 7
1108
+ },
1109
+ {
1110
+ "type": "text",
1111
+ "text": "Experimental results on multi-aspect control (Table 6) show that simply concatenating the prefixes trained for single-aspect control can effectively control the sentiment and topic simultaneously, and our experiments show that the order of the prefixes does not impact the result. On the other hand, training using the combination of our supervised and unsupervised methods can further improve the attribute alignment without sacrificing too much linguistic",
1112
+ "bbox": [
1113
+ 507,
1114
+ 774,
1115
+ 884,
1116
+ 919
1117
+ ],
1118
+ "page_idx": 7
1119
+ },
1120
+ {
1121
+ "type": "text",
1122
+ "text": "quality. Same as the observations stated in Section 4.1.2, removing the discriminative loss $\\mathcal{L}_d$ will significantly degrade the attribute relevance, especially the topic relevance. Removing the encoder loss $\\mathcal{L}_{enc}$ may achieve higher overall attribute relevance at the cost of linguistic quality, indicated by a higher perplexity. We present the generated examples in the appendix (Table 7).",
1123
+ "bbox": [
1124
+ 112,
1125
+ 84,
1126
+ 492,
1127
+ 212
1128
+ ],
1129
+ "page_idx": 8
1130
+ },
1131
+ {
1132
+ "type": "text",
1133
+ "text": "5 Conclusion",
1134
+ "text_level": 1,
1135
+ "bbox": [
1136
+ 112,
1137
+ 223,
1138
+ 247,
1139
+ 239
1140
+ ],
1141
+ "page_idx": 8
1142
+ },
1143
+ {
1144
+ "type": "text",
1145
+ "text": "We propose a novel framework for controllable GPT2 generation with frozen LMs, which utilizes contrastive prefixes to guide generation. Experimental results show that our framework can not only successfully guide generation from a single aspect but also achieve promising results on multi-aspect control tasks. Besides the control tasks we experimented with, our proposed framework can be freely applied to other desired attributes. Note that there is no guarantee of factual accuracy for the generation, which is a well-known problem in NLG models. While reducing hallucination is not the focus of this work, knowledge-grounded generation techniques can be used to alleviate this problem.",
1146
+ "bbox": [
1147
+ 112,
1148
+ 248,
1149
+ 489,
1150
+ 489
1151
+ ],
1152
+ "page_idx": 8
1153
+ },
1154
+ {
1155
+ "type": "text",
1156
+ "text": "References",
1157
+ "text_level": 1,
1158
+ "bbox": [
1159
+ 114,
1160
+ 514,
1161
+ 213,
1162
+ 531
1163
+ ],
1164
+ "page_idx": 8
1165
+ },
1166
+ {
1167
+ "type": "list",
1168
+ "sub_type": "ref_text",
1169
+ "list_items": [
1170
+ "Sumanth Dathathri, Andrea Madotto, Janice Lan, Jane Hung, Eric Frank, Piero Molino, Jason Yosinski, and Rosanne Liu. 2020. Plug and play language models: A simple approach to controlled text generation. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.",
1171
+ "Jessica Ficler and Yoav Goldberg. 2017. Controlling linguistic style aspects in neural language generation. CoRR, abs/1707.02633.",
1172
+ "Samuel Gehman, Suchin Gururangan, Maarten Sap, Yejin Choi, and Noah A. Smith. 2020. Realtoxicityprompts: Evaluating neural toxic degeneration in language models. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings, EMNLP 2020, Online Event, 16-20 November 2020, volume EMNLP 2020 of Findings of ACL, pages 3356-3369. Association for Computational Linguistics.",
1173
+ "Ari Holtzman, Jan Buys, Maxwell Forbes, Antoine Bosselut, David Golub, and Yejin Choi. 2018. Learning to write with cooperative discriminators. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, ACL 2018, Melbourne, Australia, July 15-20, 2018, Volume 1: Long Papers, pages 1638-1649. Association for Computational Linguistics."
1174
+ ],
1175
+ "bbox": [
1176
+ 115,
1177
+ 537,
1178
+ 489,
1179
+ 919
1180
+ ],
1181
+ "page_idx": 8
1182
+ },
1183
+ {
1184
+ "type": "list",
1185
+ "sub_type": "ref_text",
1186
+ "list_items": [
1187
+ "Zhiting Hu, Zichao Yang, Xiaodan Liang, Ruslan Salakhutdinov, and Eric P. Xing. 2017. Toward controlled generation of text. In Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pages 1587-1596. PMLR.",
1188
+ "Eric Jang, Shixiang Gu, and Ben Poole. 2017. Categorical reparameterization with gumbel-softmax. In 5th International Conference on Learning Representations, ICLR 2017, Toulouse, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net.",
1189
+ "Nitish Shirish Keskar, Bryan McCann, Lav R. Varshney, Caiming Xiong, and Richard Socher. 2019. CTRL: A conditional transformer language model for controllable generation. CoRR, abs/1909.05858.",
1190
+ "Ben Krause, Akhilesh Deepak Gotmare, Bryan McCann, Nitish Shirish Keskar, Shafiq R. Joty, Richard Socher, and Nazneen Fatema Rajani. 2020. Gedi: Generative discriminator guided sequence generation. CoRR, abs/2009.06367.",
1191
+ "Xiang Lisa Li and Percy Liang. 2021. Prefix-tuning: Optimizing continuous prompts for generation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing, ACL/IJCNLP 2021, (Volume 1: Long Papers), Virtual Event, August 1-6, 2021, pages 4582-4597. Association for Computational Linguistics.",
1192
+ "Zhiyu Lin and Mark Riedl. 2021. Plug-and-blend: A framework for controllable story generation with blended control codes. CoRR, abs/2104.04039.",
1193
+ "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining approach. CoRR, abs/1907.11692.",
1194
+ "Andrew L. Maas, Raymond E. Daly, Peter T. Pham, Dan Huang, Andrew Y. Ng, and Christopher Potts. 2011. Learning word vectors for sentiment analysis. In The 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, Proceedings of the Conference, 19-24 June, 2011, Portland, Oregon, USA, pages 142-150. The Association for Computer Linguistics.",
1195
+ "Chris J. Maddison, Andriy Mnih, and Yee Whye Teh. 2017. The concrete distribution: A continuous relaxation of discrete random variables. In 5th International Conference on Learning Representations, ICLR 2017, Toulouse, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net.",
1196
+ "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9."
1197
+ ],
1198
+ "bbox": [
1199
+ 510,
1200
+ 85,
1201
+ 884,
1202
+ 917
1203
+ ],
1204
+ "page_idx": 8
1205
+ },
1206
+ {
1207
+ "type": "list",
1208
+ "sub_type": "ref_text",
1209
+ "list_items": [
1210
+ "Aditya Ramesh, Mikhail Pavlov, Gabriel Goh, Scott Gray, Chelsea Voss, Alec Radford, Mark Chen, and Ilya Sutskever. 2021. Zero-shot text-to-image generation. In Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 8821-8831. PMLR.",
1211
+ "Casper Kaae Sønderby, Ben Poole, and Andriy Mnih. 2017. Continuous relaxation training of discrete latent variable image models. In Beysian DeepLearning workshop, NIPS, volume 201.",
1212
+ "Aäron van den Oord, Oriol Vinyals, and Koray Kavukcuoglu. 2017. Neural discrete representation learning. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 6306-6315.",
1213
+ "Johannes Welbl, Amelia Glaese, Jonathan Uesato, Sumanth Dathathri, John Mellor, Lisa Anne Hendricks, Kirsty Anderson, Pushmeet Kohli, Ben Coppin, and Po-Sen Huang. 2021. Challenges in detoxifying language models. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 2447-2469.",
1214
+ "Kevin Yang and Dan Klein. 2021. FUDGE: controlled text generation with future discriminators. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2021, Online, June 6-11, 2021, pages 3511-3535. Association for Computational Linguistics.",
1215
+ "Dian Yu, Kenji Sagae, and Zhou Yu. 2021. Attribute alignment: Controlling text generation from pretrained language models. CoRR, abs/2103.11070.",
1216
+ "Xiang Zhang, Junbo Jake Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. In Advances in Neural Information Processing Systems 28: Annual Conference on Neural Information Processing Systems 2015, December 7-12, 2015, Montreal, Quebec, Canada, pages 649-657."
1217
+ ],
1218
+ "bbox": [
1219
+ 115,
1220
+ 85,
1221
+ 489,
1222
+ 709
1223
+ ],
1224
+ "page_idx": 9
1225
+ },
1226
+ {
1227
+ "type": "text",
1228
+ "text": "Appendix",
1229
+ "text_level": 1,
1230
+ "bbox": [
1231
+ 114,
1232
+ 84,
1233
+ 203,
1234
+ 99
1235
+ ],
1236
+ "page_idx": 10
1237
+ },
1238
+ {
1239
+ "type": "text",
1240
+ "text": "A Hyperparameters",
1241
+ "text_level": 1,
1242
+ "bbox": [
1243
+ 114,
1244
+ 110,
1245
+ 307,
1246
+ 127
1247
+ ],
1248
+ "page_idx": 10
1249
+ },
1250
+ {
1251
+ "type": "text",
1252
+ "text": "For PPLM and GeDi, we use the hyperparameters reported in their original work (Dathathri et al., 2020; Krause et al., 2020). Note that GeDi has multiple versions of submission available online and we refer to the latest one on OpenReview.",
1253
+ "bbox": [
1254
+ 112,
1255
+ 137,
1256
+ 487,
1257
+ 217
1258
+ ],
1259
+ "page_idx": 10
1260
+ },
1261
+ {
1262
+ "type": "text",
1263
+ "text": "Our methods are implemented using the Hugging face Transformers package. In all the experiments with our methods, the random seed is fixed to 42, and the optimizer is AdamW with a learning rate of $2\\mathrm{e - }5$ $D = 24\\times 2\\times 1024$ , where 24 is the number of hidden layers in GPT2-medium, 1024 is the size of hidden states in GPT2-medium, and 2 represent one key and one value. In the sentiment control task and the topic control tasks, the maximum generation length is set to 50 during evaluation while in the detoxification task the maximum generation length is set to 20. Unless stated otherwise, the prefix length $M = 10$",
1264
+ "bbox": [
1265
+ 112,
1266
+ 218,
1267
+ 489,
1268
+ 428
1269
+ ],
1270
+ "page_idx": 10
1271
+ },
1272
+ {
1273
+ "type": "text",
1274
+ "text": "Sentiment Control In the Ours (unsupervised) setting, the training batch size is 8. $\\omega_{1} = 0.8$ , $\\omega_{3} = 2.0$ . The weight of the KL loss term $\\omega_{2}$ anneals from 0.001 to 0.1 during training while the temperature $\\tau$ reduces from 1.0 to 0.5. The number of training epochs is 60. During training, we randomly mask the input tokens when computing the next token probabilities so as to force the prefix to preserve the key information of the input text. The mask rate is 0.5.",
1275
+ "bbox": [
1276
+ 112,
1277
+ 438,
1278
+ 489,
1279
+ 598
1280
+ ],
1281
+ "page_idx": 10
1282
+ },
1283
+ {
1284
+ "type": "text",
1285
+ "text": "In the Ours (supervised) setting, the training batch size is 8. $\\omega_{1} = 0.8$ , $\\omega_{2} = 0.2$ . The number of training epochs is 50.",
1286
+ "bbox": [
1287
+ 112,
1288
+ 600,
1289
+ 489,
1290
+ 646
1291
+ ],
1292
+ "page_idx": 10
1293
+ },
1294
+ {
1295
+ "type": "text",
1296
+ "text": "For PPLM, we use the hyperparameters reported by Dathathri et al. (2020). $\\gamma = 1.0$ , $m = 10$ , $\\alpha = 0.03$ , $\\lambda_{kl} = 0.01$ , and $\\gamma_{gm} = 0.95$ .",
1297
+ "bbox": [
1298
+ 112,
1299
+ 649,
1300
+ 487,
1301
+ 697
1302
+ ],
1303
+ "page_idx": 10
1304
+ },
1305
+ {
1306
+ "type": "text",
1307
+ "text": "For GeDi, we use the hyperparameters reported by Krause et al. (2020). $\\omega = 20$ and $\\rho = 0.7$ .",
1308
+ "bbox": [
1309
+ 112,
1310
+ 697,
1311
+ 487,
1312
+ 730
1313
+ ],
1314
+ "page_idx": 10
1315
+ },
1316
+ {
1317
+ "type": "text",
1318
+ "text": "Detoxification In the Ours (unsupervised) setting, the training batch size is 8. $\\omega_{1} = 0.8$ , $\\omega_{3} = 2.0$ . The weight of the KL loss term $\\omega_{2}$ anneals from 0.001 to 0.1 during training while the temperature $\\tau$ reduces from 1.0 to 0.5. The number of training epochs is 4. Same as in the sentiment control task, the mask rate is 0.5.",
1319
+ "bbox": [
1320
+ 112,
1321
+ 741,
1322
+ 489,
1323
+ 852
1324
+ ],
1325
+ "page_idx": 10
1326
+ },
1327
+ {
1328
+ "type": "text",
1329
+ "text": "In the Ours (supervised) setting, the training batch size is 8. $\\omega_{1} = 0.8$ , $\\omega_{2} = 0.2$ . The number of training epochs is 5.",
1330
+ "bbox": [
1331
+ 112,
1332
+ 854,
1333
+ 489,
1334
+ 902
1335
+ ],
1336
+ "page_idx": 10
1337
+ },
1338
+ {
1339
+ "type": "text",
1340
+ "text": "For PPLM, we use the hyperparameters reported",
1341
+ "bbox": [
1342
+ 131,
1343
+ 903,
1344
+ 487,
1345
+ 919
1346
+ ],
1347
+ "page_idx": 10
1348
+ },
1349
+ {
1350
+ "type": "text",
1351
+ "text": "by Dathathri et al. (2020). $\\gamma = 1.0$ , $m = 10$ , $\\alpha = 0.02$ , $\\lambda_{kl} = 0.01$ , and $\\gamma_{gm} = 0.9$ .",
1352
+ "bbox": [
1353
+ 507,
1354
+ 84,
1355
+ 882,
1356
+ 117
1357
+ ],
1358
+ "page_idx": 10
1359
+ },
1360
+ {
1361
+ "type": "text",
1362
+ "text": "For GeDi, we use the hyperparameters reported by Krause et al. (2020). $\\omega = 30$ and $\\rho = 0.8$ .",
1363
+ "bbox": [
1364
+ 507,
1365
+ 117,
1366
+ 880,
1367
+ 148
1368
+ ],
1369
+ "page_idx": 10
1370
+ },
1371
+ {
1372
+ "type": "text",
1373
+ "text": "AGNews Topic Control In the Ours (supervised) setting, the training batch size is 4. $\\omega_{1} = 0.8$ $\\omega_{2} = 0.2$ . The number of training epochs is 8.",
1374
+ "bbox": [
1375
+ 507,
1376
+ 156,
1377
+ 884,
1378
+ 205
1379
+ ],
1380
+ "page_idx": 10
1381
+ },
1382
+ {
1383
+ "type": "text",
1384
+ "text": "For GeDi, we use the hyperparameters reported by Krause et al. (2020). $\\omega = 150$ and $\\rho = 0.8$ .",
1385
+ "bbox": [
1386
+ 507,
1387
+ 206,
1388
+ 880,
1389
+ 237
1390
+ ],
1391
+ "page_idx": 10
1392
+ },
1393
+ {
1394
+ "type": "text",
1395
+ "text": "DBPedia Topic Control In the Ours (supervised) setting, the training batch size is 4. $\\omega_{1} = 0.8$ , $\\omega_{2} = 0.2$ . The number of training epochs is 2.",
1396
+ "bbox": [
1397
+ 507,
1398
+ 246,
1399
+ 884,
1400
+ 294
1401
+ ],
1402
+ "page_idx": 10
1403
+ },
1404
+ {
1405
+ "type": "text",
1406
+ "text": "Multi-Aspect Control In the Ours (concatenation) setting, the sentiment prefix with length $M = 10$ and the topic prefix with length $M = 10$ are concatenated, so the resultant multi-aspect prefix has a length $M = 20$ .",
1407
+ "bbox": [
1408
+ 507,
1409
+ 303,
1410
+ 882,
1411
+ 382
1412
+ ],
1413
+ "page_idx": 10
1414
+ },
1415
+ {
1416
+ "type": "text",
1417
+ "text": "In the Ours (semi-supervised) setting, the prefix length $M = 10$ . The training batch size is 4. In the first 80,000 training steps, $\\omega_{1} = 0$ , $\\omega_{2} = 0$ , $\\omega_{3} = 1$ , which means only the encoder is trained. After that, the model is updated by another 80,000 steps with $\\omega_{1} = 0.8$ , $\\omega_{2} = 0.2$ , $\\omega_{3} = 0.4$ . We add a top-k filter and a top-p filter on $q(z|x)$ for each aspect. For sentiment, $k = 1$ , $p = 0.8$ . For topic, $k = 1$ , $p = 0.5$ .",
1418
+ "bbox": [
1419
+ 507,
1420
+ 384,
1421
+ 882,
1422
+ 529
1423
+ ],
1424
+ "page_idx": 10
1425
+ },
1426
+ {
1427
+ "type": "text",
1428
+ "text": "B Human Evaluation",
1429
+ "text_level": 1,
1430
+ "bbox": [
1431
+ 509,
1432
+ 539,
1433
+ 712,
1434
+ 556
1435
+ ],
1436
+ "page_idx": 10
1437
+ },
1438
+ {
1439
+ "type": "text",
1440
+ "text": "The payment for each approved annotation is set to $0.6. The average completion time is 3 minutes 45 seconds per HIT (prorated to an hourly wage of$ 9.6).",
1441
+ "bbox": [
1442
+ 507,
1443
+ 565,
1444
+ 882,
1445
+ 630
1446
+ ],
1447
+ "page_idx": 10
1448
+ },
1449
+ {
1450
+ "type": "text",
1451
+ "text": "Instructions",
1452
+ "text_level": 1,
1453
+ "bbox": [
1454
+ 238,
1455
+ 284,
1456
+ 309,
1457
+ 294
1458
+ ],
1459
+ "page_idx": 11
1460
+ },
1461
+ {
1462
+ "type": "text",
1463
+ "text": "Summary",
1464
+ "bbox": [
1465
+ 247,
1466
+ 303,
1467
+ 284,
1468
+ 311
1469
+ ],
1470
+ "page_idx": 11
1471
+ },
1472
+ {
1473
+ "type": "text",
1474
+ "text": "Detailed Instructions",
1475
+ "bbox": [
1476
+ 300,
1477
+ 303,
1478
+ 368,
1479
+ 311
1480
+ ],
1481
+ "page_idx": 11
1482
+ },
1483
+ {
1484
+ "type": "text",
1485
+ "text": "Examples",
1486
+ "bbox": [
1487
+ 384,
1488
+ 303,
1489
+ 418,
1490
+ 311
1491
+ ],
1492
+ "page_idx": 11
1493
+ },
1494
+ {
1495
+ "type": "text",
1496
+ "text": "In the first question, select the one with the best linguistic quality from the given texts. Do NOT take sentiment or factual errors into consideration.",
1497
+ "bbox": [
1498
+ 243,
1499
+ 323,
1500
+ 621,
1501
+ 338
1502
+ ],
1503
+ "page_idx": 11
1504
+ },
1505
+ {
1506
+ "type": "text",
1507
+ "text": "In the second question, select the one whose sentiment aligns best with the given target sentiment. Do NOT take linguistic quality into consideration.",
1508
+ "bbox": [
1509
+ 243,
1510
+ 340,
1511
+ 600,
1512
+ 355
1513
+ ],
1514
+ "page_idx": 11
1515
+ },
1516
+ {
1517
+ "type": "text",
1518
+ "text": "Instructions",
1519
+ "text_level": 1,
1520
+ "bbox": [
1521
+ 238,
1522
+ 379,
1523
+ 309,
1524
+ 388
1525
+ ],
1526
+ "page_idx": 11
1527
+ },
1528
+ {
1529
+ "type": "text",
1530
+ "text": "Summary",
1531
+ "bbox": [
1532
+ 247,
1533
+ 398,
1534
+ 280,
1535
+ 405
1536
+ ],
1537
+ "page_idx": 11
1538
+ },
1539
+ {
1540
+ "type": "text",
1541
+ "text": "Detailed Instructions",
1542
+ "bbox": [
1543
+ 297,
1544
+ 397,
1545
+ 369,
1546
+ 405
1547
+ ],
1548
+ "page_idx": 11
1549
+ },
1550
+ {
1551
+ "type": "text",
1552
+ "text": "Examples",
1553
+ "bbox": [
1554
+ 386,
1555
+ 397,
1556
+ 420,
1557
+ 405
1558
+ ],
1559
+ "page_idx": 11
1560
+ },
1561
+ {
1562
+ "type": "text",
1563
+ "text": "Compare the linguistic quality and sentiment alignment",
1564
+ "text_level": 1,
1565
+ "bbox": [
1566
+ 245,
1567
+ 418,
1568
+ 460,
1569
+ 426
1570
+ ],
1571
+ "page_idx": 11
1572
+ },
1573
+ {
1574
+ "type": "text",
1575
+ "text": "In the first question, select the one with the best linguistic quality from the given texts. Do NOT take sentiment or factual errors into consideration.",
1576
+ "bbox": [
1577
+ 243,
1578
+ 429,
1579
+ 616,
1580
+ 444
1581
+ ],
1582
+ "page_idx": 11
1583
+ },
1584
+ {
1585
+ "type": "text",
1586
+ "text": "In the second question, select the one whose sentiment aligns best with the given target sentiment. Do NOT take linguistic quality into consideration. The sentiment of a text can be positive or negative.",
1587
+ "bbox": [
1588
+ 243,
1589
+ 445,
1590
+ 596,
1591
+ 460
1592
+ ],
1593
+ "page_idx": 11
1594
+ },
1595
+ {
1596
+ "type": "text",
1597
+ "text": "An example of positive sentiment: I really like the new design of your website.",
1598
+ "bbox": [
1599
+ 243,
1600
+ 463,
1601
+ 485,
1602
+ 470
1603
+ ],
1604
+ "page_idx": 11
1605
+ },
1606
+ {
1607
+ "type": "text",
1608
+ "text": "An example of negative sentiment: The new design is awful!",
1609
+ "bbox": [
1610
+ 243,
1611
+ 473,
1612
+ 433,
1613
+ 481
1614
+ ],
1615
+ "page_idx": 11
1616
+ },
1617
+ {
1618
+ "type": "text",
1619
+ "text": "View instructions",
1620
+ "bbox": [
1621
+ 243,
1622
+ 502,
1623
+ 310,
1624
+ 510
1625
+ ],
1626
+ "page_idx": 11
1627
+ },
1628
+ {
1629
+ "type": "text",
1630
+ "text": "The following are the texts generated given the prompt: The pizza. Please read the texts and answer questions.",
1631
+ "bbox": [
1632
+ 231,
1633
+ 517,
1634
+ 611,
1635
+ 526
1636
+ ],
1637
+ "page_idx": 11
1638
+ },
1639
+ {
1640
+ "type": "list",
1641
+ "sub_type": "text",
1642
+ "list_items": [
1643
+ "A: The pizza is coming to NY!Every Saturday starting July 9th @7pm We'll be hosting a pancake lunch at The Alfalfa Wellington at 1015 Broadway, stonewall. Once you're partaking in the pancake lunch you",
1644
+ "B: The pizza box was a little overpriced, but I liked it. I've never seen a pizza that looks as good as this one. It's not perfect. It's not the best of the best, but it is a good example of what a good",
1645
+ "C: The pizza is delicious, but a little dry for my liking. The sauce was good and added some flavor to the pasta. My mom didn't complain either. We will be back next time when we're in the area. Amazing place to order food",
1646
+ "D: The pizza-making skills of a 3-year-old were on full display this week, when a boy was given a chance to play for the first time since his return from the the the it the the he has it had the it is it"
1647
+ ],
1648
+ "bbox": [
1649
+ 231,
1650
+ 529,
1651
+ 761,
1652
+ 596
1653
+ ],
1654
+ "page_idx": 11
1655
+ },
1656
+ {
1657
+ "type": "text",
1658
+ "text": "Single Choice: Which one has the best linguistic quality? (Do NOT take sentiment or factual errors into consideration.)",
1659
+ "bbox": [
1660
+ 231,
1661
+ 600,
1662
+ 638,
1663
+ 609
1664
+ ],
1665
+ "page_idx": 11
1666
+ },
1667
+ {
1668
+ "type": "text",
1669
+ "text": "Single choice, write A or B or C or D.",
1670
+ "bbox": [
1671
+ 231,
1672
+ 621,
1673
+ 357,
1674
+ 629
1675
+ ],
1676
+ "page_idx": 11
1677
+ },
1678
+ {
1679
+ "type": "text",
1680
+ "text": "Single Choice: The target sentiment is positive sentiment. Which one aligns best with the target sentiment? (Do NOT take linguistic quality into consideration.)",
1681
+ "bbox": [
1682
+ 231,
1683
+ 636,
1684
+ 726,
1685
+ 651
1686
+ ],
1687
+ "page_idx": 11
1688
+ },
1689
+ {
1690
+ "type": "text",
1691
+ "text": "Single choice, write A or B or C or D.",
1692
+ "bbox": [
1693
+ 231,
1694
+ 664,
1695
+ 357,
1696
+ 671
1697
+ ],
1698
+ "page_idx": 11
1699
+ },
1700
+ {
1701
+ "type": "text",
1702
+ "text": "Submit",
1703
+ "bbox": [
1704
+ 243,
1705
+ 677,
1706
+ 275,
1707
+ 684
1708
+ ],
1709
+ "page_idx": 11
1710
+ },
1711
+ {
1712
+ "type": "text",
1713
+ "text": "Figure 5: The user interface provided to Mechanical Turk workers.",
1714
+ "bbox": [
1715
+ 270,
1716
+ 699,
1717
+ 722,
1718
+ 714
1719
+ ],
1720
+ "page_idx": 11
1721
+ },
1722
+ {
1723
+ "type": "table",
1724
+ "img_path": "images/c6c7ec013ab1e428612bb4ef1178aee6f92ec63aafea561e2a4319f998e0bf53.jpg",
1725
+ "table_caption": [],
1726
+ "table_footnote": [],
1727
+ "table_body": "<table><tr><td rowspan=\"2\">Detoxification</td><td>GeDi</td><td>In fact, Bynum allegedly responded to Eley on Facebook: &quot;A. Someone should tell me who my NBA team _____. Nice kid (10 pounds ...)</td></tr><tr><td>Ours</td><td>In fact, Bynum allegedly responded to Eley on Facebook: &quot;Thanks for your suggestion. I&#x27;m going to start by using the phrase &#x27;if you have something to ...</td></tr><tr><td rowspan=\"4\">Negative</td><td>GPT2+</td><td>The book contains some serious errors in spelling and grammar and there is little in the way of a coherent story. This review is therefore the negative review, for lack of a better term. It is a story about &quot;The Dark Knight.&quot; It follows one...</td></tr><tr><td>PPLM</td><td>The book, The Big Empty, is the first of a series of three, to be released by the American Public Health Association in the fall. The report&#x27;s goal is to &quot;The new report ( ). A: It is ...</td></tr><tr><td>GeDi</td><td>The book ends too abruptly. Conclusion conclusion: Out of context or lack of evidence organs. Write something twice. Links! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! ! !</td></tr><tr><td>Ours</td><td>The book I had read before I heard about the film and it was a huge disappointment. I am a huge fan of The Hobbit, so I was looking forward to watching this and this is not going to hold me back from enjoying the film. I am very ...</td></tr><tr><td rowspan=\"4\">Positive</td><td>GPT2+</td><td>The potato chip is fantastic! I love it! My friends really liked it and the food is good as well. Went here for a quick lunch. We sat in the outdoor patio area next to a few of our friends. My sister and I ...</td></tr><tr><td>PPLM</td><td>The potato, which is a staple of many people&#x27;s diet, has been linked to the development of type 1 diabetes and heart problems. A group of British scientists has found the first evidence of an association between high-fiber eating and type 1 diabetes ...</td></tr><tr><td>GeDi</td><td>The potato grower will adjust perfectly to your farm and farm animals. We offer high-quality fresh, all-natural produce that is grown outdoors. About Us Bib Flowermachine provides composting, hydroponics, water, fertile ...</td></tr><tr><td>Ours</td><td>The potato chip is the classic American family meal. And while it&#x27;s been around for decades, it still is the perfect dinner option for any family, whether you&#x27;re a member of the household or not. But it is also an incredibly versatile meal. For example ...</td></tr><tr><td rowspan=\"3\">Sports</td><td>GPT2+</td><td>Views on football. Football is the subject of a number of sports-related articles by the public domain, so this will not be repeated here. This article may not contain legal advice or should be considered legal advice in relation to your own legal ...</td></tr><tr><td>GeDi</td><td>Views on Brigham MVP derby got into the mix Sunday weekend, as ESPN&#x27;s Adam Schefter produced a great (&amp; entire list we&#x27;ll get to below) breakdown of all things Beckham. Basically, we popped the top of the pitcher (who may win to clear ...</td></tr><tr><td>Ours</td><td>Views on this season are split. Some, like former Miami Dolphins quarterback Peyton Manning, believe the Patriots are a Super Bowl contender. Others, like former New England Patriots head coach Bill Belichick, say the Pats are a perennial loser.</td></tr><tr><td rowspan=\"3\">World</td><td>GPT2+</td><td>The central theme of the novel is the search for purpose and for meaning. However, the novel isn&#x27;t just about these goals and meanings. It is also about life and death, personal relationships, and the way that life and death are often intertwined in the lives of ...</td></tr><tr><td>GeDi</td><td>The central theme campaigner Najim Hasina uses is Kashmir peace, and with the Privy Council review being conducted towards the beginning of January, critical comments were placed on Delhi&#x27;s artificiality andness in defence of watchdog. As has been stated, Rajesh G...</td></tr><tr><td>Ours</td><td>The central theme of the next few weeks will be the battle against terrorism, with Iraq at the top of the list.</td></tr><tr><td>{Negative, Company}</td><td>Ours</td><td>The issue focused on accessories and software was one of the main reasons why Apple Inc. dropped the product line. The company did not realize that its product line would be the downfall of the company.</td></tr><tr><td>{Positive, Athlete}</td><td>Ours</td><td>The issue focused on his game as a center back. He is an excellent athlete who has a strong work ethic. He is a good defensive midfielder who can make plays and get his team points. He plays a natural position as a right midfielder.</td></tr></table>",
1728
+ "bbox": [
1729
+ 124,
1730
+ 156,
1731
+ 873,
1732
+ 785
1733
+ ],
1734
+ "page_idx": 12
1735
+ },
1736
+ {
1737
+ "type": "text",
1738
+ "text": "Table 7: Examples of the generation. In the first column are control codes. \"Negative\": Negative Sentiment. \"Positive\": Positive Sentiment. The second column lists the methods. \"GPT2+\": GPT2-medium + prompt engineering. The given prompts are in bold. The guiding sentences of GPT2+ are omitted for brevity.",
1739
+ "bbox": [
1740
+ 112,
1741
+ 796,
1742
+ 884,
1743
+ 839
1744
+ ],
1745
+ "page_idx": 12
1746
+ }
1747
+ ]