SlowGuess commited on
Commit
682361b
·
verified ·
1 Parent(s): 8d4ec4d

Add Batch 2bcd6ff0-7964-42cc-8382-eeb8b30be63b

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. 2401.10xxx/2401.10184/49e82a1c-bde2-43c8-b11c-01d323e6ea7e_content_list.json +0 -0
  3. 2401.10xxx/2401.10184/49e82a1c-bde2-43c8-b11c-01d323e6ea7e_model.json +0 -0
  4. 2401.10xxx/2401.10184/49e82a1c-bde2-43c8-b11c-01d323e6ea7e_origin.pdf +3 -0
  5. 2401.10xxx/2401.10184/full.md +407 -0
  6. 2401.10xxx/2401.10184/images.zip +3 -0
  7. 2401.10xxx/2401.10184/layout.json +0 -0
  8. 2401.10xxx/2401.10185/57db14d9-d6c0-49a9-b07e-a576a72a4ba2_content_list.json +0 -0
  9. 2401.10xxx/2401.10185/57db14d9-d6c0-49a9-b07e-a576a72a4ba2_model.json +0 -0
  10. 2401.10xxx/2401.10185/57db14d9-d6c0-49a9-b07e-a576a72a4ba2_origin.pdf +3 -0
  11. 2401.10xxx/2401.10185/full.md +0 -0
  12. 2401.10xxx/2401.10185/images.zip +3 -0
  13. 2401.10xxx/2401.10185/layout.json +0 -0
  14. 2401.10xxx/2401.10190/2c0034b6-01c2-475a-8cea-68f1fc9d78ab_content_list.json +0 -0
  15. 2401.10xxx/2401.10190/2c0034b6-01c2-475a-8cea-68f1fc9d78ab_model.json +0 -0
  16. 2401.10xxx/2401.10190/2c0034b6-01c2-475a-8cea-68f1fc9d78ab_origin.pdf +3 -0
  17. 2401.10xxx/2401.10190/full.md +690 -0
  18. 2401.10xxx/2401.10190/images.zip +3 -0
  19. 2401.10xxx/2401.10190/layout.json +0 -0
  20. 2401.10xxx/2401.10191/6ac68c8c-17b4-426f-b714-6afc129ebb6b_content_list.json +0 -0
  21. 2401.10xxx/2401.10191/6ac68c8c-17b4-426f-b714-6afc129ebb6b_model.json +0 -0
  22. 2401.10xxx/2401.10191/6ac68c8c-17b4-426f-b714-6afc129ebb6b_origin.pdf +3 -0
  23. 2401.10xxx/2401.10191/full.md +332 -0
  24. 2401.10xxx/2401.10191/images.zip +3 -0
  25. 2401.10xxx/2401.10191/layout.json +0 -0
  26. 2401.10xxx/2401.10208/b4ec0b04-6063-4b2e-8a46-5bc043f27b6d_content_list.json +0 -0
  27. 2401.10xxx/2401.10208/b4ec0b04-6063-4b2e-8a46-5bc043f27b6d_model.json +0 -0
  28. 2401.10xxx/2401.10208/b4ec0b04-6063-4b2e-8a46-5bc043f27b6d_origin.pdf +3 -0
  29. 2401.10xxx/2401.10208/full.md +0 -0
  30. 2401.10xxx/2401.10208/images.zip +3 -0
  31. 2401.10xxx/2401.10208/layout.json +0 -0
  32. 2401.10xxx/2401.10215/95513a42-9487-4a29-97c5-eb93bcc107a7_content_list.json +2058 -0
  33. 2401.10xxx/2401.10215/95513a42-9487-4a29-97c5-eb93bcc107a7_model.json +0 -0
  34. 2401.10xxx/2401.10215/95513a42-9487-4a29-97c5-eb93bcc107a7_origin.pdf +3 -0
  35. 2401.10xxx/2401.10215/full.md +335 -0
  36. 2401.10xxx/2401.10215/images.zip +3 -0
  37. 2401.10xxx/2401.10215/layout.json +0 -0
  38. 2401.10xxx/2401.10216/ea06c2af-58d9-41ff-9a19-b0f5df0c6d96_content_list.json +0 -0
  39. 2401.10xxx/2401.10216/ea06c2af-58d9-41ff-9a19-b0f5df0c6d96_model.json +0 -0
  40. 2401.10xxx/2401.10216/ea06c2af-58d9-41ff-9a19-b0f5df0c6d96_origin.pdf +3 -0
  41. 2401.10xxx/2401.10216/full.md +0 -0
  42. 2401.10xxx/2401.10216/images.zip +3 -0
  43. 2401.10xxx/2401.10216/layout.json +0 -0
  44. 2401.10xxx/2401.10225/ba2787e2-76e7-4f16-aa8b-fe98ad04fa0e_content_list.json +0 -0
  45. 2401.10xxx/2401.10225/ba2787e2-76e7-4f16-aa8b-fe98ad04fa0e_model.json +0 -0
  46. 2401.10xxx/2401.10225/ba2787e2-76e7-4f16-aa8b-fe98ad04fa0e_origin.pdf +3 -0
  47. 2401.10xxx/2401.10225/full.md +0 -0
  48. 2401.10xxx/2401.10225/images.zip +3 -0
  49. 2401.10xxx/2401.10225/layout.json +0 -0
  50. 2401.10xxx/2401.10226/ecea0930-2696-45e5-b53e-a244117d8d6b_content_list.json +0 -0
.gitattributes CHANGED
@@ -10390,3 +10390,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
10390
  2402.00xxx/2402.00045/a6df3759-de08-4123-890f-a2a5dff4acc7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10391
  2402.01xxx/2402.01680/8a5b4e6f-32d3-4798-bb99-93a7ab04b840_origin.pdf filter=lfs diff=lfs merge=lfs -text
10392
  2402.10xxx/2402.10067/d29e2f1c-4848-4288-82e4-88467dac452e_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10390
  2402.00xxx/2402.00045/a6df3759-de08-4123-890f-a2a5dff4acc7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10391
  2402.01xxx/2402.01680/8a5b4e6f-32d3-4798-bb99-93a7ab04b840_origin.pdf filter=lfs diff=lfs merge=lfs -text
10392
  2402.10xxx/2402.10067/d29e2f1c-4848-4288-82e4-88467dac452e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10393
+ 2401.10xxx/2401.10184/49e82a1c-bde2-43c8-b11c-01d323e6ea7e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10394
+ 2401.10xxx/2401.10185/57db14d9-d6c0-49a9-b07e-a576a72a4ba2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10395
+ 2401.10xxx/2401.10190/2c0034b6-01c2-475a-8cea-68f1fc9d78ab_origin.pdf filter=lfs diff=lfs merge=lfs -text
10396
+ 2401.10xxx/2401.10191/6ac68c8c-17b4-426f-b714-6afc129ebb6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10397
+ 2401.10xxx/2401.10208/b4ec0b04-6063-4b2e-8a46-5bc043f27b6d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10398
+ 2401.10xxx/2401.10215/95513a42-9487-4a29-97c5-eb93bcc107a7_origin.pdf filter=lfs diff=lfs merge=lfs -text
10399
+ 2401.10xxx/2401.10216/ea06c2af-58d9-41ff-9a19-b0f5df0c6d96_origin.pdf filter=lfs diff=lfs merge=lfs -text
10400
+ 2401.10xxx/2401.10225/ba2787e2-76e7-4f16-aa8b-fe98ad04fa0e_origin.pdf filter=lfs diff=lfs merge=lfs -text
10401
+ 2401.10xxx/2401.10226/ecea0930-2696-45e5-b53e-a244117d8d6b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10402
+ 2401.10xxx/2401.10229/7721d464-64a8-4067-af4a-a0b47287947f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10403
+ 2401.10xxx/2401.10232/3313835c-9694-4851-aca6-0db1f19be34d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10404
+ 2401.10xxx/2401.10359/db4caa8b-5296-4f04-8e14-86a9b62b6d00_origin.pdf filter=lfs diff=lfs merge=lfs -text
10405
+ 2401.10xxx/2401.10369/35b1fa23-8131-454d-8b74-bce162737982_origin.pdf filter=lfs diff=lfs merge=lfs -text
10406
+ 2401.10xxx/2401.10371/8124e26f-ba7f-4a5d-afd3-95859cac91fb_origin.pdf filter=lfs diff=lfs merge=lfs -text
10407
+ 2401.10xxx/2401.10417/ba401e25-bf63-485c-b6d0-ab8e334d1746_origin.pdf filter=lfs diff=lfs merge=lfs -text
10408
+ 2401.10xxx/2401.10440/c41be6bf-6c39-4be5-a38c-64627af35511_origin.pdf filter=lfs diff=lfs merge=lfs -text
10409
+ 2401.10xxx/2401.10446/79a335ad-d875-4785-95c0-5c5560ea4202_origin.pdf filter=lfs diff=lfs merge=lfs -text
10410
+ 2401.10xxx/2401.10471/24bb8982-4c1a-4d8c-b85a-6f184c133bb2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10411
+ 2401.10xxx/2401.10480/a07e995f-e32d-4bda-af4e-b4b5bd0af992_origin.pdf filter=lfs diff=lfs merge=lfs -text
10412
+ 2401.10xxx/2401.10491/2246cab1-ee9c-4a92-8a88-baf649566623_origin.pdf filter=lfs diff=lfs merge=lfs -text
10413
+ 2401.10xxx/2401.10506/6dddeff9-a845-482c-bcdb-6a9ec73c052b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10414
+ 2401.10xxx/2401.10525/4edfb614-6a89-4fbe-9501-0a3f3c1164be_origin.pdf filter=lfs diff=lfs merge=lfs -text
10415
+ 2401.10xxx/2401.10529/e44110b3-6602-4010-88a4-f2aa31d21765_origin.pdf filter=lfs diff=lfs merge=lfs -text
10416
+ 2401.10xxx/2401.10530/53c91f40-6ef0-4419-908d-d62774bfaa67_origin.pdf filter=lfs diff=lfs merge=lfs -text
10417
+ 2401.10xxx/2401.10545/bedf404b-0cdb-4888-87ea-dfb1a489219d_origin.pdf filter=lfs diff=lfs merge=lfs -text
10418
+ 2401.10xxx/2401.10568/988fd62e-c7ea-40af-9c68-cd9cc381fe51_origin.pdf filter=lfs diff=lfs merge=lfs -text
10419
+ 2401.10xxx/2401.10588/338e280c-b2fb-404b-8a1a-12e6588a606b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10420
+ 2401.10xxx/2401.10700/259a9280-26a1-48bb-a51f-9401830b58e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
10421
+ 2401.10xxx/2401.10727/49fe0026-6c66-4101-9c0c-63276c4ceab5_origin.pdf filter=lfs diff=lfs merge=lfs -text
10422
+ 2401.10xxx/2401.10774/8a3c0b00-9108-417d-9ac2-77aec3c8ed75_origin.pdf filter=lfs diff=lfs merge=lfs -text
10423
+ 2401.10xxx/2401.10815/7a12aaa9-591c-459e-a2e7-c3433601f52b_origin.pdf filter=lfs diff=lfs merge=lfs -text
10424
+ 2401.10xxx/2401.10820/9cfec181-866a-4d16-836e-b5d7e3b31462_origin.pdf filter=lfs diff=lfs merge=lfs -text
10425
+ 2401.10xxx/2401.10825/aff30a2e-69a9-4d04-8cbb-e3c57de63fef_origin.pdf filter=lfs diff=lfs merge=lfs -text
10426
+ 2401.10xxx/2401.10838/106ccbb4-8126-490a-9e41-41c00691d49f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10427
+ 2401.10xxx/2401.10862/1293a494-2943-4ef2-b5f3-fc0ab4a55685_origin.pdf filter=lfs diff=lfs merge=lfs -text
10428
+ 2401.10xxx/2401.10873/a3c1e738-5549-49a0-90b9-37d18c983c12_origin.pdf filter=lfs diff=lfs merge=lfs -text
10429
+ 2401.10xxx/2401.10880/f4aca984-2a79-4165-8a46-d7334f5440d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10430
+ 2401.10xxx/2401.10891/fd35490b-1e2d-4e06-a58c-8b80422cc27f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10431
+ 2401.11xxx/2401.11037/5b2aea64-91ec-4246-bb27-dff5c64c8fc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
10432
+ 2401.11xxx/2401.11048/06c873d2-01e9-4eef-a8a9-bea54788e435_origin.pdf filter=lfs diff=lfs merge=lfs -text
10433
+ 2401.11xxx/2401.11067/95a25677-2be3-4f0a-b122-9b4b060dbb99_origin.pdf filter=lfs diff=lfs merge=lfs -text
10434
+ 2401.11xxx/2401.11094/eaf668ac-3ca8-4776-88d1-05b7be808554_origin.pdf filter=lfs diff=lfs merge=lfs -text
10435
+ 2401.11xxx/2401.11122/d555d24f-0a63-4d38-a7fb-927910b3b914_origin.pdf filter=lfs diff=lfs merge=lfs -text
10436
+ 2401.11xxx/2401.11141/c4391a81-3989-455a-be5e-9d4800ea0c89_origin.pdf filter=lfs diff=lfs merge=lfs -text
10437
+ 2401.11xxx/2401.11161/77313f68-834c-47d1-9d07-c146a6f9d921_origin.pdf filter=lfs diff=lfs merge=lfs -text
10438
+ 2401.11xxx/2401.11166/1b96baf6-1db2-4588-9fe0-141fcd8683ab_origin.pdf filter=lfs diff=lfs merge=lfs -text
10439
+ 2401.11xxx/2401.11170/f8e3cf0d-dceb-4a0c-9f15-f38278848365_origin.pdf filter=lfs diff=lfs merge=lfs -text
10440
+ 2401.11xxx/2401.11174/e33b3450-34b2-49ec-b4d1-a9360b0e2ca1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10441
+ 2401.11xxx/2401.11181/5fe00ab8-e598-4535-85be-16ba0a57d94c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10442
+ 2401.11xxx/2401.11206/f086dea2-62c2-4138-ae23-3b0aa1bf1029_origin.pdf filter=lfs diff=lfs merge=lfs -text
10443
+ 2401.11xxx/2401.11228/834e7d2e-71f6-4b4e-9098-2f1919e161f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10444
+ 2401.11xxx/2401.11237/2a6efdd1-3681-4b68-b0a8-5432f893dbab_origin.pdf filter=lfs diff=lfs merge=lfs -text
10445
+ 2401.11xxx/2401.11249/d40cb701-9b19-406a-b60b-cc257ae9f231_origin.pdf filter=lfs diff=lfs merge=lfs -text
10446
+ 2401.11xxx/2401.11255/c70def4a-f9ce-46df-9c68-58b60f3e4e0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10447
+ 2401.11xxx/2401.11314/e573288f-5e72-4bad-b547-56400e1df321_origin.pdf filter=lfs diff=lfs merge=lfs -text
10448
+ 2401.12xxx/2401.12238/78821370-ae16-4fae-98dd-71ee71db2a4c_origin.pdf filter=lfs diff=lfs merge=lfs -text
10449
+ 2401.12xxx/2401.12242/90a09b3a-5e34-42c3-9968-adfbad0003fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
10450
+ 2401.12xxx/2401.12244/e773b451-0949-45d6-9efd-93f3af2d8f8f_origin.pdf filter=lfs diff=lfs merge=lfs -text
10451
+ 2401.12xxx/2401.12247/a8e02182-f47b-44ee-b870-b15544a09684_origin.pdf filter=lfs diff=lfs merge=lfs -text
10452
+ 2401.12xxx/2401.12249/ee5a6abb-cef8-4854-8e37-c3eabb4258d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
10453
+ 2402.01xxx/2402.01674/add0f441-43e5-4017-b5f2-fe6c3c54a4b1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10454
+ 2402.01xxx/2402.01676/605c8411-572a-4b87-8e70-904869813fe1_origin.pdf filter=lfs diff=lfs merge=lfs -text
10455
+ 2402.01xxx/2402.01679/a5b91cd8-1323-40f1-abe6-627adbcf7dc6_origin.pdf filter=lfs diff=lfs merge=lfs -text
10456
+ 2402.06xxx/2402.06633/79ec142b-879c-4885-ac82-43931c2d1658_origin.pdf filter=lfs diff=lfs merge=lfs -text
2401.10xxx/2401.10184/49e82a1c-bde2-43c8-b11c-01d323e6ea7e_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10184/49e82a1c-bde2-43c8-b11c-01d323e6ea7e_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10184/49e82a1c-bde2-43c8-b11c-01d323e6ea7e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:496708ac03cd7a987a0a60051e95e7ac5f23fbe388dd4605783ae3b6b17a9bc6
3
+ size 5550084
2401.10xxx/2401.10184/full.md ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Comparing Traditional and LLM-based Search for Image Geolocation
2
+
3
+ Albatool Wazzan
4
+
5
+ Dept of Computer & Info Sciences
6
+
7
+ Temple University
8
+
9
+ Philadelphia, USA
10
+
11
+ albatool.wazzan@temple.edu
12
+
13
+ Stephen MacNeil
14
+
15
+ Dept of Computer & Info Sciences
16
+
17
+ Temple University
18
+
19
+ Philadelphia, USA
20
+
21
+ stephen.macneil@temple.edu
22
+
23
+ Richard Souvenir
24
+
25
+ Dept of Computer & Info Sciences
26
+
27
+ Temple University
28
+
29
+ Philadelphia, USA
30
+
31
+ souvenir@temple.edu
32
+
33
+ # ABSTRACT
34
+
35
+ Web search engines have long served as indispensable tools for information retrieval; user behavior and query formulation strategies have been well studied. The introduction of search engines powered by large language models (LLMs) suggested more conversational search and new types of query strategies. In this paper, we compare traditional and LLM-based search for the task of image geolocation, i.e., determining the location where an image was captured. Our work examines user interactions, with a particular focus on query formulation strategies. In our study, 60 participants were assigned either traditional or LLM-based search engines as assistants for geolocation. Participants using traditional search more accurately predicted the location of the image compared to those using the LLM-based search. Distinct strategies emerged between users depending on the type of assistant. Participants using the LLM-based search issued longer, more natural language queries, but had shorter search sessions. When reformulating their search queries, traditional search participants tended to add more terms to their initial queries, whereas participants using the LLM-based search consistently rephrased their initial queries.
36
+
37
+ # ACM Reference Format:
38
+
39
+ Albatool Wazzan, Stephen MacNeil, and Richard Souvenir. 2024. Comparing Traditional and LLM-based Search for Image Geolocation. In Proceedings of the 2024 ACM SIGIR Conference on Human Information Interaction and Retrieval (CHIIR '24), March 10-14, 2024, Sheffield, United Kingdom. ACM, New York, NY, USA, 12 pages. https://doi.org/10.1145/3627508.3638305
40
+
41
+ # 1 INTRODUCTION
42
+
43
+ For decades, web search engines have served as the de facto reference tool for a wide range of tasks. In fact, it has been demonstrated that humans have been trained to optimize keyword-based searching using query formulations not typically used in natural language [24]. Advancements in artificial intelligence (AI) have driven the emergence of large language models (LLM), such as BERT [9], GPT-3 [5], and their successors. These models have served as the foundation for numerous applications, ranging from text generation, translation, to question answering, multi-step reasoning, and
44
+
45
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
46
+
47
+ ACM SIGIR, March 10-14, 2024, Sheffield, UK
48
+
49
+ © 2024 Copyright held by the owner/author(s). Publication rights licensed to ACM.
50
+
51
+ ACM ISBN 979-8-4007-0434-5/24/03...$15.00
52
+
53
+ https://doi.org/10.1145/3627508.3638305
54
+
55
+ complex problem solving [43]. Recently, these tools have been combined with web search to enable a new mode of LLM-powered conversational search. Unlike keyword-based search, this integration allows users to engage in a natural, interactive conversation with the LLM-powered search engine, as if they were interacting with a knowledgeable assistant. This conversational mode has the potential to improve user experiences across various domains. While previous work has explored how people engage in sense-making and constructing mental models of traditional search engines [40], the adaptation of these models to LLM-based search remains open to inquiry.
56
+
57
+ To compare traditional and LLM-based search, we consider the task of image geolocation - identifying the location in which an image was captured, an important task with applications in forensics, law enforcement, and journalism. This task has historically been performed by expert image analysts, using increasingly sophisticated reference tools as they became available. Fully automated computer vision approaches [12, 26, 45] have been developed; these approaches typically rely on the visual similarity between the query image and a previously-processed training image and tend to work best when landmarks or other unique features are visible. In the general case, accurately localizing images can be challenging. Even with the assistance of a search engine, users not only need to identify visual clues, but understand them well enough to translate into a search query. Because geolocation is a task that requires investigation, in that analysts must collect sometimes disparate clues to uncover the origin of the image, it can be expected that users will formulate multiple queries as they seek to retrieve information about these clues. This task takes advantage of both the lookup abilities of a search engine and the contextual knowledge from humans, making it a compelling task to evaluate how users adapt their query formulation strategies.
58
+
59
+ We conducted a between-subjects study with 60 participants randomly assigned to use either traditional or LLM-based search to aid in image geolocation in order to address the following research questions:
60
+
61
+ RQ1 How does the use of an LLM-based search tool versus a traditional search tool impact participants' performance in geolocation tasks?
62
+ RQ2 How do participants adapt their query formulation strategies when using LLM-based search compared to traditional search for image geolocation?
63
+ RQ3 What are the key challenges encountered by participants when using LLM-based search for image geolocation?
64
+
65
+ Our results indicate that participants using traditional search out-performed those using LLM-based search in terms of accurate image
66
+
67
+ geolocation. This outcome can be explained by our qualitative findings, where participants reported challenges formulating queries when interacting with the LLM-based search engine. LLM-based search users issued longer, more conversational queries within shorter search sessions. Participants using the traditional search engine tended to extend their initial queries with additional terms when reformulating, while those utilizing the LLM-based search consistently rephrased their initial queries.
68
+
69
+ # 2 RELATED WORK
70
+
71
+ Search engines have evolved into indispensable tools that influence how information is accessed and problems are solved [11]. However, effectively communicating the user's search intent has been a persistent challenge. Much work has been dedicated toward understanding web search query formulation patterns [3, 18, 30] and investigating how users adapt their queries and reformulation strategies in efforts to uncover search intent [6, 20, 42]. These strategies can be domain-specific. For instance, for health-related information, Zuccon showed that search results were less helpful when users issued complex queries describing their symptoms rather than using medical terminology [48]. In the educational setting, students heavily rely on search engines for academic purposes [17, 36]. However, it has been shown that a substantial portion of academic search sessions result in null queries, when individuals use vague or complex terms resulting in empty search results and obstructing users from achieving their intended search objectives [25]. Recognizing and understanding these challenges related to user behavior and query formulation strategies can enhance the overall search and retrieval experience. Our work builds upon existing research in web search and query formulation and extends the analysis to LLM-based search for the task of image geolocation.
72
+
73
+ # 2.1 LLM-based Search Analysis
74
+
75
+ LLMs are trained on large amounts of text corpora, and their effectiveness in various applications hinges on the ability to query them effectively [1]. In efforts to optimize LLMs for retrieval tasks, several works have investigated the process of querying LLMs for specific information. Jiang [21] highlighted the consequences of poorly written prompts, yielding failed retrieval results and proposed a method that used multiple automated paraphrases of the query and an aggregation scheme, mirroring how humans often rephrase their queries and provide additional context to make them more informative. Similarly, the work of Petroni [32] examined enhancing the LLM retrieval by augmenting queries with relevant context and demonstrated improved performance on various LLMs on factuality tests.
76
+
77
+ When humans seek information, they often clarify their queries with examples to obtain better results. To mimic this behavior, Brown employed few-shot learning, which involves conditioning the LLM on the task description and just a few examples, and found that this "in-context learning" works best with larger language models [5]. A more recent effort argued that language models do not learn tasks during runtime from few-shot examples, but locate tasks within the model's preexisting knowledge; this paper proposes 0-shot prompts, which uses an alternative query with different phrasings to provide additional task descriptions [35]. Wei
78
+
79
+ introduces chain-of-thought, which aims to replicate the human thought process when addressing complex problems [44]. These efforts aimed at enhancing the LLM retrieval, but do not address the challenges faced by non-expert users when querying LLMs. Recent work [47] involves non-experts issuing prompts for LLM-based chatbots and found that struggles in formulating effective prompts resemble issues observed in end-user programming and interactive machine learning systems. Their work emphasizes the need for further research in LLMs and prompt-literacy, specifically for non-expert users. Our work focuses on this challenge for the multifaceted task of image geolocation.
80
+
81
+ # 2.2 Image Geolocation
82
+
83
+ Image geolocation is a widely-studied task. One effort used a carefully constructed dataset to investigate the types of clues and strategies users employ for image geolocation [29]. Several efforts have addressed the labor-intensive nature of image geolocation by incorporating crowdsourcing to improve location identification. One study introduced a diagramming technique involving visual representations from a bird's-eye or satellite perspective, which allowed novice crowd workers to collaborate with experts [23]. In a follow-up study, the authors introduced GroundTruth, a system that enhanced image geolocation accuracy through shared representations for crowd-augmented expert work [41].
84
+
85
+ Other studies have explored how to improve the accuracy of non-expert workers in image geolocation tasks. One project explicitly instructed novice users to follow a three-step workflow inferred from expert strategies: collecting image-related clues, deriving potential coordinates based on these clues, and identifying the image location on a map [22]. Another method [34] introduced a crowdsourcing platform that leverages existing data mining methods to estimate photo and video locations from social media, then used crowdsourcing for verification.
86
+
87
+ In our approach, we focus on how participants articulate visual clues into search queries, and whether those query strategies differ depending on the type of search tool available.
88
+
89
+ # 3 METHODS
90
+
91
+ We conducted a between-subjects user study involving 60 participants. In this section, we describe the experimental platform, recruitment of participants, task design, and measures.
92
+
93
+ # 3.1 Experimental Platform
94
+
95
+ Image geolocation has been well-studied due, in part, to the popularity of gamified versions of the task. The most well-known version is GeoGuessr; others include GeoGuess, Geotastic, and City Guesser. The objective of these games is to predict the correct location on map given an image, video, or other information and points are accumulated based on speed and/or accuracy. These games can serve as useful platforms for evaluating a wide variety of cognitive tasks. In this study, we use GeoGuess [4], an open-source image geolocation game. Users are presented a Google Map StreetView image and have two minutes to guess the location by dropping a pin on a world map. Users can navigate using the StreetView interface to virtually zoom and move through the scene. Up to 5,000 points can be earned based on proximity of the prediction to the actual
96
+
97
+ ![](images/1ddbd90e3779edc725e2b46eb3804afa8eac659a3cf4deb1db70600194f1a18c.jpg)
98
+
99
+ ![](images/ccd980c0c74b97a5184175480f4909872605dce4d36204124499f3f97ea9c16f.jpg)
100
+
101
+ ![](images/a0c09fcd28deacde6eddecf54096702a987bca47afae02ffb06f16e1a4b86005.jpg)
102
+
103
+ ![](images/a9399bbf520de4ee08d5a3a323e26289581c60d6985ba8555ce2327b4f67721e.jpg)
104
+ Paris, France
105
+ Tokyo, Japan
106
+ Figure 1: Initial viewpoints (with the location indicated) of the six rounds in the experiment.
107
+
108
+ ![](images/e6c6ee5e48ed5df6e141125e2779e098331566aaca95888e499319796753d4db.jpg)
109
+ Sydney, Australia
110
+ Barcelona, Spain
111
+
112
+ ![](images/86da1608de3e505c7a82cbd07fa680282b0b642904641f4421d8802c74261193.jpg)
113
+ Chicago, USA
114
+ Ushuaia, Argentina
115
+
116
+ location. In the instructions for the game, users are encouraged to seek out clues that may be useful for localization.
117
+
118
+ # 3.2 Participants
119
+
120
+ Participants, 18 years of age or older who could read and understand English, were recruited on a university campus. Our sample consisted of 60 participants whose mean age was $25\ (SD = 4.95)$ with a diverse range of academic backgrounds, majoring in accounting, biology, computer science, systems engineering, physics, chemistry, global studies, and marketing. The IRB approved study was carried out by two members of the research team over the span of three weeks. All participants received a $5 gift card to a coffee shop and were informed of the task description, duration, compensation, and their right to forfeit at anytime before participating.
121
+
122
+ # 3.3 Task Design
123
+
124
+ Users were provided a dual-monitor setup, with the geolocation task on one screen and the search engine on the other. Microsoft Bing served as the traditional search engine and Microsoft Bing Chat, which is powered by ChatGPT, served as the LLM-based search engine.
125
+
126
+ We followed a between-subjects study design, where each participant was randomly assigned to either the (Traditional) Search or the LLM condition. The experiment consisted of six geolocation tasks (shown in Figure 1), which were intended to vary in difficulty. Participants were provided instructions to only use the provided search engine and not perform image-based search. Participants were asked to confirm their understanding of the instructions by clicking on a (I understand) button. After the instructions, participants watched a short instructional video on how to use the geolocation interface.
127
+
128
+ For each round, the participant had two minutes to provide a guess. They could consult the search tool as often as they needed,
129
+
130
+ Table 1: Geolocation performance on the six round experiment.
131
+
132
+ <table><tr><td></td><td>Estimate</td><td>Std. Error</td><td>t-value</td><td>p-value</td></tr><tr><td>(Intercept)</td><td>2678.5</td><td>167.4</td><td>15.999</td><td>&lt;2e-16 ***</td></tr><tr><td>Condition(Search)</td><td>501.3</td><td>243.5</td><td>2.059</td><td>0.0414*</td></tr></table>
133
+
134
+ given the time constraint. Upon completion of the six rounds, participants were invited to fill out a post-study survey with questions about familiarity with image geolocation, traditional or LLM-based search, attitudes toward artificial intelligence, and a set of open-ended questions for additional feedback. The entire experiment was designed to be completed in approximately 15 minutes per participant.
135
+
136
+ # 3.4 Measures
137
+
138
+ The primary dependent variable in this study is performance, measured by the points earned by each participant per round. The score ranged from 0 to 5000; the maximum score was obtained when the prediction was within a few kilometers of the actual location. The primary independent variable, type of search, was modeled as a fixed effect in our linear mixed-effects model. Each participant played the six rounds in the same order. The round number, which correlated with difficulty, was modeled as a random effect. For each participant, we maintained an event log of timestamped actions that included switching between web search and geolocation. Additionally, we recorded the search queries.
139
+
140
+ # 4 ANALYSIS & RESULTS
141
+
142
+ We excluded the five participants that did not engage with the search engine for any of the rounds, which left 29 for the Search condition and 26 for the LLM condition.
143
+
144
+ ![](images/bd55e9a336818451e27cd5712af423c181f1f8af7a846dfbbcefa958656c25fd.jpg)
145
+ Figure 2: Performance distribution (points) comparison between Search and LLM conditions on average (left) and per round.
146
+
147
+ ![](images/24b2699284b0c3a8b6188dc44992801344e1e5fe0eb93658574dc856261db0d8.jpg)
148
+ Figure 3: Percentage of multi-query rounds for Search and LLM conditions
149
+
150
+ For the Search condition, the mean performance score was 3189, with a median of 4712 and an IQR of 4262.25. For the LLM condition, the mean performance score was 2725, with a median of 3637.5 and an IQR of 4952. Figure 2 shows the distribution of scores across the two conditions by average and across the six rounds.
151
+
152
+ Overall, participants in the Search condition outperformed those in the LLM condition. For rounds 1-4, the performance was similar for both conditions, with both groups finding round 2 challenging. In the final two rounds, participants across both conditions performed poorly, with those in the LLM group performing notably worse. We use a Linear mixed-effects Model (LMM) to evaluate the difference in performance between the groups. The results are shown in Table 1. There was a significant difference in performance between the two conditions $(p = 0.0414)$ .
153
+
154
+ # 4.1 Query Formulation Patterns
155
+
156
+ Query formulation is a fundamental tool in the analysis of search behavior. Here, we investigate four key query formulation metrics, comparing their differences across the two conditions.
157
+
158
+ 4.1.1 Number of Queries. We examined the number of queries per round. Participants in the Search condition issued an average of 1.98 queries, whereas those in the LLM condition issued an average of 1.04 queries. A Chi-Square test showed this association to be significant $(\chi^2(6) = 19.71, p = 0.003)$ ; users in the Search condition issued more queries.
159
+
160
+ We computed the percentage of rounds in which participants issued more than one query. As shown in Figure 3, participants in both conditions issued more queries as the task increased in
161
+
162
+ ![](images/ec312cd8a9dc7a803da3bc3c42c4ef9f3ec1c79245374f628948074e145ed1e8.jpg)
163
+ Figure 4: Comparison of part-of-speech tag counts between Search and LLM conditions
164
+
165
+ difficulty. Participants in the Search condition favored issuing more queries, starting at around $55\%$ and reaching $70\%$ by the end of the task. In the LLM condition, participants issued more queries at a lower rate starting at $20\%$ , but reaching $55\%$ by the last round.
166
+
167
+ 4.1.2 Query Length. Query length, the average number of terms in each query, can provide valuable insights into query formulation patterns [38]. On average, participants in the Search condition formulated queries comprising 4.19 terms. In contrast, participants in the LLM condition issued queries with an average of 6.06 terms. A Chi-Square test revealed a highly significant association between the conditions and the differences in query length patterns $(\chi^2 (28) = 61.78,p < 0.001)$ .
168
+
169
+ 4.1.3 Part-of-Speech Tagging. To explore potential differences in linguistic characteristics, we performed part-of-speech tagging. Figure 4 shows the distribution of tags in the queries across conditions. After adjusting the alpha value using Bonferroni correction, in the LLM condition, several tags, including adverbs (ADV), adpositions (ADP), determiners (DET), auxiliary verbs (AUX), and (VERB) exhibited significantly higher frequencies ( $p = 0.051$ , $p = 0.046$ , $p < 0.001$ , $p < 0.001$ , $p = 0.019$ ) than the Search condition. The increased use of adverbs and auxiliary verbs in the LLM queries suggested a more natural language style, potentially influenced by the conversational nature of interactions with LLMs [31]. On the other hand, in the Search condition, usage of proper nouns (PROPN) was significantly higher ( $p = 0.034$ ) than in the LLM condition. This
170
+
171
+ ![](images/5ce70c858f279c5cadc54222183233ff1cd426567df921a9259d785312619821.jpg)
172
+ Figure 5: Average number of query terms for successive queries in a round
173
+
174
+ indicates a greater tendency to perform a keyword-based search using specific entities or locations when interacting with a traditional search engine.
175
+
176
+ 4.1.4 Questions. We explore the categorization of question and non-question queries. Following Pang and Kumar [30], we defined question queries based on the following criteria:
177
+
178
+ - Interrogative start: Queries that start with how, what, which, why, where, when, who, whose.
179
+ - Modal verb start: Queries that start with do, does, did, can, could, has, have, is, was, are, were, should. However, an exception is made for queries where the second word is not.
180
+ - Queries that end with a question mark (?).
181
+
182
+ Queries not meeting the criteria were classified as non-question.
183
+
184
+ For the Search condition, only $17\%$ were question queries. Conversely, for the LLM condition, $73\%$ , were question queries. A Chisquare analysis yielded statistically significant results between the two types $(\chi^2(1) = 6.37, p = 0.012)$ . These findings suggest that participants in the LLM condition applied a more conversational style.
185
+
186
+ # 4.2 Query Reformulation Strategies
187
+
188
+ We explore how users reformulate and refine their queries during a given round, focusing on two primary aspects: changes in query length and term repeats, which allows us to understand how participants progressively adapt their queries.
189
+
190
+ 4.2.1 Number of Terms. We investigate how the length of queries changes within round. For each round, we computed the average number of terms in each query in order. Figure 5 shows the average number of terms by query order. Participants in the LLM condition issue an initial query of $\sim 6$ terms and maintain this length for subsequent queries. Meanwhile, participants in the Search condition tend to start with shorter ( $\sim 3$ ) queries and gradually increase. LLM users, favoring longer queries, may indicate a tendency for conversational interactions. Conversely, participants in the Search condition, may reflect an initial focus on keyword-driven retrieval, with subsequent query expansion.
191
+
192
+ ![](images/f134b24bf8352907d89be9adcb22a2daf54905aea7a3545b5644a18f906c965c.jpg)
193
+ Figure 6: Percentages of term repeats for successive queries in a round
194
+
195
+ ![](images/70c4f526bbda19a199c64fdf26f27ae165af7110ddb1659089a780b6c089076b.jpg)
196
+ Figure 7: Distribution of syntactic-level types across ordered queries for Search (top), and LLM (bottom) conditions
197
+
198
+ 4.2.2 Term Repeats. We examine term repeats within a round to understand how often users refined their initial queries. We computed the Jaccard similarity percentages [27] of consecutive queries in a round. Figure 6 shows percentages of queries that share identical terms with the previous query in a round. Initially, participants in the LLM condition had generally lower term reuse of around $20\%$ , suggesting a moderate level of query refinement. In the Search condition, participants began with a higher term reuse rate of $30\%$ with a gradual decline as the round progressed, which suggests that participants initially focused on refining their queries, then shifted to queries formulated differently or focused on new clues.
199
+
200
+ # 4.3 Query Reformulation Types
201
+
202
+ Analyzing query reformulation types (QRTs) allows us to infer the user intent in query reformulation. We adopt the QRT taxonomy
203
+
204
+ <table><tr><td>Intent Category</td><td>Definition</td><td>Example Query</td></tr><tr><td>Specification (Spec)</td><td>Query becomes more specific, narrowing down the search intent</td><td>sauf street sign → sauf street sign handicap red x</td></tr><tr><td>Generalization (Gen)</td><td>Query becomes more general, broadening the search intent</td><td>saint james peter adam Hamilton → saint james</td></tr><tr><td>Synonym (Syn)</td><td>Substitution of a term with its synonym while maintaining the overall meaning</td><td>RUE CREVAUX street MAP → RUE CREVAUX street location</td></tr><tr><td>Somewhat Relevant (SR)</td><td>Intent shifts slightly while remaining somewhat tied to the original query</td><td>what language is SAUF in? → how about rue crevaux?</td></tr><tr><td>New Topic (New)</td><td>Intent shifts significantly to a different subject</td><td>what countries have placen-tro masisa? → where is 17 de mayo in Chile?</td></tr><tr><td>Others (Oth)</td><td>Queries that do not fit any cate-gory</td><td>federal street boston → federal street in (Boston)</td></tr></table>
205
+
206
+ Table 2: Intent-level query reformulation types
207
+
208
+ proposed by Chen et al. [6], which characterizes QRTs at both the syntactic and intent level.
209
+
210
+ Syntactic changes in consecutive queries, which involve alterations in the structure and composition, are categorized into five types:
211
+
212
+ - Add: New terms are introduced into the query, resulting in an expansion of its content.
213
+ - Delete: Terms present in the previous query are removed in the current query.
214
+ - Change: Modifications involve replacing some terms while keeping others unchanged.
215
+ - Repeat: A query remains identical to the preceding one.
216
+ - Others: A combination of different changes within a query or the introduction of an entirely new query.
217
+
218
+ We compute the percent of each syntactic category type for ordered queries in a round. Figure 7 shows the distribution of syntactic-level QRTs for the first, second, etc. query issued in each round. For the Search participants (Figure 7 (top)), we observe that the predominant QRTs were "Add", "Others", and "Change." The high initial rate for "Other" suggests an exploratory intent at the onset. As the task progressed, there was a noticeable rise in the "Add" type, indicating adding details to their queries as they solved the task, suggesting more exploitative behavior.
219
+
220
+ For the LLM condition (Figure 7 (bottom)), the most frequent QRT was "Change", showing a steady increase as participants progressed. Compared to the Search condition, the "Add" type was less common. This lower occurrence of "Add" suggests that participants in the LLM condition were less inclined to augment their queries with additional terms.
221
+
222
+ Beyond the syntactic level, we examined the intent level QRT. Rather than measuring how users modify queries, the aim is to measure why the changes were made to uncover the underlying motivations, evolving information needs, and user goals. Table 2 introduces the six intent categories along with example queries from our dataset.
223
+
224
+ Two members of the research team performed intent-level categorization. Both researchers individually categorized the queries, then met to reach a consensus on any discrepancies. As shown in (Figure 8 (top)), for participants in the Search condition "Specification", and "New Topic" were the predominant QRTs. Comparing
225
+
226
+ ![](images/2965acdd58520b90076ce62bbdab0d2594060033c4c0573e114febfb4e52fe5f.jpg)
227
+ Figure 8: Distribution of intent-level types across ordered queries for Search (top), and LLM (bottom) conditions
228
+
229
+ these findings with the syntactic changes observed in (Figure 7 (top)), we notice a similar pattern between "Specification" and "Add". This pattern suggests that participants were narrowing down their search intent by adding more details to their query. A parallel trend is observed between "Others" in the syntactic types and "New Topic" as their percentages initially fluctuate but eventually follow a similar pattern.
230
+
231
+ For the LLM participants, the distribution of intent-level QRTs is shown in (Figure 8 (bottom)). "Specification" was the dominant category. Analysing similar trends with the syntactic changes shown in (Figure 7 (bottom)), there was a steady increase in both 'Specification' and "Add", however "Specification" being much more frequent than "Add." Interestingly, a parallel trend can be observed between "Specification" and "Change." This suggests that while participants using the LLM primarily focused on narrowing down their search intent, they did so without necessarily adding terms to their queries. This behavior differs from the Search condition trend, where both "Specification" and "Add" showed increasing percentages and similar frequencies. This contrast showcases the distinctive user interactions between the two assistants. While Search condition "Specification" often involves query expansion, in the LLM condition, "Specification" primarily shows as query rephrasing.
232
+
233
+ # 5 QUALITATIVE FINDINGS
234
+
235
+ The qualitative findings derive mainly from the responses to the post-study survey and a comparison of the search results returned by each search engine for similar queries.
236
+
237
+ ![](images/1747036fb1093e026b4657c8d45d40defdfaa577fb9a78e879283ee38b07ba55.jpg)
238
+ Figure 9: Top clues by participants across conditions
239
+
240
+ # 5.1 Open-ended Survey Questions
241
+
242
+ We conducted a post-study survey with open-ended questions to better understand how participants translated clues into search queries and the challenges they faced.
243
+
244
+ 5.1.1 Clues Identified by Participants. We coded and categorized responses to the question: What types of clues did you identify that helped you with the task?. As illustrated in Figure 9, the predominant clue category was street names, followed closely by business names, including store names. Language was also helpful to our participants, particularly to identify non-English speaking countries. Some participants also referred to geographical features like mountains and large bodies of water. The identified clues were consistent across all participants, regardless of the assigned search tool; this aligns with prior work that explored image geolocation [29].
245
+
246
+ 5.1.2 Translating Clues into Search Queries. The post-study survey asked: How did you translate the clues into search queries?. Using an inductive, open coding approach [39], we coded and categorized these responses into distinct strategies:
247
+
248
+ Language Identification. 10 participants from each condition focused on identifying the languages present on signs, buildings, and stores. P13 stated, "I would type in the words I saw, and ask the helper, what language is this in?." Similarly, P18 explained, "I translated some of the clues I saw to English, this way it shows me what the origin of the language."
249
+
250
+ Locating Street Signs. 10 participants from the Search and 7 from the LLM condition utilized this strategy. They focused on finding street signs in corners and intersections to get closer to the location. P31 explained, "I was searching for street names, trying to identify which neighborhoods the locations were in, for larger cities." P36 also said, "I used the road signs to get a general idea of city and direction of city."
251
+
252
+ Locating Businesses/Stores. Nine participants from each condition focused on locating businesses, stores, and shops. P37 mentioned, "my strategy was typing company names into the helper."
253
+
254
+ Describing Geographic Features. A few participants, mostly from the LLM condition, described the geographic features of the location. This included providing details about trees, architecture, and mountains. P13 explained, "I tried describing the environment I was in to the chatbot, but the results were often not good."
255
+
256
+ Locating Landmarks. In a similar approach to locating businesses, a small number of participants actively searched for large buildings
257
+
258
+ ![](images/8d28d56f8ae9b4226b1a76165b75edbf60ec6b02985e15db1f7a41e70b22f699.jpg)
259
+ Figure 10: Comparison of challenges faced by our participants in both the Search and LLM conditions
260
+
261
+ and landmarks. P51 noted, "I searched for a landmark around the area."
262
+
263
+ Although these strategies did not differ significantly between the two conditions, analyzing how participants translated visual clues into search queries is important for gaining insights into their approach to image geolocation.
264
+
265
+ 5.1.3 Challenges Identified by Participants. We asked the participants to describe the challenges they faced during the experiment. Only a few participants did not answer or stated they faced no challenges. The responses were coded into six qualitative classes. Each code, with an example participant response, is given in Table 3.
266
+
267
+ The distribution of challenges across the experimental conditions is shown in Figure 10. There is notable discrepancy for Efficient Query Formulation. While a few participants from the Search condition did mention this challenge, it emerged as the primary obstacle for over half of the participants in the LLM condition. P5 explained, "My biggest challenge was getting the chatbot to understand exactly what I wanted." Similarly, P17 said, "Trying to be concise and precise with my searches using the chatbot was challenging." These participants encountered difficulties in effectively communicating their intent to the LLM-based search engine. Others took a more strategic approach to address this challenge. P46 explained, "I realized I needed to ask less specific questions and go more broadly to get answers." This adaptive strategy reflects participants' attempts to optimize their interactions with the LLM. Participant P55 mentions, "The challenging part was figuring out what I was looking at and translating it to a question that would narrow down answers coming from the chatbot."
268
+
269
+ Another challenge worth highlighting was the language barrier. Despite the advanced language capabilities of modern LLMs, the participants had difficulties when formulating language-related questions. P49 stated, "I got a response in Spanish when I typed a Spanish building name but the chatbot didn't answer the question I was asking." Similarly, P28 describes, "I had to rephrase a couple of times, especially If I was asking about phrases in different language than English the chatbot would shoot back a failed search."
270
+
271
+ # 5.2 Search Results
272
+
273
+ We examined the search queries and the results. For instance, as shown in Figure 11a, when participants searched for "Stockland
274
+
275
+ <table><tr><td>Qualitative Code</td><td>Description</td><td>Example Response</td></tr><tr><td>Location Specificity</td><td>Precisely specifying and differentiat-ing streets within a city</td><td>P1(Search): I did have a hard time. There were many First and Second streets I was on, and it is difficult to distinguish those between the first and second streets of other cities</td></tr><tr><td>Language Barriers</td><td>Identifying Foreign Words with Non-English alphabets</td><td>P13(LLM): My strategy of finding word clues failed if the words I saw were in a language that does not use the English alphabet</td></tr><tr><td>Interpreting Visual Clues</td><td>Translating visual clues into effective questions or searches</td><td>P58(Search): It is hard to try to search the architecture of a building without using an image search</td></tr><tr><td>Efficient Query Formulation</td><td>Crafting efficient search queries that would yield precise results</td><td>P38(LLM): It was challenging to find the correct wording to get the desired result</td></tr><tr><td>Lack of Textual Clues</td><td>Locations with limited textual clues, for example remote locations</td><td>P53(LLM): Sometimes it&#x27;s very hard to find street names or shop names from the image, especially if the images are from remote locations</td></tr><tr><td>Effective Clue Selection</td><td>Finding clues that can be described or will generate effective results when searched</td><td>P50(LLM): Figuring out what clue to look up, for example, local places were useful, meanwhile large chains are not as useful</td></tr></table>
276
+
277
+ Table 3: Qualitative codes resultant from the coding of challenges described by participants, and example quotes
278
+
279
+ ![](images/69fe3fb2437e62019baa2950e8769597c4627c0d99e57482870f7835b91ec861.jpg)
280
+
281
+ ![](images/8059c60cdec921ce0f16163053ef29a1240771664f3b8379e7baaf045a7a1c53.jpg)
282
+
283
+ ![](images/50dff0fa51e66f436d8767a1ba82a2d73f3570d8f61dcdaadb275d874e0e2391.jpg)
284
+ (a) Round 2 clue: Stockland Building
285
+
286
+ ![](images/7cf1f5fe3ab824046b28d33d1464815b0e0d72a4d31311f6c1db48eb4c8992c2.jpg)
287
+
288
+ ![](images/e0d559fafe41af6c0e32cecb3642543f91e862e6cebf84c4706bfd6ec677ba75.jpg)
289
+ (b) Round 5 clue: tritecnics
290
+ Figure 11: Comparison of results obtained from asking about the same clue by participants using different search assistants
291
+
292
+ ![](images/6cb80491367f1bd2d5b25b3d506c85ae8a3bde44ffd702d360b7c22b8e7efd65.jpg)
293
+ (c) Round 6 clue: Camuzzi gas del sur
294
+
295
+ building", LLM search did not return any results and requested clarification, while the search engine quickly located the building in Sydney, Australia, in the first few results.
296
+
297
+ Similar disparities were observed when searching in languages other than English. For instance, when attempting to locate the "tritecnics" company, as shown in Figure 11b, the LLM-based search struggled to translate or make sense of the word, whereas the search engine produced map results for Barcelona, Spain. In Figure 11c, while the LLM-based search successfully replied that "Camuzzi gas del sur" was a gas distribution company in Argentina, the search engine identified the location as the remote city of Ushuaia and presented a map, effectively solving the task for those participants.
298
+
299
+ # 6 DISCUSSION
300
+
301
+ The results demonstrate some key findings: (1) participants using traditional search for assistance outperformed those using the LLM-based search, (2) distinct strategies emerged due to significant variations in the query formulation strategies between the two conditions, and (3) the qualitative findings revealed that participants using the LLM search struggled to effectively formulate their queries.
302
+
303
+ # 6.1 Differences in Performance
304
+
305
+ In response to (RQ1), the results showed that participants using the traditional search outperformed those using the LLM search. We believe that a substantial portion of the performance difference can be explained by the difference in results for similar queries, as shown in Section 5.2. Specifically, as shown in Figure 11b and Figure 11c, the search engine provided a map, effectively directing our participants to the exact location. LLM-based search should not only provide creative answers, but the same features as a traditional search engine, including maps. As evident from the examples in our study, LLM search did not always return results that should have been within its capabilities. Perhaps, the participants needed to explicitly request maps or specific directions to effectively utilize these features; however this was not necessary for traditional search. This highlights the challenge of crafting effective prompts,
306
+
307
+ <table><tr><td>Query</td><td>LLM Response</td><td>Score</td></tr><tr><td>countries in southern hemisphere with mountains</td><td>The Southern Hemisphere is home to many mountain ranges, including the Andes, the Sierra Nevada de Santa Marta, and the Cordillera Oriental and Cordillera Occidental in South America. The highest peak in the Americas, Aconcagua, is also located in the Andes. In Africa, Mount Kilimanjaro is the tallest mountain and one of the world&#x27;s largest volcanoes. The Rwenzori range of mountains in Africa were created during the formation of the Great Rift Valley. New Zealand&#x27;s South Island has more than 24 mountains with elevations of 9,000 feet or more1. I hope this helps!</td><td>605</td></tr><tr><td>patas para arriba location</td><td>(Original Spanish) La expresión “patas para arriba” se utilizes para describir una situación en la que todo está desordenado o confuso. ¿Hay algo más en lo que pueda ayudarte? (Translated) The expression “upside down” is used to describe a situation in which everything is disordered or confusing. Is there anything else I can help you with?</td><td>1240</td></tr><tr><td>24seventaxi location</td><td>I&#x27;m sorry but I couldn&#x27;t find any information about 24seventaxi location. Could you please provide me with more details about what you are looking for?</td><td>1773</td></tr><tr><td>I need to know where the street H. de Magallanes is located in the world</td><td>I&#x27;m sorry but I couldn&#x27;t find any information about H. de Magallanes street. Could you please provide more details about the location or any other information that might help me find it?</td><td>18</td></tr></table>
308
+
309
+ Table 4: Examples where participants developed inaccurate mental models, resulting in the formulation of single, poor-quality queries, which led to low performance
310
+
311
+ which, as research has shown [47], significantly influences the output of LLM-based methods. While "Learn more" links were included with each answer, we found that none of the participants took advantage of this feature. This raises questions about the perceived affordances of LLMs compared to traditional search engines, as the integration of similar features in LLMs may not be as intuitive, as our study suggests. This underutilization of help links in LLM responses highlights the challenges of transitioning from traditional search engines to LLM-based search and aligns with Gibson's theory [10] emphasizing the interaction between users and their technological environment, which shapes the possibilities and constraints for action.
312
+
313
+ In response to (RQ3), our qualitative analysis provided valuable insights into the performance disparities. Over half of the participants using the LLM-based search expressed difficulties in formulating their queries. Some expressed hesitation, while others found it challenging to form queries that effectively communicated their information needs. Participants also struggled to formulate queries in different languages. Despite stated support of LLM search for many languages, there were reported instances of failed results. This emphasizes the challenge of effectively prompting the LLM to comprehend and respond to queries in diverse languages. These challenges were less prevalent when using the search engine. These observations align with our query formulation analysis. In Section 4.1.1, we noted that the average number of queries for LLM participants consisted of a single query. Figure 3 showed that LLM participants were less inclined to reformulate or issue more queries throughout the task compared to Search participants.
314
+
315
+ While it has been shown that individuals can quickly build mental models when interacting with LLM chatbots [16], the quality of these models remains uncertain, especially considering the relatively new nature of LLM technology. Participants may not have
316
+
317
+ ![](images/7dda494f03816038d30adf943e939280d2a8cad2495008f58153e3c6ae55f71a.jpg)
318
+ Figure 12: Summary findings of query formulation and reformulation analysis.
319
+
320
+ had the experience to develop accurate mental models of LLM capabilities. In the absence of well-defined mental models, users struggle to predict outcomes or make sense of their interactions with LLMs, leading to instances where users pose vague and poorly defined queries while expecting the LLM to respond appropriately [37, 47]. Table 4 demonstrates a few examples of this behavior. It is plausible that these mental models influenced the participants to attempt only a single query, potentially resulting in worse performance.
321
+
322
+ # 6.2 Differences in Query Formulation & Reformulation Strategies
323
+
324
+ Figure 12 presents a summary of our findings into query formulation and reformulation strategies, which is directly related to (RQ2). As described in Section 4.1, our study showed significant differences across all four formulation metrics. Participants in the Search condition issued shorter queries and a significantly increased use of proper nouns including places, store names, and streets. These findings align with prior research [18, 24], indicating that individuals are accustomed to keyword-based search from a lifetime of experience using traditional search engines. In contrast, participants using the LLM-based search were inclined to issue longer and more natural language queries. These findings suggest that they adhered to the perceived norms of a conversational user interface [8].
325
+
326
+ Several research efforts have explored the topic of query reformulation for traditional web search [2, 13, 20]. In our study, we adopted an existing taxonomy for categorizing both the syntactic and intent-based types of query reformulation, detailed in Section 4.2. Our investigation revealed a notable trend. Although "Specification" or
327
+
328
+ the act of narrowing down the scope and intent of a search, was the dominant category across both conditions, it aligned with different syntactic categories. In the Search condition, participants often expanded their queries by adding terms, a common behavior observed in prior research [6, 7]. However, participants using the LLM search frequently narrowed down their search intents by paraphrasing their initial queries. This observation prompts a critical question of whether the development of new query reformulation taxonomies specific to LLMs could provide a better framework for understanding and characterizing user behavior.
329
+
330
+ # 6.3 Geolocation Sensemaking Strategies
331
+
332
+ As described in Section 5.1.2, participants employed diverse strategies when searching. In geolocation tasks, participants make sense of the clues within the images by using internal or external knowledge representations [29]. Our focus was on external knowledge representations cultivated through their searching. Therefore, we did not ask about internal knowledge, such as participants' cultural backgrounds or travel history.
333
+
334
+ In geolocation tasks, participants engage in sensemaking to interpret visual cues within images. Participants demonstrate adaptability in their approach, drawing from Pirolli and Card's sensemaking model [33], which encompasses both top-down and bottom-up approaches. The top-down approach involves initiating the process with a theory or a broader concept and then seeking data to substantiate it. In this context, an example of the top-down approach is the strategy of language identification. Participants effectively employ this strategy by querying the assistant about the language's origin, which narrows the search scope to specific global regions. Similarly, some participants employed sensory sensemaking as a top-down approach by describing sensory aspects of the location to the assistant. However, the limited effectiveness of this strategy suggests that relying solely on sensory cues may not be sufficient for precise geolocation. The bottom-up approach, on the other hand, entails gathering data first and progressively forming a theory based on the available information. Examples of this approach included using street signs, buildings, and landmarks as reference points, facilitating the identification of cities then neighborhoods. The strategies identified in this study provide insights into user behavior and align with the cognitive processes driving geolocation sensemaking described by prior work [41]. Understanding how participants construct mental models based on image clues and apply sensemaking processes, enhances our ability to provide effective support and guidance in geolocation tasks.
335
+
336
+ # 6.4 Limitations
337
+
338
+ Amongst the study limitations was the latency in LLM-based search. Although the latency was only a few seconds, it could have disrupted the conversational flow, potentially affecting satisfaction and engagement during the task. Our study included participants with diverse backgrounds, education levels, ages, and degrees of technical literacy. This inherent diversity may have influenced how participants interacted with both the image geolocation task and the search engines. Furthermore, it's essential to acknowledge that image geolocation tasks have historically been conducted by expert image analysts. In our study, we did not explicitly categorize
339
+
340
+ participants based on their levels of expertise in geolocation. Lastly, our evaluation did not include the assessment of specific metrics such as search engine result pages (SERPs), clicks, or other performance indicators that could offer a more comprehensive view of the effectiveness of LLM-based search in image geolocation tasks.
341
+
342
+ # 7 CONCLUSION AND FUTURE WORK
343
+
344
+ This study offered valuable insights into differences in strategies and user behaviors when using traditional compared to LLM-based search for image geolocation. We examined the differences in performance, query formulation, and the sensemaking strategies employed by participants in these two conditions. Despite the growing capabilities of LLMs, the results reveal that participants using traditional search engines outperformed those relying on LLM-based search. An in-depth exploration of the distinct query formulation strategies utilized by participants mostly explained the performance difference, as evidenced by our qualitative findings, with query formulation being identified as the most challenging aspect of the experiment. Additionally, we observed a tendency among participants using LLMs to engage in fewer multi-query search sessions, possibly reflecting uncertainties surrounding LLM capabilities and the perceived affordances associated with LLM interface.
345
+
346
+ Our findings can extend beyond the geolocation domain, providing initial insights into user interactions with LLMs in real-world applications and prompting more research on human-centered design of LLM interfaces, with a focus on understanding how users form mental models of LLMs. To achieve more useful LLM interfaces, it is necessary to first develop a better understanding of query formulation strategies and behavior. Extensive prior research about traditional search provides a solid foundation for exploring query formulation strategies. Our work presented in this paper begins to extend this research based on the novel capabilities and the conversational nature of LLM-based search; however, more research in this area is needed. The second component of making LLM interfaces more usable is to teach novices how to effectively prompt. Emerging systems like AI Chains [46], MemorySandbox [14], Feedback Buffet [28], and PromptMaker [19] are at the forefront, making LLMs more comprehensible and user-friendly through the use of templates [19, 28] and procedural guidance [14, 15, 46]. These tools are designed to assist novice users in prompt creation by integrating visual problem representation, incorporating partial prompts, and providing user friendly interfaces that facilitate easy iteration based on the LLM output. These advancements represent a leap towards a future in which user interactions with language models become more intuitive, efficient, and user-friendly.
347
+
348
+ # ACKNOWLEDGMENTS
349
+
350
+ Thanks to Andrea Brandt for assisting with the user study. This research was sponsored by the DEVCOM Analysis Center and was accomplished under Cooperative Agreement Number W911NF-22-2-0001. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the Army Research Office or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notation herein.
351
+
352
+ # REFERENCES
353
+
354
+ [1] Leonard Adolphs, Shehzaad Dhuliawala, and Thomas Hofmann. 2021. How to Query Language Models? arXiv:2108.01928 [cs.CL]
355
+ [2] Anne Aula. 2003. Query Formulation in Web Information Search.. In ICWI. International Conference WWW/Internet, Algarve, Portugal, 403-410.
356
+ [3] Cory Barr, Rosie Jones, and Moira Regelson. 2008. The Linguistic Structure of English Web-Search Queries. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (Honolulu, Hawaii) (EMNLP '08). Association for Computational Linguistics, USA, 1021-1030.
357
+ [4] Simon Bilel Jegham, dim fort. 2021. GeoGuess. MIT Licensed. https://github.com/GeoGuess/GeoGuess Accessed: 2023.
358
+ [5] Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language Models are Few-Shot Learners. arXiv:2005.14165 [cs.CL]
359
+ [6] Jia Chen, Jiaxin Mao, Yiqun Liu, Fan Zhang, Min Zhang, and Shaoping Ma. 2021. Towards a Better Understanding of Query Reformulation Behavior in Web Search. In Proceedings of the Web Conference 2021 (Ljubljana, Slovenia) (WWW'21). Association for Computing Machinery, New York, NY, USA, 743-755. https://doi.org/10.1145/3442381.3450127
360
+ [7] Jia Chen, Jiaxin Mao, Yiqun Liu, Min Zhang, and Shaoping Ma. 2019. Investigating query reformulation behavior of search users. In China Conference on Information Retrieval. Springer, China, 39-51.
361
+ [8] Leigh Clark, Nadia Pantidi, Orla Cooney, Philip Doyle, Diego Garaialde, Justin Edwards, Brendan Spillane, Emer Gilmartin, Christine Murad, Cosmin Munteanu, Vincent Wade, and Benjamin R. Cowan. 2019. What Makes a Good Conversation? Challenges in Designing Truly Conversational Agents. In Proceedings of the 2019 CHI Conference on Human Factors in Computing Systems (Glasgow, Scotland Uk) (CHI '19). Association for Computing Machinery, New York, NY, USA, 1-12. https://doi.org/10.1145/3290605.3300705
362
+ [9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL-HLT 2019, Minneapolis, MN, USA, June 2-7, 2019, Volume 1 (Long and Short Papers), Jill Burstein, Christy Doran, and Thamar Solorio (Eds.). Association for Computational Linguistics, Minneapolis, USA, 4171-4186. https://doi.org/10.18653/v1/n19-1423
363
+ [10] James J Gibson. 2014. The ecological approach to visual perception: classic edition. Psychology press, Online.
364
+ [11] Jutta Haider and Olof Sundin. 2019. Invisible search and online search engines: The ubiquity of search in everyday life. Taylor & Francis, London.
365
+ [12] James Hays and Alexei A Efros. 2008. Im2gps: estimating geographic information from a single image. In 2008 IEEE conference on computer vision and pattern recognition. IEEE, CVPR, Alaska, USA, 1-8.
366
+ [13] Jeff Huang and Efthimis N. Efthimiadis. 2009. Analyzing and Evaluating Query Reformulation Strategies in Web Search Logs. In Proceedings of the 18th ACM Conference on Information and Knowledge Management (Hong Kong, China) (CIKM '09). Association for Computing Machinery, New York, NY, USA, 77-86. https://doi.org/10.1145/1645953.1645966
367
+ [14] Ziheng Huang, Sebastian Gutierrez, Hemanth Kamana, and Stephen MacNeil. 2023. Memory Sandbox: Transparent and Interactive Memory Management for Conversational Agents. In Adjunct Proceedings of the 36th Annual ACM Symposium on User Interface Software and Technology (UIST '23 Adjunct). Association for Computing Machinery, New York, NY, USA, Article 97, 3 pages. https://doi.org/10.1145/3586182.3615796
368
+ [15] Ziheng Huang, Kexin Quan, Joel Chan, and Stephen MacNeil. 2023. CausalMapper: Challenging Designers to Think in Systems with Causal Maps and Large Language Model. In Proceedings of the 15th Conference on Creativity and Cognition (Virtual Event, USA) (C&C '23). Association for Computing Machinery, New York, NY, USA, 325-329. https://doi.org/10.1145/3591196.3596818
369
+ [16] Angel Hsing-Chi Hwang and Andrea Stevenson Won. 2021. IdeaBot: investigating social facilitation in human-machine team creativity. In Proceedings of the 2021 CHI Conference on Human Factors in Computing Systems. ACM, Yokohama, Japan, 1-16.
370
+ [17] Rahul J Jadhav, Om Prakash Gupta, and Usharani T Pawar. 2011. Significant role of search engine in higher education. International Journal of Scientific & Engineering Research 2, 4 (2011), 1-5.
371
+ [18] Bernard J Jansen and Amanda Spink. 2006. How are we searching the World Wide Web? A comparison of nine search engine transaction logs. Information processing & management 42, 1 (2006), 248-263.
372
+ [19] Ellen Jiang, Kristen Olson, Edwin Toh, Alejandra Molina, Aaron Donsbach, Michael Terry, and Carrie J Cai. 2022. PromptMaker: Prompt-Based Prototyping
373
+
374
+ with Large Language Models. In *Extended Abstracts of the 2022 CHI Conference on Human Factors in Computing Systems* (New Orleans, LA, USA) (*CHI EA'22*. Association for Computing Machinery, New York, NY, USA, Article 35, 8 pages. https://doi.org/10.1145/3491101.3503564
375
+ [20] Jyun-Yu Jiang, Yen-Yu Ke, Pao-Yu Chien, and Pu-Jen Cheng. 2014. Learning User Reformulation Behavior for Query Auto-Completion. In Proceedings of the 37th International ACM SIGIR Conference on Research & Development in Information Retrieval (Gold Coast, Queensland, Australia) (SIGIR '14). Association for Computing Machinery, New York, NY, USA, 445-454. https://doi.org/10.1145/2600428.2609614
376
+ [21] Zhengbao Jiang, Frank F. Xu, Jun Araki, and Graham Neubig. 2020. How Can We Know What Language Models Know? arXiv:1911.12543 [cs.CL]
377
+ [22] Seungun Kim, Masaki Matsubara, and Atsuyuki Morishima. 2022. Image Geolocation by Non-Expert Crowd Workers with an Expert Strategy. In 2022 IEEE International Conference on Big Data. IEEE xplore, Osaka, Japan, 4009-4013. https://doi.org/10.1109/BigData55660.2022.10020932
378
+ [23] Rachel Kohler, John Purviance, and Kurt Luther. 2017. Supporting Image Geolocation with Diagramming and Crowdsourcing. Proceedings of the AAAI Conference on Human Computation and Crowdsourcing 5, 1 (Sep. 2017), 98-107. https://doi.org/10.1609/hcomp.v5i1.13296
379
+ [24] Dirk Lewandowski. 2008. Search engine user behaviour: How can users be guided to quality content? Information Services & Use 28, 3-4 (2008), 261-268.
380
+ [25] Xinyi Li, Bob J.A. Schijvenaars, and Maarten de Rijke. 2017. Investigating queries and search failures in academic search. Information Processing & Management 53, 3 (2017), 666-683. https://doi.org/10.1016/j.ipm.2017.01.005
381
+ [26] Tsung-Yi Lin, Yin Cui, Serge Belongie, and James Hays. 2015. Learning deep representations for ground-to-aerial geolocation. In 2015 IEEE Conference on Computer Vision and Pattern Recognition. IEEE xplore, Boston, MA, USA, 5007-5015. https://doi.org/10.1109/CVPR.2015.7299135
382
+ [27] Chang Liu, Xiangmin Zhang, and Wei Huang. 2016. The exploration of objective task difficulty and domain knowledge effects on users' query formulation. Proceedings of the Association for Information Science and Technology 53 (12 2016), 1-9. https://doi.org/10.1002/pra2.2016.14505301063
383
+ [28] Stephen MacNeil, Andrew Tran, Joanne Kim, Ziheng Huang, Seth Bernstein, and Dan Mogil. 2023. Prompt Middleware: Mapping Prompts for Large Language Models to UI Affordances. arXiv:2307.01142 [cs.HC]
384
+ [29] Sneha Mehta, Chris North, and Kurt Luther. 2016. An exploratory study of human performance in image geolocation tasks. In GroupSight Workshop on Human Computation for Image and Video Analysis, Vol. 308. HCOMP 2016, Austin, TX (USA), 3-4.
385
+ [30] Bo Pang and Ravi Kumar. 2011. Search in the lost sense of “query”: Question formulation in Web search queries and its temporal changes. In Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies. Association for Computational Linguistics, USA, 135–140.
386
+ [31] Andrea Papenmeier, Dagmar Kern, Daniel Hienert, Alfred Sliwa, Ahmet Aker, and Norbert Fuhr. 2021. Starting Conversations with Search Engines - Interfaces That Elicit Natural Language Queries. In Proceedings of the 2021 Conference on Human Information Interaction and Retrieval (Canberra ACT, Australia) (CHIIR '21). Association for Computing Machinery, New York, NY, USA, 261-265. https://doi.org/10.1145/3406522.3446035
387
+ [32] Fabio Petroni, Patrick Lewis, Aleksandra Piktus, Tim Rocktäschel, Yuxiang Wu, Alexander H. Miller, and Sebastian Riedel. 2020. How Context Affects Language Models' Factual Predictions. arXiv:2005.04611 [cs.CL]
388
+ [33] Peter Pirolli and Stuart Card. 2005. The sensemaking process and leverage points for analyst technology as identified through cognitive task analysis. In Proceedings of international conference on intelligence analysis, Vol. 5. McLean, VA, USA, 2-4.
389
+ [34] Amudha Ravi Shankar, Jose Fernandez-Marquez, Gabriele Scalia, Maria Rosa Mondardini, and Giovanna Di Marzo Serugendo. 2019. CROWD4EMS: A CROWDSOURCING PLATFORM FOR GATHERING AND GEOLOCATING SOCIAL MEDIA CONTENT IN DISASTER RESPONSE. ISPRS - International Archives of the Photogrammetry, Remote Sensing and Spatial Information Sciences XLII-3/W8 (08 2019), 331-340. https://doi.org/10.5194/isprs-archives-XLII-3-W8-331-2019
390
+ [35] Laria Reynolds and Kyle McDonell. 2021. Prompt Programming for Large Language Models: Beyond the Few-Shot Paradigm. In Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems (Yokohama, Japan) (CHI EA '21). Association for Computing Machinery, New York, NY, USA, Article 314, 7 pages. https://doi.org/10.1145/3411763.3451760
391
+ [36] S Salehi, J Tina-Du, and H Ashman. 2018. Use of Web search engines and personalisation in information searching for educational purposes. iRInformation Research.
392
+ [37] Hariharan Subramonyam, Christopher Lawrence Pondoc, Colleen Seifert, Maneesh Agrawala, and Roy Pea. 2023. Bridging the Gulf of Envisioning: Cognitive Design Challenges in LLM Interfaces. arXiv preprint arXiv:2309.14459 none, none (2023), 10 pages.
393
+ [38] Jaime Teevan, Daniel Ramage, and Merredith Ringel Morris. 2011. TwitterSearch: A Comparison of Microblog Search and Web Search. In Proceedings of the Fourth ACM International Conference on Web Search and Data Mining (Hong Kong, China)
394
+
395
+ (WSDM '11). Association for Computing Machinery, New York, NY, USA, 35-44. https://doi.org/10.1145/1935826.1935842
396
+ [39] David R Thomas. 2006. A general inductive approach for analyzing qualitative evaluation data. American journal of evaluation 27, 2 (2006), 237-246.
397
+ [40] Paul Thomas, Bodo Billerbeck, Nick Craswell, and Ryen W White. 2019. Investigating searchers' mental models to inform search explanations. ACM Transactions on Information Systems (TOIS) 38, 1 (2019), 1-25.
398
+ [41] Sukrit Venkatagiri, Jacob Thebault-Spieker, Rachel Kohler, John Purviance, Rifat Sabbir Mansur, and Kurt Luther. 2019. GroundTruth: Augmenting expert image geolocation with crowdsourcing and shared representations. Proceedings of the ACM on Human-Computer Interaction 3, CSCW (2019), 1–30.
399
+ [42] Yiwei Wang, Jiqun Liu, Soumik Mandal, and Chirag Shah. 2017. Search successes and failures in query segments and search tasks: A field study. Proceedings of the Association for Information Science and Technology 54, 1 (2017), 436-445.
400
+ [43] Jason Wei, Yi Tay, Rishi Bommasani, Colin Raffel, Barret Zoph, Sebastian Borgeaud, Dani Yogatama, Maarten Bosma, Denny Zhou, Donald Metzler, Ed H. Chi, Tatsunori Hashimoto, Oriol Vinyals, Percy Liang, Jeff Dean, and William Fedus. 2022. Emergent Abilities of Large Language Models. arXiv:2206.07682 [cs.CL]
401
+ [44] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, brian richter, Fei Xia, Ed Chi, Quoc V Le, and Denny Zhou. 2022. Chain-of-Thought Prompting Elicits Reasoning in Large Language Models. In Advances in Neural Information Processing Systems, S. Koyejo, S. Mohamed, A. Agarwal, D. Belgrave, K. Cho, and A. Oh (Eds.), Vol. 35. Curran Associates, Inc., New Orleans,
402
+
403
+ USA, 24824-24837. https://proceedings.neurips.cc/paper_files/paper/2022/file/9d5609613524ecf4f15af0f7b31abca4-Paper-Conference.pdf
404
+ [45] Tobias Weyand, Ilya Kostrikov, and James Philbin. 2016. PlaNet - Photo Geolocation with Convolutional Neural Networks. In Computer Vision - ECCV 2016. Springer International Publishing, Amsterdam, The Netherlands, 37-55. https://doi.org/10.1007/978-3-319-46484-8_3
405
+ [46] Tongshuang Wu, Michael Terry, and Carrie Jun Cai. 2022. AI Chains: Transparent and Controllable Human-AI Interaction by Chaining Large Language Model Prompts. In Proceedings of the 2022 CHI Conference on Human Factors in Computing Systems (New Orleans, LA, USA) (CHI '22). Association for Computing Machinery, New York, NY, USA, Article 385, 22 pages. https://doi.org/10.1145/3491102.3517582
406
+ [47] J.D. Zamfirescu-Pereira, Richmond Y. Wong, Bjoern Hartmann, and Qian Yang. 2023. Why Johnny Can't Prompt: How Non-AI Experts Try (and Fail) to Design LLM Prompts. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems (Hamburg, Germany) (CHI '23). Association for Computing Machinery, New York, NY, USA, Article 437, 21 pages. https://doi.org/10.1145/3544548.3581388
407
+ [48] Guido Zuccon, Bevan Koopman, and Joao Palotti. 2015. Diagnose this if you can: On the effectiveness of search engines in finding medical self-diagnosis information. In Advances in Information Retrieval: 37th European Conference on IR Research, ECIR 2015, March 29-April 2, 2015. Proceedings 37. Springer, Vienna, Austria, 562-567.
2401.10xxx/2401.10184/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f46614d9742e10b67b9017e11e69b3fa1e5dc95a8ca37049113a42d20d412e82
3
+ size 755062
2401.10xxx/2401.10184/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10185/57db14d9-d6c0-49a9-b07e-a576a72a4ba2_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10185/57db14d9-d6c0-49a9-b07e-a576a72a4ba2_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10185/57db14d9-d6c0-49a9-b07e-a576a72a4ba2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0580b120c766a67d12b36b99cdd8f5ded49556fd97e4ff60efeeaceb3881284c
3
+ size 2557448
2401.10xxx/2401.10185/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10185/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06ad28d823c7103c3a36941751390953cad09da884d04bda9d529003087e7497
3
+ size 1058483
2401.10xxx/2401.10185/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10190/2c0034b6-01c2-475a-8cea-68f1fc9d78ab_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10190/2c0034b6-01c2-475a-8cea-68f1fc9d78ab_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10190/2c0034b6-01c2-475a-8cea-68f1fc9d78ab_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cb345bcc9b5e59d39d1aee7cbdca50a32a48bf28be6876a110290ea449e7bfb
3
+ size 4920449
2401.10xxx/2401.10190/full.md ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A Kaczmarz-inspired approach to accelerate the optimization of neural network wavefunctions
2
+
3
+ Gil Goldshlager<sup>a</sup>, Nilin Abrahamsen<sup>b</sup>, Lin Lin<sup>a,c</sup>
4
+
5
+ $^{a}$ Department of Mathematics, University of California, Berkeley, CA 94720, USA
6
+
7
+ <sup>b</sup> The Simons Institute for the Theory of Computing, Berkeley, CA 94720, USA
8
+
9
+ Applied Mathematics and Computational Research Division, Lawrence Berkeley National Laboratory, Berkeley,
10
+
11
+ CA 94720, USA
12
+
13
+ # Abstract
14
+
15
+ Neural network wavefunctions optimized using the variational Monte Carlo method have been shown to produce highly accurate results for the electronic structure of atoms and small molecules, but the high cost of optimizing such wavefunctions prevents their application to larger systems. We propose the Subsampled Projected-Increment Natural Gradient Descent (SPRING) optimizer to reduce this bottleneck. SPRING combines ideas from the recently introduced minimum-step stochastic reconfiguration optimizer (MinSR) and the classical randomized Kaczmarz method for solving linear least-squares problems. We demonstrate that SPRING outperforms both MinSR and the popular Kronecker-Factored Approximate Curvature method (KFAC) across a number of small atoms and molecules, given that the learning rates of all methods are optimally tuned. For example, on the oxygen atom, SPRING attains chemical accuracy after forty thousand training iterations, whereas both MinSR and KFAC fail to do so even after one hundred thousand iterations.
16
+
17
+ # 1. Introduction
18
+
19
+ Predicting the properties of molecules and materials from first principles has numerous applications. For many chemical properties, it suffices to work within the Born-Oppenheimer approximation, in which the nuclei are viewed as classical point charges and only the electrons exhibit quantum-mechanical behavior. The study of chemistry through this lens is known as electronic structure theory.
20
+
21
+ Within electronic structure theory, methods to model the many-body electron wavefunction include Hartree-Fock theory, configuration interaction methods, and coupled cluster theory. A typical ansatz for such methods is a sum of Slater determinants which represent antisymmetrized products of single-particle states. The benefit of such an ansatz is that the energy and other properties of the wavefunction can be evaluated analytically from pre-computed few-particle integrals.
22
+
23
+ Another approach to the electronic structure problem is the variational Monte Carlo method (VMC) [1, 2]. In VMC, the properties of the wavefunction are calculated using Monte Carlo sampling rather than direct numerical integration, and the energy is variationally minimized through a stochastic optimization procedure. This increases the cost of the calculations, especially when high accuracy is required, but it enables the use of much more general ansatzes. Traditionally, these ansatzes included Slater-Jastrow wavefunctions and Slater-Jastrow-backflow wavefunctions [1].
24
+
25
+ In recent decades the modeling of very high-dimensional data and functions has seen impressive progress through the use of neural networks. For the quantum chemistry problem, the high-dimensional quantum wavefunction can be modeled by combining several neural network layers with a determinantal layer that enforces the Fermionic antisymmetry [3, 4]. Such neural network wavefunctions, optimized using the variational Monte Carlo method, have enabled near-exact computation of the ground-state energy for small molecules [5, 6, 7] and certain condensed matter systems [8, 9, 10, 11, 12].
26
+
27
+ Due to the large number of parameters and the highly nonlinear nature of neural networks, the optimization of neural network wavefunctions poses a significant challenge. Prior to neural network wavefunctions, VMC simulations typically relied on powerful domain-specific optimizers such as stochastic reconfiguration (SR) [13, 14, 15] and the linear method [16, 17, 18, 19, 20]. These optimizers are able to converge in only tens or hundreds of iterations using highly accurate gradient estimates based on millions or more Monte Carlo samples. However, when applied to a wavefunction with $N_{p}$ parameters, they require solving either a linear system or a generalized eigenvalue problem involving a dense $N_{p} \times N_{p}$ matrix known as the $S$ matrix, with a cost that is $O(N_{p}^{3})$ in general. The recently proposed Rayleigh-Gauss-Newton optimizer [21] presents a potentially favorable intermediate point between stochastic reconfiguration and the linear method, but still incurs a similar computational cost.
28
+
29
+ Since high-accuracy neural network architectures for molecules typically involve at least hundreds of thousands of parameters, it is not possible to directly apply SR or the linear method in this context. Instead, molecular neural network wavefunctions are typically optimized using methods from the machine learning community. These methods have a low per-iteration cost and require only a small number of Monte Carlo samples per iteration (or in machine learning parlance, a small minibatch size). However, they can require hundreds of thousands of optimization steps to converge fully.
30
+
31
+ For the FermiNet and related architectures [3, 6, 12, 5], the most popular optimizer is the Kronecker-Factored Approximate Curvature method (KFAC) [22]. KFAC was originally designed as a tractable approximation to natural gradient descent (NGD) [23] for machine learning models with millions of parameters. In the quantum setting, natural gradient descent is equivalent to stochastic reconfiguration since the $S$ matrix can be viewed as the Fisher information matrix of the normalized probability distribution defined by the wavefunction [3]. Taking the view of stochastic reconfiguration, the key ingredient of KFAC is an approximate factorization of the $S$ matrix. This factorization enables the efficient inversion of the approximate $S$ matrix and also makes it possible to approximate $S$ based on multiple recent minibatches. This can significantly improve performance in highly stochastic settings where a single minibatch can provide only a very noisy approximation to the true $S$ matrix. It is worth noting that the interpretation of KFAC as an approximate natural gradient method has recently been called into question due to some experiments which show that KFAC performs better than exact natural gradient updates [24]. Regardless of the underlying mechanism, KFAC represents the state-of-the-art in optimizing neural network wavefunctions for molecules and solids.
32
+
33
+ An alternative to KFAC is to take advantage of the fact that the estimate of the $S$ matrix is always low-rank when it is based on only a small set of Monte Carlo samples. This idea was first introduced in the machine learning community in the form of efficient subsampled natural gradient descent [25]. In the physics community, several recent works have proposed similar methods for the VMC setting [26, 27]. These methods make fewer heuristic assumptions than KFAC and are much simpler to describe and implement. However, unlike KFAC, they are limited to estimating the $S$
34
+
35
+ matrix based on only a single minibatch at a time.
36
+
37
+ In this work, we improve upon these existing optimizers with a new method that we call Subsampled Projected-Increment Natural Gradient Descent (SPRING). Our method is most directly inspired by the minimum-step stochastic reconfiguration (MinSR) approach of Chen and Heyl [26]. MinSR is based on the observation that the SR parameter update can be formulated as the solution to an overdetermined linear least-squares problem. At each iteration, the Monte Carlo samples provide access to a small set of rows from this least-squares problem, yielding an underdetermined subproblem which has many possible solutions. To make the parameter update unique, Chen and Heyl propose to choose the minimal-norm solution to the sampled subproblem.
38
+
39
+ We improve upon this scheme by taking inspiration from the randomized Kaczmarz method for solving overdetermined linear least-squares problems [28]. The original randomized Kaczmarz method solves the problem by sampling a single row at a time. The subsequently developed block version of the method (see i.e. [29]) instead uses a small set of rows at each iteration. In either case, the key idea of the Kaczmarz method is to iteratively project the solution vector onto the hyperplane of solutions to each sampled subproblem. When the system is consistent and the rows are sampled from an appropriate probability distribution, this approach provably converges to the true solution with an expected error that decays by a constant factor at each iteration.
40
+
41
+ Recall that the SR parameter update can be formulated as the solution to an overdetermined linear least-squares problem, and that each minibatch of Monte Carlo samples provides access to a randomly sampled underdetermined subproblem. In direct analogy to the Kaczmarz method, the parameter update in SPRING is calculated by projecting the previous parameter update onto the hyperplane of solutions to the newly sampled subproblem. By leveraging the previous parameter update as the starting point for the projection, SPRING is able to use data from previous minibatches to obtain a more accurate approximation to the true SR update direction. Furthermore, this improvement is obtained at essentially no extra cost relative to MinSR.
42
+
43
+ To demonstrate the effectiveness of SPRING, we apply it to optimize a FermiNet wavefunction for several small atoms and molecules, namely the carbon, nitrogen, and oxygen atoms, two configurations of the N2 molecule, and the CO molecule at equilibrium. We use the VMCNet code [30] for all numerical experiments. We find that SPRING consistently outperforms MinSR and KFAC when all three methods have their learning rates tuned for maximal performance. Remarkably, on the oxygen atom, SPRING is able to attain chemical accuracy after forty thousand training iterations, whereas both MinSR and KFAC fail to do so even after one hundred thousand iterations.
44
+
45
+ # 2. Background
46
+
47
+ # 2.1. Variational Monte Carlo
48
+
49
+ In this work we focus on the application of the variational Monte Carlo method to find ground-state wavefunctions for molecular systems. A molecule is defined by the positions $\mathsf{R}_I$ and charges $Z_{I}$ of $M$ atomic nuclei. Using the Born-Oppenheimer approximation [31], we model the nuclei as fixed point charges. The quantum system of interest then consists of $N$ electrons interacting under the influence of these charges. The Hamiltonian for the system is given in atomic units by
50
+
51
+ $$
52
+ H = - \frac {1}{2} \sum_ {i = 1} ^ {N} \Delta_ {r _ {i}} - \sum_ {i = 1} ^ {N} \sum_ {I = 1} ^ {M} \frac {Z _ {I}}{\left| r _ {i} - \mathsf {R} _ {I} \right|} + \sum_ {i < j} ^ {N} \frac {1}{\left| r _ {i} - r _ {j} \right|} + \sum_ {I < J} ^ {M} \frac {Z _ {I} Z _ {J}}{\left| \mathsf {R} _ {I} - \mathsf {R} _ {J} \right|}, \tag {1}
53
+ $$
54
+
55
+ where $r_i \in \mathbb{R}^3$ represents the position of electron $i$ and $\Delta_{r_i}$ represents the corresponding Laplacian operator. Because electrons are fermions, the wavefunction must be antisymmetric with respect to particle exchange. Within the space of antisymmetric wavefunctions, the ground-state energy $E_0$ and wavefunction $|\psi_0\rangle$ correspond to the smallest eigenvalue of $H$ and its corresponding eigenfunction.
56
+
57
+ We make several simplifications to the space of wavefunctions before searching for the ground-state. First, since the Hamiltonian is Hermitian, its ground-state wavefunction can be chosen to be purely real. We thus limit our search to real normalizable many-body wavefunctions of the form $\psi : \mathbb{R}^{3N} \to \mathbb{R}$ , usually written as $\psi(R) = \psi(r_1, \ldots, r_N)$ with $R \in \mathbb{R}^{3N}$ . Next, we fix the numbers $N_{\uparrow}, N_{\downarrow}$ of up- and down-spin electrons a priori for each calculation. Given this assumption, together with the antisymmetry of the wavefunction and the spin-independence of the Hamiltonian, it is possible to assume without loss of generality that the first $N_{\uparrow}$ electrons are always spin-up and the last $N_{\downarrow}$ electrons are always spin-down [1].
58
+
59
+ The ground-state wavefunction can be found by minimizing the expectation value of the energy:
60
+
61
+ $$
62
+ | \psi_ {0} \rangle = \underset {\psi} {\operatorname {a r g m i n}} \frac {\langle \psi | H | \psi \rangle}{\langle \psi | \psi \rangle}. \tag {2}
63
+ $$
64
+
65
+ Given a variational ansatz $|\psi_{\theta}\rangle$ , we can then define a loss function
66
+
67
+ $$
68
+ L (\theta) = \frac {\left\langle \psi_ {\theta} \right| H \left| \psi_ {\theta} \right\rangle}{\left\langle \psi_ {\theta} \mid \psi_ {\theta} \right\rangle} \tag {3}
69
+ $$
70
+
71
+ and represent the ground-state approximately via
72
+
73
+ $$
74
+ \theta^ {*} = \underset {\theta} {\operatorname {a r g m i n}} L (\theta), | \psi_ {0} \rangle \approx | \psi_ {\theta^ {*}} \rangle , E _ {0} \approx L (\theta^ {*}). \tag {4}
75
+ $$
76
+
77
+ In some electronic structure methods, such as the Hartree-Fock method and configuration interaction methods, the ansatz is chosen to enable the direct calculation of $L(\theta)$ based on precomputed one- and two-electron integrals. In variational Monte Carlo, the ansatz is chosen in a more flexible way, such as a Slater-Jastrow ansatz, a Slater-Jastrow-backflow ansatz, or a neural network wavefunction. For such ansatzes it is not possible to directly evaluate $L(\theta)$ , so it is necessary to instead approximate it via Monte Carlo integration. To this end, $L(\theta)$ can be reformulated stochastically as
78
+
79
+ $$
80
+ L (\theta) = \mathbb {E} _ {R \sim p} \left[ E _ {L} (\theta) \right], \tag {5}
81
+ $$
82
+
83
+ where $p(R) = \psi_{\theta}(R)^{2} / \langle \psi_{\theta}|\psi_{\theta}\rangle$ and $E_{L}(R) = H\psi_{\theta}(R) / \psi_{\theta}(R)$ (see i.e. [1], Section III.C). This last term, $E_{L}(R)$ , is known as the local energy of the wavefunction $|\psi_{\theta}\rangle$ at the position $R$ .
84
+
85
+ To optimize a variational wavefunction using this formula, it is usually necessary to calculate the gradient $\nabla_{\theta}L(\theta)$ . A convenient formula for this gradient can be derived taking advantage of the fact that $H$ is Hermitian (see i.e. [32], Appendix E):
86
+
87
+ $$
88
+ g = \nabla_ {\theta} L (\theta) = 2 \mathbb {E} _ {R \sim p} \left[ \nabla_ {\theta} \log | \psi_ {\theta} (R) | \left(E _ {L} (R) - L (\theta)\right) \right]. \tag {6}
89
+ $$
90
+
91
+ In practice, both $L(\theta)$ and $g$ are estimated stochastically using Markov-Chain Monte Carlo sampling to generate samples from $p(R)$ . The parameters can then be updated by a variety of optimization schemes, several of which we discuss in the following sections.
92
+
93
+ # 2.2. Stochastic Reconfiguration
94
+
95
+ One popular approach for optimizing variational wavefunctions is known as stochastic reconfiguration (SR) [13, 14, 15]. In recent years, SR has been widely adopted for optimizing neural quantum states in second quantization [33, 34]. It also serves as the starting point for the MinSR method to be introduced later.
96
+
97
+ SR is based on the idea of imaginary time evolution, which states that the ground-state $|\psi_0\rangle$ can be found via the formula
98
+
99
+ $$
100
+ \left| \right. \psi_ {0} \left. \right\rangle = \lim _ {\tau \rightarrow \infty} e ^ {- \tau H} | \psi \rangle \tag {7}
101
+ $$
102
+
103
+ for any wavefunction $|\psi \rangle$ such that $\langle \psi_0|\psi \rangle \neq 0$ . In SR, we optimize a variational ansatz $|\psi_{\theta}\rangle$ by finding parameter updates that approximate small imaginary time steps. In particular, we choose a step-size $\delta \tau$ and use this to define the desired updated wavefunction $|\psi^{\prime}\rangle = e^{-\delta \tau H}|\psi_{\theta}\rangle$ . Due to the constraints of the ansatz, we cannot update our wavefunction to $|\psi^{\prime}\rangle$ directly. Instead we seek a parameter update $d\theta$ such that $|\psi_{\theta +d\theta}\rangle$ is close to $|\psi^{\prime}\rangle$ . To measure the closeness, we rely on the Fubini-Study distance defined by
104
+
105
+ $$
106
+ D (| \psi \rangle , | \phi \rangle) = \arccos \frac {| \langle \psi | \phi \rangle |}{\| \psi \| \| \phi \|}. \tag {8}
107
+ $$
108
+
109
+ In practice, we make a first order approximation to both $|\psi^{\prime}\rangle$ and $|\psi_{\theta +d\theta}\rangle$ and then minimize a second-order approximation to $D^{2}(|\psi^{\prime}\rangle ,|\psi_{\theta +d\theta}\rangle)$ . See [35] for a more thorough discussion of the geometric underpinnings of stochastic reconfiguration.
110
+
111
+ To present the formula for the update, we first define the functions $\bar{O}(R)$ and $\bar{\epsilon}(R)$ . The first quantity, $\bar{O}(R)$ , is the transposed gradient of the logarithm of the normalized wavefunction, evaluated at the point $R$ . The second quantity, $\bar{\epsilon}(R)$ , is the first-order change in the logarithm of the normalized wavefunction induced by the imaginary time-step $\delta \tau$ , evaluated at the point $R$ . These quantities are given by
112
+
113
+ $$
114
+ \bar {O} (R) = \left(\nabla_ {\theta} \log | \psi_ {\theta} (R) | - \mathbb {E} _ {R \sim p} [ \nabla_ {\theta} \log | \psi_ {\theta} (R) | ]\right) ^ {T}, \tag {9}
115
+ $$
116
+
117
+ $$
118
+ \bar {\epsilon} (R) = - \delta \tau \left(E _ {L} (R) - \mathbb {E} _ {R \sim p} \left[ E _ {L} (R) \right]\right). \tag {10}
119
+ $$
120
+
121
+ It can be shown that, to second order, the square of the Fubini-study distance is given by
122
+
123
+ $$
124
+ D ^ {2} \left(\left| \psi^ {\prime} \right\rangle , \left| \psi_ {\theta + d \theta} \right\rangle\right) = \mathbb {E} _ {R \sim p} \left[ \left| \bar {O} (R) d \theta - \bar {\epsilon} (R) \right| ^ {2} \right]. \tag {11}
125
+ $$
126
+
127
+ This leads us to define the SR parameter update by
128
+
129
+ $$
130
+ d \theta = \underset {d \theta^ {\prime}} {\operatorname {a r g m i n}} \mathbb {E} _ {R \sim p} \left[ \left| \bar {O} (R) d \theta^ {\prime} - \bar {\epsilon} (R) \right| ^ {2} \right]. \tag {12}
131
+ $$
132
+
133
+ A detailed derivation of this formulation of stochastic reconfiguration can be found in [26].
134
+
135
+ In practice, (11) must always be approximated using a finite collection of $N_{s}$ samples $R_{1},\ldots ,R_{N_{s}}$ . To estimate $\bar{O} (R)$ and $\bar{\epsilon} (R)$ at the sampled points, we must empirically estimate the expectation values that appear in their definitions. We first define
136
+
137
+ $$
138
+ O = \frac {1}{\sqrt {N _ {s}}} \left[ \begin{array}{c} \nabla_ {\theta} \log | \psi_ {\theta} (R _ {1}) | \\ \vdots \\ \nabla_ {\theta} \log | \psi_ {\theta} (R _ {N _ {S}}) | \end{array} \right], \epsilon = - \frac {\delta \tau}{\sqrt {N _ {s}}} \left[ \begin{array}{c} E _ {L} (R _ {1}) \\ \vdots \\ E _ {L} (R _ {N _ {s}}) \end{array} \right]. \tag {13}
139
+ $$
140
+
141
+ Next, let $\mathbf{1}$ be the column vector of length $N_{S}$ whose entries are all equal to 1 and let $P = \frac{1}{N_s}\mathbf{1}^T$ represent the orthogonal projector onto the span of $\mathbf{1}$ . When acting on the left, the operator $P$ replaces each row of the matrix or vector on its right with the average of all of its rows. Our empirical estimates of $\bar{O}(R)$ and $\bar{\epsilon}(R)$ can thus be collected as
142
+
143
+ $$
144
+ \bar {O} = (I - P) O, \bar {\epsilon} = (I - P) \epsilon . \tag {14}
145
+ $$
146
+
147
+ The estimate of the Fubini-Study distance of the update $d\theta$ can then be written as
148
+
149
+ $$
150
+ D ^ {2} \left(\left| \psi^ {\prime} \right\rangle , \left| \psi_ {\theta + d \theta} \right\rangle\right) \approx \left\| \bar {O} d \theta - \bar {\epsilon} \right\| ^ {2}, \tag {15}
151
+ $$
152
+
153
+ and the update takes the form
154
+
155
+ $$
156
+ d \theta = \underset {d \theta^ {\prime}} {\operatorname {a r g m i n}} \left\| \bar {O} d \theta^ {\prime} - \bar {\epsilon} \right\| ^ {2}. \tag {16}
157
+ $$
158
+
159
+ In traditional SR, many more samples are taken than parameters in the ansatz, meaning that the least-squares problem is overdetermined and should have a unique solution. To protect against the case when $\bar{O}$ is ill-conditioned or singular, a Tikhonov regularization is generally added to yield the regularized problem
160
+
161
+ $$
162
+ d \theta = \underset {d \theta^ {\prime}} {\operatorname {a r g m i n}} \frac {1}{\lambda} \left\| \bar {O} d \theta^ {\prime} - \bar {\epsilon} \right\| ^ {2} + \| d \theta^ {\prime} \| ^ {2}. \tag {17}
163
+ $$
164
+
165
+ In the case of real parameters and a real wavefunction, the solution to (17) is given by
166
+
167
+ $$
168
+ d \theta = \left(\bar {O} ^ {T} \bar {O} + \lambda I\right) ^ {- 1} \bar {O} ^ {T} \bar {\epsilon}. \tag {18}
169
+ $$
170
+
171
+ This is more traditionally written as
172
+
173
+ $$
174
+ d \theta = - \frac {\delta \tau}{2} (S + \lambda I) ^ {- 1} g, \tag {19}
175
+ $$
176
+
177
+ where $S = \bar{O}^T\bar{O}$ , $g = \nabla_{\theta}L(\theta) = -2\bar{O}^T\bar{\epsilon} /\delta \tau$ , and the imaginary time step has been halved to cancel the factor of two in the gradient.
178
+
179
+ # 2.3. Minimum-Step Stochastic Reconfiguration
180
+
181
+ For a problem with $N_{p}$ variational parameters, the traditional SR update scales as $O(N_{p}^{3})$ due to the need to invert the $N_{p} \times N_{p}$ matrix $S$ . The insight of MinSR is that the parameter update can be calculated much more efficiently when $N_{s} \ll N_{p}$ . In fact the linear system is highly underdetermined in this setting. The MinSR method addresses this issue by choosing the solution with minimal norm. For regularization, MinSR utilizes a pseudoinverse with a cutoff for small eigenvalues. Subsequently, Rende et al [27] suggested using a Tikhonov regularization instead. This approach amounts to solving the same regularized equation as in traditional SR, namely (17). In the underdetermined setting, the application of the Sherman-Morrison-Woodbury formula to (18) yields the solution
182
+
183
+ $$
184
+ d \theta = \bar {O} ^ {T} \left(\bar {O} \bar {O} ^ {T} + \lambda I\right) ^ {- 1} \bar {\epsilon}. \tag {20}
185
+ $$
186
+
187
+ This is the version of MinSR that we use as the basis for SPRING. The key point is that the update can now be calculated efficiently since $\bar{O}\bar{O}^T$ is only $N_{s} \times N_{s}$ rather than $N_{p} \times N_{p}$ .
188
+
189
+ It is worth noting that methods very similar to MinSR were introduced independently in the machine learning community as early as 2019. For example, Zhang, Martens, and Grosse described a
190
+
191
+ pseudoinverse-based approach to natural gradient descent for the overparameterized case when the number of parameters is greater than the total number of data-points available [36]. Subsequently, Ren and Goldfarb proposed an efficient subsampled natural gradient method [25] for the case when the minibatch size is smaller than the number of parameters. In fact, the Tikhonov-regularized form of MinSR can be viewed as a streamlined implementation of the method of Ren and Goldfarb which applies whenever the gradient of the loss function is a linear combination of the model gradients at the sampled points. We elaborate on this connection in Appendix A.
192
+
193
+ # 2.4. Kaczmarz method for solving linear systems
194
+
195
+ MinSR enables the efficient solution of an underdetermined subsample from the SR equation, but the solution of such an equation is not necessarily a good approximation to the solution of the original SR equation. Our goal is to develop an algorithm that can converge to the solution of the original SR equation using only a series of underdetermined subsamples. This is the motivation behind the Kaczmarz algorithm, and in particular its blocked variants.
196
+
197
+ To understand the Kaczmarz algorithm, consider an overdetermined system of linear equations $Ax = b$ , where $A$ is $m \times n$ and
198
+
199
+ $$
200
+ A = \left[ \begin{array}{c} a _ {1} ^ {T} \\ \vdots \\ a _ {m} ^ {T} \end{array} \right], b = \left[ \begin{array}{c} b _ {1} \\ \vdots \\ b _ {m} \end{array} \right]. \tag {21}
201
+ $$
202
+
203
+ Assume that $A$ has full column rank and that the system is consistent, meaning that there exists a unique $x^{*}$ such that $Ax^{*} = b$ . The earliest form of the Kaczmarz algorithm, which dates back to the first half of the twentieth century [37], solves this problem iteratively. A starting guess $x_{0}$ is required for initialization and is then honed by iterations of the form
204
+
205
+ $$
206
+ x _ {k} = x _ {k - 1} + \frac {a _ {i}}{\left\| a _ {i} \right\| ^ {2}} \left(b _ {i} - a _ {i} ^ {T} x _ {k - 1}\right), \tag {22}
207
+ $$
208
+
209
+ where $i$ is chosen to cycle through all the rows of $A$ one at a time. The interpretation is that $x_{k}$ is attained by projecting $x_{k - 1}$ onto the solution space of the sampled equation $a_{i}^{T}x = b_{i}$ .
210
+
211
+ The seminal work of Strohmer and Vershynin [28] proposed a randomized variant of the algorithm which samples row $i$ with a probability proportional to $\| a_i\|^2$ . With this randomized sampling strategy, they showed that the algorithm converges as
212
+
213
+ $$
214
+ \mathbb {E} \left[ \| x _ {k} - x ^ {*} \| ^ {2} \right] \leq \left(1 - \kappa_ {D} (A) ^ {- 2}\right) ^ {k} \| x _ {0} - x ^ {*} \| ^ {2}, \tag {23}
215
+ $$
216
+
217
+ where $\kappa_{D}(A) = \| A\|_{F}\| A^{+}\|_{2}$ is the Demmel condition number of $A$ and $A^{+}$ is the Moore-Penrose pseudoinverse of $A$ .
218
+
219
+ Since single row updates are very inefficient on modern hardware, blocked versions of the randomized Kaczmarz algorithm were subsequently proposed and analyzed, i.e. [29]. Such randomized block Kaczmarz algorithms take the form
220
+
221
+ $$
222
+ x _ {k} = x _ {k - 1} + A _ {\sigma} ^ {+} \left(b _ {\sigma} - A _ {\sigma} x _ {k - 1}\right), \tag {24}
223
+ $$
224
+
225
+ where $\sigma$ represents a randomly selected subset of the rows of $A$ and $b$ and $A_{\sigma}^{+}$ represents the pseudoinverse of $A_{\sigma}$ . The interpretation is similar to before: $x_{k}$ represents the projection of $x_{k-1}$ onto the solution space of the sampled equation $A_{\sigma}x = b_{\sigma}$ . For consistent equations the randomized block Kaczmarz algorithm converges linearly to the true solution $x^{*}$ , though the convergence rate is more complicated than in the single row case and has a more nuanced dependence on the sampling procedure.
226
+
227
+ # 3. Methods
228
+
229
+ We now present our main contribution, the Subsampled Projected-Increment Natural Gradient Descent (SPRING) algorithm for optimizing neural network wavefunctions. To start, recall the formula for the SR parameter update:
230
+
231
+ $$
232
+ d \theta = \underset {d \theta^ {\prime}} {\operatorname {a r g m i n}} \mathbb {E} _ {R \sim p} \left[ \left| \bar {O} (R) d \theta^ {\prime} - \bar {\epsilon} (R) \right| ^ {2} \right]. \tag {25}
233
+ $$
234
+
235
+ Since $R$ is drawn from a continuous and high-dimensional space this equation can be seen as a highly overdetermined linear least-squares problem, with each configuration $R$ corresponding to a single row. Now, let $\bar{O}_k$ and $\bar{\epsilon}_k$ denote the subsamples from $\bar{O}(R)$ and $\bar{\epsilon}(R)$ that are available at VMC training iteration $k$ . Our approach hinges on the assumption that the imaginary time-step is small, meaning the parameter vector $\theta$ changes only slightly at each iteration. Thus, although the sampled subproblems $(\bar{O}_{k-1}, \bar{\epsilon}_{k-1})$ , $(\bar{O}_k, \bar{\epsilon}_k)$ can be completely different, the underlying SR equations are nearly identical. This inspires us to proceed as follows:
236
+
237
+ 1. Make the approximation that the recent iterates $(\bar{O}_k,\bar{\epsilon}_k),(\bar{O}_{k - 1},\bar{\epsilon}_{k - 1}),\ldots$ are all random block samples from the current SR equation
238
+
239
+ $$
240
+ \underset {d \theta^ {\prime}} {\operatorname {a r g m i n}} \mathbb {E} _ {R \sim p} \left[ \left| \bar {O} (R) d \theta^ {\prime} - \bar {\epsilon} (R) \right| ^ {2} \right]. \tag {26}
241
+ $$
242
+
243
+ 2. Apply the randomized block Kazcmarz method to these iterates to yield an approximate solution $\phi_{k}$ to the current SR equation.
244
+ 3. Apply some scaling and clipping procedures, to be described later, to $\phi_{k}$ to calculate the parameter update $d\theta_{k}$ .
245
+
246
+ In this way SPRING takes advantage of the entire optimization history to inform each parameter update, distinguishing it from MinSR which leverages only a single minibatch of data at a time.
247
+
248
+ Following the structure of a single step of the randomized block Kaczmarz method [29], we calculate $\phi_{k}$ by projecting $\phi_{k - 1}$ onto the solution space of the underdetermined equation $\bar{O}_k\phi = \bar{\epsilon}_k$ Recall that the basic form for the Kaczmarz projection is
249
+
250
+ $$
251
+ \phi_ {k} = \phi_ {k - 1} + \bar {O} _ {k} ^ {+} (\bar {\epsilon} _ {k} - \bar {O} _ {k} \phi_ {k - 1}). \tag {27}
252
+ $$
253
+
254
+ Equivalently we can say that $\phi_k$ satisfies
255
+
256
+ $$
257
+ \phi_ {k} = \underset {\phi} {\arg \min } \left\| \phi - \phi_ {k - 1} \right\| ^ {2} \text {s . t .} \bar {O} _ {k} \phi = \bar {\epsilon} _ {k}. \tag {28}
258
+ $$
259
+
260
+ Thus, at a conceptual level, our algorithm differs from MinSR only in how we break the indeterminacy of the sampled subproblem: MinSR chooses the solution of minimal norm, whereas SPRING chooses the solution that is nearest to the previous approximate solution.
261
+
262
+ In practice $\bar{O}_k$ can be singular or very ill-conditioned, so it is beneficial to add some form of regularization to this projection. We choose to regularize the projection by incorporating the linear equations using a penalty term rather than a hard constraint. To make this modification, we introduce a regularization parameter $\lambda$ and use the formula
263
+
264
+ $$
265
+ \phi_ {k} = \underset {\phi} {\operatorname {a r g m i n}} \frac {1}{\lambda} \left\| \bar {O} _ {k} \phi - \bar {\epsilon} _ {k} \right\| ^ {2} + \| \phi - \phi_ {k - 1} \| ^ {2}. \tag {29}
266
+ $$
267
+
268
+ Even with this regularization, we find that directly using (29) can result in unstable optimization trajectories (see Figure 10b). We do not yet have an explanation for this phenomenon. To stabilize the method, we decay the previous gradient by a small amount before projecting it. Formally, this stabilization comes in the form of a new regularization parameter $\mu$ and a modified update formula
269
+
270
+ $$
271
+ \phi_ {k} = \underset {\phi} {\operatorname {a r g m i n}} \frac {1}{\lambda} \left\| \bar {O} _ {k} \phi - \bar {\epsilon} _ {k} \right\| ^ {2} + \| \phi - \mu \phi_ {k - 1} \| ^ {2}. \tag {30}
272
+ $$
273
+
274
+ An explicit formula for $\phi_{k}$ can now be derived. We first define $\pi_k = \phi_k - \mu \phi_{k - 1}$ , $\pi = \phi -\mu \phi_{k - 1}$ and $\bar{\zeta}_k = \bar{\epsilon}_k - \mu \bar{O}_k\phi_{k - 1}$ and note that (30) can be recast as
275
+
276
+ $$
277
+ \pi_ {k} = \underset {\pi} {\operatorname {a r g m i n}} \frac {1}{\lambda} \left\| \bar {O} _ {k} \pi - \bar {\zeta} _ {k} \right\| ^ {2} + \| \pi \| ^ {2}. \tag {31}
278
+ $$
279
+
280
+ This formula now has the same form as MinSR with Tikhonov regularization, so we know that the solution is given by
281
+
282
+ $$
283
+ \pi_ {k} = \bar {O} _ {k} ^ {T} \left(\bar {O} _ {k} \bar {O} _ {k} ^ {T} + \lambda I\right) ^ {- 1} \bar {\zeta} _ {k}. \tag {32}
284
+ $$
285
+
286
+ Finally, we can translate back to $\phi_k$ yielding
287
+
288
+ $$
289
+ \phi_ {k} = \bar {O} _ {k} ^ {T} (\bar {O} _ {k} \bar {O} _ {k} ^ {T} + \lambda I) ^ {- 1} \bar {\zeta} _ {k} + \mu \phi_ {k - 1}. \tag {33}
290
+ $$
291
+
292
+ To understand this formula, it is helpful to explicitly write out the three terms:
293
+
294
+ $$
295
+ \phi_ {k} = \bar {O} _ {k} ^ {T} (\bar {O} _ {k} \bar {O} _ {k} ^ {T} + \lambda I) ^ {- 1} \bar {\epsilon} _ {k} + \mu \phi_ {k - 1} - \mu \bar {O} _ {k} ^ {T} (\bar {O} _ {k} \bar {O} _ {k} ^ {T} + \lambda I) ^ {- 1} \bar {O} _ {k} \phi_ {k - 1}. \tag {34}
296
+ $$
297
+
298
+ The first term alone corresponds to MinSR, while the first two terms together correspond to a scaled version of MinSR with momentum (to be presented in Section 3.4). It is the addition of the third term that concretely distinguishes SPRING from these other methods.
299
+
300
+ Equation (33) is almost the formula that we use in practice. The only missing piece is an extra stabilization procedure that we introduce in Section 3.2, which is not essential but should be included for best performance. Finally, to calculate $d\theta_{k}$ , we also include a learning rate schedule $\eta_{k}$ and a norm constraint $C$ which ensures that the parameters are not changed too much at each iteration (see Section 3.1). To ensure that the Kaczmarz algorithm is implemented consistently, regardless of the learning rate schedule, we always calculate $\bar{\epsilon}_{k}$ using $\delta \tau = 1$ and then apply the learning rate $\eta_{k}$ only when we calculate $d\theta_{k}$ . For convenience we present the full procedure for SPRING including these extra details in Algorithm 1.
301
+
302
+ It is worth noting that due to several technical details, we cannot directly transfer the convergence results for the classical Kaczmarz method to the VMC setting. First of all, the inclusion of the parameter $\mu$ in SPRING distinguishes it substantially from the Kaczmarz method. We can understand this distinction as arising from the fact that in SPRING, the underlying least-squares system changes a small amount after each iteration. Thus, it makes sense to progressively forget the information from previous iterations, which is what we accomplish by setting $\mu < 1$ . Additionally, the least-squares problem arising in VMC need not be consistent, and in the inconsistent setting the Kaczmarz method only converges to within a ball of the true solution, with the radius of the ball depending on how inconsistent the system is [38]. For these reasons, we cannot derive a rigorous convergence guarantee for SPRING with regard to either $d\theta$ or $\theta$ . Still, the connection to the Kaczmarz method serves to motivate the algorithm and explain its superior performance relative to MinSR.
303
+
304
+ # Algorithm 1 SPRING
305
+
306
+ Require: Hamiltonian $H$ , ansatz $\psi_{\theta}$
307
+
308
+ Require: Initialization $\theta_0$ , iteration count $K$ , batch size $N_s$ , learning rate schedule $\eta_k$
309
+
310
+ Require: Tikhonov damping $\lambda$ , decay factor $\mu$ , norm constraint $C$
311
+
312
+ 1: $\theta \gets \theta_0$
313
+ 2: $\phi \gets 0$
314
+
315
+ 3: for k in 1:K do
316
+
317
+ 4: Sample $R_{1},\ldots ,R_{N_{s}}$ from $p(R) = \psi_{\theta}(R)^{2} / \langle \psi_{\theta}|\psi_{\theta}\rangle$ ▷ i.e. using MCMC
318
+ 5: Calculate $\bar{O}$ $\bar{\epsilon}$ using $\delta \tau = 1$ via (13), (14)
319
+ 6: $\bar{\zeta} \gets \bar{\epsilon} - \mu \bar{O}\phi$
320
+ 7: $\phi \gets \bar{O}^T (\bar{O}\bar{O}^T +\lambda I + \frac{1}{N_s}\mathbf{1}\mathbf{1}^T)^{-1}\bar{\zeta} +\mu \phi$ $\triangleright$ following (39) and using Cholesky
321
+ 8: $d\theta \gets \phi \cdot \min (\eta_k,\sqrt{C} /\| \phi \|)$ See Section 3.1
322
+ 9: $\theta \gets \theta + d\theta$
323
+
324
+ 10: end for
325
+
326
+ 11: return $\theta$
327
+
328
+ # 3.1. Norm Constraint
329
+
330
+ As an additional stabilization procedure, we include a constraint on the norm of the MinSR, $\mathrm{MinSR + M}$ , and SPRING updates inspired by the norm constraint used by KFAC. We have found that including this norm constraint helps to stabilize the optimization procedure and enable larger learning rates to be applied, and it also improves the ability to transfer learning rates from one system to another. For reference we briefly describe how the norm constraint used by KFAC works before introducing the form of the norm constraint that we use for MinSR and SPRING.
331
+
332
+ The norm constraint in KFAC is "natural" in the sense that it applies to the norm induced by the Fisher information matrix as opposed to the Euclidean norm. To define this scheme let $g$ be the estimated energy gradient, $\eta$ be the learning rate, $S$ be KFAC's approximation to the Fisher information matrix, and $\phi = S^{-1}g$ be the unscaled and unconstrained KFAC parameter update. The final update will be $d\theta = c\phi$ for some scalar $c$ . The norm constraint relies upon the fact that $\| d\theta \|_S^2 = c^2\phi^T S\phi = c^2\phi^T g$ and as a result it enforces the constraint
333
+
334
+ $$
335
+ c ^ {2} \phi^ {T} g \leq C. \tag {35}
336
+ $$
337
+
338
+ Concretely this is achieved by choosing
339
+
340
+ $$
341
+ d \theta = \phi \cdot \min (\eta , \sqrt {C} / \sqrt {\phi^ {T} g}). \tag {36}
342
+ $$
343
+
344
+ In SPRING the update $d\theta$ and the gradient $g$ are not related by a positive definite matrix S; hence the above scheme is not justified. Instead we implement a simple Euclidean norm constraint given by
345
+
346
+ $$
347
+ \left\| d \theta \right\| ^ {2} \leq C. \tag {37}
348
+ $$
349
+
350
+ Just like in KFAC, if this constraint is violated before the norm constraint is applied, we scale the update down to ensure that it is satisfied. Concretely, we calculate the update $d\theta_{k}$ from the Kaczmarz iterate $\phi_{k}$ and the learning rate $\eta_{k}$ using the formula
351
+
352
+ $$
353
+ d \theta_ {k} = \phi_ {k} \cdot \min \left(\eta_ {k}, \sqrt {C} / \| \phi_ {k} \|\right).
354
+ $$
355
+
356
+ # 3.2. Numerically Stable Inversion
357
+
358
+ In exact arithmetic, $T = \bar{O}\bar{O}^T$ is positive semidefinite, $T + \lambda I$ is positive definite, and $(T + \lambda I)^{-1}\bar{\zeta}$ can be computed efficiently using the Cholesky decomposition. However, in single precision we have found that the $T$ matrix can be become indefinite. As a result, the Cholesky decomposition can fail if the regularization is chosen to be too small, and we have even observed this to occur when using our default value of $\lambda = 0.001$ .
359
+
360
+ We now note that $T$ is always singular due to the construction of $\bar{O}$ . Recall that $\mathbf{1}$ represents the column vector whose entries are all equal to 1 and $P = \frac{1}{N_s}\mathbf{1}\mathbf{1}^T$ represents the orthogonal projector onto the span of $\mathbf{1}$ . Since $\bar{O} = (I - P)O$ , we have that
361
+
362
+ $$
363
+ T \mathbf {1} = \bar {O} \bar {O} ^ {T} \mathbf {1} = \bar {O} O ^ {T} (I - P) \mathbf {1} = \bar {O} O ^ {T} (\mathbf {1} - \mathbf {1}) = 0. \tag {38}
364
+ $$
365
+
366
+ This means that any numerical perturbation to $T$ can make it indefinite. If the numerical perturbation is large enough, the resulting $T$ matrix can have an eigenvalue smaller than $-\lambda$ , in which case the Cholesky factorization fails. This can happen even when all of the other eigenvalues of $T$ are well-separated from 0.
367
+
368
+ We alleviate this issue by replacing the matrix $T + \lambda I$ with the further regularized matrix $T + \lambda I + \omega P$ for some positive real $\omega$ . We thus calculate the Kaczmarz iterate for iteration $k$ as
369
+
370
+ $$
371
+ \phi_ {k} = \bar {O} _ {k} ^ {T} \left(T _ {k} + \lambda I + \omega P\right) ^ {- 1} \bar {\zeta} _ {k} + \mu \phi_ {k - 1}. \tag {39}
372
+ $$
373
+
374
+ This does not affect the value of $\phi_{k}$ in exact arithmetic. To see this, note that $\mathbf{1}$ is an eigenvector of $(T_{k} + \lambda I)$ with eigenvalue $\lambda$ , and is thus also an eigenvector of $(T_{k} + \lambda I + \omega P)$ with eigenvalue $\omega + \lambda$ . As a result, we have
375
+
376
+ $$
377
+ \left(T _ {k} + \lambda I + \omega P\right) ^ {- 1} = \left(T _ {k} + \lambda I\right) ^ {- 1} + \left(\frac {1}{\omega + \lambda} - \frac {1}{\lambda}\right) P. \tag {40}
378
+ $$
379
+
380
+ Acting on the left with $\bar{O}_k^T$ then annihilates the extra term:
381
+
382
+ $$
383
+ \begin{array}{l} \bar {O} _ {K} ^ {T} (T _ {k} + \lambda I + \omega P) ^ {- 1} = \bar {O} _ {k} ^ {T} (T _ {k} + \lambda I) ^ {- 1} + \left(\frac {1}{\omega + \lambda} - \frac {1}{\lambda}\right) \bar {O} _ {k} ^ {T} P \\ = \bar {O} _ {k} ^ {T} (T _ {k} + \lambda I) ^ {- 1}. \\ \end{array}
384
+ $$
385
+
386
+ Thus, using (39) reduces the likelihood that numerical errors cause the Cholesky solver to fail without otherwise affecting the computation. In our experiments we have used the value $\omega = 1$ in all cases. However, it is theoretically possible for the eigenvalues of $T$ to all be smaller than 1, in which case setting $\omega = 1$ would increase the condition number of the regularized $T$ matrix. In such a case $\omega$ could instead be chosen for example to be the mean of the eigenvalues of the original $T$ matrix.
387
+
388
+ # 3.3. MinSR implementation details
389
+
390
+ We now briefly discuss the version of MinSR that we use for our numerical experiments, which is very similar but not identical to the basic version presented in Section 2.3. Functionally, the only differences are the inclusion of a norm constraint and the extra regularization of the matrix $\bar{O}_k\bar{O}_k^T$ as described in Section 3.2. Additionally, for consistency with SPRING we set $\delta \tau = 1$ when
391
+
392
+ calculating $\bar{\epsilon}_k$ and we instead control the step size with a learning rate parameter $\eta_k$ . With these modifications the MinSR algorithm is described by the following formulas:
393
+
394
+ $$
395
+ \phi_ {k} = \bar {O} _ {k} ^ {T} (\bar {O} _ {k} \bar {O} _ {k} ^ {T} + \lambda I + P) ^ {- 1} \bar {\epsilon} _ {k}, \tag {41}
396
+ $$
397
+
398
+ $$
399
+ d \theta_ {k} = \phi_ {k} \cdot \min \left(\eta_ {k}, \sqrt {C} / \| \phi_ {k} \|\right). \tag {42}
400
+ $$
401
+
402
+ # 3.4. MinSR with Momentum
403
+
404
+ We motivate SPRING primarily via its relation to the Kaczmarz method. An alternative viewpoint is to see SPRING as a modification of a momentum method with an added projection step to remove the component of the momentum that conflicts with the current subproblem $\bar{O}_k d\theta = \bar{\epsilon}$ . While we do not endorse this perspective, it is natural to wonder how SPRING would compare to MinSR with naïve momentum, which we refer to as MinSR+M.
405
+
406
+ Concretely, MinSR+M iteratively updates a momentum direction $\phi_{k}$ by taking a convex combination of the previous value $\phi_{k - 1}$ and the current MinSR solution. Just like in SPRING, we assume an imaginary time step of $\delta \tau = 1$ when we calculate $\bar{\epsilon}_k$ and instead control the step size using a learning rate parameter $\eta_{k}$ and a norm constraint. The method is thus described by the formulas:
407
+
408
+ $$
409
+ \phi_ {k} = (1 - \mu) \bar {O} _ {k} ^ {T} (\bar {O} _ {k} \bar {O} _ {k} ^ {T} + \lambda I + P) ^ {- 1} \bar {\epsilon} _ {k} + \mu \phi_ {k - 1}, \tag {43}
410
+ $$
411
+
412
+ $$
413
+ d \theta_ {k} = \phi_ {k} \cdot \min \left(\eta_ {k}, \sqrt {C} / \| \phi_ {k} \|\right). \tag {44}
414
+ $$
415
+
416
+ As in SPRING, the $\mu$ parameter acts as a decay factor on the previous parameter update. However, MinSR+M differs significantly from SPRING in that no regularized projector is applied to the previous update. It also differs in that the MinSR solution is scaled by a factor of $(1 - \mu)$ to achieve a convex combination, which means $\mu$ must be chosen to be strictly less than one. See Section 4.3 and Figure 10a for further discussion of the choice of $\mu$ for MinSR+M. We demonstrate in our numerical experiments that SPRING outperforms MinSR+M substantially. This supports the viewpoint that SPRING is not simply a momentum method, but is rather a way of improving the quality of the parameter updates as approximate solutions to the SR equation.
417
+
418
+ # 4. Results
419
+
420
+ We now turn to our numerical results. Our main finding is that SPRING outperforms KFAC, MinSR, and MinSR+M for several small atoms and molecules. We present results for the carbon, nitrogen, and oxygen atoms in Section 4.1 and for the $N_{2}$ and CO molecules in Section 4.2. We summarize the final energies attained in our main experiments in Table 1. We use VMCNet [30] to run all our experiments, which is a neural network VMC framework based on JAX [39]. For all of our experiments, we use a standard FermiNet architecture with 16 dense determinants; see Table B.2 for details.
421
+
422
+ For our training phase we use one hundred thousand optimization iterations with one thousand MCMC samples per iteration. For our MCMC proposals we use Gaussian all-electrons moves with a step-size that is tuned dynamically to maintain an acceptance ratio of approximately $50\%$ . We take ten MCMC steps between optimization iterations to reduce correlations between the samples, and we use mean-centered local energy clipping to improve the stability of the optimization. We use a decaying learning rate with a decay rate of $r = 1e - 4$ and learning rate schedule $\eta_{k} = \frac{\eta}{1 + rk}$ . For MinSR, MINSR+M, and SPRING, we assume $\delta \tau = 1$ and apply the learning rate $\eta_{k}$ as described
423
+
424
+ <table><tr><td>System</td><td>KFAC</td><td>MinSR</td><td>MinSR+M</td><td>SPRING</td><td>Benchmark</td><td>Benchmark source</td></tr><tr><td>C</td><td>-37.8445</td><td>-37.8445</td><td>-37.8448</td><td>-37.8449</td><td>-37.8450</td><td>[40], Table XI</td></tr><tr><td>N</td><td>-54.5877</td><td>-54.5885</td><td>-54.5889</td><td>-54.5890</td><td>-54.5892</td><td>[40], Table XI</td></tr><tr><td>O</td><td>-75.0643</td><td>-75.0652</td><td>-75.0659</td><td>-75.0668</td><td>-75.0673</td><td>[40], Table XI</td></tr><tr><td>N2, equilibrium</td><td>-109.5301</td><td>-109.5268</td><td>-108.5294</td><td>-109.5322</td><td>-109.5423</td><td>[41], Table II</td></tr><tr><td>N2, 4.0 Bohr</td><td>-109.1862</td><td>-109.1794</td><td>-109.1829</td><td>-109.1906</td><td>-109.2021</td><td>[42], MLR4(6,8)</td></tr><tr><td>CO</td><td>-113.3140</td><td>-113.3092</td><td>-113.3117</td><td>-113.3169</td><td>-113.3255</td><td>[3], Table II</td></tr></table>
425
+
426
+ Table 1: Summary of final energies attained with tuned learning rates for all systems studied. Energies are reported in Hartrees, to four decimal places.
427
+
428
+ in Section 3. For more details on our VMC settings, see Table B.3. To obtain final energies we run a separate inference phase with the parameters from the last iteration of each optimization run; see Table B.4 for details.
429
+
430
+ To compare the methods fairly, we do not start them from fully random initializations. Rather, for each system in question, we first run a short preliminary optimization and save the result to a checkpoint. The rest of our experiments then load the starting parameters from the end of the preliminary optimization phase, switch out the optimizer and hyperparameters to the desired settings, and run one hundred thousand further optimization iterations. This procedure removes the chaotic early optimization stage as a factor from the comparison between the different methods, ensuring that no method is unfairly advantaged by randomly having a favorable start. Our preliminary optimization is different from the Hartree-Fock pretraining procedure (see [3, 6] for example) because we optimize the variational energy from the beginning. However, our results should translate well to a setting with pretraining as a result of this preliminary optimization. See Table B.5 for more details on the preliminary optimization phase.
431
+
432
+ For all of our optimizers we use a Tikhonov regularization parameter of $\lambda = 0.001$ and a norm constraint of $C = 0.001$ unless indicated otherwise. For $\mathrm{MinSR + M}$ we use a momentum parameter of $\mu = 0.9$ , and for SPRING we use the regularization parameter $\mu = 0.99$ . In Section 4.3 we present several hyperparameter studies which demonstrate that these values provide a fair basis for comparing the methods. We also investigate the effect of the preliminary optimization phase and demonstrate that SPRING outperforms KFAC significantly without preliminary optimization.
433
+
434
+ In all of our numerical results we report energy errors relative to the benchmark energies listed in Table 1. For the energy and variance trajectories, we smooth out the curves by reporting averages over a sliding window of ten thousand iterations. We note that we are using a somewhat smaller network, fewer MCMC samples, and fewer optimization iterations than were used in most state-of-the-art calculations with KFAC; hence it should not be surprising that some of our results do not reach the same level of accuracy as those presented in the literature for comparable systems. Our results can be viewed as as proof-of-principle for the value of SPRING on small calculations, which will need to be extended to larger calculations by future works.
435
+
436
+ # 4.1. Small atoms
437
+
438
+ We first present results for KFAC, MinSR, MinSR+M, and SPRING on the carbon, nitrogen, and oxygen atoms. We find that it is critical to turn the learning rate of each method in order to achieve the best performance and obtain a fair comparison. Thus, for each method, we test a number of learning rates on the carbon atom, with results presented in Figure 1. Our experiments lead us to choose a learning rate of $\eta = 0.02$ for KFAC, $\eta = 0.1$ for MinSR, $\eta = 0.2$ for MinSR+M, and $\eta = 0.02$ for SPRING.
439
+
440
+ ![](images/9c801e54bfaef14b0a35ce030f4c11eda04551624bdeaba7bb96b77a2b8980d3.jpg)
441
+ (a) KFAC
442
+
443
+ ![](images/c67f8eef2d2aa4cee8adce93e23aaa49c3bc5e889cc9758bbd6b52bd0df35d3d.jpg)
444
+ (b) MinSR
445
+
446
+ ![](images/ed2a39f39ea461ce6161261440ab3a93b3e0c5d690c004a89b3043ba758dc719.jpg)
447
+ (c) MinSR+M
448
+
449
+ ![](images/52a0c5c56d0f303bf668d99b6ec69d139de09acf1bd830f014d7ccc40007c87d.jpg)
450
+ (d) SPRING
451
+ Figure 1: Learning rate sweeps on the carbon atom with four different optimizers.
452
+
453
+ After tuning the learning rates on the carbon atom, we use the selected learning rates to compare the methods on carbon, nitrogen, and oxygen. The results with optimized learning rates are shown in Figure 2. On the carbon atom, we find that SPRING reaches chemical accuracy significantly faster than the other methods. Relative to KFAC and MinSR, its final energy error is about a factor of four lower, and its final local energy variance that is about an order of magnitude lower. On nitrogen and oxygen, SPRING continues to outperform the other methods. In the case of the oxygen atom, SPRING reaches chemical accuracy after forty thousand iterations, whereas MinSR+M takes eighty thousand iterations and the other methods never reach chemical accuracy.
454
+
455
+ # 4.2. Molecules
456
+
457
+ We next test KFAC, MinSR, MinSR+M, and SPRING on two somewhat larger systems: the $\mathrm{N}_2$ and CO molecules. We begin by tuning the learning rates for all four methods on the $\mathrm{N}_2$ molecule at an equilibrium bond distance of 2.016 Bohr. We present the results of the learning rate sweeps in Figure 3. Based on these results, we pick learning rates of $\eta = 0.05$ for KFAC, $\eta = 0.02$ for MinSR, $\eta = 0.02$ for MinSR+M, and $\eta = 0.002$ for SPRING. These learning rates are not always chosen strictly for the best final energy since the final energy can vary due to statistical fluctuations in the parameters and statistical errors in the energy estimation. We instead look at the optimization trajectories and the final energies and pick the learning rate that appears to be optimal based on the combination of the two. It is worth noting that the optimal learning rates for MinSR+M and SPRING change by a factor of 10 between the carbon atom and the $\mathrm{N}_2$ molecule, whereas the optimal learning rate for MinSR changes by only a factor of 5, and the optimal learning rate for KFAC changes by only a factor of 2. The optimal learning rate may thus be more system-dependent for MinSR+M and SPRING compared to the other methods.
458
+
459
+ Using these optimized learning rates, we then compare the four methods on the $\mathrm{N}_2$ molecule both at equilibrium and at a stretched bond distance of 4.0 Bohr, and on the CO molecule at an
460
+
461
+ ![](images/b0c9c2f1661ef23f51345290b30b9b085849b3d1238e84ccd564247bb1ea43b2.jpg)
462
+
463
+ ![](images/8fa50ff87081d97596356da62d16272b9c7934e8d193c5f7897bb0b361705e18.jpg)
464
+ (a) Carbon atom.
465
+
466
+ ![](images/8bc7c87bfb39c0a1486e0e5bfa12637e49247e37dbbe60d5388bf78bb79b17fe.jpg)
467
+
468
+ ![](images/5f8f664e4cc2667e07e7700409069b399762e148af83fe9c28410f06273f38e8.jpg)
469
+
470
+ ![](images/64f6f0be01bb9ab153b5fb7a6b5dbddbeb2ffe925fdfead260239bfe7e96644f.jpg)
471
+ (b) Nitrogen atom.
472
+
473
+ ![](images/040f770b931e2fec5fe5061a687d2c2328c47d40ac795c52f07a3e53d3632ebe.jpg)
474
+
475
+ ![](images/61d7eaa3fde4846baff2eff52788afdc7cb623ae70371a307c7e6c118bdc0494.jpg)
476
+ Figure 2: Comparison of methods on three small atoms, with learning rates tuned on the carbon atom.
477
+
478
+ ![](images/0ae236a2f7c98c6825769ef54aaf8e5cffebc0f13f0f62a78e6c8af224d6264c.jpg)
479
+ (c) Oxygen atom
480
+
481
+ ![](images/cd6720ba11d5c86c580e4504a7a3baae4a5982d4339f5994996ef904cc623b62.jpg)
482
+
483
+ ![](images/eda2d4c1f057280029fb28cb54b6ea4ba8fa61ec7399eabee8a50f17f56f804e.jpg)
484
+ (a) KFAC
485
+
486
+ ![](images/51950ccca1b1c266a746dbe31ab7aa08b2b53aba0c7826f037e4861baaf87b33.jpg)
487
+ (b) MinSR
488
+
489
+ ![](images/3ed8b9c7c1424bf24d6febb4f15d04770337346fc79b34479215e88fc782247a.jpg)
490
+ (c) MinSR+M
491
+
492
+ ![](images/a465857740c0eb875e180208c85b2b6e5b4edec4ee1a6b7cb0dc585f8074a9d3.jpg)
493
+ (d) SPRING
494
+ Figure 3: Learning rate sweeps on the nitrogen molecule at equilibrium bond distance with four different optimizers.
495
+
496
+ equilibrium bond distance of 2.173 Bohr. The results of these comparisons are presented in Figure 4 and Figure 5. For these systems none of the optimizers are able to reach chemical accuracy, which is expected due to the relatively small network, few MCMC samples, and few optimization iterations that we are using. Nonetheless, SPRING still displays a marked advantage over the other methods, converging much faster and to a lower energy and energy variance in all three cases. The MinSR+M optimizer only improves upon MinSR marginally for these systems, and it is outperformed by KFAC. This provides evidence that the SPRING algorithm is distinctly better than simply adding momentum on top of MinSR.
497
+
498
+ # 4.3. Hyperparameter studies
499
+
500
+ In this section, we provide several experiments that demonstrate that our main results are robust to the choices of $C$ and $\mu$ . We also provide evidence that the results are not significantly skewed by the preliminary optimization phase. For simplicity we focus on the carbon atom for the purposes of this section.
501
+
502
+ We first show in Figure 6 how the results of the learning rate experiments differ if we do not apply a norm constraint to any of the methods. In all four cases, the performance is much more sensitive to the learning rate when the constraint is turned off, and we are prevented from displaying results with larger learning rates because they produce completely unstable optimization trajectories. The combination of these factors means that without the norm constraints, it is much more difficult to find an effective learning rate, and our ability to transfer the learning rate from system to system is hindered. Nonetheless, even with the norm constraint off, the tuned version of SPRING significantly outperforms the tuned versions of the other methods, as shown in Figure 7. In Figure 8, we show that neither increasing nor decreasing the norm constraint used for KFAC enables it to compete with the performance of SPRING on the carbon atom. This provides further evidence that the advantage of SPRING is not related to the choice or the form of the norm constraint.
503
+
504
+ ![](images/b6b612db85be2457cd1bd52da66f7032d2da959012e39647fe50e6bd4d126820.jpg)
505
+ (a) Equilibrium configuration, bond distance 2.016 Bohr.
506
+
507
+ ![](images/2a2440d94c73b86862b82d20b715e91a3c729dc9d88583e14daebe4b0bf640b0.jpg)
508
+
509
+ ![](images/ce6f39d371da8edbc01b44fec79cef7d3c807146e95233c76db23dcef96f8ccc.jpg)
510
+
511
+ ![](images/6ccb2e6ad127dc5f5df9914fec6aeb33857355eff093a5a8a5ec77649906b652.jpg)
512
+ (b) Stretched configuration, bond distance 4.0 Bohr.
513
+
514
+ ![](images/884d65a13ff2924413e6ad692b122cf0616a7c5e6557d2a5736554a2900e86a6.jpg)
515
+ Figure 4: Comparison of methods on $\mathbf{N}_2$ molecule at two bond distances, with learning rates tuned at equilibrium.
516
+
517
+ ![](images/ce655ae7d12262532713416fb6bbfb2cd551a170362d2c3c50372ad2e2d7c983.jpg)
518
+
519
+ ![](images/96f80da36997e5976e4fb14df35e3b303653fa8beafc5a167271f50f1e57cec6.jpg)
520
+ Figure 5: Comparison of methods on CO molecule, with learning rates tuned on $\mathrm{N}_2$ .
521
+
522
+ ![](images/d6bf65d7a812f9e9b41cd11b49d4a5f750dc9216b4ab24dc47b77baf0528fdc7.jpg)
523
+
524
+ ![](images/a98761e3756b54106f5cece340e57126bc778a74f8aa745e77a0d5a0fd4d79d0.jpg)
525
+
526
+ ![](images/b343acc941ab8fc8f8910afa09745662ca3a08420c5f10ade7283ced443c6b48.jpg)
527
+ (a) KFAC
528
+
529
+ ![](images/61dccd10cb78284b68afe04a50599f1284df38b97d8f3623ee7c722e24d5ea35.jpg)
530
+ (b) MinSR
531
+
532
+ ![](images/9af997dd622d4fd0e77adf1a7b7a16abbfedc8e4e0b36915f999586b6e8c5c41.jpg)
533
+ (c) MinSR+M
534
+
535
+ ![](images/0d19eb0b3bd8f97e69ce2493cb559618d861245676a794d652d3a27ce8770206.jpg)
536
+ (d) SPRING
537
+ Figure 6: Learning rate sweeps on the carbon atom without norm constraints with four different optimizers.
538
+
539
+ To better understand the impact of the norm constraint on SPRING, we return to the learning rate sweep for SPRING on the carbon atom and provide some additional data from these five optimization runs. In particular, we plot the scale factor $q_{k} = \max (1,\| \eta_{k}\phi_{k}\| /\sqrt{C})$ . For example, if $q_{k} = 5$ then this means that at iteration $k$ the norm constraint resulted in a scaling down of the parameter update by a factor of 5, and if $q_{k} = 1$ then the norm constraint at iteration $k$ had no effect. We see in Figure 9 that $q_{k}$ tends to grow over the course of the first one hundred optimization iterations and then decay to 1 as the optimization progresses. We can understand this in the following way: $q_{k}$ first rises as more optimization history is incorporated into SPRING; it then decays due to both the learning rate decay and the decay of the energy gradient. As should be expected, with a larger learning rate, $q_{k}$ reaches a larger peak and takes longer to decay to 1.
540
+
541
+ Next, we test the MinSR+M scheme with several different values of the momentum parameter
542
+
543
+ ![](images/3db5c11c9d9bd8039bad114e17a936d76f4eddb226a2bdd4ddefe4c677accbbf.jpg)
544
+ Figure 7: Comparison of methods on carbon atom, with tuned learning rates and no norm constraint.
545
+
546
+ ![](images/42fd337672756a804a17b7b0f8a912446d92c7a737daa71ba3e38174cebd3210.jpg)
547
+
548
+ ![](images/fc6fc42c0d1b14e5c7ac7b839a07f6eba38cc2f4cdd3071ab86c23da40c8b741.jpg)
549
+
550
+ ![](images/1dab07a24af6e54bc6eec122a7e8fe85ba27ff6d4bc1da19dc4d8c82e36530e7.jpg)
551
+ Figure 8: Testing KFAC on the carbon atom with different values of the norm constraint $C$ . No value performs better than the default value of $C = 0.001$ and all settings are outperformed by SPRING with $C = 0.001$ . All runs use the optimized learning rate of $\eta = 0.02$ .
552
+
553
+ ![](images/59692420a3ebeb50991d49cd8d121ad6359f834f07bd2fdab5d0c0cf976638cd.jpg)
554
+ Figure 9: Effect of the norm constraint on the SPRING parameter update during the learning rate sweep on the carbon atom. The quantity $q_{k} = \max (1, \| \eta_{k}\phi_{k}\| /\sqrt{C})$ represents the extent to which the parameter update is scaled down as a result of the norm constraint.
555
+
556
+ ![](images/7a130cd22d525f5952357a122a28f23b1fe01aec7cbfaca5c7e817b740f328e9.jpg)
557
+ (a)
558
+
559
+ ![](images/4b051e1ccd0dba3cb02d24f751d3ea02831c35591af077eba6063c02056991c0.jpg)
560
+ (b)
561
+ Figure 10: Hyperparameter studies for the parameter $\mu$ for both MinSR+M and SPRING. (a) MinSR+M with several values of the momentum parameter $\mu$ , on the carbon atom. The learning rate is held fixed at the previously tuned value of $\eta = 0.2$ . (b) SPRING on the carbon atom with several values of the regularization parameter $\mu$ . All values converge well except for the unregularized case $\mu = 1.0$ , shown in black, for which the optimization is unstable and encounters NaNs after approximately twenty thousand epochs. The learning rate is held fixed at the previously tuned value of $\eta = 0.02$
562
+
563
+ $\mu$ , using the previously optimized learning rate of $\eta = 0.2$ . For values larger than $\mu = 0.95$ we find that the optimization is unstable and we do not report numerical results. We find that choosing $\mu$ anywhere between 0.8 and 0.95 produces approximately optimal results, which justifies our use of $\mu = 0.9$ for our main experiments. Results are shown in Figure 10a. Similarly, we test SPRING with several values of its regularization parameter $\mu$ , again using the previously optimized learning rate of $\eta = 0.02$ . Results are shown in Figure 10b. We find that choosing $\mu$ anywhere between 0.98 and 0.999 produces approximately optimal results, which justifies our choice of $\mu = 0.99$ for our main experiments. Interestingly, if $\mu = 1.0$ then the method becomes unstable. We do not yet have an explanation for the source of this instability.
564
+
565
+ Finally, we consider the effects of the preliminary optimization phase. Since we have used KFAC for the preliminary optimization in all cases, it is unclear thus far whether SPRING can perform well when run from a random initialization. To ameliorate this concern, we compare the performance of SPRING against KFAC without any preliminary optimization, with results in Figure 11. We find that KFAC outperforms SPRING in the very early stages of the optimization. However, SPRING overtakes KFAC after about seven thousand training iterations and reaches chemical accuracy in fewer than half as many iterations as KFAC. We conclude that SPRING is also effective starting from a random initialization.
566
+
567
+ # 4.4. Computational Cost
568
+
569
+ The computational cost of SPRING is essentially identical to that of MinSR, as the extra computations for SPRING are negligible in cost. The same applies to MinSR+M. For all three of these methods, the computational bottleneck lies in computing the matrix $T = \bar{O}\bar{O}^T$ , which has asymptotic complexity $O(N_p\cdot N_s^2)$ . This cost dominates the $O(N_{s}^{3})$ cost of the Cholesky factorization.
570
+
571
+ Relative to KFAC, we expect the cost of SPRING to be highly dependent on the system size, the number of MCMC samples per iteration, and the number of GPUs used to parallelize the calculation, and we do not attempt to perform a systematic study along these lines. In our experiments, we use
572
+
573
+ ![](images/1a483758febcd5eb354840f3dc032c26a5f9811e5c00d9a32b4ec87cf7248a6e.jpg)
574
+ Figure 11: Comparison of SPRING versus KFAC on the carbon atom without preliminary optimization. We use the previously tuned learning rate of $\eta = 0.02$ for both KFAC and SPRING.
575
+
576
+ a single GeForce RTX 2080Ti GPU with 1000 MCMC walkers. With these particular settings, we find that SPRING results in VMC iterations that are about $50\%$ slower than KFAC for our smaller systems such as the carbon atom. As system size grows, the difference is reduced, and for the $N_{2}$ molecule the VMC iterations with SPRING are only about $5\%$ slower than with KFAC.
577
+
578
+ # 5. Discussion
579
+
580
+ Neural network wavefunctions represent a promising avenue towards highly accurate simulations of small but challenging molecular systems. The major bottleneck of applying such wavefunctions is the high cost associated with their optimization. In this work we introduce a new optimizer called SPRING to alleviate this bottleneck. SPRING combines ideas from the recently proposed MinSR optimizer [26] with ideas from the randomized Kaczmarz method for solving overdetermined least-squares problems [28]. By doing so, SPRING is able to utilize optimization history to improve upon MinSR in a principled way at essentially no extra cost. We test SPRING on several small atoms and molecules, comparing it against MinSR, MinSR with momentum, and KFAC. We find that SPRING consistently outperforms the alternatives across all tested systems. We hope that these findings will be extended to larger systems by future works.
581
+
582
+ Due to several discrepancies between the VMC setting and the traditional Kaczmarz setting, we are not yet able to furnish a rigorous proof of convergence for the SPRING optimizer. One interesting direction for future research is to modify SPRING in a way that makes it possible to prove its convergence. For example, the exponentially convergent randomized Kaczmarz method of Strohmer and Vershynin requires that the rows are sampled with probability proportional to the square of their Euclidean norm [28]. Furthermore, in the randomized block Kaczmarz method, Needell and Tropp find that for optimal performance it is critical to partition the constraints of the least-squares problem into well-conditioned blocks [29]. Such sampling schemes would represent a significant departure from traditional VMC methods that sample from the probability density of the wavefunction, but they could lead to rigorous convergence guarantees, better performance, or both. There are also variants of the Kaczmarz method such as the randomized extended Kaczmarz algorithm of Zouzias and Freris [43] that can converge to the solution of inconsistent least-squares problems. This technique cannot be directly applied to the VMC setting since it requires sampling
583
+
584
+ the columns as well as the rows of the system. However, it could serve as inspiration for further development of the algorithm.
585
+
586
+ We also draw a connection between the MinSR and SPRING methods for optimizing neural network wavefunctions and the efficient subsampled natural gradient descent method of Ren and Goldfarb [25]. In particular, we show in Appendix A that MinSR can be viewed as a simplified implementation of the method of Ren and Goldfarb which applies as long as the gradient of the loss function is a linear combination of the model gradients at the sampled points. This is quite a common scenario in machine learning, occurring for example in supervised learning with a mean-squared error loss function. In such a setting, SPRING can then be viewed as a potential improvement to existing subsampled natural gradient descent methods. We leave it to future works to determine whether SPRING can yield performance improvements for applications outside of VMC.
587
+
588
+ # Acknowledgements
589
+
590
+ This research used the Savio computational cluster resource provided by the Berkeley Research Computing program at the University of California, Berkeley (supported by the UC Berkeley Chancellor, Vice Chancellor for Research, and Chief Information Officer). This material is based upon work supported by the U.S. Department of Energy, Office of Science, Office of Advanced Scientific Computing Research, Department of Energy Computational Science Graduate Fellowship under Award Number(s) DE-SC0023112 (G.G.). This effort was supported by the SciAI Center, and funded by the Office of Naval Research (ONR), under Grant Number N00014-23-1-2729 (N.A.). LL is a Simons Investigator in Mathematics. We thank Yixiao Chen, Zhiyan Ding, Yuehaw Khoo, Michael Lindsey, Eric Neuscamman, and Zaiwen Wen for their helpful discussions, and the anonymous reviewers for their valuable comments.
591
+
592
+ # Disclaimer
593
+
594
+ This report was prepared as an account of work sponsored by an agency of the United States Government. Neither the United States Government nor any agency thereof, nor any of their employees, makes any warranty, express or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. Reference herein to any specific commercial product, process, or service by trade name, trademark, manufacturer, or otherwise does not necessarily constitute or imply its endorsement, recommendation, or favoring by the United States Government or any agency thereof. The views and opinions of authors expressed herein do not necessarily state or reflect those of the United States Government or any agency thereof.
595
+
596
+ # References
597
+
598
+ [1] WMC Foulkes, Lubos Mitas, RJ Needs, and Guna Rajagopal. Quantum monte carlo simulations of solids. Reviews of Modern Physics, 73(1):33, 2001.
599
+ [2] Federico Becca and Sandro Sorella. Quantum Monte Carlo approaches for correlated systems. Cambridge University Press, 2017.
600
+
601
+ [3] David Pfau, James S Spencer, Alexander GDG Matthews, and W Matthew C Foulkes. Ab initio solution of the many-electron schrödinger equation with deep neural networks. Physical Review Research, 2(3):033429, 2020.
602
+ [4] Jan Hermann, Zeno Schatzle, and Frank Noé. Deep-neural-network solution of the electronic schrödinger equation. Nature Chemistry, 12(10):891-897, 2020.
603
+ [5] Zeno Schätzle, PB Szabó, Matej Mezera, Jan Hermann, and Frank Noé. Deepqmc: An open-source software suite for variational optimization of deep-learning molecular wave functions. The Journal of Chemical Physics, 159(9), 2023.
604
+ [6] Ingrid von Glehn, James S Spencer, and David Pfau. A self-attention ansatz for ab-initio quantum chemistry. arXiv preprint arXiv:2211.13672, 2022.
605
+ [7] Leon Gerard, Michael Scherbela, Philipp Marquetand, and Philipp Grohs. Gold-standard solutions to the schrödinger equation using deep learning: How much physics do we need? Advances in Neural Information Processing Systems, 35:10282-10294, 2022.
606
+ [8] Gino Cassella, Halvard Sutterud, Sam Azadi, N. D. Drummond, David Pfau, James S. Spencer, and W. M. C. Foulkes. Discovering quantum phase transitions with fermionic neural networks. Phys. Rev. Lett., 130:036401, Jan 2023.
607
+ [9] Jane Kim, Gabriel Pescia, Bryce Fore, Jannes Nys, Giuseppe Carleo, Stefano Gandolfi, Morten Hjorth-Jensen, and Alessandro Lovato. Neural-network quantum states for ultra-cold fermi gases, 2023.
608
+ [10] Wan Tong Lou, Halvard Sutterud, Gino Cassella, W. M. C. Foulkes, Johannes Knolle, David Pfau, and James S. Spencer. Neural wave functions for superfluids, 2023.
609
+ [11] Gabriel Pescia, Jannes Nys, Jane Kim, Alessandro Lovato, and Giuseppe Carleo. Message-passing neural quantum states for the homogeneous electron gas, 2023.
610
+ [12] Xiang Li, Zhe Li, and Ji Chen. Ab initio calculation of real solids via neural network ansatz. Nature Communications, 13(1):7895, 2022.
611
+ [13] Sandro Sorella. Generalized lanczos algorithm for variational quantum monte carlo. Phys. Rev. B, 64:024512, Jun 2001.
612
+ [14] MP Nightingale and Vilen Melik-Alaverdian. Optimization of ground-and excited-state wave functions and van der waals clusters. Physical review letters, 87(4):043401, 2001.
613
+ [15] Sandro Sorella, Michele Casula, and Dario Rocca. Weak binding between two aromatic rings: Feeling the van der waals attraction by quantum monte carlo methods. The Journal of chemical physics, 127(1), 2007.
614
+ [16] C. J. Umrigar, Julien Toulouse, Claudia Filippi, S. Sorella, and R. G. Hennig. Alleviation of the fermion-sign problem by optimization of many-body wave functions. Phys. Rev. Lett., 98:110201, Mar 2007.
615
+ [17] Julien Toulouse and Cyrus J Umrigar. Optimization of quantum monte carlo wave functions by energy minimization. The Journal of chemical physics, 126(8), 2007.
616
+
617
+ [18] Eric Neuscamman, CJ Umrigar, and Garnet Kin-Lic Chan. Optimizing large parameter sets in variational quantum monte carlo. Physical Review B, 85(4):045103, 2012.
618
+ [19] Luning Zhao and Eric Neuscamman. A blocked linear method for optimizing large parameter sets in variational monte carlo. Journal of chemical theory and computation, 13(6):2604-2611, 2017.
619
+ [20] Iliya Sabzevari, Ankit Mahajan, and Sandeep Sharma. An accelerated linear method for optimizing non-linear wavefunctions in variational monte carlo. The Journal of chemical physics, 152(2), 2020.
620
+ [21] Robert J Webber and Michael Lindsey. Rayleigh-gauss-newton optimization with enhanced sampling for variational monte carlo. Physical Review Research, 4(3):033099, 2022.
621
+ [22] James Martens and Roger Grosse. Optimizing neural networks with kronecker-factored approximate curvature. In International conference on machine learning, pages 2408-2417. PMLR, 2015.
622
+ [23] Shun-Ichi Amari. Natural gradient works efficiently in learning. Neural computation, 10(2):251-276, 1998.
623
+ [24] Frederik Benzing. Gradient descent on neurons and its link to approximate second-order optimization. In International Conference on Machine Learning, pages 1817-1853. PMLR, 2022.
624
+ [25] Yi Ren and Donald Goldfarb. Efficient subsampled gauss-newton and natural gradient methods for training neural networks. arXiv preprint arXiv:1906.02353, 2019.
625
+ [26] Ao Chen and Markus Heyl. Efficient optimization of deep neural quantum states toward machine precision, 2023.
626
+ [27] Riccardo Rende, Luciano Loris Viteritti, Lorenzo Bardone, Federico Becca, and Sebastian Goldt. A simple linear algebra identity to optimize large-scale neural network quantum states. arXiv preprint arXiv:2310.05715, 2023.
627
+ [28] Thomas Strohmer and Roman Vershynin. A randomized kaczmarz algorithm with exponential convergence. Journal of Fourier Analysis and Applications, 15(2):262-278, 2009.
628
+ [29] Deanna Needell and Joel A Tropp. Paved with good intentions: analysis of a randomized block kaczmarz method. Linear Algebra and its Applications, 441:199-221, 2014.
629
+ [30] Jeffmin Lin, Gil Goldshlager, and Lin Lin. VMCNet: Flexible, general-purpose VMC framework, built on JAX. http://github.com/jeffminlin/vmcnet, 2021.
630
+ [31] M. Born and R. Oppenheimer. Zur Quantentheorie der Molekeln. Ann. der Phys. (4), 84:457-484, 1927.
631
+ [32] Jeffmin Lin, Gil Goldshlager, and Lin Lin. Explicitly antisymmetrized neural network layers for variational monte carlo simulation. Journal of Computational Physics, 474:111765, 2023.
632
+ [33] Giuseppe Carleo and Matthias Troyer. Solving the quantum many-body problem with artificial neural networks. Science, 355(6325):602-606, 2017.
633
+
634
+ [34] Giuseppe Carleo, Kenny Choo, Damian Hofmann, James ET Smith, Tom Westerhout, Fabien Alet, Emily J Davis, Stavros Efthymiou, Ivan Glasser, Sheng-Hsuan Lin, et al. Netket: A machine learning toolkit for many-body quantum systems. SoftwareX, 10:100311, 2019.
635
+ [35] Chae-Yeun Park and Michael J Kastoryano. Geometry of learning neural quantum states. Physical Review Research, 2(2):023232, 2020.
636
+ [36] Guodong Zhang, James Martens, and Roger B Grosse. Fast convergence of natural gradient descent for over-parameterized neural networks. Advances in Neural Information Processing Systems, 32, 2019.
637
+ [37] Stefan Karczmarz. Angenaherte auflösung von systemen linearer glei-chungen. Bull. Int. Acad. Pol. Sic. Let., Cl. Sci. Math. Nat., pages 355-357, 1937.
638
+ [38] Deanna Needell. Randomized kaczmarz solver for noisy linear systems. BIT Numerical Mathematics, 50:395-403, 2010.
639
+ [39] James Bradbury, Roy Frostig, Peter Hawkins, Matthew James Johnson, Chris Leary, Dougal Maclaurin, George Necula, Adam Paszke, Jake VanderPlas, Skye Wanderman-Milne, and Qiao Zhang. JAX: composable transformations of Python+NumPy programs, 2018.
640
+ [40] Subhas J Chakravorty, Steven R Gwaltney, Ernest R Davidson, Farid A Parpia, and Charlotte Froese p Fischer. Ground-state correlation energies for atomic ions with 3 to 18 electrons. Physical Review A, 47(5):3649, 1993.
641
+ [41] Claudia Filippi and Cyrus J Umrigar. Multiconfiguration wave functions for quantum monte carlo calculations of first-row diatomic molecules. The Journal of Chemical Physics, 105(1):213-226, 1996.
642
+ [42] Robert J Le Roy, Yiye Huang, and Calvin Jary. An accurate analytic potential function for ground-state n2 from a direct-potential-fit analysis of spectroscopic data. The Journal of chemical physics, 125(16), 2006.
643
+ [43] Anastasios Zouzias and Nikolaos M Freris. Randomized extended kaczmarz for solving least squares. SIAM Journal on Matrix Analysis and Applications, 34(2):773-793, 2013.
644
+ [44] Minghan Yang, Dong Xu, Zaiwen Wen, Mengyun Chen, and Pengxiang Xu. Sketch-based empirical natural gradient methods for deep learning. Journal of Scientific Computing, 92(3):94, 2022.
645
+
646
+ # Appendix A. Connection to Efficient Subsampled Natural Gradient Descent
647
+
648
+ Recall that MinSR with Tikhonov regularization uses the update formula
649
+
650
+ $$
651
+ d \theta = \bar {O} ^ {T} (\lambda I + \bar {O} \bar {O} ^ {T}) ^ {- 1} \bar {\epsilon}, \tag {A.1}
652
+ $$
653
+
654
+ where $\lambda$ is the damping parameter. This is the same formula that arises from the "simple linear algebra trick" of Rende et al [25]. We now introduce the efficient subsampled natural gradient method of Ren and Goldfarb [25] and show how it is equivalent to MinSR in the VMC setting. The key idea of Ren and Goldfarb is that a damped Fisher matrix arising from a small minibatch can
655
+
656
+ be viewed as a low-rank perturbation of the identity and is thus amenable to inversion using the Sherman-Morrison-Woodbury formula. Using the notation of the later work by Yang et al [44], the resulting update is
657
+
658
+ $$
659
+ d \theta = \frac {1}{\lambda} \left(- I + U \left(\lambda I + U ^ {T} U\right) ^ {- 1} U ^ {T}\right) g,
660
+ $$
661
+
662
+ where $g$ is the gradient estimator and the columns of $U$ contain the model gradients divided by the square-root of the number of samples used.
663
+
664
+ Now suppose, as is often the case, that the gradient of the loss function is simply a linear combination of the gradients at each input, with some input-dependent scalar weights. Then we can write the gradient estimator as $g = Uv$ for some column vector $v$ that depends on the sampled inputs. This allows us to simplify the NGD-SMW formula as follows:
665
+
666
+ $$
667
+ \begin{array}{l} d \theta = \frac {1}{\lambda} \left(- I + U \left(\lambda I + U ^ {T} U\right) ^ {- 1} U ^ {T}\right) U v \\ = \frac {1}{\lambda} \left(- U + U \left(\lambda I + U ^ {T} U\right) ^ {- 1} U ^ {T} U\right) v \\ = \frac {1}{\lambda} \left(- U + U (\lambda I + U ^ {T} U) ^ {- 1} (\lambda I + U ^ {T} U) - U (\lambda I + U ^ {T} U) ^ {- 1} (\lambda I)\right) v \\ = - U \left(\lambda I + U ^ {T} U\right) ^ {- 1} v. \\ \end{array}
668
+ $$
669
+
670
+ Comparing with (A.1), we see that this is equivalent to the MinSR formula when we identify $\bar{O}^T$ with $U$ and $\bar{\epsilon}$ with $v$ . Furthermore, this identification is the natural one, since the columns of $\bar{O}^T$ represent the gradients of the logarithm of the normalized wavefunction and the formula $g = \bar{O}^T\bar{\epsilon}$ holds in the VMC setting up to constant factors. Thus, the MinSR method can be viewed as a simplified way of implementing the efficient subsampled natural gradient method in the context of VMC. Furthermore, SPRING can be viewed as a potential improvement to subsampled natural gradient descent and may have applications outside the VMC setting.
671
+
672
+ # Appendix B. Network Architecture and Hyperparameters
673
+
674
+ We list in Table B.2 the hyperparameters of the FermiNet architecture that we use in all our experiments. We list in Table B.3 the other settings used for our VMC training phase, in Table B.4 the settings used for our inference phase, and in Table B.5 the settings used for our preliminary optimization.
675
+
676
+ <table><tr><td>Hyperparameter</td><td>Value</td></tr><tr><td>One-electron stream width</td><td>256</td></tr><tr><td>Two-electron stream width</td><td>16</td></tr><tr><td>Number of equivariant layers</td><td>4</td></tr><tr><td>Backflow activation function</td><td>tanh</td></tr><tr><td>Number of determinants</td><td>16</td></tr><tr><td>Exponential envelope structure</td><td>Isotropic</td></tr><tr><td>Determinant type</td><td>Dense</td></tr></table>
677
+
678
+ Table B.2: List of FermiNet architecture hyperparameters used for all experiments.
679
+
680
+ <table><tr><td>Setting</td><td>Value</td></tr><tr><td>Standard deviations for local energy clipping</td><td>5</td></tr><tr><td>Number of walkers</td><td>1000</td></tr><tr><td>MCMC burn-in steps</td><td>5000</td></tr><tr><td>Training iterations</td><td>1e5</td></tr><tr><td>MCMC steps between updates</td><td>10</td></tr></table>
681
+
682
+ Table B.3: List of settings for the training phase.
683
+
684
+ <table><tr><td>Setting</td><td>Value</td></tr><tr><td>Number of walkers</td><td>2000</td></tr><tr><td>MCMC burn-in steps for C,N,O</td><td>5000</td></tr><tr><td>MCMC burn-in steps for N2</td><td>1e4</td></tr><tr><td>Inference iterations</td><td>2e4</td></tr><tr><td>MCMC steps between local energy measurements</td><td>10</td></tr></table>
685
+
686
+ Table B.4: List of settings for the inference phase.
687
+
688
+ <table><tr><td>Setting</td><td>Value</td></tr><tr><td>Optimizer</td><td>KFAC</td></tr><tr><td>Learning rate</td><td>0.05</td></tr><tr><td>Kernel initializers</td><td>Orthogonal</td></tr><tr><td>Bias initializers</td><td>Random normal</td></tr><tr><td>Standard deviations for local energy clipping</td><td>5</td></tr><tr><td>Number of walkers</td><td>1000</td></tr><tr><td>MCMC burn-in steps</td><td>5000</td></tr><tr><td>Training iterations for C,N,O</td><td>1000</td></tr><tr><td>Training iterations for N2</td><td>5000</td></tr><tr><td>MCMC steps between updates</td><td>10</td></tr></table>
689
+
690
+ Table B.5: List of settings for the preliminary optimization phase.
2401.10xxx/2401.10190/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ceb2ca12a7b59abb966c47f61c43b456b1ad04afc72bf5522a1ed40266b7c55
3
+ size 1155661
2401.10xxx/2401.10190/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10191/6ac68c8c-17b4-426f-b714-6afc129ebb6b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10191/6ac68c8c-17b4-426f-b714-6afc129ebb6b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10191/6ac68c8c-17b4-426f-b714-6afc129ebb6b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47a466469a422cc8731815649caaff6882e945cf41eabcf2387ff641126afb0f
3
+ size 5821118
2401.10xxx/2401.10191/full.md ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # DIVIDE AND NOT FORGET: ENSEMBLE OF SELECTIVELY TRAINED EXPERTS IN CONTINUAL LEARNING
2
+
3
+ Grzegorz Rypeć<sup>1,2*</sup>, Sebastian Cygert<sup>1,3</sup>, Valeriya Khan<sup>1,2</sup>, Tomasz Trzeciński<sup>1,2,4</sup>, Bartosz Zielinski<sup>1,5</sup> & Bartłomiej Twardowski<sup>1,6,7</sup>
4
+
5
+ $^{1}$ IDEAS-NCBR, $^{2}$ Warsaw University of Technology, $^{3}$ Gdańsk University of Technology,
6
+ <sup>4</sup> Tooploox, <sup>5</sup> Jagiellonian University, <sup>6</sup> Computer Vision Center, Barcelona
7
+ <sup>7</sup> Department of Computer Science, Universitat Autònoma de Barcelona, {grzegorz.rypesc, sebastian.cygert, valeriya.khan, tomasz.trzcinski, bartosz.zielinski, bartlomiej.twardowski}@ideas-ncbr.pl
8
+
9
+ # ABSTRACT
10
+
11
+ Class-incremental learning is becoming more popular as it helps models widen their applicability while not forgetting what they already know. A trend in this area is to use a mixture-of-expert technique, where different models work together to solve the task. However, the experts are usually trained all at once using whole task data, which makes them all prone to forgetting and increasing computational burden. To address this limitation, we introduce a novel approach named SEED. SEED selects only one, the most optimal expert for a considered task, and uses data from this task to fine-tune only this expert. For this purpose, each expert represents each class with a Gaussian distribution, and the optimal expert is selected based on the similarity of those distributions. Consequently, SEED increases diversity and heterogeneity within the experts while maintaining the high stability of this ensemble method. The extensive experiments demonstrate that SEED achieves state-of-the-art performance in exemplar-free settings across various scenarios, showing the potential of expert diversification through data in continual learning.
12
+
13
+ # 1 INTRODUCTION
14
+
15
+ In Continual Learning (CL), tasks are presented to the learner sequentially as a stream of non-i.i.d data. The model has only access to the data in the current task. Therefore, it is prone to catastrophic forgetting of previously acquired knowledge (French, 1999; McCloskey & Cohen, 1989). This effect has been extensively studied in Class Incremental Learning (CIL), where the goal is to train the classifier incrementally and achieve the best accuracy for all classes seen so far. One of the most straightforward solutions to alleviate forgetting is to store exemplars of each class. However, its application is limited, e.g., due to privacy concerns or in memory-constrained devices (Ravaglia et al., 2021). That is why more challenging, exemplar-free CIL solutions attract a lot of attention.
16
+
17
+ Many recent CIL methods that do not store exemplars rely on having a strong feature extractor right from the beginning of incremental learning steps. This extractor is trained on the larger first task, which provides a substantial amount of data (i.e., $50\%$ of all available classes) (Hou et al., 2019; Zhu et al., 2022; Petit et al., 2023), or it starts from a large pre-trained model that remains unchanged (Hayes & Kanan, 2020a; Wang et al., 2022c) that eliminates the problem of representational drift (Yu et al., 2020). However, these methods perform poorly when little training data is available upfront. In Fig. 1, we illustrate both CIL setups, with and without the more significant first task. The trend is evident when we have a lot of data in the first task - results steadily improve over time. However, the
18
+
19
+ ![](images/7fdafa760f59359bc535b603f493b5be00b8772247d1c2e72c01453af414ccee.jpg)
20
+ Figure 1: Exemplar-free Class Incremental Learning methods evaluated on CIFAR100 divided into eleven tasks for two different data distributions.
21
+
22
+ progress is not evident for the setup with equal splits, where a frozen (or nearly frozen by high regularization) feature extractor does not yield good results. This setup is more challenging as it requires the whole network to continually learn new features (plasticity) and face the problem of catastrophic forgetting of already learned ones (stability).
23
+
24
+ One solution for this problem is architecture-based CIL methods, notably by expanding the network structure beyond a single model. Expert Gate (Aljundi et al., 2017) creates a new expert, defined as a neural network, for each task to mitigate forgetting. However, it can potentially result in unlimited growth in the number of parameters. Therefore, more advanced ensembling solutions, like CoSCL (Wang et al., 2022b), limit the computational budget using a fixed number of experts trained in parallel to generate features ensemble. In order to prevent forgetting, regularization is applied to all ensembles during training a new task, limiting their plasticity. Doan et al. (2022) propose ensembling multiple models for continual learning with exemplars for experience-replay. To perform efficient ensembling and control a number of the model's parameters, they enforce the model's connectivity to keep several ensembles fixed. However, exemplars are still necessary, and as in CoSCL, task-id is required during the inference.
25
+
26
+ As a remedy for the above issues, we introduce a novel ensembling method for exemplar-free CIL called SEED: Selection of Experts for Ensemble Diversification. Similarly to CoSCL and (Doan et al., 2022), SEED uses a fixed number of experts in the ensemble. However, only a single expert is updated while learning a new task. That, in turn, mitigates forgetting and encourages diversification between the experts. While only one expert is being trained, the others still participate in predictions. In SEED, the training does not require more computation than single-model solutions. The right expert for the update is selected based on the current ensemble state and new task data. The selection aims to limit representation drift for the classifier. The ensemble classifier uses multivariate Gaussian distribution representation associated with each expert (see Fig. 2). At the inference time, Bayes classification from all the experts is used for a final prediction. As a result, SEED achieves state-of-the-art accuracy for task-aware and task-agnostic scenarios while maintaining the high plasticity of the resulting model under different data distribution shifts within tasks.
27
+
28
+ In conclusion, the main contributions of our paper are as follows:
29
+
30
+ - We introduce SEED, a new method that leverages an ensemble of experts where a new task is selectively trained with only a single expert, which mitigates forgetting, encourages diversification between experts and causes no computational overhead during the training.
31
+ - We introduce a unique method for selecting an expert based on multivariate Gauss distributions of each class in the ensemble that limits representational drift for a selected expert. At the inference time, SEED uses the same rich class representation to perform Bayes classification and make predictions in a task-agnostic way.
32
+ - With the series of experiments, we show that existing methods that start CIL from a strong feature extractor later during the training mainly focus on stability. In contrast, SEED also holds high plasticity and outperforms other methods without any assumption of the class distribution during incremental learning sessions.
33
+
34
+ # 2 RELATED WORK
35
+
36
+ Class-Incremental Learning (CIL) represents the most challenging and prevalent scenario in the field of Continual Learning research (Van de Ven & Tolias, 2019; Masana et al., 2022), where during the evaluation task-id is unknown, and the classifier has to predict all classes seen so far. The simplest solution to fight catastrophic forgetting in CIL is to store exemplars, e.g. LUCIR (Hou et al., 2019), BiC (Wu et al., 2019), Foster (Wang et al., 2022a), WA (Zhao et al., 2020). Having exemplars greatly simplifies learning cross-task features. However, storing exemplars can not always be an option due to privacy issues or other limitations. Then, the hardest scenario exemplar-free CIL is considered, where number of methods exists: LwF (Li & Hoiem, 2016), SDC (Yu et al., 2020), ABD (Smith et al., 2021), PASS (Zhu et al., 2021b), IL2A (Zhu et al., 2021a), SSRE (Zhu et al., 2022), FeTrIL (Petit et al., 2023). Most of them favor stability and alleviate forgetting through various forms of regularization applied to an already well-performing feature extractor. Some approaches even concentrate solely on the incremental learning of the classifier while keeping the backbone network frozen (Petit et al., 2023). However, freezing the backbone can limit the plasticity and not be sufficient for more complex
37
+
38
+ ![](images/e6219e0bdccd90020a8e256602a0a3121765b30cf3b92272afcd8dd50f3743b1.jpg)
39
+ Figure 2: SEED comprises $K$ deep network experts $g_{k} \circ f$ (here $K = 2$ ), sharing the initial layers $f$ for higher computational performance. $f$ are frozen after the first task. Each expert contains one Gaussian distribution per class $c \in C$ in his unique latent space. In this example, we consider four classes, classes 1 and 2 from task 1 and classes 3 and 4 from task 2. During inference, we generate latent representations of input $x$ for each expert and calculate its log-likelihoods for distributions of all classes (for each expert separately). Then, we softmax those log-likelihoods and compute their average over all experts. The class with the highest average softmax is considered as the prediction.
40
+
41
+ settings, e.g., when tasks are unrelated, like in CTrL (Veniat et al., 2020). This work specifically aims at exemplar-free CIL, where the model's plasticity in learning new features for improved classification is still considered an essential factor.
42
+
43
+ Growing architectures and ensemble. Architecture-based methods for CIL can dynamically adjust some networks' parameters while learning new tasks, i.e. DER (Yan et al., 2021), Progress and Compress (Rusu et al., 2016) or use masking techniques, e.g. HAT (Serrà et al., 2018). In an extreme case, each task can have a dedicated expert network (Aljundi et al., 2017) or a single network per class (van de Ven et al., 2021). That greatly improves plasticity but also requires increasing resources as the number of parameters increases. Additionally, while the issue of forgetting is addressed, transferring knowledge between tasks becomes a new challenge. A recent method, CoSCL (Wang et al., 2022b), addresses this by performing an ensemble of a limited number of experts, which are diversified using a cooperation loss. However, this method is limited to task-aware settings. Doan et al. (2022) diversifies the ensemble by training tasks on different subspaces of models and then merging them. In contrast to our approach, the method requires exemplars to do so.
44
+
45
+ Gaussian Models in CL. Exemplar-free CIL methods based on cross-entropy classifiers suffer recency bias towards newly trained task (Wu et al., 2019; Masana et al., 2022). Therefore, some methods employ nearest mean classifiers with stored class centroids (Rebuffi et al., 2017; Yu et al., 2020). SLDA (Hayes & Kanan, 2020b) assigns labels to inputs based on the closest Gaussian, computed using the running class means and covariance matrix from the stream of tasks. In the context of continual unsupervised learning (Rao et al., 2019), Gaussian Mixture Models were used to describe new emerging classes during the CL session. Recently, in (Yang et al., 2021), a fixed, pre-trained feature extractor and Gaussian distributions with diagonal covariance matrices were used to solve the CIL problem. However, we argue that such an approach has low plasticity and limited applicability. Therefore, we propose an improved method based on multivariate Gaussian distributions and multiple experts that can learn new knowledge efficiently.
46
+
47
+ # 3 METHOD
48
+
49
+ The core idea of our approach is to directly diversify experts by training them on different tasks and combining their knowledge during the inference. Each expert contains two components: a feature extractor that generates a unique latent space and a set of Gaussian distributions (one per class). The overlap of class distributions varies across different experts due to disparities in expert embeddings. SEED takes advantage of this diversity, considering it both during training and inference.
50
+
51
+ Architecture. Our approach, presented in Fig. 2, consists of $K$ deep network experts $g_{k} \circ f$ for $k = 1, \dots, K$ , sharing the initial layers $f$ for improving computational performance. $f$ are frozen after the first task. We consider the number of shared layers a hyperparameter (see Appendix A.3).
52
+
53
+ Moreover, each expert $k$ contains one Gaussian distribution $G_{k}^{c} = (\mu_{k}^{c},\Sigma_{k}^{c})$ per class $c$ for its unique latent space.
54
+
55
+ Algorithm. During inference, we perform an ensemble of Bayes classifiers. The procedure is presented in Fig. 2. Firstly, we generate representations of input $x$ for each expert $k$ as $r_k = g_k \circ f(x)$ . Secondly, we calculate log-likelihoods of $r_k$ for all distributions $G_k^c$ associated with this expert
56
+
57
+ $$
58
+ l _ {k} ^ {c} (x) = - \frac {1}{2} \left[ \ln \left(\left| \Sigma_ {k} ^ {c} \right|\right) + S \ln (2 \pi) + \left(r _ {k} - \mu_ {k} ^ {c}\right) ^ {T} \left(\Sigma_ {k} ^ {c}\right) ^ {- 1} \left(r _ {k} - \mu_ {k} ^ {c}\right) \right], \tag {1}
59
+ $$
60
+
61
+ where $S$ is the latent space dimension. Then, we softmax those values $\widehat{l_k^1},\ldots ,\widehat{l_k^{|C|}} =$ softmax $(l_k^1,\dots,l_k^{|C|};\tau)$ per each expert, where $C$ is the set of classes and $\tau$ is a temperature. Class $c$ with the highest average value after softmax over all experts (highest $\mathbb{E}_k\widehat{l}_k^c$ ) is returned as a prediction for task agnostic setup. For task aware inference, we limit this procedure to classes from the considered task.
62
+
63
+ Our training assumes $T$ tasks, each corresponding to the non-overlapping set of classes $C_1 \cup C_2 \cup \dots \cup C_T = C$ such that $C_t \cap C_s = \emptyset$ for $t \neq s$ . Moreover, task $t$ is a training step with only access to data $D_t = \{(x,y) | y \in C_t\}$ , and the objective is to train a model performing well both for classes of a new task and classes of previously learned tasks ( $< t$ ).
64
+
65
+ The main idea of training SEED, as presented in Fig. 3, is to choose and finetune one expert for each task, where the chosen expert should correspond to latent space where distributions of new classes overlap the least. Intuitively, this strategy causes latent space to change as little as possible, improving stability.
66
+
67
+ ![](images/ed522bfe67e2703b29498e411d378dfff42faa8e7e9322650082a6b79f84d9f5.jpg)
68
+ Figure 3: SEED training process for $K = 2$ experts, $T = 3$ tasks, and $|C_t| = 2$ classes per task. When the third task appears with novel classes $C_3$ , we analyze distributions of $C_3$ classes (here represented as purple distributions) in latent spaces of all experts. We choose the expert where those distributions overlap least (here, expert 2). We finetune this expert to increase the separability of new classes further and move to the next task.
69
+
70
+ To formally describe our training, let us assume that we are in the moment of training when we have access to data $D_{t} = \{(x,y)|y\in C_{t}\}$ of task $t$ for which we want to finetune the model. There are two steps to take, selecting the optimal expert for task $t$ and finetuning this expert.
71
+
72
+ Expert selection starts with determining the distribution for each class $c \in C_t$ in each expert $k$ . For this purpose, we pass all $x$ from $D_t$ with $y = c$ through deep network $g_k \circ f$ . This results in a set of vectors in latent space for which we approximate a multivariate Gaussian distribution $q_{c,k}$ . In consequence, each expert is associated with a set $Q_k = \{q_{1,k}, q_{2,k}, \dots, q_{|C_t|,k}\}$ of $|C_t|$ distributions. We select expert $\bar{k}$ for which those distributions overlap least using symmetrized Kullback-Leibler divergence $d_{KL}$ :
73
+
74
+ $$
75
+ \bar {k} = \underset {k} {\operatorname {a r g m a x}} \sum_ {q _ {i, k}, q _ {j, k} \in Q _ {k}} d _ {K L} \left(q _ {i, k}, q _ {j, k}\right), \tag {2}
76
+ $$
77
+
78
+ To finetune the selected expert $\bar{k}$ , we add the linear head to its deep network and train $g_{\bar{k}}$ using $D_{t}$ set. As a loss function, we use cross-entropy combined with feature regularization based on knowledge distillation (Li & Hoiem, 2016) weighted with $\alpha$ : $L = (1 - \alpha)L_{CE} + \alpha L_{KD}$ , where $\mathcal{L}_{KD} = \frac{1}{|B|}\sum_{i\in B}||g_{\bar{k}}\circ f(x_i) - g_{\bar{k}}^{old}\circ f(x_i)||$ , $B$ is a batch and $g_{\bar{k}}^{old}$ is frozen $g_{\bar{k}}$ .
79
+
80
+ While we use CE for its simplicity and effective clustering (Horiguchi et al., 2019), it can be replaced with other training objectives, such as self-supervision. Then, we remove the linear head, update distributions of $Q_{\bar{k}}$ , and move to the next task.
81
+
82
+ Due to the random expert initializations, we skip the selection procedure for $K$ initial tasks and omit $L_{KD}$ . Instead, we select the expert with the same number as the number task ( $k = t$ ) and use $L = L_{CE}$ . For the same reason, we calculate distributions of new tasks only for the experts trained so far ( $k \leq t$ ). Finally, we fix $f$ after the first task so that finetuning one expert does not affect others.
83
+
84
+ # 4 EXPERIMENTS
85
+
86
+ In order to evaluate the performance of SEED and fairly compare it with other models, we utilize three commonly used benchmark datasets in the field of Continual Learning (CL): CIFAR-100 (Krizhevsky, 2009) (100 classes), ImageNet-Subset (Deng et al., 2009) (100 classes) and DomainNet (Peng et al., 2019) (345 classes, from 6 domains). DomainNet contains objects in very different domains, allowing us to measure models' adaptability to new data distributions. We create each task with a subset of classes from a single domain, so the domain changes between tasks (more extensive data drift). We always set $K = 5$ for SEED, so it consists of 5 experts. We evaluate all Continual Learning approaches in three different task distribution scenarios. We train all methods from scratch. Detailed information regarding experiments and the code are in the Appendix. We compare all methods with standard CIL evaluations using the classification accuracies after each task, and average incremental accuracy, which is the average of those accuracies (Rebuffi et al., 2017). We train all methods from scratch in all scenarios.
87
+
88
+ The first scenario is the CIL equal split setting, where each task has the same number of classes. This weakens the feature extractor trained on the first task, as there is little data. Therefore, this scenario better exposes the methods' plasticity. We reproduce results using FACIL(Masana et al., 2022), and PyCIL(Zhou et al., 2021) benchmarks for this setting. We train all methods using random crops, horizontal flips, cutouts, and AugMix (Hendrycks et al., 2019) data augmentations.
89
+
90
+ The second scenario is similar to the one used in (Hou et al., 2019), where the first task is larger than the subsequent tasks. This equips CIL methods with a more robust feature extractor than the equal split scenario. Precisely, the first task consists of either $50\%$ or $40\%$ of all classes. This setting allows methods that freeze the feature extractor (low plasticity) to achieve good results. We take baseline results for this setting from (Petit et al., 2023).
91
+
92
+ The third scenario is task incremental on equal split tasks (where the task id is known during inference). Here, the baseline results and numbers of models' parameters are taken from (Wang et al., 2022b). We perform the same data augmentations as in this work.
93
+
94
+ # 4.1 RESULTS
95
+
96
+ Tab. 1 presents the comparison of SEED and state-of-the-art exemplar-free CIL methods for CIFAR-100, DomainNet, and ImageNet-Subset in the equal split scenario. We report average incremental accuracies for various split conditions and domain shift scenarios (DomainNet). We present joint training as an upper bound for the CL training.
97
+
98
+ SEED outperforms other methods by a large margin in each setting. For CIFAR-100, SEED is better than the second-best method by 14.7, 17.5, and 15.6 percentage points for $T = 10, 20, 50$ , respectively. The difference in results increases as there are more tasks in the setting. More precisely, for $T = 10$ , SEED has 14.7 percentage points better accuracy than the second-best method (LwF*, which is LwF implementation with PyCIL (Zhou et al., 2021) data augmentations and learning rate schedule). At the same time, for $T = 50$ SEED is better by $15.6\%$ . The results are consistent for other datasets, proving that SEED achieves state-of-the-art results in an equal split scenario. Moreover, based on DomainNet results, we conclude that SEED is also better in scenarios with a significant distributional shift. Detailed results for CIFAR100 T=50 and DomainNet T=36 are presented in
99
+
100
+ ![](images/5be65c374814a76b373cca0350bad982f2c5a6f7fc6cae3bff2f7403724f78a4.jpg)
101
+ Figure 4: Class incremental accuracy achieved after each task for equal splits on CIFAR100 and DomainNet. SEED significantly outperforms other methods in equal split scenarios for many tasks (left) and more considerable data shifts (right).
102
+
103
+ ![](images/99aa46a374a3b8a3d54fe0c1ef39e767baacc299fc350e35c98d0a4ad50172c6.jpg)
104
+ Fig. 4. In this extreme setting, where each task consists of just little data, SEED results in significantly higher accuracies for the last tasks than other methods.
105
+
106
+ Table 1: Task-agnostic avg. inc. accuracy $(\%)$ for equally split tasks on CIFAR-100, DomainNet and ImageNet-Subset. The best results are in bold. SEED achieves superior results compared to other methods and outperforms the second best method (FeTrIL) by a large margin.
107
+
108
+ <table><tr><td rowspan="2">CIL Method</td><td colspan="3">CIFAR-100 (ResNet32)</td><td colspan="3">DomainNet</td><td>ImageNet-Subset</td></tr><tr><td>T=10</td><td>T=20</td><td>T=50</td><td>T=12</td><td>T=24</td><td>T=36</td><td>T=10</td></tr><tr><td>Finetune</td><td>26.4±0.1</td><td>17.1±0.1</td><td>9.4±0.1</td><td>17.9±0.3</td><td>14.8±0.1</td><td>10.9±0.2</td><td>27.4±0.4</td></tr><tr><td>EWC (Kirkpatrick et al., 2017) (PNAS&#x27;17)</td><td>37.8±0.8</td><td>21.0±0.1</td><td>9.2±0.5</td><td>19.2±0.2</td><td>15.7±0.1</td><td>11.1±0.3</td><td>29.8±0.3</td></tr><tr><td>LwF* (Rebuffi et al., 2017) (CVPR&#x27;17)</td><td>47.0±0.2</td><td>38.5±0.2</td><td>18.9±1.2</td><td>20.9±0.2</td><td>15.1±0.6</td><td>10.3±0.7</td><td>32.3±0.4</td></tr><tr><td>PASS (Zhu et al., 2021b) (CVPR&#x27;21)</td><td>37.8±1.1</td><td>24.5±1.0</td><td>19.3±1.7</td><td>25.9±0.5</td><td>23.1±0.5</td><td>9.8±0.3</td><td>-</td></tr><tr><td>IL2A (Zhu et al., 2021a) (NeurIPS&#x27;21)</td><td>43.5±0.3</td><td>28.3±1.7</td><td>16.4±0.9</td><td>20.7±0.5</td><td>18.2±0.4</td><td>16.2±0.4</td><td>-</td></tr><tr><td>SSRE (Zhu et al., 2022) (CVPR&#x27;22)</td><td>44.2±0.6</td><td>32.1±0.9</td><td>21.5±1.8</td><td>33.2±0.7</td><td>24.0±1.0</td><td>22.1±0.7</td><td>45.0±0.5</td></tr><tr><td>FeTrIL (Petit et al., 2023) (WACV&#x27;23)</td><td>46.3±0.3</td><td>38.7±0.3</td><td>27.0±1.2</td><td>33.5±0.6</td><td>33.9±0.5</td><td>27.5±0.7</td><td>58.7±0.2</td></tr><tr><td>SEED</td><td>61.7±0.4</td><td>56.2±0.3</td><td>42.6±1.4</td><td>45.0±0.2</td><td>44.9±0.2</td><td>39.2±0.3</td><td>67.8±0.3</td></tr><tr><td>Joint</td><td></td><td>71.4±0.3</td><td></td><td>63.7±0.5</td><td>69.3±0.4</td><td>69.1±0.1</td><td>81.5±0.5</td></tr></table>
109
+
110
+ Large first task class incremental scenarios. We present results for this setting in Tab. 2. For CIFAR-100, SEED is better than the best method (FeTrIL) by 4.6, 4.1, and 1.4 percentage points for $T = 6$ , 11, 21, respectively. For $T = 6$ on ImageNet-Subset, SEED is better by 3.3 percentage points than the best method. However, with more tasks, $T = 11$ or $T = 21$ , FeTrIL with a frozen feature extractor presents better average incremental accuracy.
111
+
112
+ We can notice that simple regularization-based methods such as EWC and LwF* are far behind more recent ones: FeTrIL, SSRE, and PASS, which achieve high levels of overall average incremental accuracy. However, these methods benefit from a larger initial task, where a robust feature extractor can be trained before incremental steps. In SEED, each expert can still specialize for a different set of tasks and continually learn more diversified features even with using regularization like LwF. The difference between SEED and other methods is noticeably smaller in this scenario than in the equal split scenario. This fact proves that SEED works better in scenarios where a strong feature extractor must be trained from scratch or where there is a domain shift between tasks.
113
+
114
+ Task incremental with limited number of parameters. We investigate the performance of SEED in task incremental scenarios. We compare it against another state-of-the-art task incremental ensemble method - CoSCL (Wang et al., 2022b) and follow the proposed limited number of models' parameters setup. We compare SEED to: HAT (Serrà et al., 2018), MARK (Hurtado et al., 2021), and BNS (Qin et al., 2021). Tab. 3 presents the results with the number of utilized parameters. Our method requires significantly fewer parameters than other methods and achieves better average incremental accuracy. Despite being designed to solve the exemplar-free CIL problem, SEED outperforms other task-incremental learning methods. Additionally, we check how the number of shared layers ( $f$ function) affects SEED's performance. Increasing the number of shared layers decreases required parameters but negatively impacts task-aware accuracy. As such, the number of shared layers in SEED is a
115
+
116
+ Table 2: Comparison of CIL methods on ResNet18 and CIFAR-100 or ImageNet-Subset under larger first task conditions. We report task-agnostic avg. inc. accuracy from multiple runs. The best result is in bold. The discrepancy in results between SEED and other methods decreases compared to the equal split scenario.
117
+
118
+ <table><tr><td rowspan="2">CIL Method</td><td colspan="3">CIFAR-100</td><td colspan="3">ImageNet-Subset</td></tr><tr><td>T=6 |C1|=50</td><td>T=11 |C1|=50</td><td>T=21 |C1|=40</td><td>T=6 |C1|=50</td><td>T=11 |C1|=50</td><td>T=21 |C1|=40</td></tr><tr><td>EWC* (Kirkpatrick et al., 2017) (PNAS&#x27;17)</td><td>24.5</td><td>21.2</td><td>15.9</td><td>26.2</td><td>20.4</td><td>19.3</td></tr><tr><td>LwF* (Rebuffi et al., 2017) (CVPR&#x27;17)</td><td>45.9</td><td>27.4</td><td>20.1</td><td>46.0</td><td>31.2</td><td>42.9</td></tr><tr><td>DeeSIL (Belouadah &amp; Popescu, 2018) (ECCVW&#x27;18)</td><td>60.0</td><td>50.6</td><td>38.1</td><td>67.9</td><td>60.1</td><td>50.5</td></tr><tr><td>MUC* (Liu et al., 2020) (ECCV&#x27;20)</td><td>49.4</td><td>30.2</td><td>21.3</td><td>-</td><td>35.1</td><td>-</td></tr><tr><td>SDC* (Yu et al., 2020) (CVPR&#x27;20)</td><td>56.8</td><td>57.0</td><td>58.9</td><td>-</td><td>61.2</td><td>-</td></tr><tr><td>ABD* (Smith et al., 2021) (ICCV&#x27;21)</td><td>63.8</td><td>62.5</td><td>57.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>PASS* (Zhu et al., 2021b) (CVPR&#x27;21)</td><td>63.5</td><td>61.8</td><td>58.1</td><td>64.4</td><td>61.8</td><td>51.3</td></tr><tr><td>IL2A* (Zhu et al., 2021a) (NeurIPS&#x27;21)</td><td>66.0</td><td>60.3</td><td>57.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SSRE* (Zhu et al., 2022) (CVPR&#x27;22)</td><td>65.9</td><td>65.0</td><td>61.7</td><td>-</td><td>67.7</td><td>-</td></tr><tr><td>FeTrIL* (Petit et al., 2023) (WACV&#x27;23)</td><td>66.3</td><td>65.2</td><td>61.5</td><td>72.2</td><td>71.2</td><td>67.1</td></tr><tr><td>SEED</td><td>70.9±0.3</td><td>69.3±0.5</td><td>62.9±0.9</td><td>75.5±0.4</td><td>70.9±0.5</td><td>63.0±0.8</td></tr><tr><td>Joint</td><td></td><td>80.4</td><td></td><td></td><td>81.5</td><td></td></tr></table>
119
+
120
+ hyperparameter that allows for a trade-off between achieved results and the number of parameters required for training.
121
+
122
+ Table 3: Limited parameters setting on CIFAR-100 with random class order. The reported metric is average task aware accuracy (\%). Results for SEED are presented for various numbers of shared layers. Although we designed SEED for the task agnostic setting, it achieves superior results to exemplar-free, architecture-based methods using fewer parameters.
123
+
124
+ <table><tr><td>Approach</td><td>#Params.</td><td>20-split</td><td>50-split</td></tr><tr><td>HAT</td><td>6.8M</td><td>77.0</td><td>80.5</td></tr><tr><td>MARK</td><td>4.7M</td><td>78.3</td><td>-</td></tr><tr><td>BNS</td><td>6.7M</td><td>-</td><td>82.4</td></tr><tr><td>CoSCL(EWC+LWF)</td><td>4.6M</td><td>79.4±1.0</td><td>87.9±1.1</td></tr><tr><td>SEED</td><td>3.2M</td><td>86.8±0.3</td><td>91.2±0.4</td></tr><tr><td>SEED(1 shared)</td><td>3.2M</td><td>86.7±0.6</td><td>91.2±0.5</td></tr><tr><td>SEED(11 shared)</td><td>3.1M</td><td>85.6±0.3</td><td>89.6±0.2</td></tr><tr><td>SEED(21 shared)</td><td>2.7M</td><td>82.4±0.4</td><td>88.1±0.5</td></tr></table>
125
+
126
+ Table 4: Ablation study of SEED for CIL setting with $\mathrm{T} = {10}$ on ResNet32 and CIFAR-100. Avg. inc. acc. is reported. Multiple components of SEED were ablated. SEED as-designed presents the best performance.
127
+
128
+ <table><tr><td>Approach</td><td>Acc.(%)</td></tr><tr><td>SEED(5 experts)</td><td>61.7 ±0.4</td></tr><tr><td>standard ensemble</td><td>56.9±0.4</td></tr><tr><td>weighted ensemble</td><td>57.0±0.5</td></tr><tr><td>CoSCL ensemble</td><td>57.3±0.4</td></tr><tr><td>w/o multivariate Gauss.</td><td>53.5±0.5</td></tr><tr><td>w/o covariance</td><td>54.1±0.3</td></tr><tr><td>w/o temp. in softmax</td><td>59.2±0.5</td></tr><tr><td>w/ReLU</td><td>57.8±0.6</td></tr></table>
129
+
130
+ # 4.2 DISCUSSION
131
+
132
+ Is SEED better than other ensemble methods? We want to verify that the improved performance of our method comes from more than just forming an ensemble of classifiers. Hence, we compare SEED with the vanilla ensemble approach to continual learning, where all experts are initialized with random weights, trained on the first task, and sequentially fine-tuned on incremental tasks. The final decision is obtained by averaging the predictions of ensemble members. We present results in Tab. 4. Using the standard ensemble decreases the accuracy by $4.8\%$ . We also experiment with the approaches where the predictions are weighted during inference by the confidence of the ensemble members (using prediction entropy, as in (Ruan et al., 2023) and where the experts are trained with additional ensemble cooperation loss from (Wang et al., 2022b). However, they yielded similar results to uniform weighting.
133
+
134
+ Diversity of experts Fig. 5 and Fig.11 (Appendix) depict the quality of each expert on various tasks and their respective contributions to the ensemble. It can be observed that experts specialize in tasks on which they were fine-tuned. For each task, there is always an expert who exhibits over $2.5\%$ points better accuracy than the average of all experts. This demonstrates that experts specialize in different tasks. Additionally, the ensemble consistently achieves higher accuracy (ranging from $6\%$ to $10\%$ points) than the average of all experts on all tasks. Furthermore, the ensemble consistently outperforms the best individual expert, indicating that each expert contributes uniquely to the ensemble. See the
135
+
136
+ details in Fig. 10 (Appendix) for more analysis of overlap strategy from Eq. 2 that also presents how experts are diversified between the tasks.
137
+
138
+ ![](images/ff089d1b8341a1cd73c12449a44d289e6c2481ae56b02d9725c3d17335d5b018.jpg)
139
+ Figure 5: Diversity of experts on CIFAR-100 dataset with $T = 20$ split. The presented metric is relative accuracy (\%) calculated by subtracting the accuracy of each expert from the averaged accuracy of all experts. Black squares represent experts selected to be finetuned on a given task. Although we do not impose any cost function associated with experts' diversity, they tend to specialize in different tasks by the design of our method. Moreover, our ensemble (bottom row) always performs better than the best expert, proving that each expert contributes uniquely to the ensemble in SEED.
140
+
141
+ Expert selection strategy. In order to demonstrate that our minimum overlap selection strategy (KL-max) improves the performance of the SEED architecture, we compare it to three other selection strategies. The first is a random selection strategy, where each expert has an equal probability of being chosen for finetuning. The second is a round-robin selection strategy, where for a task $t$ , an expert with no. $1 + (t - 1 \bmod K)$ is chosen for a finetuning. The third one is the maximum overlap strategy (KL-min), in which we choose the expert for which the overlap between latent distributions of new classes is the highest. We conduct ten runs on CIFAR-100 with a Resnet32 architecture, three experts, and a random class order and report the average incremental accuracy in Fig. 6. Our minimum overlap selection strategy shows a higher mean and median than the other methods.
142
+
143
+ Ablation study. In Tab. 4, we present the ablation study for SEED. We report task-agnostic inc. avg. accuracy for five experts on CIFAR-100 and ResNet32, where results are averaged over three runs. Firstly, we remove or replace particular SEED components. We start by replacing the multivariate Gaussian distribution with its diagonal form. This reduces accuracy to $53.5\%$ . Then, we remove Gaussian distributions and represent each class as a mean prototype in the latent space and use Nearest Mean Classifier (NMC) to make predictions. This also reduces accuracy, which shows that using multivariate distribution is important for SEED accuracy. Secondly, we check the importance of using temperature in the softmax function during inference. SEED without temperature $(\tau = 1)$ achieves worse results than with temperature $(\tau = 3)$ , allowing more experts to contribute to the ensemble with more fuzzy likelihoods. At last, we analyze various SEED modifications, i.e., adding ReLU activations (like in the original ResNet) at the last layer, which decreased the accuracy by $3.9\%$ points. It is because it is easier for the neural network trained with cross-entropy loss to represent features as Gaussian distribution if nonlinear activation is removed. See Tab. 7 for additional ablation study.
144
+
145
+ ![](images/325aac768bcfeb242ed39ebb1c0bdbb4f73da9f20d6eb317b18e9c3805ffed89.jpg)
146
+ Figure 6: Avg. inc. accuracy of 10 runs with different class orderings for CIFAR-100 and different fine-tuning expert selection strategies for $T = 20,50$ and three experts. Our KL-max expert selection strategy yields better results than random, round-robin, and KL-min.
147
+
148
+ Plasticity vs stability trade-off. SEED uses feature distillation in trained expert to alleviate forgetting. To assess the influence of the regularization on overall method performance, we use the forgetting and intransigence measures defined in (Chaudhry et al., 2018). Fig. 7 (left) shows the relationship between forgetting and intransigence for four different regularization-based methods: SEED, EWC as a parameters regularization-based method, LWF as regularization of network's output with distillation,
149
+
150
+ ![](images/32dbd3d1b7318a9cb979ef77b53d39a47f820913f2023069cd4750871b702715.jpg)
151
+ Figure 7: CIFAR-100. (Left) Forgetting and intransigence for different methods when manipulating the stability-plasticity parameters for $T = 10$ . SEED with 5 experts achieves the best forgetting-intransigence trade-off. (Right) SEED accuracy as a function of a number of experts for $T = 20$ with 5 or 50 classes in the first task. Bars reflect standard dev. out of three runs.
152
+
153
+ ![](images/094d0b5a99ded4aace2fcd7a2f1147cf01cbc301b5e54fca39791d5ed3325a68.jpg)
154
+
155
+ and a recent FeTrIL method (Petit et al., 2023). For SEED, we adapt plasticity using the $\alpha$ and $K$ parameter, and for both EWC and LWF, we change the $\lambda$ parameter. FeTrIL method has no such parameter, as it uses a frozen backbone. The trade-off between stability and plasticity is evident. The FeTrIL model is very intransigent, with low plasticity and low forgetting. Plasticity is crucial in the CIL setting with ten or more tasks with an equal number of classes. Thus, EWC and LwF, need to be less rigid and exhibit more forgetting. The SEED model for $K = 3$ and $K = 5$ , achieves much better results than FeTrIL while remaining less intransigent and more stable than LwF and EWC. By adjusting the $\alpha$ trade-off parameter of SEED, its stability can be controlled for any number of experts.
156
+
157
+ Number of experts. In Fig. 7 (right), we analyze how the number of experts influences the avg. incremental accuracy achieved by SEED. Changing the number of experts from 1 to 5 increases task-agnostic and task-aware accuracy by $\approx 15\%$ for $T_0 = 5$ . However, for $T_0 = 50$ , the increase is less significant ( $2\%$ and $5\%$ for task aware and task agnostic settings, respectively). These results suggest that scenarios with the significantly bigger first task are simpler than equal split ones. Moreover, going beyond five experts does not improve final CIL performance so much.
158
+
159
+ # 5 CONCLUSIONS
160
+
161
+ In this paper, we introduce an exemplar-free CIL method called SEED. It consists of a limited number of trained from scratch experts that all cooperate during inference, but in each task, only one is selected for finetuning. Firstly, this decreases forgetting, as only a single expert model's parameters are updated without changing learned representations of the others. Secondly, it encourages diversified class representations between the experts. The selection is based on the overlap of distributions of classes encountered in a task. That allows us to find a trade-off between model plasticity and stability. Our experimental study shows that the SEED method achieves state-of-the-art performance across several exemplar-free class-incremental learning scenarios, including different task splits, significant shifts in data distribution between tasks, and task-incremental settings. In the ablation study, we proved that each SEED component is necessary to obtain the best results.
162
+
163
+ Reproducibility and limitations of SEED We enclose the code in the supplementary material, and results can be reproduced by following theREADME file. Our method has three limitations. Firstly, SEED may be not feasible for scenarios where tasks are completely unrelated and the number of parameters is limited, as in that case sharing initial parameters between experts may lead to a poor performance. Secondly, SEED requires the maximum number of experts given upfront, which can be found as a limitation of our method for new settings. Thirdly, calculating a distribution for a class may not be possible if the class's covariance matrix is singular. We address the last problem by decreasing latent space size. We elaborate more on this in the Appendix A.2.
164
+
165
+ # ACKNOWLEDGMENTS
166
+
167
+ This research was supported by National Science Centre, Poland grant no 2020/39/B/ST6/01511, grant no 2022/45/B/ST6/02817, grant no 2022/47/B/ST6/03397, PL-Grid Infrastructure grant nr PLG/2022/016058, and the Excellence Initiative at the Jagiellonian University. This work was partially funded by the European Union under the Horizon Europe grant OMINO (grant number 101086321) and Horizon Europe Program (HORIZON-CL4-2022-HUMAN-02) under the project "ELIAS: European Lighthouse of AI for Sustainability", GA no. 101120237, it was also co-financed with funds from the Polish Ministry of Education and Science under the program entitled International Co-Financed Projects. Bartlomiej Twardowski acknowledges the grant RYC2021-032765-I. Views and opinions expressed are however those of the author(s) only and do not necessarily reflect those of the European Union or the European Research Executive Agency. Neither the European Union nor European Research Executive Agency can be held responsible for them.
168
+
169
+ # REFERENCES
170
+
171
+ Rahaf Aljundi, Punarjay Chakravarty, and Tinne Tuytelaars. Expert gate: Lifelong learning with a network of experts. In Conference on Computer Vision and Pattern Recognition, CVPR, 2017.
172
+ Eden Belouadah and Adrian Popescu. Deesil: Deep-shallow incremental learning. TaskCV Workshop @ ECCV 2018., 2018.
173
+ Arslan Chaudhry, Puneet Kumar Dokania, Thalaiyasingam Ajanthan, and Philip H. S. Torr. Riemannian walk for incremental learning: Understanding forgetting and intransigence. In Computer Vision - ECCV 2018 - 15th European Conference, Munich, Germany, September 8-14, 2018, Proceedings, Part XI, Lecture Notes in Computer Science, pp. 556-572, 2018.
174
+ Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Fei-Fei Li. Imagenet: A large-scale hierarchical image database. In 2009 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR 2009), 20-25 June 2009, Miami, Florida, USA, pp. 248-255, 2009.
175
+ Thang Doan, Seyed Iman Mirzadeh, and Mehrdad Farajtabar. Continual learning beyond a single model. arXiv preprint arXiv:2202.09826, 2022.
176
+ Robert M French. Catastrophic forgetting in connectionist networks. Trends in cognitive sciences, 3 (4):128-135, 1999.
177
+ Tyler L. Hayes and Christopher Kanan. Lifelong machine learning with deep streaming linear discriminant analysis. In The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, June 2020a.
178
+ Tyler L Hayes and Christopher Kanan. Lifelong machine learning with deep streaming linear discriminant analysis. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition workshops, pp. 220-221, 2020b.
179
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Conference on Computer Vision and Pattern Recognition, CVPR, 2016.
180
+ Dan Hendrycks, Norman Mu, Ekin Dogus Cubuk, Barret Zoph, Justin Gilmer, and Balaji Lakshminarayanan. Augmix: A simple data processing method to improve robustness and uncertainty. In International Conference on Learning Representations, 2019.
181
+ Shota Horiguchi, Daiki Ikami, and Kiyoharu Aizawa. Significance of softmax-based features in comparison to distance metric learning-based features. IEEE transactions on pattern analysis and machine intelligence, 42(5):1279-1285, 2019.
182
+ Saihui Hou, Xinyu Pan, Chen Change Loy, Zilei Wang, and Dahua Lin. Learning a unified classifier incrementally via rebalancing. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 831-839, 2019.
183
+ Julio Hurtado, Alain Raymond, and Alvaro Soto. Optimizing reusable knowledge for continual learning via metalearning. Advances in Neural Information Processing Systems, 34:14150-14162, 2021.
184
+
185
+ James Kirkpatrick, Razvan Pascanu, Neil Rabinowitz, Joel Veness, Guillaume Desjardins, Andrei A Rusu, Kieran Milan, John Quan, Tiago Ramalho, Agnieszka Grabska-Barwinska, et al. Overcoming catastrophic forgetting in neural networks. Proceedings of the national academy of sciences, 114 (13):3521-3526, 2017.
186
+ Alex Krizhevsky. Learning multiple layers of features from tiny images. Technical report, University of Toronto, 2009.
187
+ Zhizhong Li and Derek Hoiem. Learning without forgetting. In Computer Vision - ECCV 2016 - 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV, volume 9908 of Lecture Notes in Computer Science, pp. 614-629, 2016.
188
+ Yu Liu, Sarah Parisot, Gregory Slabaugh, Xu Jia, Ales Leonardis, and Tinne Tuytelaars. More classifiers, less forgetting: A generic multi-classifier paradigm for incremental learning. In European Conference on Computer Vision, pp. 699-716. Springer, 2020.
189
+ Marc Masana, Xialei Liu, Bartlomiej Twardowski, Mikel Menta, Andrew D Bagdanov, and Joost van de Weijer. Class-incremental learning: Survey and performance evaluation on image classification. IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 1-20, 2022.
190
+ Michael McCloskey and Neal J Cohen. Catastrophic interference in connectionist networks: The sequential learning problem. In *Psychology of learning and motivation*, volume 24, pp. 109-165. Elsevier, 1989.
191
+ Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. Advances in neural information processing systems, 32, 2019.
192
+ Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In Proceedings of the IEEE International Conference on Computer Vision, pp. 1406-1415, 2019.
193
+ Grégoire Petit, Adrian Popescu, Hugo Schindler, David Picard, and Bertrand Delezozide. Fetril: Feature translation for exemplar-free class-incremental learning. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pp. 3911-3920, 2023.
194
+ Qi Qin, Wenpeng Hu, Han Peng, Dongyan Zhao, and Bing Liu. Bns: Building network structures dynamically for continual learning. Advances in Neural Information Processing Systems, 34: 20608-20620, 2021.
195
+ Dushyant Rao, Francesco Visin, Andrei A. Rusu, Razvan Pascanu, Yee Whye Teh, and Raia Hadsell. Continual unsupervised representation learning. In Advances in Neural Information Processing Systems 32: Annual Conference on Neural Information Processing Systems 2019, NeurIPS 2019, December 8-14, 2019, Vancouver, BC, Canada, pp. 7645-7655, 2019.
196
+ Leonardo Ravaglia, Manuele Rusci, Davide Nadalini, Alessandro Capotondi, Francesco Conti, and Luca Benini. A tinyml platform for on-device continual learning with quantized latent replays. IEEE Journal on Emerging and Selected Topics in Circuits and Systems, 11(4):789-802, 2021.
197
+ Sylvestre-Alvise Rebuffi, Alexander Kolesnikov, Georg Sperl, and Christoph H. Lampert. icarl: Incremental classifier and representation learning. In 2017 IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2017, Honolulu, HI, USA, July 21-26, 2017, pp. 5533-5542, 2017.
198
+ Yangjun Ruan, Saurabh Singh, Warren R. Morningstar, Alexander A. Alemi, Sergey Ioffe, Ian Fischer, and Joshua V. Dillon. Weighted ensemble self-supervised learning. In 11th International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, Conference Track Proceedings, 2023.
199
+ Andrei A. Rusu, Neil C. Rabinowitz, Guillaume Desjardins, Hubert Soyer, James Kirkpatrick, Koray Kavukcuoglu, Razvan Pascanu, and Raia Hadsell. Progressive neural networks. CoRR, abs/1606.04671, 2016.
200
+
201
+ Joan Serrà, Didac Suris, Marius Miron, and Alexandros Karatzoglou. Overcoming catastrophic forgetting with hard attention to the task. In Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, pp. 4555-4564, 2018.
202
+ James Seale Smith, Yen-Chang Hsu, Jonathan Balloch, Yilin Shen, Hongxia Jin, and Zsolt Kira. Always be dreaming: A new approach for data-free class-incremental learning. In IEEE/CVF International Conference on Computer Vision, ICCV 2021, pp. 9354-9364. IEEE, 2021.
203
+ Gido M Van de Ven and Andreas S Tolias. Three scenarios for continual learning. arXiv preprint arXiv:1904.07734, 2019.
204
+ Gido M. van de Ven, Zhe Li, and Andreas S. Tolias. Class-incremental learning with generative classifiers. In IEEE Conference on Computer Vision and Pattern Recognition Workshops, CVPR Workshops 2021, virtual, June 19-25, 2021, pp. 3611-3620, 2021.
205
+ Tom Veniat, Ludovic Denoyer, and Marc'Aurelio Ranzato. Efficient continual learning with modular networks and task-driven priors. arXiv preprint arXiv:2012.12631, 2020.
206
+ Fu-Yun Wang, Da-Wei Zhou, Han-Jia Ye, and De-Chuan Zhan. FOSTER: feature boosting and compression for class-incremental learning. In Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXV, pp. 398-414, 2022a.
207
+ Liyuan Wang, Xingxing Zhang, Qian Li, Jun Zhu, and Yi Zhong. Coscl: Cooperation of small continual learners is stronger than a big one. In Computer Vision - ECCV 2022 - 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXVI, pp. 254-271, 2022b.
208
+ Zifeng Wang, Zizhao Zhang, Chen-Yu Lee, Han Zhang, Ruoxi Sun, Xiaoqi Ren, Guolong Su, Vincent Perot, Jennifer Dy, and Tomas Pfister. Learning to prompt for continual learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 139-149, 2022c.
209
+ Yue Wu, Yinpeng Chen, Lijuan Wang, Yuancheng Ye, Zicheng Liu, Yandong Guo, and Yun Fu. Large scale incremental learning. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2019, Long Beach, CA, USA, June 16-20, 2019, pp. 374-382, 2019.
210
+ Shipeng Yan, Jiangwei Xie, and Xuming He. DER: dynamically expandable representation for class incremental learning. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2021, virtual, June 19-25, 2021, pp. 3014-3023, 2021.
211
+ Yang Yang, Zhiying Cui, Junjie Xu, Changhong Zhong, Ruixuan Wang, and Wei-Shi Zheng. Continual learning with bayesian model based on a fixed pre-trained feature extractor. In Medical Image Computing and Computer Assisted Intervention - MICCAI 2021 Strasbourg, France, September, 27 - October 1, 2021, Proceedings, Part V, pp. 397-406, 2021.
212
+ Lu Yu, Bartlomiej Twardowski, Xialei Liu, Luis Herranz, Kai Wang, Yongmei Cheng, Shangling Jui, and Joost van de Weijer. Semantic drift compensation for class-incremental learning. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 6980-6989. IEEE, 2020.
213
+ Bowen Zhao, Xi Xiao, Guojun Gan, Bin Zhang, and Shu-Tao Xia. Maintaining discrimination and fairness in class incremental learning. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition, CVPR 2020, Seattle, WA, USA, June 13-19, 2020, pp. 13205-13214. IEEE, 2020.
214
+ Da-Wei Zhou, Fu-Yun Wang, Han-Jia Ye, and De-Chuan Zhan. Pycil: A python toolbox for class incremental learning, 2021.
215
+ Fei Zhu, Zhen Cheng, Xu-yao Zhang, and Cheng-lin Liu. Class-incremental learning via dual augmentation. Advances in Neural Information Processing Systems, 34, 2021a.
216
+ Fei Zhu, Xu-Yao Zhang, Chuang Wang, Fei Yin, and Cheng-Lin Liu. Prototype augmentation and self-supervision for incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5871-5880, 2021b.
217
+ Kai Zhu, Wei Zhai, Yang Cao, Jiebo Luo, and Zheng-Jun Zha. Self-sustaining representation expansion for non-exemplar class-incremental learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9296-9305, 2022.
218
+
219
+ # A APPENDICES
220
+
221
+ # A.1 IMPLEMENTATION DETAILS
222
+
223
+ All experiments are the average over three runs and all methods are trained from scratch as in their original papers. We implemented SEED in FACIL (Masana et al., 2022) framework using Python 3 programming language and PyTorch (Paszke et al., 2019) machine learning library. We utilized a computer equipped with AMD EPYCTM 7742 CPU and NVIDIA A-100™ GPU to perform experiments. On this machine, SEED takes around 1 hour to be trained on CIFAR100 for $T = 10$ .
224
+
225
+ For all experiments, SEED is trained using the Stochastic Gradient Descent (SGD) optimizer for 200 epochs per task, with a momentum of 0.9, weight decay factor equal 0.0005, $\alpha$ set to 0.99, $\tau$ set to 3 and an initial learning rate of 0.05. The learning rate decreases ten times after 60, 120, and 160 epochs. As the knowledge distillation loss, we employ the L2 distance calculated for embeddings in the latent space. We set the default number of experts to 5 and class representation dimensionality $S$ to 64. In order to find the best hyperparameters for SEED, we perform a manual hyperparameter search on a validation dataset.
226
+
227
+ Tab. 1, Fig. 4 and Fig. 9. We perform experiments for all methods using implementations provided in FACIL and PyCIL (Zhou et al., 2021) frameworks. We use ResNet32 as a feature extractor for CIFAR100 and ResNet18 for DomainNet and ImageNet-Subset. For DomainNet $T = 12$ , we use 25 classes per task; for $T = 18$ , we use 10; for $T = 36$ , we use 5.
228
+
229
+ All methods were trained using the same data augmentations: random crops, horizontal flips, cutouts, and AugMix (Hendrycks et al., 2019). For baseline methods, we set default hyperparameters provided in benchmarks. However, for LwF, we use $\lambda = 10$ as we observed that this significantly improved its performance.
230
+
231
+ Tab. 2. For baseline results, we provide results reported in (Petit et al., 2023). All CIL methods use the same data augmentations: random resized crops, horizontal flips, cutouts, and AugMix (Hendrycks et al., 2019).
232
+
233
+ Tab. 3. The setting and baseline results are identical to (Wang et al., 2022b). We train SEED with the same data augmentation methods as other methods: horizontal flips and random crops. Here, we use five experts consisting of the Resnet32 network.
234
+
235
+ Fig. 6 We calculate relative accuracy by subtracting each expert's accuracy from the average accuracy of all experts as in (Wang et al., 2022b). We perform 10 runs with random seeds.
236
+
237
+ Fig. 7. Below we report the range of used parameters for plotting the forgetting-intransigence curves (Fig. 7 - left).
238
+
239
+ LwF: $\lambda \in \{1,2,3,5,7,10,15,20,25,50,100\}$
240
+ - EWC: $\lambda \in \{100, 500, 1000, 2500, 5000, 7500, 10000\}$
241
+ - SEED $K = 1$ : $\gamma \in \{0.0, 0.25, 0.5, 0.9, 0.95, 0.97, 0.99, 0.999\}$
242
+ - SEED $K = 3$ : $\gamma \in \{0.9, 0.95, 0.96, 0.97, 0.98, 0.99, 0.999\}$
243
+ - SEED $K = 5$ : $\gamma \in \{0.5, 0.9, 0.97, 0.999\}$
244
+
245
+ # A.2 RESNET ARCHITECTURE MODIFICATION
246
+
247
+ The backbone for the SEED method can be any popular neural network with its head removed. This study focuses on a family of modified ResNet (He et al., 2016) architectures.
248
+
249
+ ResNet architecture is a typical neural architecture used for the continual learning setting. In this work, we follow this standard. However, there are two minor changes to ResNet in our algorithm.
250
+
251
+ Due to ReLU activation functions placed at the end of each ResNet block, latent feature vectors of ResNet models consist of non-negative elements. That implies that every continuous random variable representing a class is defined on $[0; \infty)^S$ , where $S$ is the size of a latent vector. However, Gaussian distributions are defined for random variables of real values, which, in our case, reduces the ability to represent classes as multivariate Gaussian distributions. In order to alleviate this problem, we remove the last ReLU activation function from every block in the last layer of ResNet architecture.
252
+
253
+ Secondly, the size $S$ of the latent sample representation should be adjustable. There are two reasons for that. Firstly, if $S$ is too big and a number of class samples is low, $\Sigma$ can be a singular matrix. This implies that the likelihood function might not be well-defined. Secondly, adjusting $S$ allows us to reduce the number of parameters the algorithm requires. We overcome this issue by adding a 1x1 convolution layer with $S$ kernels after the last block of the architecture. For example, this allows us to represent feature vectors of Resnet18 with 64 elements instead of 512.
254
+
255
+ # A.3 MEMORY FOOTPRINT
256
+
257
+ SEED requires:
258
+
259
+ $$
260
+ \left| \theta_ {f} \right| + K \left| \theta_ {g} \right| + \sum_ {i = 1} ^ {K} \sum_ {j = i} ^ {T} \left| C _ {j} \right| \left(S + \frac {S (S + 1)}{2}\right) \tag {3}
261
+ $$
262
+
263
+ parameters to run, where $|\theta_f|$ and $|\theta_g|$ represent number of parameters of $f$ and $g$ functions, respectively. $S$ is dimensionality of embedding space, $K$ is number of experts, $T$ is number of tasks, $|C_j|$ is a number of classes in $j$ -th task.
264
+
265
+ This total number of parameters used by SEED can be limited in several ways:
266
+
267
+ - Decreasing S by adding a convolutional $1 \times 1$ bottleneck layer at the network's end.
268
+ - Pruning parameters.
269
+ - Performing weight quantization.
270
+ - Using simpler feature extractor.
271
+ - Increasing number of shared layers (moving parameters from $g$ into $f$ function).
272
+ - Simplifying multivariate Gaussian distributions to diagonal covariance matrix or prototypes.
273
+
274
+ # A.4 NUMBER OF PARAMETERS VS ACCURACY TRADE-OFF
275
+
276
+ To investigate the trade-off between the number of the SEED's parameters and the overall average incremental accuracy of the method, we conducted several experiments with a different number of experts and shared layers (as in a previous experiment in Tab. 3 we only adjust the number of layers). We see that these two factors indeed control and decrease the number of required parameters, e.g., sharing the first 25 layers in Resnet32 decreases memory footprint by 0.8 million parameters when we use five experts. However, it slightly hurts the performance of SEED, as the overall average incremental accuracy drops by $4.4\%$ . These results, combined with expected forgetting/intransigence, can guide an application of SEED for a particular problem.
277
+
278
+ We additionally compare SEED to the best competitor - FeTrIL (with Resnet32) in a low parameters regime. FeTrIL stores feature extractor without the linear head and prototypes of each class. For SEED we utilize Resnet20 network with a number of kernels changed from 64 to 48 in the third block. We use 5 experts which share first 17 layers. We present results in Tab. 5. We present the number of network weights and total number of parameters which for SEED also includes multivariate Gaussian distributions. SEED has 13K less parameters than FeTrIL but achieves better accuracy on 3 settings.
279
+
280
+ Table 5: SEED outperforms competitors in terms of performance and number of parameters on equally split CIFAR100, however it requires decreasing size of the feature extractor network and sharing first 17 layers.
281
+
282
+ <table><tr><td rowspan="2">CIL Method</td><td rowspan="2">Network</td><td rowspan="2">Network weights</td><td rowspan="2">Total params.</td><td colspan="3">CIFAR-10</td></tr><tr><td>T=10</td><td>T=20</td><td>T=50</td></tr><tr><td>EWC* (Kirkpatrick et al., 2017) (PNAS&#x27;17)</td><td>ResNet32</td><td>473K</td><td>473K</td><td>24.5</td><td>21.2</td><td>15.9</td></tr><tr><td>LwF* (Rebuffi et al., 2017) (CVPR&#x27;17)</td><td>Resnet32</td><td>473K</td><td>473K</td><td>45.9</td><td>27.4</td><td>20.1</td></tr><tr><td>FeTriL (Petit et al., 2023) (WACV&#x27;23)</td><td>Resnet32</td><td>473K</td><td>473K</td><td>46.3±0.3</td><td>38.7±0.3</td><td>27.0±1.2</td></tr><tr><td>SEED</td><td>Resnet20*</td><td>339K</td><td>460K</td><td>54.7±0.2</td><td>48.6±0.3</td><td>33.1±1.1</td></tr></table>
283
+
284
+ # A.5 PRETRAINING SEED
285
+
286
+ We study the impact of using a pretrained network with SEED on its performance. For this purpose we utilize ResNet-18 with weights pretrained on ImageNet-1K as a feature extractor for every expert.
287
+
288
+ ![](images/ef1c9b0d767187e8db00fc555924b60b9b805ebb155fa370207ab2ba24e5113a.jpg)
289
+
290
+ ![](images/4d57e61b05e2eef80f4307d8e6b2993d7ec89e9e76ecb9375f2f144220355eba.jpg)
291
+
292
+ ![](images/d4e83dec6aaae769a6893ce30a2eb245635bc21e58516e406d5068945a0496d1.jpg)
293
+ Figure 8: Impact of number of experts and number of shared layers on accuracy and number parameters of SEED. We utilize CIFAR100 with $|T| = 10$ . We can observe that accuracy drops when decreasing the number of experts and increasing the number of shared layers.
294
+
295
+ ![](images/23fbb2d4f79916963a28fe34104ccde80d9031211221bc891bf126609f2bd25d.jpg)
296
+
297
+ <table><tr><td rowspan="2">DomainNet |T|</td><td colspan="2">Avg. Inc. Accuracy (%)</td><td colspan="2">Forgetting (%)</td></tr><tr><td>From scratch</td><td>Pretained</td><td>From scratch</td><td>Pretained</td></tr><tr><td>12</td><td>45.0</td><td>53.1</td><td>12.1</td><td>12.8</td></tr><tr><td>24</td><td>44.9</td><td>54.2</td><td>11.2</td><td>12.1</td></tr><tr><td>36</td><td>39.2</td><td>53.6</td><td>13.7</td><td>15.6</td></tr></table>
298
+
299
+ Table 6: Pretraining experts in SEED increases its accuracy while slightly increasing forgetting. We compare ResNet-18 with weights pretrained on ImageNet-1K to a randomly initialized ResNet-18 as feature extractors of each expert.
300
+
301
+ We compare it to SEED initialized with random weights (training from scratch) in Tab.6. Pretrained SEED achieves better average incremental accuracy by: $8.1\%$ , $9.3\%$ , $14.4\%$ on DomainNet split to 12, 24, 36 tasks respectively.
302
+
303
+ # A.6 ADDITIONAL RESULTS
304
+
305
+ In this section we provide more experimental results. Fig. 10 presents insight into diversity of experts on CIFAR100 for $T = 50$ and 5 experts. We measure value of overlap per expert in each task given by Eq. 2. Average value of the function differ between tasks, e.g., for task 49. it equals to ≈ 3.5, while for task 33. it equals to ≈ 18.0. This is due to semantic similarity between classes in a given task, classes in task 49. (cups, bowls) are semantically more similar than classes in task 33. (bed, dolphin). However, in each task we can observe significant variance between values for experts. This proves that classes overlap differently in experts, therefore experts are diversified what allows SEED to achieve great results.
306
+
307
+ In Fig. 9, we present accuracies obtained after equal split tasks for Table 1. We report results for CIFAR100 and DomainNet datasets. We can observe that for DomainNet SEED achieves $15\%$ better accuracy after the last incremental step than FeTril. On CIFAR100 SEED achieves $20\%$ and $16\%$ better accuracy than the best method. This proves that SEED achieves superior results to state-of-the-art methods on equal-split and big domain shift settings.
308
+
309
+ Table 7 presents an additional ablation study for SEED. We test various ways to approximate class distributions in the latent space on CIFAR100 dataset and $T = 10$ . Firstly, we replace multivariate Gaussian distribution with a Gaussian Mixture Model (GMM) consisting of 2 multivariate Gaussian
310
+
311
+ ![](images/a865518f59eefbd49b600ab1c97259ed71a67c989769b792268dadf48229c2a4.jpg)
312
+
313
+ ![](images/3e0d3b08f1b9b85e69048846ab1d6db01ebbc97a254cef181af04ebf9e48eeb0.jpg)
314
+
315
+ ![](images/93767763b87797b9789657c1b2fd16e298b5626389d5241e371ad0b738e26cd2.jpg)
316
+ Figure 9: Accuracy after each task for equal splits on CIFAR100 and DomainNet. SEED significantly outperforms other methods in equal split scenarios for many tasks (top) and more considerable data shifts (bottom).
317
+
318
+ ![](images/84476f7a37a73d91b714cbf3d168645889d9df1a27d818eabb7468a62b5853e4.jpg)
319
+
320
+ distributions. It slightly reduces the accuracy (by $1.2\%$ ). Then, we abandon multivariate Gaussians and approximate classes using 2 and 3 Gaussian distributions with the diagonal covariance matrix. That decreases accuracy by a large margin. We also approximate classes using their prototypes (centroids) in the latent space. This also reduces the performance of SEED.
321
+
322
+ Table 7: Ablation study of SEED for CIL setting with $\mathrm{T} = {10}$ on ResNet32 and CIFAR-100. Avg. inc. accuracy is reported. We test different variations of class representation (such as Gaussian Mixture Model, diagonal covariance matrix or prototypes). SEED presents the best performance when used as designed.
323
+
324
+ <table><tr><td>Approach</td><td>Acc.(%)</td></tr><tr><td>SEED(5 experts)</td><td>61.7 ±0.5</td></tr><tr><td>w/ 2× multivariate</td><td>60.5±0.7</td></tr><tr><td>w/ 2× diagonal</td><td>53.8 ±0.1</td></tr><tr><td>w/ 3× diagonal</td><td>53.8±0.3</td></tr><tr><td>w/ prototypes</td><td>54.1±0.3</td></tr></table>
325
+
326
+ ![](images/cd863cbca9eca4894cfd593232b0f057d731682b5076e031c68dbd3cb89d056f.jpg)
327
+ Figure 10: Overlap of class distributions in each task per expert on CIFAR-100 dataset with $T = 50$ split and random class order. Diversification by data yields high variation in overlap values in each task what proves that experts are diversified and learn different features. Average overlap values differ between tasks, as they depend on semantic similarity of classes. Classes in task 49. are semantically similar (cups, bowls) but classes in task 33. are different (beds, dolphins).
328
+
329
+ ![](images/c556e964764cc24faa819cba7f953f735f8212674eb52a20b65a41d9b879ec38.jpg)
330
+
331
+ ![](images/8f7308559a8aaa844bbf3eb8e8c74b5cc261d6679f252315631e4c15bf282417.jpg)
332
+ Figure 11: Diversity of experts on CIFAR-100 dataset with $T = 20$ split, different seeds and random class order. The presented metric is relative accuracy (\%). Black squares represent experts selected to be finetuned on a given task. We can observe that experts specialize in different tasks.
2401.10xxx/2401.10191/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b6ac32a47215bed38e6b1cf5c44aea9140f141c5fd51e561c2e5aa38a973df
3
+ size 985615
2401.10xxx/2401.10191/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10208/b4ec0b04-6063-4b2e-8a46-5bc043f27b6d_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10208/b4ec0b04-6063-4b2e-8a46-5bc043f27b6d_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10208/b4ec0b04-6063-4b2e-8a46-5bc043f27b6d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99a36d4c6705e6f7ca37dd342da1f65bbd1d839884cf8be277f15a2354ea27ae
3
+ size 5520799
2401.10xxx/2401.10208/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10208/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b45b75002189cc5e91b6ba1dbcb8726717d7801ca2a7660439c361733b8986d
3
+ size 1646850
2401.10xxx/2401.10208/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10215/95513a42-9487-4a29-97c5-eb93bcc107a7_content_list.json ADDED
@@ -0,0 +1,2058 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "GPAVATAR: GENERALIZABLE AND PRECISE HEAD AVATAR FROM IMAGE(S)",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 171,
8
+ 99,
9
+ 823,
10
+ 148
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Xuangeng Chu $^{1,2*}$ Yu Li $^{2\\dagger}$ Ailing Zeng $^{2}$ Tianyu Yang $^{2}$ Lijian Lin $^{2}$ Yunfei Liu $^{2}$ Tatsuya Harada $^{1,3\\dagger}$",
17
+ "bbox": [
18
+ 181,
19
+ 171,
20
+ 812,
21
+ 202
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ The University of Tokyo $^{2}$ International Digital Economy Academy (IDEA) $^{3}$ RIKEN AIP $\\{\\text { xuangeng.chu, harada } \\} @ \\text { mi.t.u-tokyo.ac.jp }$ $\\{\\text { liyu, zengailing, yangtianyu, linlijian, liuyunfei } \\} @ \\text { idea.edu.cn }$",
28
+ "bbox": [
29
+ 181,
30
+ 203,
31
+ 808,
32
+ 247
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "image",
38
+ "img_path": "images/f958e67fff484195e3c75244490227c67ce82c24cb25b8352d81171ca40bf9f9.jpg",
39
+ "image_caption": [
40
+ "Figure 1: Our GPAvatar is able to reconstruct 3D head avatars from even a single input (i.e., one-shot), with strong generalization and precise expression control. The leftmost images are the inputs, and the subsequent images depict reenactment results. Inset images display the corresponding driving faces. Additionally, the first row shows three novel view results."
41
+ ],
42
+ "image_footnote": [],
43
+ "bbox": [
44
+ 173,
45
+ 268,
46
+ 823,
47
+ 420
48
+ ],
49
+ "page_idx": 0
50
+ },
51
+ {
52
+ "type": "text",
53
+ "text": "ABSTRACT",
54
+ "text_level": 1,
55
+ "bbox": [
56
+ 450,
57
+ 500,
58
+ 545,
59
+ 513
60
+ ],
61
+ "page_idx": 0
62
+ },
63
+ {
64
+ "type": "text",
65
+ "text": "Head avatar reconstruction, crucial for applications in virtual reality, online meetings, gaming, and film industries, has garnered substantial attention within the computer vision community. The fundamental objective of this field is to faithfully recreate the head avatar and precisely control expressions and postures. Existing methods, categorized into 2D-based warping, mesh-based, and neural rendering approaches, present challenges in maintaining multi-view consistency, incorporating non-facial information, and generalizing to new identities. In this paper, we propose a framework named GPAAvatar that reconstructs 3D head avatars from one or several images in a single forward pass. The key idea of this work is to introduce a dynamic point-based expression field driven by a point cloud to precisely and effectively capture expressions. Furthermore, we use a Multi Tri-planes Attention (MTA) fusion module in the tri-planes canonical field to leverage information from multiple input images. The proposed method achieves faithful identity reconstruction, precise expression control, and multi-view consistency, demonstrating promising results for free-viewpoint rendering and novel view synthesis. Code is available at https://github.com/xg-chu/GPAvatar.",
66
+ "bbox": [
67
+ 228,
68
+ 527,
69
+ 767,
70
+ 752
71
+ ],
72
+ "page_idx": 0
73
+ },
74
+ {
75
+ "type": "text",
76
+ "text": "1 INTRODUCTION",
77
+ "text_level": 1,
78
+ "bbox": [
79
+ 173,
80
+ 775,
81
+ 336,
82
+ 789
83
+ ],
84
+ "page_idx": 0
85
+ },
86
+ {
87
+ "type": "text",
88
+ "text": "Head avatar reconstruction holds immense potential in various applications, including virtual reality, online meetings, gaming, and the film industry. In recent years, this field has garnered significant attention within the computer vision community. The primary objective of head avatar reconstruction is to faithfully recreate the source head while enabling precise control over expressions and posture. This capability will facilitate the generation of desired new expressions and poses for the source portrait (Li et al., 2023a; Yu et al., 2023b; Li et al., 2023b).",
89
+ "bbox": [
90
+ 169,
91
+ 805,
92
+ 826,
93
+ 890
94
+ ],
95
+ "page_idx": 0
96
+ },
97
+ {
98
+ "type": "header",
99
+ "text": "Published as a conference paper at ICLR 2024",
100
+ "bbox": [
101
+ 171,
102
+ 32,
103
+ 478,
104
+ 47
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "page_footnote",
110
+ "text": "*This work was partially done during an internship at IDEA.",
111
+ "bbox": [
112
+ 189,
113
+ 896,
114
+ 550,
115
+ 909
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "page_footnote",
121
+ "text": "† Corresponding Author.",
122
+ "bbox": [
123
+ 192,
124
+ 910,
125
+ 338,
126
+ 922
127
+ ],
128
+ "page_idx": 0
129
+ },
130
+ {
131
+ "type": "aside_text",
132
+ "text": "arXiv:2401.10215v1 [cs.CV] 18 Jan 2024",
133
+ "bbox": [
134
+ 22,
135
+ 267,
136
+ 58,
137
+ 707
138
+ ],
139
+ "page_idx": 0
140
+ },
141
+ {
142
+ "type": "page_number",
143
+ "text": "1",
144
+ "bbox": [
145
+ 493,
146
+ 948,
147
+ 503,
148
+ 959
149
+ ],
150
+ "page_idx": 0
151
+ },
152
+ {
153
+ "type": "text",
154
+ "text": "Some exploratory methods have partially achieved this goal and can be roughly categorized into three types: 2D-based warping methods (Yin et al., 2022), mesh-based methods (Khakhulin et al., 2022), and neural rendering methods (Sun et al., 2023; Ma et al., 2023; Li et al., 2023a; Yu et al., 2023b; Li et al., 2023b). Among these, 2D-based methods warp the original image to new expressions with a warping field estimated from sparse landmarks, and then synthesize the appearance through an encoder and decoder. However, these methods struggle to maintain multi-view consistency when there are significant changes in head pose due to their lack of necessary 3D constraints. Furthermore, these methods are unable to effectively decouple expressions and identity from the source portrait, leading to unfaithful driving results. Mesh-based methods explicitly model the source portrait with a 3D Morphable Model (3DMM) (Blanz & Vetter, 1999; Paysan et al., 2009; Li et al., 2017; Gerig et al., 2018). By incorporating 3D information, these methods effectively address the issue of multi-view consistency. However, due to the limitations in the modeling and expressive capacity of 3DMM, the reconstructed head often lacks non-facial information such as hair, and the expressions are often unnatural. With the outstanding performance of NeRF (neural radiance field) in multi-view image synthesis, the latest methods have started to leverage NeRF for head avatar reconstruction(Xu et al., 2023; Zielonka et al., 2023; Zheng et al., 2022; Sun et al., 2023; Ma et al., 2023). Compared to 2D and mesh-based methods, NeRF-based methods have shown the ability to synthesize results that are 3D-consistent and include non-facial information. However, these methods can't generalize well to new identities. Some of these methods require a large amount of portrait data for reconstruction, and some involve time-consuming optimization processes during inference.",
155
+ "bbox": [
156
+ 169,
157
+ 103,
158
+ 826,
159
+ 382
160
+ ],
161
+ "page_idx": 1
162
+ },
163
+ {
164
+ "type": "text",
165
+ "text": "In this paper, we present a framework for reconstructing the source portrait in a single forward pass. Given one or several unseen images, our method reconstructs an animatable implicit head avatar representation. Some examples are shown in Fig. 1. The core challenge lies in faithfully reconstructing the head avatar from a single image and achieving precise control over expressions. To address this issue, we introduce a point cloud-driven dynamic expression field to precisely capture expressions and use a Multi Tri-planes Attention (MTA) module in the tri-planes canonical field to leverage information from multiple input images. The 3DMM point cloud-driven field provides natural and precise expression control and facilitates identity-expression decoupling. The merged tri-planes encapsulate a feature space that includes faithful identity information from the source portrait while modeling parts not covered by the 3DMM, such as shoulders and hair. The experiment verifies that our method generalizes well to unseen identities and enables precise expression control without test-time optimization, thereby enabling tasks such as free novel view synthesis and reenactment.",
166
+ "bbox": [
167
+ 169,
168
+ 388,
169
+ 823,
170
+ 555
171
+ ],
172
+ "page_idx": 1
173
+ },
174
+ {
175
+ "type": "text",
176
+ "text": "The major contributions of our work are as follows:",
177
+ "bbox": [
178
+ 171,
179
+ 561,
180
+ 513,
181
+ 575
182
+ ],
183
+ "page_idx": 1
184
+ },
185
+ {
186
+ "type": "list",
187
+ "sub_type": "text",
188
+ "list_items": [
189
+ "- We introduce a 3D head avatar reconstruction framework that achieves faithful reconstruction in a single forward pass and generalizes well to in-the-wild images.",
190
+ "- We propose a dynamic Point-based Expression Field (PEF) that allows for precise and natural cross-identity expression control.",
191
+ "- We propose a Multi Tri-planes Attention (MTA) fusion module to accept an arbitrary number of input images. It enables the incorporation of more information during inference, particularly beneficial for extreme inputs like closed eyes and occlusions."
192
+ ],
193
+ "bbox": [
194
+ 215,
195
+ 587,
196
+ 821,
197
+ 691
198
+ ],
199
+ "page_idx": 1
200
+ },
201
+ {
202
+ "type": "text",
203
+ "text": "2 RELATED WORK",
204
+ "text_level": 1,
205
+ "bbox": [
206
+ 171,
207
+ 713,
208
+ 344,
209
+ 729
210
+ ],
211
+ "page_idx": 1
212
+ },
213
+ {
214
+ "type": "text",
215
+ "text": "2.1 TALKING HEAD SYNTHESIS",
216
+ "text_level": 1,
217
+ "bbox": [
218
+ 171,
219
+ 744,
220
+ 408,
221
+ 758
222
+ ],
223
+ "page_idx": 1
224
+ },
225
+ {
226
+ "type": "text",
227
+ "text": "Previous methods for head synthesis can be categorized into deformation-based, mesh-based, and NeRF-based methods. Warping-based methods (Siarohin et al., 2019; Zakharov et al., 2020; Wang et al., 2021a; Yin et al., 2022; Drobyshev et al., 2022; Zhang et al., 2023) are popular among 2D generative methods. Usually, these methods apply deformation operations to the source image to drive the motion in the target image. Due to a lack of clear understanding and modeling of the 3D geometry of the head avatar, these methods often produce unrealistic distortions when the poses and expressions change a lot. Many subsequent works (Ren et al., 2021; Yin et al., 2022; Zhang et al., 2023) alleviated this problem by introducing 3DMM (Blanz & Vetter, 1999; Paysan et al., 2009; Li et al., 2017; Geric et al., 2018), but this problem still exists and limits the performance of 2D methods. To completely address this problem, many 3DMM-based works (Feng et al., 2021; Danecek et al., 2022; Khakhulin et al., 2022) reconstruct animatable avatars by estimating 3DMM",
228
+ "bbox": [
229
+ 169,
230
+ 771,
231
+ 823,
232
+ 924
233
+ ],
234
+ "page_idx": 1
235
+ },
236
+ {
237
+ "type": "header",
238
+ "text": "Published as a conference paper at ICLR 2024",
239
+ "bbox": [
240
+ 171,
241
+ 32,
242
+ 478,
243
+ 47
244
+ ],
245
+ "page_idx": 1
246
+ },
247
+ {
248
+ "type": "page_number",
249
+ "text": "2",
250
+ "bbox": [
251
+ 493,
252
+ 948,
253
+ 503,
254
+ 959
255
+ ],
256
+ "page_idx": 1
257
+ },
258
+ {
259
+ "type": "text",
260
+ "text": "parameters from portrait images. Among them, ROME(Khakhulin et al., 2022) estimates the 3DMM parameters, the offset of mesh vertex and the texture to render the results. However, although 3DMM provides strong priors for understanding the face, it focuses only on facial regions and cannot capture other detailed features such as hairstyles and accessories, and the fidelity is limited by the resolution of meshes, resulting in unnatural appearances in the reenactment images.",
261
+ "bbox": [
262
+ 169,
263
+ 103,
264
+ 823,
265
+ 174
266
+ ],
267
+ "page_idx": 2
268
+ },
269
+ {
270
+ "type": "text",
271
+ "text": "NeRF(Mildenhall et al., 2020) is a type of implicit 3D scene representation method known for its excellent performance in static scene reconstruction. Many works(Park et al., 2021a;b; Tretschk et al., 2021) try to extend it from static scenes to dynamic scenes, and there are also many works(Gafni et al., 2021; Zheng et al., 2022; Xu et al., 2023; Zielonka et al., 2023; Athar et al., 2023) that apply NeRF to human portrait reconstruction and animation. One of the research directions is to generate controllable 3D head avatars from random noise (Sun et al., 2023; Ma et al., 2023). While these methods can produce realistic and controllable results, achieving reconstruction requires GAN inversion, which is impractical in real-time scenarios. Another research direction is to utilize data from specific individuals for reconstruction (Gafni et al., 2021; Athar et al., 2022; Zheng et al., 2022; Xu et al., 2023; Bai et al., 2023; Zielonka et al., 2023). While the results are impressive, they cannot learn networks for different identities and require thousands of frames of personal image data, raising privacy concerns. At the same time, there are also some methods to use audio drivers to control avatar (Tang et al., 2022; Guo et al., 2021; Yu et al., 2023a), providing users with a more flexible and easy-to-use driver method.",
272
+ "bbox": [
273
+ 169,
274
+ 180,
275
+ 826,
276
+ 377
277
+ ],
278
+ "page_idx": 2
279
+ },
280
+ {
281
+ "type": "text",
282
+ "text": "2.2 ONE-SHOT HEAD AVATARS",
283
+ "text_level": 1,
284
+ "bbox": [
285
+ 171,
286
+ 406,
287
+ 403,
288
+ 420
289
+ ],
290
+ "page_idx": 2
291
+ },
292
+ {
293
+ "type": "image",
294
+ "img_path": "images/ae4540aa4d5db500531cb973bd6fc867f5541d507b9f91ddd448a30a439f7eb4.jpg",
295
+ "image_caption": [
296
+ "Figure 2: Differences from existing state-of-the-art methods. Existing methods may over-process expression information or use expression features, leading to expression detail loss. Our approach avoids this loss with a point-based expression field, and our method flexibly accepts single or multiple images as input, enhancing information gathering through our multi-tri-planes attention module."
297
+ ],
298
+ "image_footnote": [],
299
+ "bbox": [
300
+ 204,
301
+ 435,
302
+ 346,
303
+ 546
304
+ ],
305
+ "page_idx": 2
306
+ },
307
+ {
308
+ "type": "image",
309
+ "img_path": "images/0a23934c66b87596ad4022c8888ef68ef306a3eeaa9fe5a1c978a5d6b963cd36.jpg",
310
+ "image_caption": [],
311
+ "image_footnote": [],
312
+ "bbox": [
313
+ 354,
314
+ 435,
315
+ 485,
316
+ 546
317
+ ],
318
+ "page_idx": 2
319
+ },
320
+ {
321
+ "type": "image",
322
+ "img_path": "images/f2c6b7833df13908d5af57ff49935d0a0a196e042342b6dc2019a0c38e1cd648.jpg",
323
+ "image_caption": [],
324
+ "image_footnote": [],
325
+ "bbox": [
326
+ 495,
327
+ 435,
328
+ 638,
329
+ 546
330
+ ],
331
+ "page_idx": 2
332
+ },
333
+ {
334
+ "type": "image",
335
+ "img_path": "images/35074ae691639facc7e80419d3bbc482a2b20c4b3bdaf6298428cebdc1f72e8f.jpg",
336
+ "image_caption": [],
337
+ "image_footnote": [],
338
+ "bbox": [
339
+ 647,
340
+ 435,
341
+ 790,
342
+ 546
343
+ ],
344
+ "page_idx": 2
345
+ },
346
+ {
347
+ "type": "text",
348
+ "text": "To address these issues, some works (Trevithick et al., 2023; Hong et al., 2022; Li et al., 2023a; b; Yu et al., 2023b) have focused on reconstructing 3D avatars from arbitrary input images. Some methods can achieve static reconstruction (Trevithick et al., 2023; Hong et al., 2022), but they are unable to reanimate these digital avatars. There are also methods that utilize NeRF to achieve animatable one-shot forward reconstruction of target avatars, such as GOAvatar(Li et al., 2023b), NOFA(Yu et al., 2023b), and HideNeRF(Li et al., 2023a). GOAvatar (Li et al., 2023b) utilizes three sets of tri-planes to respectively represent the standard pose, image details, and expression. It also employs a finetuned GFPGAN (Wang et al., 2021b) network to enhance the details of the results. NOFA (Yu et al., 2023b) utilizes the rich 3D-consistent generative prior of 3D GAN to synthesize neural volumes of different faces and employs deformation fields to model facial dynamics. HideNeRF (Li et al., 2023a) utilizes a multi-resolution tri-planes representation and a 3DMM-based deformation field to generate reenactment images while enhancing identity consistency during the generation process. While these methods produce impressive results, they still have some limitations in expression-driven tasks. Some of these methods either rely on the rendering results of 3DMM as input to control deformation fields or directly use 3DMM parameters for expression-driven tasks. In such encoding and decoding processes, subtle facial expression information may inevitably be lost.",
349
+ "bbox": [
350
+ 169,
351
+ 638,
352
+ 826,
353
+ 863
354
+ ],
355
+ "page_idx": 2
356
+ },
357
+ {
358
+ "type": "text",
359
+ "text": "In this paper, we utilize the FLAME (Li et al., 2017) point cloud as prior and propose a novel 3D head neural avatar framework. It not only generalizes to unseen identities but also offers precise control over expression details during reenactment and surpasses all previous works in reenactment image quality. Fig. 2 illustrates the differences between our method and existing approaches.",
360
+ "bbox": [
361
+ 169,
362
+ 867,
363
+ 823,
364
+ 925
365
+ ],
366
+ "page_idx": 2
367
+ },
368
+ {
369
+ "type": "header",
370
+ "text": "Published as a conference paper at ICLR 2024",
371
+ "bbox": [
372
+ 171,
373
+ 32,
374
+ 478,
375
+ 47
376
+ ],
377
+ "page_idx": 2
378
+ },
379
+ {
380
+ "type": "page_number",
381
+ "text": "3",
382
+ "bbox": [
383
+ 493,
384
+ 948,
385
+ 503,
386
+ 959
387
+ ],
388
+ "page_idx": 2
389
+ },
390
+ {
391
+ "type": "image",
392
+ "img_path": "images/b9ce6cf0cdbdbf0fdaff2b3cf3582febe07fc64e0a494871b21f5543ffaed9d1.jpg",
393
+ "image_caption": [
394
+ "Figure 3: Overview: Our method mainly comprises two branches: one that captures fine-grained expressions with PEF (Sec. 3.2) and another that integrates information from multiple inputs through MTA (Sec. 3.1, 3.3). Finally, there is the rendering and super-resolution component (Sec. 3.4)."
395
+ ],
396
+ "image_footnote": [],
397
+ "bbox": [
398
+ 212,
399
+ 90,
400
+ 795,
401
+ 247
402
+ ],
403
+ "page_idx": 3
404
+ },
405
+ {
406
+ "type": "text",
407
+ "text": "3 METHOD",
408
+ "text_level": 1,
409
+ "bbox": [
410
+ 171,
411
+ 321,
412
+ 282,
413
+ 335
414
+ ],
415
+ "page_idx": 3
416
+ },
417
+ {
418
+ "type": "text",
419
+ "text": "In this section, we will describe our method. Our approach has the capability to faithfully reconstruct head avatars from any number of inputs and achieve precise reenactment. The overall process can be summarized as follows:",
420
+ "bbox": [
421
+ 169,
422
+ 352,
423
+ 823,
424
+ 393
425
+ ],
426
+ "page_idx": 3
427
+ },
428
+ {
429
+ "type": "equation",
430
+ "text": "\n$$\nI _ {t} = R (M T A (E (I _ {i})), P E F (\\text {F L A M E} (s _ {i}, e _ {t}, p _ {t}), \\theta), p _ {c a m}),\n$$\n",
431
+ "text_format": "latex",
432
+ "bbox": [
433
+ 295,
434
+ 400,
435
+ 699,
436
+ 417
437
+ ],
438
+ "page_idx": 3
439
+ },
440
+ {
441
+ "type": "text",
442
+ "text": "where $I_{i}$ represents the input image(s), $s_i$ is the shape parameter of the source image $I_{i}$ , $e_t$ and $p_t$ is the desired expression and pose parameter. The canonical feature space is constructed by $E(I_i)$ , and our Point-based Expression Field (PEF) is built with point cloud FLAME $(s_i, e_t, p_t)$ and point feature $\\theta$ . If there are multiple input images, their canonical feature space will be merged by the Multi-Tri-planes Attention module (MTA). Finally, $R$ is the volume rendering function that renders the reenactment image $I_t$ based on camera pose $p_{cam}$ . The overall process is illustrated in Fig. 3. In the following, we will describe the canonical encoder in Sec. 3.1, explain how PEF controls expressions in Sec. 3.2, introduce how we fuse multiple inputs through MTA in Sec 3.3, discuss the rendering and super-resolution process in Sec. 3.4, and finally, we describe the training targets in Sec. 3.5.",
443
+ "bbox": [
444
+ 169,
445
+ 421,
446
+ 826,
447
+ 561
448
+ ],
449
+ "page_idx": 3
450
+ },
451
+ {
452
+ "type": "text",
453
+ "text": "3.1 CANONICAL FEATURE ENCODER",
454
+ "text_level": 1,
455
+ "bbox": [
456
+ 171,
457
+ 577,
458
+ 442,
459
+ 590
460
+ ],
461
+ "page_idx": 3
462
+ },
463
+ {
464
+ "type": "text",
465
+ "text": "Due to the fact that the tri-planes representation has strong 3D geometric priors and strikes a good balance between synthesis quality and speed, we employ the tri-planes representation as our standard feature space. Specifically, inspired by GFPGAN (Wang et al., 2021b), our encoder follows a UNet structure, and during its up-sampling process, we use a StyleGAN structure. We generally keep the same setting as GFPGAN, except that our encoder maps the original image from $3 \\times 512 \\times 512$ to $3 \\times 32 \\times 256 \\times 256$ to build a tri-planes feature space. We only modified the input and output layers to achieve this. In the experiment, we observed that this structure can effectively integrate global information from the input image during the down-sampling process, and then generate mutually correlated planes during the up-sampling process. In order to enhance the robustness of the encoder and adapt to arbitrary real-world inputs, we applied affine transformations to align the input 2D images using estimated head poses. Since we utilize a separate expression feature field encoded with PEF, the canonical feature space here lacks complete semantics on its own. Therefore, while many works based on tri-planes restrict the canonical feature space to have neutral expressions, here we do not impose any restrictions and train the encoder from scratch in an end-to-end manner.",
466
+ "bbox": [
467
+ 169,
468
+ 603,
469
+ 826,
470
+ 799
471
+ ],
472
+ "page_idx": 3
473
+ },
474
+ {
475
+ "type": "text",
476
+ "text": "3.2 POINT-BASED EXPRESSION FIELD",
477
+ "text_level": 1,
478
+ "bbox": [
479
+ 171,
480
+ 814,
481
+ 450,
482
+ 828
483
+ ],
484
+ "page_idx": 3
485
+ },
486
+ {
487
+ "type": "text",
488
+ "text": "In this section, we will introduce how to build a controllable expression field based on point clouds. Many methods for head avatars rely on 3DMM parameters or rendering images to generate expressions. However, they either have limited expressive capabilities when directly using 3DMM parameters or lose details due to excessive encoding and decoding processes. Inspired by Point-NeRF (Xu et al., 2022), we directly use the point cloud from 3DMM to construct a point-based expression field, thereby avoiding over-processing and retaining expression details as much as possible.",
489
+ "bbox": [
490
+ 169,
491
+ 839,
492
+ 823,
493
+ 925
494
+ ],
495
+ "page_idx": 3
496
+ },
497
+ {
498
+ "type": "header",
499
+ "text": "Published as a conference paper at ICLR 2024",
500
+ "bbox": [
501
+ 171,
502
+ 32,
503
+ 478,
504
+ 47
505
+ ],
506
+ "page_idx": 3
507
+ },
508
+ {
509
+ "type": "page_number",
510
+ "text": "4",
511
+ "bbox": [
512
+ 493,
513
+ 948,
514
+ 503,
515
+ 959
516
+ ],
517
+ "page_idx": 3
518
+ },
519
+ {
520
+ "type": "text",
521
+ "text": "Unlike attempts to reconstruct static scenes, our point-based expression field (PEF) aims to model dynamic expressions. To achieve this goal, we bind learnable weights to each FLAME vertex in the PEF. Due to the stable semantics and geometric topology of FLAME vertices, such as points representing the eyes and mouth not undergoing semantic changes across various expressions and identities, each neural point in the PEF also holds stable semantics and can be shared across different identities. During sampling features from PEF, we sample several nearest points to calculate the final feature for the sample position. If we sample the nearest points from the local region following (Xu et al., 2022), we may encounter limitations in representation capabilities, such that non-FLAME areas are modeled only in canonical feature space and parts related to expressions such as hair that are not included in FLAME may become fully rigid, making certain expressions unnatural. Therefore, we instead search for neighboring points in the entire space and use relative position encoding to provide the model with direction and distance information. Our approach liberates the representation capabilities of point features, and experiments have also confirmed that our method performs better. To achieve the synergy between the canonical feature space and PEF and harness the prior capabilities of point clouds, we remove the global pose from the FLAME pose and instead model it using the camera pose. This ensures that the point cloud is always in a canonical position near the origin. Since we sample features from both feature spaces, the semantic information and 3D priors from the PEF can also undergo collaborative learning with the canonical feature space.",
522
+ "bbox": [
523
+ 169,
524
+ 103,
525
+ 826,
526
+ 354
527
+ ],
528
+ "page_idx": 4
529
+ },
530
+ {
531
+ "type": "text",
532
+ "text": "The overall process of our PEF is as follows: for any given query 3D position $x$ during the NeRF sampling process, we retrieve its nearest $K$ points and obtain their corresponding features $f_{i}$ and positions $p_i$ . Then, we employ linear layers to regress the features for each point, and finally combine these features based on positional weights, as shown in Eq. 1:",
533
+ "bbox": [
534
+ 169,
535
+ 359,
536
+ 823,
537
+ 416
538
+ ],
539
+ "page_idx": 4
540
+ },
541
+ {
542
+ "type": "equation",
543
+ "text": "\n$$\nf _ {e x p, x} = \\sum_ {i} ^ {K} \\frac {w _ {i}}{\\sum_ {j} ^ {K} w _ {j}} L _ {p} \\left(f _ {i}, F _ {p o s} \\left(p _ {i} - x\\right)\\right), \\text {w h e r e} w _ {i} = \\frac {1}{p _ {i} - x}, \\tag {1}\n$$\n",
544
+ "text_format": "latex",
545
+ "bbox": [
546
+ 282,
547
+ 419,
548
+ 823,
549
+ 460
550
+ ],
551
+ "page_idx": 4
552
+ },
553
+ {
554
+ "type": "text",
555
+ "text": "where $L_{p}$ is the linear layers and $F_{pos}$ is the frequency positional encoding function. During this process, the position of point $p_i$ changes as the FLAME expression parameters change, creating a dynamic expression feature field. This allows the FLAME to directly contribute to the NeRF feature space, avoiding the loss of information introduced by excessive processing. Due to the decoupling of the canonical tri-planes and PEF, we only create the canonical tri-planes once during inference, and the speed of PEF will affect the inference speed. Thanks to the efficient parallel nearest neighbor query, the PEF process can be completed quickly, greatly improving the speed of inference.",
556
+ "bbox": [
557
+ 169,
558
+ 460,
559
+ 823,
560
+ 559
561
+ ],
562
+ "page_idx": 4
563
+ },
564
+ {
565
+ "type": "text",
566
+ "text": "3.3 MULTI TRI-PLANES ATTENTION",
567
+ "text_level": 1,
568
+ "bbox": [
569
+ 171,
570
+ 574,
571
+ 437,
572
+ 588
573
+ ],
574
+ "page_idx": 4
575
+ },
576
+ {
577
+ "type": "text",
578
+ "text": "Based on the aforementioned modules, we can obtain animatable high-fidelity results. However, since the source image can be arbitrary, this introduces some challenging scenarios. For example, there may be occlusions in the source image, or the eyes in the source image may be closed while the desired expression requires open eyes. In this situation, the model may generate illusions based on statistically average eye and facial features, but these illusions may be incorrect. Although this image cannot produce the truth about missing parts, we may have other images that supplement the missing parts. To achieve this goal, we have implemented an attention-based module to fuse the tri-planes features of multiple images, which is called Multi Tri-planes Attention (MTA).",
579
+ "bbox": [
580
+ 169,
581
+ 599,
582
+ 823,
583
+ 712
584
+ ],
585
+ "page_idx": 4
586
+ },
587
+ {
588
+ "type": "text",
589
+ "text": "Our MTA uses a learnable tri-plane to query multiple tri-planes from different images, generating weights for feature fusion, as shown in Eq. 2:",
590
+ "bbox": [
591
+ 169,
592
+ 718,
593
+ 823,
594
+ 747
595
+ ],
596
+ "page_idx": 4
597
+ },
598
+ {
599
+ "type": "equation",
600
+ "text": "\n$$\nP = \\sum_ {i} ^ {N} \\frac {w _ {i}}{\\sum_ {j} ^ {N} w _ {j}} E \\left(I _ {i}\\right), \\text {w h e r e} w _ {i} = L _ {q} (Q) L _ {k} \\left(E \\left(I _ {i}\\right)\\right), \\tag {2}\n$$\n",
601
+ "text_format": "latex",
602
+ "bbox": [
603
+ 313,
604
+ 750,
605
+ 823,
606
+ 790
607
+ ],
608
+ "page_idx": 4
609
+ },
610
+ {
611
+ "type": "text",
612
+ "text": "where $I_{i}$ is the input image, $N$ is the number of input images, $E$ is the canonical encoder, $L_{q}$ and $L_{k}$ are the linear layers to generate queries and keys, and $Q$ is the learnable query tri-planes.",
613
+ "bbox": [
614
+ 171,
615
+ 791,
616
+ 823,
617
+ 821
618
+ ],
619
+ "page_idx": 4
620
+ },
621
+ {
622
+ "type": "text",
623
+ "text": "Through our experiments, we have demonstrated that our MTA effectively enhances performance and completes missing information in one-shot inputs, such as pupil information and the other half of the face in extreme pose variations. During training, we use two random frames as input and one frame as the target, while during inference, our MTA can accept any number of images as input. Furthermore, experiments show that our MTA can consistently fuse multiple tri-planes features from images of the same person captured at different times. Even when dealing with images of different individuals and styles, MTA can still produce reasonable results, showcasing its strong robustness.",
624
+ "bbox": [
625
+ 169,
626
+ 825,
627
+ 826,
628
+ 925
629
+ ],
630
+ "page_idx": 4
631
+ },
632
+ {
633
+ "type": "header",
634
+ "text": "Published as a conference paper at ICLR 2024",
635
+ "bbox": [
636
+ 171,
637
+ 32,
638
+ 478,
639
+ 47
640
+ ],
641
+ "page_idx": 4
642
+ },
643
+ {
644
+ "type": "page_number",
645
+ "text": "5",
646
+ "bbox": [
647
+ 493,
648
+ 948,
649
+ 503,
650
+ 959
651
+ ],
652
+ "page_idx": 4
653
+ },
654
+ {
655
+ "type": "text",
656
+ "text": "3.4 VOLUME RENDERING AND SUPER RESOLUTION",
657
+ "text_level": 1,
658
+ "bbox": [
659
+ 171,
660
+ 103,
661
+ 547,
662
+ 118
663
+ ],
664
+ "page_idx": 5
665
+ },
666
+ {
667
+ "type": "text",
668
+ "text": "Given the camera's intrinsic and extrinsic parameters, we sample the rays and perform two-pass hierarchical sampling along these rays, followed by volume rendering to obtain 2D results. Due to the extensive computational resources required for high-resolution volume rendering, training and testing on high resolution become time-consuming and costly. A popular solution to this problem is the lightweight super-resolution module. In our work, we render low-resolution images at $128 \\times 128$ resolution, and these low-resolution images consist of a 32-dimensional feature map with the first three dimensions corresponding to RGB pixel values. The super-resolution module we use is similar to our canonical feature space encoder, and like the encoder, we train this super-resolution module from scratch in an end-to-end manner.",
669
+ "bbox": [
670
+ 169,
671
+ 128,
672
+ 826,
673
+ 255
674
+ ],
675
+ "page_idx": 5
676
+ },
677
+ {
678
+ "type": "text",
679
+ "text": "3.5 TRAINING STRATEGY AND LOSS FUNCTIONS",
680
+ "text_level": 1,
681
+ "bbox": [
682
+ 171,
683
+ 271,
684
+ 529,
685
+ 285
686
+ ],
687
+ "page_idx": 5
688
+ },
689
+ {
690
+ "type": "text",
691
+ "text": "We train our model from scratch using an end-to-end training approach. By sampling original and target images from the same video, we construct pairs of images with the same identity but different expressions and poses. During the training process, our primary objective is to make the reenactment images consistent with the target images. We use $L_{1}$ and perceptual loss (Johnson et al., 2016; Zhang et al., 2018) on both low-resolution and high-resolution reenactment images to achieve this objective, as shown in the Eq. 3:",
692
+ "bbox": [
693
+ 169,
694
+ 297,
695
+ 823,
696
+ 381
697
+ ],
698
+ "page_idx": 5
699
+ },
700
+ {
701
+ "type": "equation",
702
+ "text": "\n$$\n\\mathcal {L} _ {r e c} = \\left| \\left| I _ {l r} - I _ {t} \\right| \\right| + \\left| \\left| I _ {h r} - I _ {t} \\right| \\right| + \\lambda_ {p} \\left(\\left| \\left| \\varphi \\left(I _ {l r}\\right) - \\varphi (I _ {t}) \\right| \\right| + \\left| \\left| \\varphi \\left(I _ {h r}\\right) - \\varphi (I _ {t}) \\right| \\right|\\right), \\tag {3}\n$$\n",
703
+ "text_format": "latex",
704
+ "bbox": [
705
+ 232,
706
+ 386,
707
+ 823,
708
+ 405
709
+ ],
710
+ "page_idx": 5
711
+ },
712
+ {
713
+ "type": "text",
714
+ "text": "where $I_{t}$ is the reenactment target image, $I_{lr}$ and $I_{hr}$ are the low-resolution and high-resolution reenactment results, $\\varphi$ is the AlexNet (Krizhevsky et al., 2012) used in the perceptual loss, and $\\lambda_{p}$ is the weight for the perceptual loss.",
715
+ "bbox": [
716
+ 169,
717
+ 409,
718
+ 823,
719
+ 452
720
+ ],
721
+ "page_idx": 5
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "Additionally, we add a density-based norm loss as shown in the Eq. 4:",
726
+ "bbox": [
727
+ 171,
728
+ 458,
729
+ 635,
730
+ 473
731
+ ],
732
+ "page_idx": 5
733
+ },
734
+ {
735
+ "type": "equation",
736
+ "text": "\n$$\n\\mathcal {L} _ {\\text {n o r m}} = \\left\\| d _ {n} \\right\\| _ {2}, \\tag {4}\n$$\n",
737
+ "text_format": "latex",
738
+ "bbox": [
739
+ 437,
740
+ 478,
741
+ 823,
742
+ 494
743
+ ],
744
+ "page_idx": 5
745
+ },
746
+ {
747
+ "type": "text",
748
+ "text": "where $d_{n}$ is the density used in volume rendering (Mildenhall et al., 2020). This loss encourages the total density of NeRF to be as low as possible, thereby encouraging reconstructions that closely adhere to the actual 3D shape and avoid the appearance of artifacts. The overall training objective is as:",
749
+ "bbox": [
750
+ 169,
751
+ 501,
752
+ 823,
753
+ 556
754
+ ],
755
+ "page_idx": 5
756
+ },
757
+ {
758
+ "type": "equation",
759
+ "text": "\n$$\n\\mathcal {L} _ {\\text {o v e r a l l}} = \\lambda_ {r} \\mathcal {L} _ {\\text {r e c}} + \\lambda_ {n} \\mathcal {L} _ {\\text {n o r m}}, \\tag {5}\n$$\n",
760
+ "text_format": "latex",
761
+ "bbox": [
762
+ 388,
763
+ 563,
764
+ 823,
765
+ 580
766
+ ],
767
+ "page_idx": 5
768
+ },
769
+ {
770
+ "type": "text",
771
+ "text": "where $\\lambda_r$ and $\\lambda_n$ are the weights that balance the loss.",
772
+ "bbox": [
773
+ 171,
774
+ 585,
775
+ 529,
776
+ 599
777
+ ],
778
+ "page_idx": 5
779
+ },
780
+ {
781
+ "type": "text",
782
+ "text": "4 EXPERIMENTS",
783
+ "text_level": 1,
784
+ "bbox": [
785
+ 171,
786
+ 619,
787
+ 328,
788
+ 635
789
+ ],
790
+ "page_idx": 5
791
+ },
792
+ {
793
+ "type": "text",
794
+ "text": "In this section, we will first introduce the dataset we use, the implementation details of our method, and the baselines of our work. We will then compare our method with existing approaches using a variety of metrics.",
795
+ "bbox": [
796
+ 169,
797
+ 650,
798
+ 823,
799
+ 694
800
+ ],
801
+ "page_idx": 5
802
+ },
803
+ {
804
+ "type": "text",
805
+ "text": "4.1 EXPERIMENT SETTING",
806
+ "text_level": 1,
807
+ "bbox": [
808
+ 171,
809
+ 709,
810
+ 375,
811
+ 723
812
+ ],
813
+ "page_idx": 5
814
+ },
815
+ {
816
+ "type": "text",
817
+ "text": "Datasets. We use the VFHQ (Xie et al., 2022) dataset to train our model. This dataset comprises clips from various interview scenarios, and we utilized a subset consisting of 8,013 video clips. From these videos, we extracted 240,390 frames to create our training dataset. During the training process, we randomly sampled frames from the same video to create pairs of images with the same identity but different expressions. One frame was used as the target for reenactment, while the others served as source images. Given that our method can accept any number of inputs, in each iteration, we sampled two inputs with a $70\\%$ probability and one input with a $30\\%$ probability. Regarding evaluation, we assessed our method on the VFHQ dataset (Xie et al., 2022) and the HDTF dataset (Zhang et al., 2021). It's important to note that our model was not fine-tuned on the HDTF dataset. In the evaluation process, we used the first frame of each video as the source image, with the remaining frames as targets for reenactment.",
818
+ "bbox": [
819
+ 169,
820
+ 734,
821
+ 826,
822
+ 888
823
+ ],
824
+ "page_idx": 5
825
+ },
826
+ {
827
+ "type": "text",
828
+ "text": "Evaluation Metrics. We evaluated all methods on both same-identity and cross-identity reenactment tasks. For the cross-identity reenactment task, due to the lack of ground truth, we evaluated the",
829
+ "bbox": [
830
+ 169,
831
+ 895,
832
+ 823,
833
+ 925
834
+ ],
835
+ "page_idx": 5
836
+ },
837
+ {
838
+ "type": "header",
839
+ "text": "Published as a conference paper at ICLR 2024",
840
+ "bbox": [
841
+ 171,
842
+ 32,
843
+ 478,
844
+ 47
845
+ ],
846
+ "page_idx": 5
847
+ },
848
+ {
849
+ "type": "page_number",
850
+ "text": "6",
851
+ "bbox": [
852
+ 493,
853
+ 948,
854
+ 504,
855
+ 959
856
+ ],
857
+ "page_idx": 5
858
+ },
859
+ {
860
+ "type": "image",
861
+ "img_path": "images/f30a8dd3a2d8da75a40981502527afee7ef1d4ffe40752dbb6610fd8c7c75902.jpg",
862
+ "image_caption": [
863
+ "Figure 4: Qualitative results on VFHQ (Xie et al., 2022) and HDTF (Zhang et al., 2021) datasets. The first two rows are from VFHQ and the third row is from HDTF."
864
+ ],
865
+ "image_footnote": [],
866
+ "bbox": [
867
+ 214,
868
+ 102,
869
+ 782,
870
+ 318
871
+ ],
872
+ "page_idx": 6
873
+ },
874
+ {
875
+ "type": "image",
876
+ "img_path": "images/1d30ddc6965ae0404053cabb031c312d57938896bd34295a3dcf25e8a9ac9238.jpg",
877
+ "image_caption": [
878
+ "Figure 5: Qualitative results on VFHQ (Xie et al., 2022) and HDTF (Zhang et al., 2021) datasets. The first four rows are from VFHQ and the last row is from HDTF."
879
+ ],
880
+ "image_footnote": [],
881
+ "bbox": [
882
+ 212,
883
+ 375,
884
+ 782,
885
+ 734
886
+ ],
887
+ "page_idx": 6
888
+ },
889
+ {
890
+ "type": "text",
891
+ "text": "cosine similarity of identity embeddings (CSIM) based on ArcFace (Deng et al., 2019) between the reenacted frames and source images to assess identity consistency during the reenactment task. We also used the Average Expression Distance (AED) and Average Pose Distance (APD) metrics based on (Danecek et al., 2022) to assess the accuracy of expression and pose driving. In the same-identity reenactment task, where ground truth frames are available, in addition to the aforementioned metrics, we evaluate PSNR, SSIM, L1, and LPIPS metrics between the reenacted frames and ground truth frames. We also calculated the Average Key-point Distance (AKD) based on (Bulat & Tzimiropoulos, 2017) as another reference for expression reenactment accuracy.",
892
+ "bbox": [
893
+ 169,
894
+ 777,
895
+ 826,
896
+ 888
897
+ ],
898
+ "page_idx": 6
899
+ },
900
+ {
901
+ "type": "text",
902
+ "text": "Implementation Details. Our framework is built upon the PyTorch framework (Paszke et al., 2017), and during the training process, we employ the ADAM (Kingma & Ba, 2014) optimizer with a learn",
903
+ "bbox": [
904
+ 169,
905
+ 895,
906
+ 823,
907
+ 925
908
+ ],
909
+ "page_idx": 6
910
+ },
911
+ {
912
+ "type": "header",
913
+ "text": "Published as a conference paper at ICLR 2024",
914
+ "bbox": [
915
+ 173,
916
+ 32,
917
+ 478,
918
+ 47
919
+ ],
920
+ "page_idx": 6
921
+ },
922
+ {
923
+ "type": "page_number",
924
+ "text": "7",
925
+ "bbox": [
926
+ 493,
927
+ 948,
928
+ 504,
929
+ 959
930
+ ],
931
+ "page_idx": 6
932
+ },
933
+ {
934
+ "type": "table",
935
+ "img_path": "images/96dd7619f4af193fdc19cfb1f90a82dca614c668e110269ba9e71059bf416adf.jpg",
936
+ "table_caption": [
937
+ "Table 1: Quantitative results on the VFHQ (Xie et al., 2022) dataset. For a fair comparison, we compare one-shot results using the first frame input. Ours Two-in uses both the first and last frames. Entries in green are the best ones in a one-shot setting."
938
+ ],
939
+ "table_footnote": [],
940
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"8\">Self Reenactment</td><td colspan=\"3\">Cross-Id Reenactment</td></tr><tr><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>CSIM↑</td><td>L1↓</td><td>AED↓</td><td>APD↓</td><td>AKD↓</td><td>CSIM↑</td><td>AED↓</td><td>APD↓</td></tr><tr><td>ROME (Khakhulin et al., 2022)</td><td>19.88</td><td>0.735</td><td>0.237</td><td>0.679</td><td>0.060</td><td>0.497</td><td>0.017</td><td>4.53</td><td>0.531</td><td>0.936</td><td>0.026</td></tr><tr><td>StyleHeat (Yin et al., 2022)</td><td>19.95</td><td>0.738</td><td>0.251</td><td>0.603</td><td>0.065</td><td>0.593</td><td>0.024</td><td>5.30</td><td>0.506</td><td>0.961</td><td>0.038</td></tr><tr><td>OTAvatar (Ma et al., 2023)</td><td>18.10</td><td>0.600</td><td>0.346</td><td>0.660</td><td>0.092</td><td>0.734</td><td>0.035</td><td>6.05</td><td>0.514</td><td>0.962</td><td>0.059</td></tr><tr><td>Next3D (Sun et al., 2023)</td><td>19.95</td><td>0.656</td><td>0.281</td><td>0.631</td><td>0.066</td><td>0.727</td><td>0.026</td><td>5.17</td><td>0.482</td><td>0.996</td><td>0.036</td></tr><tr><td>HideNeRF (Li et al., 2023a)</td><td>20.07</td><td>0.745</td><td>0.204</td><td>0.794</td><td>0.056</td><td>0.521</td><td>0.031</td><td>5.33</td><td>0.558</td><td>1.024</td><td>0.044</td></tr><tr><td>Ours One-in</td><td>22.08</td><td>0.765</td><td>0.177</td><td>0.789</td><td>0.039</td><td>0.434</td><td>0.017</td><td>3.53</td><td>0.558</td><td>0.910</td><td>0.034</td></tr><tr><td>Ours Two-in</td><td>22.86</td><td>0.779</td><td>0.169</td><td>0.771</td><td>0.035</td><td>0.411</td><td>0.017</td><td>3.44</td><td>0.551</td><td>0.907</td><td>0.034</td></tr></table>",
941
+ "bbox": [
942
+ 174,
943
+ 143,
944
+ 823,
945
+ 241
946
+ ],
947
+ "page_idx": 7
948
+ },
949
+ {
950
+ "type": "table",
951
+ "img_path": "images/61a76416f9dcc7eececa585dea810cd5684cc3646b19b8e327ac90fe55e7f6ab.jpg",
952
+ "table_caption": [
953
+ "Table 2: Quantitative results on the HDTF (Zhang et al., 2021) dataset. For a fair comparison, we compare one-shot results using the first frame input. Ours Two-in uses both the first and last frames. Entries in green are the best ones in a one-shot setting."
954
+ ],
955
+ "table_footnote": [],
956
+ "table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"8\">Self Reenactment</td><td colspan=\"3\">Cross-Id Reenactment</td></tr><tr><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>CSIM↑</td><td>L1↓</td><td>AED↓</td><td>APD↓</td><td>AKD↓</td><td>CSIM↑</td><td>AED↓</td><td>APD↓</td></tr><tr><td>ROME (Khakhulin et al., 2022)</td><td>20.84</td><td>0.722</td><td>0.176</td><td>0.781</td><td>0.044</td><td>0.540</td><td>0.012</td><td>3.93</td><td>0.721</td><td>0.929</td><td>0.017</td></tr><tr><td>StyleHeat (Yin et al., 2022)</td><td>21.91</td><td>0.772</td><td>0.210</td><td>0.705</td><td>0.045</td><td>0.527</td><td>0.015</td><td>3.69</td><td>0.666</td><td>0.902</td><td>0.027</td></tr><tr><td>OTAvatar (Ma et al., 2023)</td><td>20.50</td><td>0.695</td><td>0.241</td><td>0.765</td><td>0.064</td><td>0.681</td><td>0.020</td><td>5.15</td><td>0.699</td><td>1.047</td><td>0.034</td></tr><tr><td>Next3D (Sun et al., 2023)</td><td>20.35</td><td>0.723</td><td>0.217</td><td>0.730</td><td>0.048</td><td>0.644</td><td>0.022</td><td>4.19</td><td>0.622</td><td>1.014</td><td>0.026</td></tr><tr><td>HideNeRF (Li et al., 2023a)</td><td>21.38</td><td>0.803</td><td>0.147</td><td>0.907</td><td>0.038</td><td>0.499</td><td>0.027</td><td>4.33</td><td>0.803</td><td>1.031</td><td>0.032</td></tr><tr><td>Ours One-in</td><td>24.21</td><td>0.834</td><td>0.131</td><td>0.871</td><td>0.029</td><td>0.427</td><td>0.012</td><td>3.06</td><td>0.790</td><td>0.869</td><td>0.020</td></tr><tr><td>Ours Two-in</td><td>25.36</td><td>0.849</td><td>0.122</td><td>0.851</td><td>0.026</td><td>0.406</td><td>0.012</td><td>3.01</td><td>0.769</td><td>0.837</td><td>0.021</td></tr></table>",
957
+ "bbox": [
958
+ 174,
959
+ 306,
960
+ 823,
961
+ 404
962
+ ],
963
+ "page_idx": 7
964
+ },
965
+ {
966
+ "type": "text",
967
+ "text": "ing rate of 1.0e-4. We conducted training on 2 NVIDIA Tesla A100 GPUs, with a total batch size of 8. During the training process, our PEF searches for the nearest $K = 8$ points, while MTA selects two frames as source images. Our approach employs an end-to-end training methodology. The training process consists of 150,000 iterations and the full training process consumes approximately 50 GPU hours, showing its resource utilization efficiency. During the inference time, our method achieves 15 FPS when running on an A100 GPU. More details can be found in the supplementary materials.",
968
+ "bbox": [
969
+ 169,
970
+ 421,
971
+ 826,
972
+ 506
973
+ ],
974
+ "page_idx": 7
975
+ },
976
+ {
977
+ "type": "text",
978
+ "text": "4.2 MAIN RESULTS",
979
+ "text_level": 1,
980
+ "bbox": [
981
+ 171,
982
+ 526,
983
+ 323,
984
+ 540
985
+ ],
986
+ "page_idx": 7
987
+ },
988
+ {
989
+ "type": "text",
990
+ "text": "**Baseline Methods.** We compared our method with five state-of-the-art existing methods, including StyleHeat (Yin et al., 2022) (2D-based warping), ROME (Khakhulin et al., 2022) (mesh-based), OTAvatar (Ma et al., 2023), Next3D (Sun et al., 2023; Roich et al., 2021) (based on NeRF and 3D generative models), and HideNeRF (Li et al., 2023a), which is most similar to our setup. All results were evaluated using official code implementations.",
991
+ "bbox": [
992
+ 169,
993
+ 554,
994
+ 823,
995
+ 626
996
+ ],
997
+ "page_idx": 7
998
+ },
999
+ {
1000
+ "type": "text",
1001
+ "text": "Self-Reenactment Results. We begin by evaluating the synthesis performance when the source and driving image are the same person. Tab. 1 and Tab. 2 show the quantitative results on VFHQ and HDTF, respectively. Notably, our approach exhibits a significant advantage over other state-of-the-art methods in terms of both synthesis quality metrics (PSNR, SSIM, LPIPS, and L1) and expression control quality metrics (AED and AKD). Qualitative results on VFHQ and HDTF are visually demonstrated in Fig. 4. These results showcase that our method not only excels in synthesis quality but also captures subtle expressions, as exemplified by the surprised expression in the first row and the angry expression in the second row. Importantly, our model achieved these results without any training or fine-tuning on the HDTF dataset, thus demonstrating the robust generalization capability of our approach.",
1002
+ "bbox": [
1003
+ 169,
1004
+ 631,
1005
+ 826,
1006
+ 772
1007
+ ],
1008
+ "page_idx": 7
1009
+ },
1010
+ {
1011
+ "type": "text",
1012
+ "text": "Cross-Identity Reenactment Results. We also evaluate the synthesis performance when the source and the driving images contain different persons. Tab. 1 and Tab. 2 show quantitative results, and Fig. 5 showcases qualitative results. Due to the absence of ground truth data, a quantitative evaluation of synthesis performance is not feasible, but the qualitative results evident that our method excels in expression control. These results show the efficacy of our approach in scenarios where the source and driving images are from different individuals.",
1013
+ "bbox": [
1014
+ 169,
1015
+ 777,
1016
+ 826,
1017
+ 863
1018
+ ],
1019
+ "page_idx": 7
1020
+ },
1021
+ {
1022
+ "type": "text",
1023
+ "text": "Multiple images input. In addition to quantitative results, we further illustrate the advantages of multi-input methods in challenging scenarios, as shown in Fig. 6, such as closed eyes and significant pose variations. The results demonstrate that employing multiple inputs can further enhance synthesis quality while maintaining precise expression control.",
1024
+ "bbox": [
1025
+ 169,
1026
+ 867,
1027
+ 826,
1028
+ 926
1029
+ ],
1030
+ "page_idx": 7
1031
+ },
1032
+ {
1033
+ "type": "header",
1034
+ "text": "Published as a conference paper at ICLR 2024",
1035
+ "bbox": [
1036
+ 171,
1037
+ 32,
1038
+ 478,
1039
+ 47
1040
+ ],
1041
+ "page_idx": 7
1042
+ },
1043
+ {
1044
+ "type": "page_number",
1045
+ "text": "8",
1046
+ "bbox": [
1047
+ 493,
1048
+ 948,
1049
+ 504,
1050
+ 959
1051
+ ],
1052
+ "page_idx": 7
1053
+ },
1054
+ {
1055
+ "type": "image",
1056
+ "img_path": "images/5344428fd695f2e97b47f5c69589190150e820fa8f86339f5e25cf7a8df437ec.jpg",
1057
+ "image_caption": [],
1058
+ "image_footnote": [],
1059
+ "bbox": [
1060
+ 235,
1061
+ 99,
1062
+ 382,
1063
+ 159
1064
+ ],
1065
+ "page_idx": 8
1066
+ },
1067
+ {
1068
+ "type": "image",
1069
+ "img_path": "images/9f3ca9e2971f066c62b82e73159920d3cb95968303f3f18993785ecb11772cdd.jpg",
1070
+ "image_caption": [],
1071
+ "image_footnote": [],
1072
+ "bbox": [
1073
+ 383,
1074
+ 99,
1075
+ 495,
1076
+ 159
1077
+ ],
1078
+ "page_idx": 8
1079
+ },
1080
+ {
1081
+ "type": "image",
1082
+ "img_path": "images/58139ba39315280ce0f1d55331c7b36c0c6a1254c7a662326cf669bd24e87ca7.jpg",
1083
+ "image_caption": [],
1084
+ "image_footnote": [],
1085
+ "bbox": [
1086
+ 500,
1087
+ 99,
1088
+ 642,
1089
+ 159
1090
+ ],
1091
+ "page_idx": 8
1092
+ },
1093
+ {
1094
+ "type": "image",
1095
+ "img_path": "images/76b07b2ba5b3ae99922105445ef7236128b15e49703ac6a341e4db14f5ea696b.jpg",
1096
+ "image_caption": [],
1097
+ "image_footnote": [],
1098
+ "bbox": [
1099
+ 643,
1100
+ 101,
1101
+ 756,
1102
+ 159
1103
+ ],
1104
+ "page_idx": 8
1105
+ },
1106
+ {
1107
+ "type": "image",
1108
+ "img_path": "images/c78fafb43a83b689957d0956cef203332d351d97860bf19c93dbfcbc949f5159.jpg",
1109
+ "image_caption": [
1110
+ "Figure 6: Qualitative results of multi-inputs. In each image, the left side shows input frames, while the right side displays reenactment frames and driving frames. It can be observed that using multiple inputs enhances the performance, especially in cases of closed eyes and occlusions."
1111
+ ],
1112
+ "image_footnote": [],
1113
+ "bbox": [
1114
+ 235,
1115
+ 161,
1116
+ 419,
1117
+ 232
1118
+ ],
1119
+ "page_idx": 8
1120
+ },
1121
+ {
1122
+ "type": "image",
1123
+ "img_path": "images/66821664911fdfab7231970c490121bac03889a9cc9d11c4acdb4ac6c736725f.jpg",
1124
+ "image_caption": [],
1125
+ "image_footnote": [],
1126
+ "bbox": [
1127
+ 423,
1128
+ 161,
1129
+ 570,
1130
+ 232
1131
+ ],
1132
+ "page_idx": 8
1133
+ },
1134
+ {
1135
+ "type": "image",
1136
+ "img_path": "images/5d453aa0f858404dc86f69614aa99c36cd651dd0ac0f948f49b58c8cb764797e.jpg",
1137
+ "image_caption": [],
1138
+ "image_footnote": [],
1139
+ "bbox": [
1140
+ 571,
1141
+ 161,
1142
+ 756,
1143
+ 232
1144
+ ],
1145
+ "page_idx": 8
1146
+ },
1147
+ {
1148
+ "type": "table",
1149
+ "img_path": "images/530a7c0a947cb7eb2956368c83fb880b909e777668e4556e0b4be0a0269edca6.jpg",
1150
+ "table_caption": [
1151
+ "Table 3: Ablation results on the VFHQ dataset. Entries in green are the best ones."
1152
+ ],
1153
+ "table_footnote": [],
1154
+ "table_body": "<table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>CSIM↑</td><td>L1↓</td><td>AED↓</td><td>APD↓</td><td>AKD↓</td></tr><tr><td>w/o PEF</td><td>22.01</td><td>0.762</td><td>0.186</td><td>0.766</td><td>0.040</td><td>0.576</td><td>0.020</td><td>4.30</td></tr><tr><td>w/o global sample</td><td>21.58</td><td>0.760</td><td>0.194</td><td>0.765</td><td>0.039</td><td>0.518</td><td>0.019</td><td>3.96</td></tr><tr><td>point cloud 2000</td><td>21.95</td><td>0.761</td><td>0.193</td><td>0.750</td><td>0.040</td><td>0.497</td><td>0.020</td><td>3.86</td></tr><tr><td>query 4 points</td><td>22.04</td><td>0.762</td><td>0.192</td><td>0.751</td><td>0.039</td><td>0.514</td><td>0.020</td><td>3.90</td></tr><tr><td>Ours One-in</td><td>22.08</td><td>0.765</td><td>0.177</td><td>0.789</td><td>0.039</td><td>0.434</td><td>0.017</td><td>3.53</td></tr><tr><td>mean Two-in</td><td>22.75</td><td>0.776</td><td>0.190</td><td>0.726</td><td>0.036</td><td>0.452</td><td>0.019</td><td>3.68</td></tr><tr><td>mean Three-in</td><td>23.03</td><td>0.780</td><td>0.191</td><td>0.724</td><td>0.035</td><td>0.455</td><td>0.019</td><td>3.65</td></tr><tr><td>mean Four-in</td><td>23.16</td><td>0.783</td><td>0.194</td><td>0.716</td><td>0.035</td><td>0.449</td><td>0.018</td><td>3.64</td></tr><tr><td>Ours Two-in</td><td>22.86</td><td>0.779</td><td>0.169</td><td>0.771</td><td>0.035</td><td>0.411</td><td>0.017</td><td>3.44</td></tr><tr><td>Ours Three-in</td><td>23.27</td><td>0.788</td><td>0.165</td><td>0.772</td><td>0.033</td><td>0.403</td><td>0.016</td><td>3.41</td></tr><tr><td>Ours Four-in</td><td>23.49</td><td>0.792</td><td>0.164</td><td>0.773</td><td>0.032</td><td>0.400</td><td>0.016</td><td>3.41</td></tr></table>",
1155
+ "bbox": [
1156
+ 254,
1157
+ 311,
1158
+ 738,
1159
+ 439
1160
+ ],
1161
+ "page_idx": 8
1162
+ },
1163
+ {
1164
+ "type": "text",
1165
+ "text": "4.3 ABLATION STUDIES",
1166
+ "text_level": 1,
1167
+ "bbox": [
1168
+ 171,
1169
+ 455,
1170
+ 354,
1171
+ 469
1172
+ ],
1173
+ "page_idx": 8
1174
+ },
1175
+ {
1176
+ "type": "text",
1177
+ "text": "Effectiveness of Point-based Expression Field. To validate the effectiveness of our proposed PEF, we provide FLAME expression parameters directly as a baseline for comparison (w/o PEF in Tab. 3). This method was applied in NeRFace and demonstrated expression control capability. The improvements in AED and AKD clearly indicate that our PEF significantly enhances expression control. We also tried sampling points in a local area with a maximum distance of 1/128 instead of global sampling. The results are shown as w/o global sample in Tab. 3. The results show that our global sampling enhances the details in expressions and the quality of synthesis.",
1178
+ "bbox": [
1179
+ 169,
1180
+ 482,
1181
+ 823,
1182
+ 580
1183
+ ],
1184
+ "page_idx": 8
1185
+ },
1186
+ {
1187
+ "type": "text",
1188
+ "text": "Effectiveness of Multi Tri-planes Attention. To validate the effectiveness of our proposed MTA, we established a naive mean-based baseline that averages the tri-planes of multiple images to obtain a merged plane. Table 3 shows the results. We observe that our MTA exhibits better synthesis performance, which we attribute to MTA's ability to avoid feature blurring caused by average fusion.",
1189
+ "bbox": [
1190
+ 169,
1191
+ 585,
1192
+ 823,
1193
+ 643
1194
+ ],
1195
+ "page_idx": 8
1196
+ },
1197
+ {
1198
+ "type": "text",
1199
+ "text": "Ablation on Hyper-parameters. We conducted experiments on the selection of hyper-parameters. We randomly selected 2,000 points from 5,023 points of FLAME, and the results in Tab. 3 show that our method can also work on sparse point clouds. This may be attributed to our PEF finding neighboring points from the entire space, which prevents sparse sampling issues. We also reduced the number of query neighbors $K$ from 8 to 4, and the results indicate that our method has some robustness to the number of neighboring points.",
1200
+ "bbox": [
1201
+ 169,
1202
+ 648,
1203
+ 823,
1204
+ 734
1205
+ ],
1206
+ "page_idx": 8
1207
+ },
1208
+ {
1209
+ "type": "text",
1210
+ "text": "5 CONCLUSION",
1211
+ "text_level": 1,
1212
+ "bbox": [
1213
+ 171,
1214
+ 753,
1215
+ 318,
1216
+ 768
1217
+ ],
1218
+ "page_idx": 8
1219
+ },
1220
+ {
1221
+ "type": "text",
1222
+ "text": "In this paper, we have introduced a novel framework for generalizable and precise reconstruction of animatable 3D head avatars. Our approach reconstructs the neural radiance field using only one or a few input images and leverages a point-based expression field to control the expression of synthesized images. Additionally, we have introduced an attention-based fusion module to utilize information from multiple input images. Ablation studies suggest that the proposed Point-based Expression Field (PEF) and Multi Tri-planes Attention (MTA) can enhance synthesis quality and expression control. Our experimental results also demonstrate that our method achieves the most precise expression control and state-of-the-art synthesis quality on multiple benchmark datasets. We believe that our method has a wide range of potential applications due to its strong generalization and precise expression control capabilities.",
1223
+ "bbox": [
1224
+ 169,
1225
+ 784,
1226
+ 826,
1227
+ 924
1228
+ ],
1229
+ "page_idx": 8
1230
+ },
1231
+ {
1232
+ "type": "header",
1233
+ "text": "Published as a conference paper at ICLR 2024",
1234
+ "bbox": [
1235
+ 171,
1236
+ 32,
1237
+ 478,
1238
+ 47
1239
+ ],
1240
+ "page_idx": 8
1241
+ },
1242
+ {
1243
+ "type": "page_number",
1244
+ "text": "9",
1245
+ "bbox": [
1246
+ 493,
1247
+ 948,
1248
+ 504,
1249
+ 959
1250
+ ],
1251
+ "page_idx": 8
1252
+ },
1253
+ {
1254
+ "type": "text",
1255
+ "text": "6 ETHICS STATEMENT",
1256
+ "text_level": 1,
1257
+ "bbox": [
1258
+ 171,
1259
+ 102,
1260
+ 374,
1261
+ 118
1262
+ ],
1263
+ "page_idx": 9
1264
+ },
1265
+ {
1266
+ "type": "text",
1267
+ "text": "Since our framework allows for the reconstruction and reenactment of head avatars, it has a wide range of applications but also carries the potential risk of misuse, such as using it to create fake videos of others, violating privacy, and spreading false information. We are aware of the potential for misuse of our method and strongly discourage such practices. To this end, we have proposed several plans to prevent this technical risk:",
1268
+ "bbox": [
1269
+ 169,
1270
+ 133,
1271
+ 823,
1272
+ 204
1273
+ ],
1274
+ "page_idx": 9
1275
+ },
1276
+ {
1277
+ "type": "list",
1278
+ "sub_type": "text",
1279
+ "list_items": [
1280
+ "- We will add a conspicuous watermark to the synthesized video so that viewers can easily identify whether the video was synthesized by the model. This will significantly reduce the cost for viewers to identify the video and reduce the risk of abuse.",
1281
+ "- We limit the identity of the target speaker to virtual identities such as virtual idols, and prohibit the synthesis of real people without formal consent. Furthermore, synthetic videos may only be used for educational or other legitimate purposes (such as online courses) and any misuse will be subject to liability via the tracking methods we present in the next point.",
1282
+ "- We will also inject invisible watermarks into the synthesized video to store the IP of the video producer, so that the video producer must consider the potential risks brought by the synthesized video. This will encourage video producers to proactively think about whether their videos will create ethical risks and reduce the possibility of creating abusive videos."
1283
+ ],
1284
+ "bbox": [
1285
+ 215,
1286
+ 215,
1287
+ 823,
1288
+ 378
1289
+ ],
1290
+ "page_idx": 9
1291
+ },
1292
+ {
1293
+ "type": "text",
1294
+ "text": "To summarize, as a technology designer, we come up with strict licenses and technologies to prevent abuse of our GPAvatar, a talking face reconstruction system. We think more efforts from governments, society, technology designers, and users are needed to eliminate the abuse of deepfake. Besides, we hope the video maker is aware of the potential risks and responsibilities when using the talking face generation techniques. We believe that, with proper application, our method has the potential to demonstrate significant utility in various real-world scenarios.",
1295
+ "bbox": [
1296
+ 169,
1297
+ 390,
1298
+ 823,
1299
+ 474
1300
+ ],
1301
+ "page_idx": 9
1302
+ },
1303
+ {
1304
+ "type": "text",
1305
+ "text": "7 REPRODUCIBILITY STATEMENT",
1306
+ "text_level": 1,
1307
+ "bbox": [
1308
+ 171,
1309
+ 494,
1310
+ 470,
1311
+ 510
1312
+ ],
1313
+ "page_idx": 9
1314
+ },
1315
+ {
1316
+ "type": "text",
1317
+ "text": "Here we summarize the efforts made to ensure the reproducibility of this work. The model architectures and training details are introduced in Appendix A.1, and we also release the code for the model at https://github.com/xg-chu/GPAvatar. The data processing and evaluation details are introduced in Appendix A.2 and Appendix A.3.",
1318
+ "bbox": [
1319
+ 169,
1320
+ 526,
1321
+ 823,
1322
+ 583
1323
+ ],
1324
+ "page_idx": 9
1325
+ },
1326
+ {
1327
+ "type": "text",
1328
+ "text": "ACKNOWLEDGMENTS",
1329
+ "text_level": 1,
1330
+ "bbox": [
1331
+ 171,
1332
+ 598,
1333
+ 328,
1334
+ 611
1335
+ ],
1336
+ "page_idx": 9
1337
+ },
1338
+ {
1339
+ "type": "text",
1340
+ "text": "This work was partially supported by JST Moonshot R&D Grant Number JPMJPS2011, CREST Grant Number JPMJCR2015 and Basic Research Grant (Super AI) of Institute for AI and Beyond of the University of Tokyo. This work was also partially supported by JST, the establishment of university fellowships towards the creation of science technology innovation, Grant Number JP-MJFS2108.",
1341
+ "bbox": [
1342
+ 169,
1343
+ 621,
1344
+ 826,
1345
+ 691
1346
+ ],
1347
+ "page_idx": 9
1348
+ },
1349
+ {
1350
+ "type": "header",
1351
+ "text": "Published as a conference paper at ICLR 2024",
1352
+ "bbox": [
1353
+ 171,
1354
+ 32,
1355
+ 478,
1356
+ 47
1357
+ ],
1358
+ "page_idx": 9
1359
+ },
1360
+ {
1361
+ "type": "page_number",
1362
+ "text": "10",
1363
+ "bbox": [
1364
+ 490,
1365
+ 946,
1366
+ 509,
1367
+ 960
1368
+ ],
1369
+ "page_idx": 9
1370
+ },
1371
+ {
1372
+ "type": "text",
1373
+ "text": "REFERENCES",
1374
+ "text_level": 1,
1375
+ "bbox": [
1376
+ 174,
1377
+ 102,
1378
+ 287,
1379
+ 117
1380
+ ],
1381
+ "page_idx": 10
1382
+ },
1383
+ {
1384
+ "type": "list",
1385
+ "sub_type": "ref_text",
1386
+ "list_items": [
1387
+ "ShahRukh Athar, Zexiang Xu, Kalyan Sunkavalli, Eli Shechtman, and Zhixin Shu. Rignerf: Fully controllable neural 3d portraits. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pp. 20364-20373, 2022.",
1388
+ "ShahRukh Athar, Zhixin Shu, and Dimitris Samaras. Flame-in-nerf: Neural control of radiance fields for free view face animation. In 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1-8. IEEE, 2023.",
1389
+ "Ziqian Bai, Feitong Tan, Zeng Huang, Kripasindhu Sarkar, Danhang Tang, Di Qiu, Abhinitra Meka, Ruofei Du, Mingsong Dou, Sergio Orts-Escalano, et al. Learning personalized high quality volumetric head avatars from monocular rgb videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16890-16900, 2023.",
1390
+ "Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, 1999.",
1391
+ "Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In IEEE International Conference on Computer Vision (ICCV), 2017.",
1392
+ "Radek Danecek, Michael J. Black, and Timo Bolkart. EMOCA: Emotion driven monocular face capture and animation. In Conference on Computer Vision and Pattern Recognition (CVPR), pp. 20311-20322, 2022.",
1393
+ "Jiankang Deng, Jia Guo, Xue Niannan, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019.",
1394
+ "Nikita Drobyshev, Jenya Chelishev, Taras Khakhulin, Aleksei Ivakhnenko, Victor Lempitsky, and Egor Zakharov. Megaportraits: One-shot megapixel neural head avatars. Proceedings of the 30th ACM International Conference on Multimedia, 2022.",
1395
+ "Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. ACM Transactions on Graphics (ToG), Proc. SIGGRAPH, pp. 88:1-88:13, 2021.",
1396
+ "Guy Gafni, Justus Thies, Michael Zollhöfer, and Matthias Nießner. Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021.",
1397
+ "Thomas Gereg, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schonborn, and Thomas Vetter. Morphable face models—an open framework. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 75–82, 2018.",
1398
+ "Yudong Guo, Keyu Chen, Sen Liang, Yong-Jin Liu, Hujun Bao, and Juyong Zhang. Ad-nerf: Audio driven neural radiance fields for talking head synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5784-5794, 2021.",
1399
+ "Yang Hong, Bo Peng, Haiyao Xiao, Ligang Liu, and Juyong Zhang. Headnerf: A real-time nef-based parametric head model. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022.",
1400
+ "Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 694-711. Springer, 2016.",
1401
+ "Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 4401-4410, 2019.",
1402
+ "Taras Khakhulin, Vanessa Sklyarova, Victor Lempitsky, and Egor Zakharov. Realistic one-shot mesh-based head avatars. In European Conference on Computer Vision (ECCV), 2022."
1403
+ ],
1404
+ "bbox": [
1405
+ 171,
1406
+ 125,
1407
+ 825,
1408
+ 924
1409
+ ],
1410
+ "page_idx": 10
1411
+ },
1412
+ {
1413
+ "type": "header",
1414
+ "text": "Published as a conference paper at ICLR 2024",
1415
+ "bbox": [
1416
+ 171,
1417
+ 32,
1418
+ 478,
1419
+ 47
1420
+ ],
1421
+ "page_idx": 10
1422
+ },
1423
+ {
1424
+ "type": "page_number",
1425
+ "text": "11",
1426
+ "bbox": [
1427
+ 490,
1428
+ 948,
1429
+ 506,
1430
+ 959
1431
+ ],
1432
+ "page_idx": 10
1433
+ },
1434
+ {
1435
+ "type": "list",
1436
+ "sub_type": "ref_text",
1437
+ "list_items": [
1438
+ "Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.",
1439
+ "Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 2012.",
1440
+ "Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), pp. 194:1-194:17, 2017.",
1441
+ "Weichuang Li, Longhao Zhang, Dong Wang, Bin Zhao, Zhigang Wang, Mulin Chen, Bang Zhang, Zhongjian Wang, Liefeng Bo, and Xuelong Li. One-shot high-fidelity talking-head synthesis with deformable neural radiance field. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023a.",
1442
+ "Xueting Li, Shalini De Mello, Sifei Liu, Koki Nagano, Umar Iqbal, and Jan Kautz. Generalizable one-shot neural head avatar. Arxiv, 2023b.",
1443
+ "Zhiyuan Ma, Xiangyu Zhu, Guojun Qi, Zhen Lei, and Lei Zhang. Otavatar: One-shot talking face avatar with controllable tri-plane rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023.",
1444
+ "Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020.",
1445
+ "Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. IEEE International Conference on Computer Vision (ICCV), 2021a.",
1446
+ "Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo Martin-Brualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. Graph., 2021b.",
1447
+ "Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017.",
1448
+ "Pascal Paysan, Reinhard Knothe, Brian Amberg, Sami Romdhani, and Thomas Vetter. A 3d face model for pose and illumination invariant face recognition. In 2009 sixth IEEE international conference on advanced video and signal based surveillance, pp. 296-301, 2009.",
1449
+ "Yurui Ren, Ge Li, Yuanqi Chen, Thomas H Li, and Shan Liu. Pirenderer: Controllable portrait image generation via semantic neural rendering. In IEEE International Conference on Computer Vision (ICCV), 2021.",
1450
+ "Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Trans. Graph., 2021.",
1451
+ "Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. First order motion model for image animation. Advances in Neural Information Processing Systems (NeurIPS), 2019.",
1452
+ "Jingxiang Sun, Xuan Wang, Lizhen Wang, Xiaoyu Li, Yong Zhang, Hongwen Zhang, and Yebin Liu. Next3d: Generative neural texture rasterization for 3d-aware head avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 20991-21002, 2023.",
1453
+ "Jiaxiang Tang, Kaisiyuan Wang, Hang Zhou, Xiaokang Chen, Dongliang He, Tianshu Hu, Jingtuo Liu, Gang Zeng, and Jingdong Wang. Real-time neural radiance talking portrait synthesis via audio-spatial decomposition. arXiv preprint arXiv:2211.12368, 2022.",
1454
+ "Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In IEEE International Conference on Computer Vision (ICCV), 2021."
1455
+ ],
1456
+ "bbox": [
1457
+ 171,
1458
+ 103,
1459
+ 825,
1460
+ 922
1461
+ ],
1462
+ "page_idx": 11
1463
+ },
1464
+ {
1465
+ "type": "header",
1466
+ "text": "Published as a conference paper at ICLR 2024",
1467
+ "bbox": [
1468
+ 171,
1469
+ 32,
1470
+ 478,
1471
+ 47
1472
+ ],
1473
+ "page_idx": 11
1474
+ },
1475
+ {
1476
+ "type": "page_number",
1477
+ "text": "12",
1478
+ "bbox": [
1479
+ 490,
1480
+ 946,
1481
+ 508,
1482
+ 959
1483
+ ],
1484
+ "page_idx": 11
1485
+ },
1486
+ {
1487
+ "type": "list",
1488
+ "sub_type": "ref_text",
1489
+ "list_items": [
1490
+ "Alex Trevithick, Matthew Chan, Michael Stengel, Eric R. Chan, Chao Liu, Zhiding Yu, Sameh Khamis, Manmohan Chandraker, Ravi Ramamoorthi, and Koki Nagano. Real-time radiance fields for single-image portrait view synthesis. In ACM Transactions on Graphics (SIGGRAPH), 2023.",
1491
+ "Ting-Chun Wang, Arun Mallya, and Ming-Yu Liu. One-shot free-view neural talking-head synthesis for video conferencing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021a.",
1492
+ "Xintao Wang, Yu Li, Honglun Zhang, and Ying Shan. Towards real-world blind face restoration with generative facial prior. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021b.",
1493
+ "Liangbin Xie, Xintao Wang, Honglun Zhang, Chao Dong, and Ying Shan. Vfhq: A high-quality dataset and benchmark for video face super-resolution. In The IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), 2022.",
1494
+ "Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5438-5448, 2022.",
1495
+ "Yuelang Xu, Hongwen Zhang, Lizhen Wang, Xiaochen Zhao, Han Huang, Guojun Qi, and Yebin Liu. Latentavator: Learning latent expression code for expressive neural head avatar. arXiv preprint arXiv:2305.01190, 2023.",
1496
+ "Fei Yin, Yong Zhang, Xiaodong Cun, Mingdeng Cao, Yanbo Fan, Xuan Wang, Qingyan Bai, Baoyuan Wu, Jue Wang, and Yujiu Yang. Styleheat: One-shot high-resolution editable talking face generation via pre-trained stylegan. In European Conference on Computer Vision (ECCV), 2022.",
1497
+ "Heng Yu, Koichiro Niinuma, and László A Jeni. Confies: Controllable neural face avatars. In 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1-8. IEEE, 2023a.",
1498
+ "Wangbo Yu, Yanbo Fan, Yong Zhang, Xuan Wang, Fei Yin, Yunpeng Bai, Yan-Pei Cao, Ying Shan, Yang Wu, Zhongqian Sun, et al. Nofa: Nerf-based one-shot facial avatar reconstruction. In ACM SIGGRAPH 2023 Conference Proceedings, pp. 1-12, 2023b.",
1499
+ "Egor Zakharov, Aleksei Ivakhnenko, Aliaksandra Shysheya, and Victor Lempitsky. Fast bi-layer neural synthesis of one-shot realistic head avatars. In European Conference on Computer Vision (ECCV), 2020.",
1500
+ "Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 586-595, 2018.",
1501
+ "Wenxuan Zhang, Xiaodong Cun, Xuan Wang, Yong Zhang, Xi Shen, Yu Guo, Ying Shan, and Fei Wang. Sadtalker: Learning realistic 3d motion coefficients for stylized audio-driven single image talking face animation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8652-8661, 2023.",
1502
+ "Zhimeng Zhang, Lincheng Li, Yu Ding, and Changjie Fan. Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021.",
1503
+ "Yufeng Zheng, Victoria Fernández Abrevaya, Marcel C Bühler, Xu Chen, Michael J Black, and Otmar Hilliges. Im avatar: Implicit morphable head avatars from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13545-13555, 2022.",
1504
+ "Wojciech Zielonka, Timo Bolkart, and Justus Thies. Instant volumetric head avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4574-4584, 2023."
1505
+ ],
1506
+ "bbox": [
1507
+ 171,
1508
+ 102,
1509
+ 825,
1510
+ 883
1511
+ ],
1512
+ "page_idx": 12
1513
+ },
1514
+ {
1515
+ "type": "header",
1516
+ "text": "Published as a conference paper at ICLR 2024",
1517
+ "bbox": [
1518
+ 171,
1519
+ 32,
1520
+ 478,
1521
+ 47
1522
+ ],
1523
+ "page_idx": 12
1524
+ },
1525
+ {
1526
+ "type": "page_number",
1527
+ "text": "13",
1528
+ "bbox": [
1529
+ 490,
1530
+ 946,
1531
+ 508,
1532
+ 959
1533
+ ],
1534
+ "page_idx": 12
1535
+ },
1536
+ {
1537
+ "type": "text",
1538
+ "text": "A REPRODUCIBILITY",
1539
+ "text_level": 1,
1540
+ "bbox": [
1541
+ 171,
1542
+ 102,
1543
+ 367,
1544
+ 118
1545
+ ],
1546
+ "page_idx": 13
1547
+ },
1548
+ {
1549
+ "type": "text",
1550
+ "text": "A.1 MORE IMPLEMENTATION DETAILS",
1551
+ "text_level": 1,
1552
+ "bbox": [
1553
+ 171,
1554
+ 133,
1555
+ 457,
1556
+ 148
1557
+ ],
1558
+ "page_idx": 13
1559
+ },
1560
+ {
1561
+ "type": "text",
1562
+ "text": "Specifically, our canonical feature encoder takes the original image of $3 \\times 512 \\times 512$ as input. We obtain the style code through 4 groups of ResBlock down-sampling, use 3 groups of ResBlock upsampling to obtain the conditions, and then use StyleGAN (Karras et al., 2019) to output the $3 \\times 32 \\times 256 \\times 256$ tri-planes based on style code and conditions. In the point-based expression field, we assign a 32-dim feature to each point. Since FLAME (Li et al., 2017) contains 5023 points, the total point features size is $5023 \\times 32$ . During sampling in PEF, we select the nearest 8 points, compute a 39-dim relative position code for each point, and use two fully connected layers to map the 71-dim features to 32-dim. In the multi tri-planes attention module, we employ three linear layers to map the query and key for attention calculation, and the input, output, and hidden layer dimensions are all set to 32. After obtaining 32-dim features from the canonical feature space and expression field, we use three dense layers to map the feature from 64-dim to 128-dim. Subsequently, a single linear layer is employed to predict density, and two linear layers are used to predict RGB values. Finally, we employ a super-resolution network, similar to the encoder, to map the image from $32 \\times 128 \\times 128$ to $3 \\times 512 \\times 512$ dimensions. We also provide code for the model in the supplementary materials for reference.",
1563
+ "bbox": [
1564
+ 169,
1565
+ 159,
1566
+ 826,
1567
+ 367
1568
+ ],
1569
+ "page_idx": 13
1570
+ },
1571
+ {
1572
+ "type": "text",
1573
+ "text": "A.2 MORE DATA PROCESSING DETAILS",
1574
+ "text_level": 1,
1575
+ "bbox": [
1576
+ 171,
1577
+ 383,
1578
+ 464,
1579
+ 398
1580
+ ],
1581
+ "page_idx": 13
1582
+ },
1583
+ {
1584
+ "type": "text",
1585
+ "text": "We use 8,013 video clips from the VFHQ dataset (Xie et al., 2022) for training, and we uniformly sampled 30 frames from each video clip to ensure that the expressions and poses in each frame were as diverse as possible, resulting in a total of 240,390 frames. We cropped the heads and shoulders from the videos, extracted the 3DMM parameters for each frame (including identity, expression, and camera pose) with (Danecek et al., 2022) and further refined the pose with (Bulat & Tzimiropoulos, 2017). Finally, we resized all these images to $512 \\times 512$ pixels.",
1586
+ "bbox": [
1587
+ 169,
1588
+ 410,
1589
+ 823,
1590
+ 494
1591
+ ],
1592
+ "page_idx": 13
1593
+ },
1594
+ {
1595
+ "type": "text",
1596
+ "text": "For the HDTF (Zhang et al., 2021) dataset, we followed the training-testing division in OTAvatar. We conducted a uniform time-based sampling, selecting 100 frames from each of the 19 videos, thereby creating a test split encompassing 1900 frames. As for the VFHQ dataset, we employed a similar approach, uniformly sampling 60 frames from each of the 30 videos. This method ensured that all parts of each test video were sampled as thoroughly as possible.",
1597
+ "bbox": [
1598
+ 169,
1599
+ 501,
1600
+ 823,
1601
+ 571
1602
+ ],
1603
+ "page_idx": 13
1604
+ },
1605
+ {
1606
+ "type": "text",
1607
+ "text": "A.3 MORE EVALUATION DETAILS",
1608
+ "text_level": 1,
1609
+ "bbox": [
1610
+ 171,
1611
+ 588,
1612
+ 423,
1613
+ 602
1614
+ ],
1615
+ "page_idx": 13
1616
+ },
1617
+ {
1618
+ "type": "text",
1619
+ "text": "We conducted comparisons with ROME (Khakhulin et al., 2022), StyleHeat (Yin et al., 2022), OTAvatar (Ma et al., 2023), Next3D (Sun et al., 2023), and HideNeRF (Li et al., 2023a) using their official implementations. Since NOFA (Yu et al., 2023b) does not currently provide an official implementation, and GOAvatar (Li et al., 2023b) is a parallel work, it is challenging to make a correct and fair comparison between these two. Additionally, as Next3D (Sun et al., 2023) has not yet provided a formal inversion implementation, we integrated PTI (Roich et al., 2021) for reconstruction within it.",
1620
+ "bbox": [
1621
+ 169,
1622
+ 613,
1623
+ 823,
1624
+ 699
1625
+ ],
1626
+ "page_idx": 13
1627
+ },
1628
+ {
1629
+ "type": "text",
1630
+ "text": "For each method, we utilized the official data pre-processing scripts to obtain their respective input frames, driving frames, and result frames. For all methods, we aligned the facial regions to a uniform size and then resized them to $512 \\times 512$ . It's important to note that the same alignment parameters were applied to both the driving frames and result frames to ensure their correspondence. Subsequently, we computed all metrics on the aligned frames to ensure a fair comparison. Furthermore, as most methods primarily focus on the facial area, our approach actually encompasses a larger region, including parts of the shoulders. During alignment, we used a region closer to the face to make as few modifications as possible to the baseline method's results, which may result in our approach not performing optimally in some metrics.",
1631
+ "bbox": [
1632
+ 169,
1633
+ 704,
1634
+ 826,
1635
+ 830
1636
+ ],
1637
+ "page_idx": 13
1638
+ },
1639
+ {
1640
+ "type": "text",
1641
+ "text": "B LIMITATIONS",
1642
+ "text_level": 1,
1643
+ "bbox": [
1644
+ 171,
1645
+ 849,
1646
+ 320,
1647
+ 864
1648
+ ],
1649
+ "page_idx": 13
1650
+ },
1651
+ {
1652
+ "type": "text",
1653
+ "text": "Our method has some limitations. Specifically, our current FLAME-based model lacks a module to control the shoulders and body, resulting in limited control below the neck. Currently, the position of the shoulders in the images generated by our model is generally consistent with the input image.",
1654
+ "bbox": [
1655
+ 169,
1656
+ 881,
1657
+ 823,
1658
+ 925
1659
+ ],
1660
+ "page_idx": 13
1661
+ },
1662
+ {
1663
+ "type": "header",
1664
+ "text": "Published as a conference paper at ICLR 2024",
1665
+ "bbox": [
1666
+ 171,
1667
+ 32,
1668
+ 478,
1669
+ 47
1670
+ ],
1671
+ "page_idx": 13
1672
+ },
1673
+ {
1674
+ "type": "page_number",
1675
+ "text": "14",
1676
+ "bbox": [
1677
+ 490,
1678
+ 946,
1679
+ 508,
1680
+ 959
1681
+ ],
1682
+ "page_idx": 13
1683
+ },
1684
+ {
1685
+ "type": "text",
1686
+ "text": "Additionally, for other areas not modeled by FLAME such as hair and tongue, explicit control is also not feasible. Furthermore, while our aspiration is to achieve real-time reenactment of more than 30 fps, our current performance is pre-real-time for now (approximately 15 fps on the A100 GPU). We leave addressing these limitations for future work.",
1687
+ "bbox": [
1688
+ 169,
1689
+ 103,
1690
+ 823,
1691
+ 160
1692
+ ],
1693
+ "page_idx": 14
1694
+ },
1695
+ {
1696
+ "type": "text",
1697
+ "text": "C MORE ABLATION STUDIES",
1698
+ "text_level": 1,
1699
+ "bbox": [
1700
+ 171,
1701
+ 190,
1702
+ 433,
1703
+ 207
1704
+ ],
1705
+ "page_idx": 14
1706
+ },
1707
+ {
1708
+ "type": "image",
1709
+ "img_path": "images/2f3b323d97c417686731c36a6e782ab51127bd4c1b80dba2b99e376d8969b316.jpg",
1710
+ "image_caption": [
1711
+ "Figure 7: Qualitative results on VFHQ (Xie et al., 2022) datasets. Compared to the mean baseline, our MTA preserves more details. The smaller of the two inputs is provided to Ours Two-in."
1712
+ ],
1713
+ "image_footnote": [],
1714
+ "bbox": [
1715
+ 173,
1716
+ 241,
1717
+ 823,
1718
+ 382
1719
+ ],
1720
+ "page_idx": 14
1721
+ },
1722
+ {
1723
+ "type": "image",
1724
+ "img_path": "images/097613aa51cbb5333fe0773135bd143fb1d6cb26f49fa31aa4e64894e46ed016.jpg",
1725
+ "image_caption": [
1726
+ "Figure 8: Qualitative results on VFHQ (Xie et al., 2022) datasets. With four inputs, our method produces sharp and detailed results."
1727
+ ],
1728
+ "image_footnote": [],
1729
+ "bbox": [
1730
+ 173,
1731
+ 467,
1732
+ 823,
1733
+ 648
1734
+ ],
1735
+ "page_idx": 14
1736
+ },
1737
+ {
1738
+ "type": "text",
1739
+ "text": "To thoroughly explore design choices for the model, we conducted additional ablation experiments. More Ablation with MTA. Fig. 7 presents the qualitative results compared to the naive mean baseline. From the qualitative results, we can observe that the mean baseline excessively smooths the eyes and facial features, leading to a decrease in performance, which aligns with the observations from the quantitative results. Fig. 8 displays the qualitative results of our method when using four images as input. It can be observed that using four images as input does not result in detail smoothing or loss and has a quite good performance.",
1740
+ "bbox": [
1741
+ 169,
1742
+ 720,
1743
+ 823,
1744
+ 820
1745
+ ],
1746
+ "page_idx": 14
1747
+ },
1748
+ {
1749
+ "type": "text",
1750
+ "text": "Visualization of attention map in MTA. In order to better evaluate the performance of MTA, we visualized the attention map. As shown in Fig.9, we can see that the model can pay attention to different parts of the face very well. When the input includes left and right faces, the model can provide attention to both sides to achieve a more complete modeling of the whole face. At the same time, the model also pays more attention to the open-eyes input to ensure that the eye area is reconstructed correctly. These visualization results are consistent with our expectations and show the advantages of MTA in fusing multiple image inputs.",
1751
+ "bbox": [
1752
+ 169,
1753
+ 825,
1754
+ 826,
1755
+ 925
1756
+ ],
1757
+ "page_idx": 14
1758
+ },
1759
+ {
1760
+ "type": "header",
1761
+ "text": "Published as a conference paper at ICLR 2024",
1762
+ "bbox": [
1763
+ 171,
1764
+ 32,
1765
+ 478,
1766
+ 47
1767
+ ],
1768
+ "page_idx": 14
1769
+ },
1770
+ {
1771
+ "type": "page_number",
1772
+ "text": "15",
1773
+ "bbox": [
1774
+ 490,
1775
+ 946,
1776
+ 508,
1777
+ 959
1778
+ ],
1779
+ "page_idx": 14
1780
+ },
1781
+ {
1782
+ "type": "image",
1783
+ "img_path": "images/ff2c1840da988453f4a4499bd34934f156be54798cec476a29acd27c59507be0.jpg",
1784
+ "image_caption": [
1785
+ "Figure 9: Visualization of attention map in our MTA module. The image is normalized by affine transformation. Red represents higher attention and black represents lower attention. MTA can pay attention to the left and right faces in different inputs, and also pay more attention to the eyes-open photos."
1786
+ ],
1787
+ "image_footnote": [],
1788
+ "bbox": [
1789
+ 174,
1790
+ 101,
1791
+ 823,
1792
+ 301
1793
+ ],
1794
+ "page_idx": 15
1795
+ },
1796
+ {
1797
+ "type": "text",
1798
+ "text": "D PRELIMINARIES OF FLAME",
1799
+ "text_level": 1,
1800
+ "bbox": [
1801
+ 171,
1802
+ 400,
1803
+ 447,
1804
+ 416
1805
+ ],
1806
+ "page_idx": 15
1807
+ },
1808
+ {
1809
+ "type": "text",
1810
+ "text": "We use the geometry prior from the FLAME (Li et al., 2017) model, a 3D morphable model known for its geometric accuracy and versatility. It extends beyond static facial models by incorporating expressions, offers precise control over facial features, and is represented parametrically. FLAME finds applications in facial animation, avatar creation, and facial recognition due to its realistic rendering capabilities and flexibility. The FLAME model represents the head shape in the following way:",
1811
+ "bbox": [
1812
+ 169,
1813
+ 436,
1814
+ 823,
1815
+ 521
1816
+ ],
1817
+ "page_idx": 15
1818
+ },
1819
+ {
1820
+ "type": "equation",
1821
+ "text": "\n$$\nT P (\\hat {\\beta}, \\hat {\\theta}, \\hat {\\psi}) = \\bar {T} + B S (\\hat {\\beta}; S) + B P (\\hat {\\theta}; P) + B E (\\hat {\\psi}; E), \\tag {6}\n$$\n",
1822
+ "text_format": "latex",
1823
+ "bbox": [
1824
+ 303,
1825
+ 535,
1826
+ 823,
1827
+ 556
1828
+ ],
1829
+ "page_idx": 15
1830
+ },
1831
+ {
1832
+ "type": "text",
1833
+ "text": "where $\\bar{T}$ is a template mesh, $BS(\\hat{\\beta};S)$ is a shape blend-shape function to account for identity-related shape variation, $BP(\\hat{\\theta};P)$ is a corrective pose blend-shape to correct pose deformations that cannot be explained solely by linear blend skinning, and expression blend-shapes $BE(\\hat{\\psi};E)$ is used to capture facial expressions.",
1834
+ "bbox": [
1835
+ 169,
1836
+ 569,
1837
+ 823,
1838
+ 633
1839
+ ],
1840
+ "page_idx": 15
1841
+ },
1842
+ {
1843
+ "type": "text",
1844
+ "text": "E BLENDING MULTIPLE IDENTITIES",
1845
+ "text_level": 1,
1846
+ "bbox": [
1847
+ 171,
1848
+ 660,
1849
+ 493,
1850
+ 676
1851
+ ],
1852
+ "page_idx": 15
1853
+ },
1854
+ {
1855
+ "type": "text",
1856
+ "text": "We also attempted to synthesize results using different individuals and styles as inputs. As shown in Fig. 10, even in the case of such diverse inputs, our method demonstrated robustness, producing reasonable results while combining features from different persons in the images.",
1857
+ "bbox": [
1858
+ 169,
1859
+ 696,
1860
+ 823,
1861
+ 739
1862
+ ],
1863
+ "page_idx": 15
1864
+ },
1865
+ {
1866
+ "type": "image",
1867
+ "img_path": "images/df704dadef583c6137fc095ca54a59c61904a50815d251c9ac43c86e8b19aa0f.jpg",
1868
+ "image_caption": [
1869
+ "Figure 10: Blend results from in-the-wild images. The smaller images are the driving frames."
1870
+ ],
1871
+ "image_footnote": [],
1872
+ "bbox": [
1873
+ 173,
1874
+ 758,
1875
+ 823,
1876
+ 859
1877
+ ],
1878
+ "page_idx": 15
1879
+ },
1880
+ {
1881
+ "type": "header",
1882
+ "text": "Published as a conference paper at ICLR 2024",
1883
+ "bbox": [
1884
+ 171,
1885
+ 32,
1886
+ 478,
1887
+ 47
1888
+ ],
1889
+ "page_idx": 15
1890
+ },
1891
+ {
1892
+ "type": "page_number",
1893
+ "text": "16",
1894
+ "bbox": [
1895
+ 490,
1896
+ 948,
1897
+ 508,
1898
+ 959
1899
+ ],
1900
+ "page_idx": 15
1901
+ },
1902
+ {
1903
+ "type": "text",
1904
+ "text": "F MORE QUALITATIVE RESULTS",
1905
+ "text_level": 1,
1906
+ "bbox": [
1907
+ 171,
1908
+ 102,
1909
+ 460,
1910
+ 119
1911
+ ],
1912
+ "page_idx": 16
1913
+ },
1914
+ {
1915
+ "type": "text",
1916
+ "text": "In this section, we showcase more visual results of our method for cross-identity reenactment. Fig. 11 showcases additional results of our method. It's worth noting that, for a fair comparison with other methods, we standardized the evaluation details across all methods, as stated in the evaluation specifics in Appendix A.3. Our method can also encompass more non-facial regions, as illustrated in Fig. 11. Fig. 12 displays the results on VFHQ, and Fig. 13 presents results on HDTF, respectively. We also provide a supplementary video to show more dynamic results.",
1917
+ "bbox": [
1918
+ 169,
1919
+ 133,
1920
+ 826,
1921
+ 219
1922
+ ],
1923
+ "page_idx": 16
1924
+ },
1925
+ {
1926
+ "type": "header",
1927
+ "text": "Published as a conference paper at ICLR 2024",
1928
+ "bbox": [
1929
+ 171,
1930
+ 32,
1931
+ 478,
1932
+ 47
1933
+ ],
1934
+ "page_idx": 16
1935
+ },
1936
+ {
1937
+ "type": "page_number",
1938
+ "text": "17",
1939
+ "bbox": [
1940
+ 490,
1941
+ 946,
1942
+ 508,
1943
+ 959
1944
+ ],
1945
+ "page_idx": 16
1946
+ },
1947
+ {
1948
+ "type": "image",
1949
+ "img_path": "images/ac91e08d42280982beccccbf013ee92182f45f8e65e33c2f1ce6311cbd776129.jpg",
1950
+ "image_caption": [
1951
+ "Figure 11: Qualitative results on VFHQ (Xie et al., 2022) datasets. The smaller of the two source inputs is provided to Ours Two-in."
1952
+ ],
1953
+ "image_footnote": [],
1954
+ "bbox": [
1955
+ 174,
1956
+ 101,
1957
+ 823,
1958
+ 875
1959
+ ],
1960
+ "page_idx": 17
1961
+ },
1962
+ {
1963
+ "type": "header",
1964
+ "text": "Published as a conference paper at ICLR 2024",
1965
+ "bbox": [
1966
+ 173,
1967
+ 32,
1968
+ 478,
1969
+ 47
1970
+ ],
1971
+ "page_idx": 17
1972
+ },
1973
+ {
1974
+ "type": "page_number",
1975
+ "text": "18",
1976
+ "bbox": [
1977
+ 490,
1978
+ 946,
1979
+ 508,
1980
+ 959
1981
+ ],
1982
+ "page_idx": 17
1983
+ },
1984
+ {
1985
+ "type": "image",
1986
+ "img_path": "images/f2918319c319ac5b512f08fb2c9d613f41e701865d2ec6d87fb76cf26d5e3382.jpg",
1987
+ "image_caption": [
1988
+ "Figure 12: Qualitative results on VFHQ (Xie et al., 2022) datasets. The smaller of the two source inputs is provided to Ours Two-in."
1989
+ ],
1990
+ "image_footnote": [],
1991
+ "bbox": [
1992
+ 192,
1993
+ 101,
1994
+ 808,
1995
+ 881
1996
+ ],
1997
+ "page_idx": 18
1998
+ },
1999
+ {
2000
+ "type": "header",
2001
+ "text": "Published as a conference paper at ICLR 2024",
2002
+ "bbox": [
2003
+ 173,
2004
+ 32,
2005
+ 478,
2006
+ 47
2007
+ ],
2008
+ "page_idx": 18
2009
+ },
2010
+ {
2011
+ "type": "page_number",
2012
+ "text": "19",
2013
+ "bbox": [
2014
+ 490,
2015
+ 946,
2016
+ 508,
2017
+ 959
2018
+ ],
2019
+ "page_idx": 18
2020
+ },
2021
+ {
2022
+ "type": "image",
2023
+ "img_path": "images/777affae18edc672e03667f9ff9396e92b1d7ca25d5109d4c9b80b6be9a83e68.jpg",
2024
+ "image_caption": [
2025
+ "Figure 13: Qualitative results on HDTF (Zhang et al., 2021) datasets. The smaller of the two source inputs is provided to Ours Two-in."
2026
+ ],
2027
+ "image_footnote": [],
2028
+ "bbox": [
2029
+ 181,
2030
+ 108,
2031
+ 823,
2032
+ 851
2033
+ ],
2034
+ "page_idx": 19
2035
+ },
2036
+ {
2037
+ "type": "header",
2038
+ "text": "Published as a conference paper at ICLR 2024",
2039
+ "bbox": [
2040
+ 173,
2041
+ 32,
2042
+ 478,
2043
+ 47
2044
+ ],
2045
+ "page_idx": 19
2046
+ },
2047
+ {
2048
+ "type": "page_number",
2049
+ "text": "20",
2050
+ "bbox": [
2051
+ 488,
2052
+ 946,
2053
+ 509,
2054
+ 959
2055
+ ],
2056
+ "page_idx": 19
2057
+ }
2058
+ ]
2401.10xxx/2401.10215/95513a42-9487-4a29-97c5-eb93bcc107a7_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10215/95513a42-9487-4a29-97c5-eb93bcc107a7_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a2f709e833700252578a0d192d3f477eb0e4604f0a22fa09e99d9f03b84d81b
3
+ size 5986989
2401.10xxx/2401.10215/full.md ADDED
@@ -0,0 +1,335 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # GPAVATAR: GENERALIZABLE AND PRECISE HEAD AVATAR FROM IMAGE(S)
2
+
3
+ Xuangeng Chu $^{1,2*}$ Yu Li $^{2\dagger}$ Ailing Zeng $^{2}$ Tianyu Yang $^{2}$ Lijian Lin $^{2}$ Yunfei Liu $^{2}$ Tatsuya Harada $^{1,3\dagger}$
4
+
5
+ $^{1}$ The University of Tokyo $^{2}$ International Digital Economy Academy (IDEA) $^{3}$ RIKEN AIP $\{\text { xuangeng.chu, harada } \} @ \text { mi.t.u-tokyo.ac.jp }$ $\{\text { liyu, zengailing, yangtianyu, linlijian, liuyunfei } \} @ \text { idea.edu.cn }$
6
+
7
+ ![](images/f958e67fff484195e3c75244490227c67ce82c24cb25b8352d81171ca40bf9f9.jpg)
8
+ Figure 1: Our GPAvatar is able to reconstruct 3D head avatars from even a single input (i.e., one-shot), with strong generalization and precise expression control. The leftmost images are the inputs, and the subsequent images depict reenactment results. Inset images display the corresponding driving faces. Additionally, the first row shows three novel view results.
9
+
10
+ # ABSTRACT
11
+
12
+ Head avatar reconstruction, crucial for applications in virtual reality, online meetings, gaming, and film industries, has garnered substantial attention within the computer vision community. The fundamental objective of this field is to faithfully recreate the head avatar and precisely control expressions and postures. Existing methods, categorized into 2D-based warping, mesh-based, and neural rendering approaches, present challenges in maintaining multi-view consistency, incorporating non-facial information, and generalizing to new identities. In this paper, we propose a framework named GPAAvatar that reconstructs 3D head avatars from one or several images in a single forward pass. The key idea of this work is to introduce a dynamic point-based expression field driven by a point cloud to precisely and effectively capture expressions. Furthermore, we use a Multi Tri-planes Attention (MTA) fusion module in the tri-planes canonical field to leverage information from multiple input images. The proposed method achieves faithful identity reconstruction, precise expression control, and multi-view consistency, demonstrating promising results for free-viewpoint rendering and novel view synthesis. Code is available at https://github.com/xg-chu/GPAvatar.
13
+
14
+ # 1 INTRODUCTION
15
+
16
+ Head avatar reconstruction holds immense potential in various applications, including virtual reality, online meetings, gaming, and the film industry. In recent years, this field has garnered significant attention within the computer vision community. The primary objective of head avatar reconstruction is to faithfully recreate the source head while enabling precise control over expressions and posture. This capability will facilitate the generation of desired new expressions and poses for the source portrait (Li et al., 2023a; Yu et al., 2023b; Li et al., 2023b).
17
+
18
+ Some exploratory methods have partially achieved this goal and can be roughly categorized into three types: 2D-based warping methods (Yin et al., 2022), mesh-based methods (Khakhulin et al., 2022), and neural rendering methods (Sun et al., 2023; Ma et al., 2023; Li et al., 2023a; Yu et al., 2023b; Li et al., 2023b). Among these, 2D-based methods warp the original image to new expressions with a warping field estimated from sparse landmarks, and then synthesize the appearance through an encoder and decoder. However, these methods struggle to maintain multi-view consistency when there are significant changes in head pose due to their lack of necessary 3D constraints. Furthermore, these methods are unable to effectively decouple expressions and identity from the source portrait, leading to unfaithful driving results. Mesh-based methods explicitly model the source portrait with a 3D Morphable Model (3DMM) (Blanz & Vetter, 1999; Paysan et al., 2009; Li et al., 2017; Gerig et al., 2018). By incorporating 3D information, these methods effectively address the issue of multi-view consistency. However, due to the limitations in the modeling and expressive capacity of 3DMM, the reconstructed head often lacks non-facial information such as hair, and the expressions are often unnatural. With the outstanding performance of NeRF (neural radiance field) in multi-view image synthesis, the latest methods have started to leverage NeRF for head avatar reconstruction(Xu et al., 2023; Zielonka et al., 2023; Zheng et al., 2022; Sun et al., 2023; Ma et al., 2023). Compared to 2D and mesh-based methods, NeRF-based methods have shown the ability to synthesize results that are 3D-consistent and include non-facial information. However, these methods can't generalize well to new identities. Some of these methods require a large amount of portrait data for reconstruction, and some involve time-consuming optimization processes during inference.
19
+
20
+ In this paper, we present a framework for reconstructing the source portrait in a single forward pass. Given one or several unseen images, our method reconstructs an animatable implicit head avatar representation. Some examples are shown in Fig. 1. The core challenge lies in faithfully reconstructing the head avatar from a single image and achieving precise control over expressions. To address this issue, we introduce a point cloud-driven dynamic expression field to precisely capture expressions and use a Multi Tri-planes Attention (MTA) module in the tri-planes canonical field to leverage information from multiple input images. The 3DMM point cloud-driven field provides natural and precise expression control and facilitates identity-expression decoupling. The merged tri-planes encapsulate a feature space that includes faithful identity information from the source portrait while modeling parts not covered by the 3DMM, such as shoulders and hair. The experiment verifies that our method generalizes well to unseen identities and enables precise expression control without test-time optimization, thereby enabling tasks such as free novel view synthesis and reenactment.
21
+
22
+ The major contributions of our work are as follows:
23
+
24
+ - We introduce a 3D head avatar reconstruction framework that achieves faithful reconstruction in a single forward pass and generalizes well to in-the-wild images.
25
+ - We propose a dynamic Point-based Expression Field (PEF) that allows for precise and natural cross-identity expression control.
26
+ - We propose a Multi Tri-planes Attention (MTA) fusion module to accept an arbitrary number of input images. It enables the incorporation of more information during inference, particularly beneficial for extreme inputs like closed eyes and occlusions.
27
+
28
+ # 2 RELATED WORK
29
+
30
+ # 2.1 TALKING HEAD SYNTHESIS
31
+
32
+ Previous methods for head synthesis can be categorized into deformation-based, mesh-based, and NeRF-based methods. Warping-based methods (Siarohin et al., 2019; Zakharov et al., 2020; Wang et al., 2021a; Yin et al., 2022; Drobyshev et al., 2022; Zhang et al., 2023) are popular among 2D generative methods. Usually, these methods apply deformation operations to the source image to drive the motion in the target image. Due to a lack of clear understanding and modeling of the 3D geometry of the head avatar, these methods often produce unrealistic distortions when the poses and expressions change a lot. Many subsequent works (Ren et al., 2021; Yin et al., 2022; Zhang et al., 2023) alleviated this problem by introducing 3DMM (Blanz & Vetter, 1999; Paysan et al., 2009; Li et al., 2017; Geric et al., 2018), but this problem still exists and limits the performance of 2D methods. To completely address this problem, many 3DMM-based works (Feng et al., 2021; Danecek et al., 2022; Khakhulin et al., 2022) reconstruct animatable avatars by estimating 3DMM
33
+
34
+ parameters from portrait images. Among them, ROME(Khakhulin et al., 2022) estimates the 3DMM parameters, the offset of mesh vertex and the texture to render the results. However, although 3DMM provides strong priors for understanding the face, it focuses only on facial regions and cannot capture other detailed features such as hairstyles and accessories, and the fidelity is limited by the resolution of meshes, resulting in unnatural appearances in the reenactment images.
35
+
36
+ NeRF(Mildenhall et al., 2020) is a type of implicit 3D scene representation method known for its excellent performance in static scene reconstruction. Many works(Park et al., 2021a;b; Tretschk et al., 2021) try to extend it from static scenes to dynamic scenes, and there are also many works(Gafni et al., 2021; Zheng et al., 2022; Xu et al., 2023; Zielonka et al., 2023; Athar et al., 2023) that apply NeRF to human portrait reconstruction and animation. One of the research directions is to generate controllable 3D head avatars from random noise (Sun et al., 2023; Ma et al., 2023). While these methods can produce realistic and controllable results, achieving reconstruction requires GAN inversion, which is impractical in real-time scenarios. Another research direction is to utilize data from specific individuals for reconstruction (Gafni et al., 2021; Athar et al., 2022; Zheng et al., 2022; Xu et al., 2023; Bai et al., 2023; Zielonka et al., 2023). While the results are impressive, they cannot learn networks for different identities and require thousands of frames of personal image data, raising privacy concerns. At the same time, there are also some methods to use audio drivers to control avatar (Tang et al., 2022; Guo et al., 2021; Yu et al., 2023a), providing users with a more flexible and easy-to-use driver method.
37
+
38
+ # 2.2 ONE-SHOT HEAD AVATARS
39
+
40
+ ![](images/ae4540aa4d5db500531cb973bd6fc867f5541d507b9f91ddd448a30a439f7eb4.jpg)
41
+ Figure 2: Differences from existing state-of-the-art methods. Existing methods may over-process expression information or use expression features, leading to expression detail loss. Our approach avoids this loss with a point-based expression field, and our method flexibly accepts single or multiple images as input, enhancing information gathering through our multi-tri-planes attention module.
42
+
43
+ ![](images/0a23934c66b87596ad4022c8888ef68ef306a3eeaa9fe5a1c978a5d6b963cd36.jpg)
44
+
45
+ ![](images/f2c6b7833df13908d5af57ff49935d0a0a196e042342b6dc2019a0c38e1cd648.jpg)
46
+
47
+ ![](images/35074ae691639facc7e80419d3bbc482a2b20c4b3bdaf6298428cebdc1f72e8f.jpg)
48
+
49
+ To address these issues, some works (Trevithick et al., 2023; Hong et al., 2022; Li et al., 2023a; b; Yu et al., 2023b) have focused on reconstructing 3D avatars from arbitrary input images. Some methods can achieve static reconstruction (Trevithick et al., 2023; Hong et al., 2022), but they are unable to reanimate these digital avatars. There are also methods that utilize NeRF to achieve animatable one-shot forward reconstruction of target avatars, such as GOAvatar(Li et al., 2023b), NOFA(Yu et al., 2023b), and HideNeRF(Li et al., 2023a). GOAvatar (Li et al., 2023b) utilizes three sets of tri-planes to respectively represent the standard pose, image details, and expression. It also employs a finetuned GFPGAN (Wang et al., 2021b) network to enhance the details of the results. NOFA (Yu et al., 2023b) utilizes the rich 3D-consistent generative prior of 3D GAN to synthesize neural volumes of different faces and employs deformation fields to model facial dynamics. HideNeRF (Li et al., 2023a) utilizes a multi-resolution tri-planes representation and a 3DMM-based deformation field to generate reenactment images while enhancing identity consistency during the generation process. While these methods produce impressive results, they still have some limitations in expression-driven tasks. Some of these methods either rely on the rendering results of 3DMM as input to control deformation fields or directly use 3DMM parameters for expression-driven tasks. In such encoding and decoding processes, subtle facial expression information may inevitably be lost.
50
+
51
+ In this paper, we utilize the FLAME (Li et al., 2017) point cloud as prior and propose a novel 3D head neural avatar framework. It not only generalizes to unseen identities but also offers precise control over expression details during reenactment and surpasses all previous works in reenactment image quality. Fig. 2 illustrates the differences between our method and existing approaches.
52
+
53
+ ![](images/b9ce6cf0cdbdbf0fdaff2b3cf3582febe07fc64e0a494871b21f5543ffaed9d1.jpg)
54
+ Figure 3: Overview: Our method mainly comprises two branches: one that captures fine-grained expressions with PEF (Sec. 3.2) and another that integrates information from multiple inputs through MTA (Sec. 3.1, 3.3). Finally, there is the rendering and super-resolution component (Sec. 3.4).
55
+
56
+ # 3 METHOD
57
+
58
+ In this section, we will describe our method. Our approach has the capability to faithfully reconstruct head avatars from any number of inputs and achieve precise reenactment. The overall process can be summarized as follows:
59
+
60
+ $$
61
+ I _ {t} = R (M T A (E (I _ {i})), P E F (\text {F L A M E} (s _ {i}, e _ {t}, p _ {t}), \theta), p _ {c a m}),
62
+ $$
63
+
64
+ where $I_{i}$ represents the input image(s), $s_i$ is the shape parameter of the source image $I_{i}$ , $e_t$ and $p_t$ is the desired expression and pose parameter. The canonical feature space is constructed by $E(I_i)$ , and our Point-based Expression Field (PEF) is built with point cloud FLAME $(s_i, e_t, p_t)$ and point feature $\theta$ . If there are multiple input images, their canonical feature space will be merged by the Multi-Tri-planes Attention module (MTA). Finally, $R$ is the volume rendering function that renders the reenactment image $I_t$ based on camera pose $p_{cam}$ . The overall process is illustrated in Fig. 3. In the following, we will describe the canonical encoder in Sec. 3.1, explain how PEF controls expressions in Sec. 3.2, introduce how we fuse multiple inputs through MTA in Sec 3.3, discuss the rendering and super-resolution process in Sec. 3.4, and finally, we describe the training targets in Sec. 3.5.
65
+
66
+ # 3.1 CANONICAL FEATURE ENCODER
67
+
68
+ Due to the fact that the tri-planes representation has strong 3D geometric priors and strikes a good balance between synthesis quality and speed, we employ the tri-planes representation as our standard feature space. Specifically, inspired by GFPGAN (Wang et al., 2021b), our encoder follows a UNet structure, and during its up-sampling process, we use a StyleGAN structure. We generally keep the same setting as GFPGAN, except that our encoder maps the original image from $3 \times 512 \times 512$ to $3 \times 32 \times 256 \times 256$ to build a tri-planes feature space. We only modified the input and output layers to achieve this. In the experiment, we observed that this structure can effectively integrate global information from the input image during the down-sampling process, and then generate mutually correlated planes during the up-sampling process. In order to enhance the robustness of the encoder and adapt to arbitrary real-world inputs, we applied affine transformations to align the input 2D images using estimated head poses. Since we utilize a separate expression feature field encoded with PEF, the canonical feature space here lacks complete semantics on its own. Therefore, while many works based on tri-planes restrict the canonical feature space to have neutral expressions, here we do not impose any restrictions and train the encoder from scratch in an end-to-end manner.
69
+
70
+ # 3.2 POINT-BASED EXPRESSION FIELD
71
+
72
+ In this section, we will introduce how to build a controllable expression field based on point clouds. Many methods for head avatars rely on 3DMM parameters or rendering images to generate expressions. However, they either have limited expressive capabilities when directly using 3DMM parameters or lose details due to excessive encoding and decoding processes. Inspired by Point-NeRF (Xu et al., 2022), we directly use the point cloud from 3DMM to construct a point-based expression field, thereby avoiding over-processing and retaining expression details as much as possible.
73
+
74
+ Unlike attempts to reconstruct static scenes, our point-based expression field (PEF) aims to model dynamic expressions. To achieve this goal, we bind learnable weights to each FLAME vertex in the PEF. Due to the stable semantics and geometric topology of FLAME vertices, such as points representing the eyes and mouth not undergoing semantic changes across various expressions and identities, each neural point in the PEF also holds stable semantics and can be shared across different identities. During sampling features from PEF, we sample several nearest points to calculate the final feature for the sample position. If we sample the nearest points from the local region following (Xu et al., 2022), we may encounter limitations in representation capabilities, such that non-FLAME areas are modeled only in canonical feature space and parts related to expressions such as hair that are not included in FLAME may become fully rigid, making certain expressions unnatural. Therefore, we instead search for neighboring points in the entire space and use relative position encoding to provide the model with direction and distance information. Our approach liberates the representation capabilities of point features, and experiments have also confirmed that our method performs better. To achieve the synergy between the canonical feature space and PEF and harness the prior capabilities of point clouds, we remove the global pose from the FLAME pose and instead model it using the camera pose. This ensures that the point cloud is always in a canonical position near the origin. Since we sample features from both feature spaces, the semantic information and 3D priors from the PEF can also undergo collaborative learning with the canonical feature space.
75
+
76
+ The overall process of our PEF is as follows: for any given query 3D position $x$ during the NeRF sampling process, we retrieve its nearest $K$ points and obtain their corresponding features $f_{i}$ and positions $p_i$ . Then, we employ linear layers to regress the features for each point, and finally combine these features based on positional weights, as shown in Eq. 1:
77
+
78
+ $$
79
+ f _ {e x p, x} = \sum_ {i} ^ {K} \frac {w _ {i}}{\sum_ {j} ^ {K} w _ {j}} L _ {p} \left(f _ {i}, F _ {p o s} \left(p _ {i} - x\right)\right), \text {w h e r e} w _ {i} = \frac {1}{p _ {i} - x}, \tag {1}
80
+ $$
81
+
82
+ where $L_{p}$ is the linear layers and $F_{pos}$ is the frequency positional encoding function. During this process, the position of point $p_i$ changes as the FLAME expression parameters change, creating a dynamic expression feature field. This allows the FLAME to directly contribute to the NeRF feature space, avoiding the loss of information introduced by excessive processing. Due to the decoupling of the canonical tri-planes and PEF, we only create the canonical tri-planes once during inference, and the speed of PEF will affect the inference speed. Thanks to the efficient parallel nearest neighbor query, the PEF process can be completed quickly, greatly improving the speed of inference.
83
+
84
+ # 3.3 MULTI TRI-PLANES ATTENTION
85
+
86
+ Based on the aforementioned modules, we can obtain animatable high-fidelity results. However, since the source image can be arbitrary, this introduces some challenging scenarios. For example, there may be occlusions in the source image, or the eyes in the source image may be closed while the desired expression requires open eyes. In this situation, the model may generate illusions based on statistically average eye and facial features, but these illusions may be incorrect. Although this image cannot produce the truth about missing parts, we may have other images that supplement the missing parts. To achieve this goal, we have implemented an attention-based module to fuse the tri-planes features of multiple images, which is called Multi Tri-planes Attention (MTA).
87
+
88
+ Our MTA uses a learnable tri-plane to query multiple tri-planes from different images, generating weights for feature fusion, as shown in Eq. 2:
89
+
90
+ $$
91
+ P = \sum_ {i} ^ {N} \frac {w _ {i}}{\sum_ {j} ^ {N} w _ {j}} E \left(I _ {i}\right), \text {w h e r e} w _ {i} = L _ {q} (Q) L _ {k} \left(E \left(I _ {i}\right)\right), \tag {2}
92
+ $$
93
+
94
+ where $I_{i}$ is the input image, $N$ is the number of input images, $E$ is the canonical encoder, $L_{q}$ and $L_{k}$ are the linear layers to generate queries and keys, and $Q$ is the learnable query tri-planes.
95
+
96
+ Through our experiments, we have demonstrated that our MTA effectively enhances performance and completes missing information in one-shot inputs, such as pupil information and the other half of the face in extreme pose variations. During training, we use two random frames as input and one frame as the target, while during inference, our MTA can accept any number of images as input. Furthermore, experiments show that our MTA can consistently fuse multiple tri-planes features from images of the same person captured at different times. Even when dealing with images of different individuals and styles, MTA can still produce reasonable results, showcasing its strong robustness.
97
+
98
+ # 3.4 VOLUME RENDERING AND SUPER RESOLUTION
99
+
100
+ Given the camera's intrinsic and extrinsic parameters, we sample the rays and perform two-pass hierarchical sampling along these rays, followed by volume rendering to obtain 2D results. Due to the extensive computational resources required for high-resolution volume rendering, training and testing on high resolution become time-consuming and costly. A popular solution to this problem is the lightweight super-resolution module. In our work, we render low-resolution images at $128 \times 128$ resolution, and these low-resolution images consist of a 32-dimensional feature map with the first three dimensions corresponding to RGB pixel values. The super-resolution module we use is similar to our canonical feature space encoder, and like the encoder, we train this super-resolution module from scratch in an end-to-end manner.
101
+
102
+ # 3.5 TRAINING STRATEGY AND LOSS FUNCTIONS
103
+
104
+ We train our model from scratch using an end-to-end training approach. By sampling original and target images from the same video, we construct pairs of images with the same identity but different expressions and poses. During the training process, our primary objective is to make the reenactment images consistent with the target images. We use $L_{1}$ and perceptual loss (Johnson et al., 2016; Zhang et al., 2018) on both low-resolution and high-resolution reenactment images to achieve this objective, as shown in the Eq. 3:
105
+
106
+ $$
107
+ \mathcal {L} _ {r e c} = \left| \left| I _ {l r} - I _ {t} \right| \right| + \left| \left| I _ {h r} - I _ {t} \right| \right| + \lambda_ {p} \left(\left| \left| \varphi \left(I _ {l r}\right) - \varphi (I _ {t}) \right| \right| + \left| \left| \varphi \left(I _ {h r}\right) - \varphi (I _ {t}) \right| \right|\right), \tag {3}
108
+ $$
109
+
110
+ where $I_{t}$ is the reenactment target image, $I_{lr}$ and $I_{hr}$ are the low-resolution and high-resolution reenactment results, $\varphi$ is the AlexNet (Krizhevsky et al., 2012) used in the perceptual loss, and $\lambda_{p}$ is the weight for the perceptual loss.
111
+
112
+ Additionally, we add a density-based norm loss as shown in the Eq. 4:
113
+
114
+ $$
115
+ \mathcal {L} _ {\text {n o r m}} = \left\| d _ {n} \right\| _ {2}, \tag {4}
116
+ $$
117
+
118
+ where $d_{n}$ is the density used in volume rendering (Mildenhall et al., 2020). This loss encourages the total density of NeRF to be as low as possible, thereby encouraging reconstructions that closely adhere to the actual 3D shape and avoid the appearance of artifacts. The overall training objective is as:
119
+
120
+ $$
121
+ \mathcal {L} _ {\text {o v e r a l l}} = \lambda_ {r} \mathcal {L} _ {\text {r e c}} + \lambda_ {n} \mathcal {L} _ {\text {n o r m}}, \tag {5}
122
+ $$
123
+
124
+ where $\lambda_r$ and $\lambda_n$ are the weights that balance the loss.
125
+
126
+ # 4 EXPERIMENTS
127
+
128
+ In this section, we will first introduce the dataset we use, the implementation details of our method, and the baselines of our work. We will then compare our method with existing approaches using a variety of metrics.
129
+
130
+ # 4.1 EXPERIMENT SETTING
131
+
132
+ Datasets. We use the VFHQ (Xie et al., 2022) dataset to train our model. This dataset comprises clips from various interview scenarios, and we utilized a subset consisting of 8,013 video clips. From these videos, we extracted 240,390 frames to create our training dataset. During the training process, we randomly sampled frames from the same video to create pairs of images with the same identity but different expressions. One frame was used as the target for reenactment, while the others served as source images. Given that our method can accept any number of inputs, in each iteration, we sampled two inputs with a $70\%$ probability and one input with a $30\%$ probability. Regarding evaluation, we assessed our method on the VFHQ dataset (Xie et al., 2022) and the HDTF dataset (Zhang et al., 2021). It's important to note that our model was not fine-tuned on the HDTF dataset. In the evaluation process, we used the first frame of each video as the source image, with the remaining frames as targets for reenactment.
133
+
134
+ Evaluation Metrics. We evaluated all methods on both same-identity and cross-identity reenactment tasks. For the cross-identity reenactment task, due to the lack of ground truth, we evaluated the
135
+
136
+ ![](images/f30a8dd3a2d8da75a40981502527afee7ef1d4ffe40752dbb6610fd8c7c75902.jpg)
137
+ Figure 4: Qualitative results on VFHQ (Xie et al., 2022) and HDTF (Zhang et al., 2021) datasets. The first two rows are from VFHQ and the third row is from HDTF.
138
+
139
+ ![](images/1d30ddc6965ae0404053cabb031c312d57938896bd34295a3dcf25e8a9ac9238.jpg)
140
+ Figure 5: Qualitative results on VFHQ (Xie et al., 2022) and HDTF (Zhang et al., 2021) datasets. The first four rows are from VFHQ and the last row is from HDTF.
141
+
142
+ cosine similarity of identity embeddings (CSIM) based on ArcFace (Deng et al., 2019) between the reenacted frames and source images to assess identity consistency during the reenactment task. We also used the Average Expression Distance (AED) and Average Pose Distance (APD) metrics based on (Danecek et al., 2022) to assess the accuracy of expression and pose driving. In the same-identity reenactment task, where ground truth frames are available, in addition to the aforementioned metrics, we evaluate PSNR, SSIM, L1, and LPIPS metrics between the reenacted frames and ground truth frames. We also calculated the Average Key-point Distance (AKD) based on (Bulat & Tzimiropoulos, 2017) as another reference for expression reenactment accuracy.
143
+
144
+ Implementation Details. Our framework is built upon the PyTorch framework (Paszke et al., 2017), and during the training process, we employ the ADAM (Kingma & Ba, 2014) optimizer with a learn
145
+
146
+ Table 1: Quantitative results on the VFHQ (Xie et al., 2022) dataset. For a fair comparison, we compare one-shot results using the first frame input. Ours Two-in uses both the first and last frames. Entries in green are the best ones in a one-shot setting.
147
+
148
+ <table><tr><td rowspan="2">Method</td><td colspan="8">Self Reenactment</td><td colspan="3">Cross-Id Reenactment</td></tr><tr><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>CSIM↑</td><td>L1↓</td><td>AED↓</td><td>APD↓</td><td>AKD↓</td><td>CSIM↑</td><td>AED↓</td><td>APD↓</td></tr><tr><td>ROME (Khakhulin et al., 2022)</td><td>19.88</td><td>0.735</td><td>0.237</td><td>0.679</td><td>0.060</td><td>0.497</td><td>0.017</td><td>4.53</td><td>0.531</td><td>0.936</td><td>0.026</td></tr><tr><td>StyleHeat (Yin et al., 2022)</td><td>19.95</td><td>0.738</td><td>0.251</td><td>0.603</td><td>0.065</td><td>0.593</td><td>0.024</td><td>5.30</td><td>0.506</td><td>0.961</td><td>0.038</td></tr><tr><td>OTAvatar (Ma et al., 2023)</td><td>18.10</td><td>0.600</td><td>0.346</td><td>0.660</td><td>0.092</td><td>0.734</td><td>0.035</td><td>6.05</td><td>0.514</td><td>0.962</td><td>0.059</td></tr><tr><td>Next3D (Sun et al., 2023)</td><td>19.95</td><td>0.656</td><td>0.281</td><td>0.631</td><td>0.066</td><td>0.727</td><td>0.026</td><td>5.17</td><td>0.482</td><td>0.996</td><td>0.036</td></tr><tr><td>HideNeRF (Li et al., 2023a)</td><td>20.07</td><td>0.745</td><td>0.204</td><td>0.794</td><td>0.056</td><td>0.521</td><td>0.031</td><td>5.33</td><td>0.558</td><td>1.024</td><td>0.044</td></tr><tr><td>Ours One-in</td><td>22.08</td><td>0.765</td><td>0.177</td><td>0.789</td><td>0.039</td><td>0.434</td><td>0.017</td><td>3.53</td><td>0.558</td><td>0.910</td><td>0.034</td></tr><tr><td>Ours Two-in</td><td>22.86</td><td>0.779</td><td>0.169</td><td>0.771</td><td>0.035</td><td>0.411</td><td>0.017</td><td>3.44</td><td>0.551</td><td>0.907</td><td>0.034</td></tr></table>
149
+
150
+ Table 2: Quantitative results on the HDTF (Zhang et al., 2021) dataset. For a fair comparison, we compare one-shot results using the first frame input. Ours Two-in uses both the first and last frames. Entries in green are the best ones in a one-shot setting.
151
+
152
+ <table><tr><td rowspan="2">Method</td><td colspan="8">Self Reenactment</td><td colspan="3">Cross-Id Reenactment</td></tr><tr><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>CSIM↑</td><td>L1↓</td><td>AED↓</td><td>APD↓</td><td>AKD↓</td><td>CSIM↑</td><td>AED↓</td><td>APD↓</td></tr><tr><td>ROME (Khakhulin et al., 2022)</td><td>20.84</td><td>0.722</td><td>0.176</td><td>0.781</td><td>0.044</td><td>0.540</td><td>0.012</td><td>3.93</td><td>0.721</td><td>0.929</td><td>0.017</td></tr><tr><td>StyleHeat (Yin et al., 2022)</td><td>21.91</td><td>0.772</td><td>0.210</td><td>0.705</td><td>0.045</td><td>0.527</td><td>0.015</td><td>3.69</td><td>0.666</td><td>0.902</td><td>0.027</td></tr><tr><td>OTAvatar (Ma et al., 2023)</td><td>20.50</td><td>0.695</td><td>0.241</td><td>0.765</td><td>0.064</td><td>0.681</td><td>0.020</td><td>5.15</td><td>0.699</td><td>1.047</td><td>0.034</td></tr><tr><td>Next3D (Sun et al., 2023)</td><td>20.35</td><td>0.723</td><td>0.217</td><td>0.730</td><td>0.048</td><td>0.644</td><td>0.022</td><td>4.19</td><td>0.622</td><td>1.014</td><td>0.026</td></tr><tr><td>HideNeRF (Li et al., 2023a)</td><td>21.38</td><td>0.803</td><td>0.147</td><td>0.907</td><td>0.038</td><td>0.499</td><td>0.027</td><td>4.33</td><td>0.803</td><td>1.031</td><td>0.032</td></tr><tr><td>Ours One-in</td><td>24.21</td><td>0.834</td><td>0.131</td><td>0.871</td><td>0.029</td><td>0.427</td><td>0.012</td><td>3.06</td><td>0.790</td><td>0.869</td><td>0.020</td></tr><tr><td>Ours Two-in</td><td>25.36</td><td>0.849</td><td>0.122</td><td>0.851</td><td>0.026</td><td>0.406</td><td>0.012</td><td>3.01</td><td>0.769</td><td>0.837</td><td>0.021</td></tr></table>
153
+
154
+ ing rate of 1.0e-4. We conducted training on 2 NVIDIA Tesla A100 GPUs, with a total batch size of 8. During the training process, our PEF searches for the nearest $K = 8$ points, while MTA selects two frames as source images. Our approach employs an end-to-end training methodology. The training process consists of 150,000 iterations and the full training process consumes approximately 50 GPU hours, showing its resource utilization efficiency. During the inference time, our method achieves 15 FPS when running on an A100 GPU. More details can be found in the supplementary materials.
155
+
156
+ # 4.2 MAIN RESULTS
157
+
158
+ **Baseline Methods.** We compared our method with five state-of-the-art existing methods, including StyleHeat (Yin et al., 2022) (2D-based warping), ROME (Khakhulin et al., 2022) (mesh-based), OTAvatar (Ma et al., 2023), Next3D (Sun et al., 2023; Roich et al., 2021) (based on NeRF and 3D generative models), and HideNeRF (Li et al., 2023a), which is most similar to our setup. All results were evaluated using official code implementations.
159
+
160
+ Self-Reenactment Results. We begin by evaluating the synthesis performance when the source and driving image are the same person. Tab. 1 and Tab. 2 show the quantitative results on VFHQ and HDTF, respectively. Notably, our approach exhibits a significant advantage over other state-of-the-art methods in terms of both synthesis quality metrics (PSNR, SSIM, LPIPS, and L1) and expression control quality metrics (AED and AKD). Qualitative results on VFHQ and HDTF are visually demonstrated in Fig. 4. These results showcase that our method not only excels in synthesis quality but also captures subtle expressions, as exemplified by the surprised expression in the first row and the angry expression in the second row. Importantly, our model achieved these results without any training or fine-tuning on the HDTF dataset, thus demonstrating the robust generalization capability of our approach.
161
+
162
+ Cross-Identity Reenactment Results. We also evaluate the synthesis performance when the source and the driving images contain different persons. Tab. 1 and Tab. 2 show quantitative results, and Fig. 5 showcases qualitative results. Due to the absence of ground truth data, a quantitative evaluation of synthesis performance is not feasible, but the qualitative results evident that our method excels in expression control. These results show the efficacy of our approach in scenarios where the source and driving images are from different individuals.
163
+
164
+ Multiple images input. In addition to quantitative results, we further illustrate the advantages of multi-input methods in challenging scenarios, as shown in Fig. 6, such as closed eyes and significant pose variations. The results demonstrate that employing multiple inputs can further enhance synthesis quality while maintaining precise expression control.
165
+
166
+ ![](images/5344428fd695f2e97b47f5c69589190150e820fa8f86339f5e25cf7a8df437ec.jpg)
167
+
168
+ ![](images/9f3ca9e2971f066c62b82e73159920d3cb95968303f3f18993785ecb11772cdd.jpg)
169
+
170
+ ![](images/58139ba39315280ce0f1d55331c7b36c0c6a1254c7a662326cf669bd24e87ca7.jpg)
171
+
172
+ ![](images/76b07b2ba5b3ae99922105445ef7236128b15e49703ac6a341e4db14f5ea696b.jpg)
173
+
174
+ ![](images/c78fafb43a83b689957d0956cef203332d351d97860bf19c93dbfcbc949f5159.jpg)
175
+ Figure 6: Qualitative results of multi-inputs. In each image, the left side shows input frames, while the right side displays reenactment frames and driving frames. It can be observed that using multiple inputs enhances the performance, especially in cases of closed eyes and occlusions.
176
+
177
+ ![](images/66821664911fdfab7231970c490121bac03889a9cc9d11c4acdb4ac6c736725f.jpg)
178
+
179
+ ![](images/5d453aa0f858404dc86f69614aa99c36cd651dd0ac0f948f49b58c8cb764797e.jpg)
180
+
181
+ Table 3: Ablation results on the VFHQ dataset. Entries in green are the best ones.
182
+
183
+ <table><tr><td>Method</td><td>PSNR↑</td><td>SSIM↑</td><td>LPIPS↓</td><td>CSIM↑</td><td>L1↓</td><td>AED↓</td><td>APD↓</td><td>AKD↓</td></tr><tr><td>w/o PEF</td><td>22.01</td><td>0.762</td><td>0.186</td><td>0.766</td><td>0.040</td><td>0.576</td><td>0.020</td><td>4.30</td></tr><tr><td>w/o global sample</td><td>21.58</td><td>0.760</td><td>0.194</td><td>0.765</td><td>0.039</td><td>0.518</td><td>0.019</td><td>3.96</td></tr><tr><td>point cloud 2000</td><td>21.95</td><td>0.761</td><td>0.193</td><td>0.750</td><td>0.040</td><td>0.497</td><td>0.020</td><td>3.86</td></tr><tr><td>query 4 points</td><td>22.04</td><td>0.762</td><td>0.192</td><td>0.751</td><td>0.039</td><td>0.514</td><td>0.020</td><td>3.90</td></tr><tr><td>Ours One-in</td><td>22.08</td><td>0.765</td><td>0.177</td><td>0.789</td><td>0.039</td><td>0.434</td><td>0.017</td><td>3.53</td></tr><tr><td>mean Two-in</td><td>22.75</td><td>0.776</td><td>0.190</td><td>0.726</td><td>0.036</td><td>0.452</td><td>0.019</td><td>3.68</td></tr><tr><td>mean Three-in</td><td>23.03</td><td>0.780</td><td>0.191</td><td>0.724</td><td>0.035</td><td>0.455</td><td>0.019</td><td>3.65</td></tr><tr><td>mean Four-in</td><td>23.16</td><td>0.783</td><td>0.194</td><td>0.716</td><td>0.035</td><td>0.449</td><td>0.018</td><td>3.64</td></tr><tr><td>Ours Two-in</td><td>22.86</td><td>0.779</td><td>0.169</td><td>0.771</td><td>0.035</td><td>0.411</td><td>0.017</td><td>3.44</td></tr><tr><td>Ours Three-in</td><td>23.27</td><td>0.788</td><td>0.165</td><td>0.772</td><td>0.033</td><td>0.403</td><td>0.016</td><td>3.41</td></tr><tr><td>Ours Four-in</td><td>23.49</td><td>0.792</td><td>0.164</td><td>0.773</td><td>0.032</td><td>0.400</td><td>0.016</td><td>3.41</td></tr></table>
184
+
185
+ # 4.3 ABLATION STUDIES
186
+
187
+ Effectiveness of Point-based Expression Field. To validate the effectiveness of our proposed PEF, we provide FLAME expression parameters directly as a baseline for comparison (w/o PEF in Tab. 3). This method was applied in NeRFace and demonstrated expression control capability. The improvements in AED and AKD clearly indicate that our PEF significantly enhances expression control. We also tried sampling points in a local area with a maximum distance of 1/128 instead of global sampling. The results are shown as w/o global sample in Tab. 3. The results show that our global sampling enhances the details in expressions and the quality of synthesis.
188
+
189
+ Effectiveness of Multi Tri-planes Attention. To validate the effectiveness of our proposed MTA, we established a naive mean-based baseline that averages the tri-planes of multiple images to obtain a merged plane. Table 3 shows the results. We observe that our MTA exhibits better synthesis performance, which we attribute to MTA's ability to avoid feature blurring caused by average fusion.
190
+
191
+ Ablation on Hyper-parameters. We conducted experiments on the selection of hyper-parameters. We randomly selected 2,000 points from 5,023 points of FLAME, and the results in Tab. 3 show that our method can also work on sparse point clouds. This may be attributed to our PEF finding neighboring points from the entire space, which prevents sparse sampling issues. We also reduced the number of query neighbors $K$ from 8 to 4, and the results indicate that our method has some robustness to the number of neighboring points.
192
+
193
+ # 5 CONCLUSION
194
+
195
+ In this paper, we have introduced a novel framework for generalizable and precise reconstruction of animatable 3D head avatars. Our approach reconstructs the neural radiance field using only one or a few input images and leverages a point-based expression field to control the expression of synthesized images. Additionally, we have introduced an attention-based fusion module to utilize information from multiple input images. Ablation studies suggest that the proposed Point-based Expression Field (PEF) and Multi Tri-planes Attention (MTA) can enhance synthesis quality and expression control. Our experimental results also demonstrate that our method achieves the most precise expression control and state-of-the-art synthesis quality on multiple benchmark datasets. We believe that our method has a wide range of potential applications due to its strong generalization and precise expression control capabilities.
196
+
197
+ # 6 ETHICS STATEMENT
198
+
199
+ Since our framework allows for the reconstruction and reenactment of head avatars, it has a wide range of applications but also carries the potential risk of misuse, such as using it to create fake videos of others, violating privacy, and spreading false information. We are aware of the potential for misuse of our method and strongly discourage such practices. To this end, we have proposed several plans to prevent this technical risk:
200
+
201
+ - We will add a conspicuous watermark to the synthesized video so that viewers can easily identify whether the video was synthesized by the model. This will significantly reduce the cost for viewers to identify the video and reduce the risk of abuse.
202
+ - We limit the identity of the target speaker to virtual identities such as virtual idols, and prohibit the synthesis of real people without formal consent. Furthermore, synthetic videos may only be used for educational or other legitimate purposes (such as online courses) and any misuse will be subject to liability via the tracking methods we present in the next point.
203
+ - We will also inject invisible watermarks into the synthesized video to store the IP of the video producer, so that the video producer must consider the potential risks brought by the synthesized video. This will encourage video producers to proactively think about whether their videos will create ethical risks and reduce the possibility of creating abusive videos.
204
+
205
+ To summarize, as a technology designer, we come up with strict licenses and technologies to prevent abuse of our GPAvatar, a talking face reconstruction system. We think more efforts from governments, society, technology designers, and users are needed to eliminate the abuse of deepfake. Besides, we hope the video maker is aware of the potential risks and responsibilities when using the talking face generation techniques. We believe that, with proper application, our method has the potential to demonstrate significant utility in various real-world scenarios.
206
+
207
+ # 7 REPRODUCIBILITY STATEMENT
208
+
209
+ Here we summarize the efforts made to ensure the reproducibility of this work. The model architectures and training details are introduced in Appendix A.1, and we also release the code for the model at https://github.com/xg-chu/GPAvatar. The data processing and evaluation details are introduced in Appendix A.2 and Appendix A.3.
210
+
211
+ # ACKNOWLEDGMENTS
212
+
213
+ This work was partially supported by JST Moonshot R&D Grant Number JPMJPS2011, CREST Grant Number JPMJCR2015 and Basic Research Grant (Super AI) of Institute for AI and Beyond of the University of Tokyo. This work was also partially supported by JST, the establishment of university fellowships towards the creation of science technology innovation, Grant Number JP-MJFS2108.
214
+
215
+ # REFERENCES
216
+
217
+ ShahRukh Athar, Zexiang Xu, Kalyan Sunkavalli, Eli Shechtman, and Zhixin Shu. Rignerf: Fully controllable neural 3d portraits. In Proceedings of the IEEE/CVF conference on Computer Vision and Pattern Recognition, pp. 20364-20373, 2022.
218
+ ShahRukh Athar, Zhixin Shu, and Dimitris Samaras. Flame-in-nerf: Neural control of radiance fields for free view face animation. In 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1-8. IEEE, 2023.
219
+ Ziqian Bai, Feitong Tan, Zeng Huang, Kripasindhu Sarkar, Danhang Tang, Di Qiu, Abhinitra Meka, Ruofei Du, Mingsong Dou, Sergio Orts-Escalano, et al. Learning personalized high quality volumetric head avatars from monocular rgb videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 16890-16900, 2023.
220
+ Volker Blanz and Thomas Vetter. A morphable model for the synthesis of 3d faces. In Proceedings of the 26th annual conference on Computer graphics and interactive techniques, 1999.
221
+ Adrian Bulat and Georgios Tzimiropoulos. How far are we from solving the 2d & 3d face alignment problem? (and a dataset of 230,000 3d facial landmarks). In IEEE International Conference on Computer Vision (ICCV), 2017.
222
+ Radek Danecek, Michael J. Black, and Timo Bolkart. EMOCA: Emotion driven monocular face capture and animation. In Conference on Computer Vision and Pattern Recognition (CVPR), pp. 20311-20322, 2022.
223
+ Jiankang Deng, Jia Guo, Xue Niannan, and Stefanos Zafeiriou. Arcface: Additive angular margin loss for deep face recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019.
224
+ Nikita Drobyshev, Jenya Chelishev, Taras Khakhulin, Aleksei Ivakhnenko, Victor Lempitsky, and Egor Zakharov. Megaportraits: One-shot megapixel neural head avatars. Proceedings of the 30th ACM International Conference on Multimedia, 2022.
225
+ Yao Feng, Haiwen Feng, Michael J. Black, and Timo Bolkart. Learning an animatable detailed 3D face model from in-the-wild images. ACM Transactions on Graphics (ToG), Proc. SIGGRAPH, pp. 88:1-88:13, 2021.
226
+ Guy Gafni, Justus Thies, Michael Zollhöfer, and Matthias Nießner. Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021.
227
+ Thomas Gereg, Andreas Morel-Forster, Clemens Blumer, Bernhard Egger, Marcel Luthi, Sandro Schonborn, and Thomas Vetter. Morphable face models—an open framework. In 2018 13th IEEE International Conference on Automatic Face & Gesture Recognition (FG 2018), pp. 75–82, 2018.
228
+ Yudong Guo, Keyu Chen, Sen Liang, Yong-Jin Liu, Hujun Bao, and Juyong Zhang. Ad-nerf: Audio driven neural radiance fields for talking head synthesis. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 5784-5794, 2021.
229
+ Yang Hong, Bo Peng, Haiyao Xiao, Ligang Liu, and Juyong Zhang. Headnerf: A real-time nef-based parametric head model. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022.
230
+ Justin Johnson, Alexandre Alahi, and Li Fei-Fei. Perceptual losses for real-time style transfer and super-resolution. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part II 14, pp. 694-711. Springer, 2016.
231
+ Tero Karras, Samuli Laine, and Timo Aila. A style-based generator architecture for generative adversarial networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 4401-4410, 2019.
232
+ Taras Khakhulin, Vanessa Sklyarova, Victor Lempitsky, and Egor Zakharov. Realistic one-shot mesh-based head avatars. In European Conference on Computer Vision (ECCV), 2022.
233
+
234
+ Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014.
235
+ Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 2012.
236
+ Tianye Li, Timo Bolkart, Michael. J. Black, Hao Li, and Javier Romero. Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia), pp. 194:1-194:17, 2017.
237
+ Weichuang Li, Longhao Zhang, Dong Wang, Bin Zhao, Zhigang Wang, Mulin Chen, Bang Zhang, Zhongjian Wang, Liefeng Bo, and Xuelong Li. One-shot high-fidelity talking-head synthesis with deformable neural radiance field. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023a.
238
+ Xueting Li, Shalini De Mello, Sifei Liu, Koki Nagano, Umar Iqbal, and Jan Kautz. Generalizable one-shot neural head avatar. Arxiv, 2023b.
239
+ Zhiyuan Ma, Xiangyu Zhu, Guojun Qi, Zhen Lei, and Lei Zhang. Otavatar: One-shot talking face avatar with controllable tri-plane rendering. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2023.
240
+ Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), 2020.
241
+ Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. IEEE International Conference on Computer Vision (ICCV), 2021a.
242
+ Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo Martin-Brualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. Graph., 2021b.
243
+ Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017.
244
+ Pascal Paysan, Reinhard Knothe, Brian Amberg, Sami Romdhani, and Thomas Vetter. A 3d face model for pose and illumination invariant face recognition. In 2009 sixth IEEE international conference on advanced video and signal based surveillance, pp. 296-301, 2009.
245
+ Yurui Ren, Ge Li, Yuanqi Chen, Thomas H Li, and Shan Liu. Pirenderer: Controllable portrait image generation via semantic neural rendering. In IEEE International Conference on Computer Vision (ICCV), 2021.
246
+ Daniel Roich, Ron Mokady, Amit H Bermano, and Daniel Cohen-Or. Pivotal tuning for latent-based editing of real images. ACM Trans. Graph., 2021.
247
+ Aliaksandr Siarohin, Stéphane Lathuilière, Sergey Tulyakov, Elisa Ricci, and Nicu Sebe. First order motion model for image animation. Advances in Neural Information Processing Systems (NeurIPS), 2019.
248
+ Jingxiang Sun, Xuan Wang, Lizhen Wang, Xiaoyu Li, Yong Zhang, Hongwen Zhang, and Yebin Liu. Next3d: Generative neural texture rasterization for 3d-aware head avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 20991-21002, 2023.
249
+ Jiaxiang Tang, Kaisiyuan Wang, Hang Zhou, Xiaokang Chen, Dongliang He, Tianshu Hu, Jingtuo Liu, Gang Zeng, and Jingdong Wang. Real-time neural radiance talking portrait synthesis via audio-spatial decomposition. arXiv preprint arXiv:2211.12368, 2022.
250
+ Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In IEEE International Conference on Computer Vision (ICCV), 2021.
251
+
252
+ Alex Trevithick, Matthew Chan, Michael Stengel, Eric R. Chan, Chao Liu, Zhiding Yu, Sameh Khamis, Manmohan Chandraker, Ravi Ramamoorthi, and Koki Nagano. Real-time radiance fields for single-image portrait view synthesis. In ACM Transactions on Graphics (SIGGRAPH), 2023.
253
+ Ting-Chun Wang, Arun Mallya, and Ming-Yu Liu. One-shot free-view neural talking-head synthesis for video conferencing. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021a.
254
+ Xintao Wang, Yu Li, Honglun Zhang, and Ying Shan. Towards real-world blind face restoration with generative facial prior. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021b.
255
+ Liangbin Xie, Xintao Wang, Honglun Zhang, Chao Dong, and Ying Shan. Vfhq: A high-quality dataset and benchmark for video face super-resolution. In The IEEE Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), 2022.
256
+ Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5438-5448, 2022.
257
+ Yuelang Xu, Hongwen Zhang, Lizhen Wang, Xiaochen Zhao, Han Huang, Guojun Qi, and Yebin Liu. Latentavator: Learning latent expression code for expressive neural head avatar. arXiv preprint arXiv:2305.01190, 2023.
258
+ Fei Yin, Yong Zhang, Xiaodong Cun, Mingdeng Cao, Yanbo Fan, Xuan Wang, Qingyan Bai, Baoyuan Wu, Jue Wang, and Yujiu Yang. Styleheat: One-shot high-resolution editable talking face generation via pre-trained stylegan. In European Conference on Computer Vision (ECCV), 2022.
259
+ Heng Yu, Koichiro Niinuma, and László A Jeni. Confies: Controllable neural face avatars. In 2023 IEEE 17th International Conference on Automatic Face and Gesture Recognition (FG), pp. 1-8. IEEE, 2023a.
260
+ Wangbo Yu, Yanbo Fan, Yong Zhang, Xuan Wang, Fei Yin, Yunpeng Bai, Yan-Pei Cao, Ying Shan, Yang Wu, Zhongqian Sun, et al. Nofa: Nerf-based one-shot facial avatar reconstruction. In ACM SIGGRAPH 2023 Conference Proceedings, pp. 1-12, 2023b.
261
+ Egor Zakharov, Aleksei Ivakhnenko, Aliaksandra Shysheya, and Victor Lempitsky. Fast bi-layer neural synthesis of one-shot realistic head avatars. In European Conference on Computer Vision (ECCV), 2020.
262
+ Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 586-595, 2018.
263
+ Wenxuan Zhang, Xiaodong Cun, Xuan Wang, Yong Zhang, Xi Shen, Yu Guo, Ying Shan, and Fei Wang. Sadtalker: Learning realistic 3d motion coefficients for stylized audio-driven single image talking face animation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 8652-8661, 2023.
264
+ Zhimeng Zhang, Lincheng Li, Yu Ding, and Changjie Fan. Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021.
265
+ Yufeng Zheng, Victoria Fernández Abrevaya, Marcel C Bühler, Xu Chen, Michael J Black, and Otmar Hilliges. Im avatar: Implicit morphable head avatars from videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 13545-13555, 2022.
266
+ Wojciech Zielonka, Timo Bolkart, and Justus Thies. Instant volumetric head avatars. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 4574-4584, 2023.
267
+
268
+ # A REPRODUCIBILITY
269
+
270
+ # A.1 MORE IMPLEMENTATION DETAILS
271
+
272
+ Specifically, our canonical feature encoder takes the original image of $3 \times 512 \times 512$ as input. We obtain the style code through 4 groups of ResBlock down-sampling, use 3 groups of ResBlock upsampling to obtain the conditions, and then use StyleGAN (Karras et al., 2019) to output the $3 \times 32 \times 256 \times 256$ tri-planes based on style code and conditions. In the point-based expression field, we assign a 32-dim feature to each point. Since FLAME (Li et al., 2017) contains 5023 points, the total point features size is $5023 \times 32$ . During sampling in PEF, we select the nearest 8 points, compute a 39-dim relative position code for each point, and use two fully connected layers to map the 71-dim features to 32-dim. In the multi tri-planes attention module, we employ three linear layers to map the query and key for attention calculation, and the input, output, and hidden layer dimensions are all set to 32. After obtaining 32-dim features from the canonical feature space and expression field, we use three dense layers to map the feature from 64-dim to 128-dim. Subsequently, a single linear layer is employed to predict density, and two linear layers are used to predict RGB values. Finally, we employ a super-resolution network, similar to the encoder, to map the image from $32 \times 128 \times 128$ to $3 \times 512 \times 512$ dimensions. We also provide code for the model in the supplementary materials for reference.
273
+
274
+ # A.2 MORE DATA PROCESSING DETAILS
275
+
276
+ We use 8,013 video clips from the VFHQ dataset (Xie et al., 2022) for training, and we uniformly sampled 30 frames from each video clip to ensure that the expressions and poses in each frame were as diverse as possible, resulting in a total of 240,390 frames. We cropped the heads and shoulders from the videos, extracted the 3DMM parameters for each frame (including identity, expression, and camera pose) with (Danecek et al., 2022) and further refined the pose with (Bulat & Tzimiropoulos, 2017). Finally, we resized all these images to $512 \times 512$ pixels.
277
+
278
+ For the HDTF (Zhang et al., 2021) dataset, we followed the training-testing division in OTAvatar. We conducted a uniform time-based sampling, selecting 100 frames from each of the 19 videos, thereby creating a test split encompassing 1900 frames. As for the VFHQ dataset, we employed a similar approach, uniformly sampling 60 frames from each of the 30 videos. This method ensured that all parts of each test video were sampled as thoroughly as possible.
279
+
280
+ # A.3 MORE EVALUATION DETAILS
281
+
282
+ We conducted comparisons with ROME (Khakhulin et al., 2022), StyleHeat (Yin et al., 2022), OTAvatar (Ma et al., 2023), Next3D (Sun et al., 2023), and HideNeRF (Li et al., 2023a) using their official implementations. Since NOFA (Yu et al., 2023b) does not currently provide an official implementation, and GOAvatar (Li et al., 2023b) is a parallel work, it is challenging to make a correct and fair comparison between these two. Additionally, as Next3D (Sun et al., 2023) has not yet provided a formal inversion implementation, we integrated PTI (Roich et al., 2021) for reconstruction within it.
283
+
284
+ For each method, we utilized the official data pre-processing scripts to obtain their respective input frames, driving frames, and result frames. For all methods, we aligned the facial regions to a uniform size and then resized them to $512 \times 512$ . It's important to note that the same alignment parameters were applied to both the driving frames and result frames to ensure their correspondence. Subsequently, we computed all metrics on the aligned frames to ensure a fair comparison. Furthermore, as most methods primarily focus on the facial area, our approach actually encompasses a larger region, including parts of the shoulders. During alignment, we used a region closer to the face to make as few modifications as possible to the baseline method's results, which may result in our approach not performing optimally in some metrics.
285
+
286
+ # B LIMITATIONS
287
+
288
+ Our method has some limitations. Specifically, our current FLAME-based model lacks a module to control the shoulders and body, resulting in limited control below the neck. Currently, the position of the shoulders in the images generated by our model is generally consistent with the input image.
289
+
290
+ Additionally, for other areas not modeled by FLAME such as hair and tongue, explicit control is also not feasible. Furthermore, while our aspiration is to achieve real-time reenactment of more than 30 fps, our current performance is pre-real-time for now (approximately 15 fps on the A100 GPU). We leave addressing these limitations for future work.
291
+
292
+ # C MORE ABLATION STUDIES
293
+
294
+ ![](images/2f3b323d97c417686731c36a6e782ab51127bd4c1b80dba2b99e376d8969b316.jpg)
295
+ Figure 7: Qualitative results on VFHQ (Xie et al., 2022) datasets. Compared to the mean baseline, our MTA preserves more details. The smaller of the two inputs is provided to Ours Two-in.
296
+
297
+ ![](images/097613aa51cbb5333fe0773135bd143fb1d6cb26f49fa31aa4e64894e46ed016.jpg)
298
+ Figure 8: Qualitative results on VFHQ (Xie et al., 2022) datasets. With four inputs, our method produces sharp and detailed results.
299
+
300
+ To thoroughly explore design choices for the model, we conducted additional ablation experiments. More Ablation with MTA. Fig. 7 presents the qualitative results compared to the naive mean baseline. From the qualitative results, we can observe that the mean baseline excessively smooths the eyes and facial features, leading to a decrease in performance, which aligns with the observations from the quantitative results. Fig. 8 displays the qualitative results of our method when using four images as input. It can be observed that using four images as input does not result in detail smoothing or loss and has a quite good performance.
301
+
302
+ Visualization of attention map in MTA. In order to better evaluate the performance of MTA, we visualized the attention map. As shown in Fig.9, we can see that the model can pay attention to different parts of the face very well. When the input includes left and right faces, the model can provide attention to both sides to achieve a more complete modeling of the whole face. At the same time, the model also pays more attention to the open-eyes input to ensure that the eye area is reconstructed correctly. These visualization results are consistent with our expectations and show the advantages of MTA in fusing multiple image inputs.
303
+
304
+ ![](images/ff2c1840da988453f4a4499bd34934f156be54798cec476a29acd27c59507be0.jpg)
305
+ Figure 9: Visualization of attention map in our MTA module. The image is normalized by affine transformation. Red represents higher attention and black represents lower attention. MTA can pay attention to the left and right faces in different inputs, and also pay more attention to the eyes-open photos.
306
+
307
+ # D PRELIMINARIES OF FLAME
308
+
309
+ We use the geometry prior from the FLAME (Li et al., 2017) model, a 3D morphable model known for its geometric accuracy and versatility. It extends beyond static facial models by incorporating expressions, offers precise control over facial features, and is represented parametrically. FLAME finds applications in facial animation, avatar creation, and facial recognition due to its realistic rendering capabilities and flexibility. The FLAME model represents the head shape in the following way:
310
+
311
+ $$
312
+ T P (\hat {\beta}, \hat {\theta}, \hat {\psi}) = \bar {T} + B S (\hat {\beta}; S) + B P (\hat {\theta}; P) + B E (\hat {\psi}; E), \tag {6}
313
+ $$
314
+
315
+ where $\bar{T}$ is a template mesh, $BS(\hat{\beta};S)$ is a shape blend-shape function to account for identity-related shape variation, $BP(\hat{\theta};P)$ is a corrective pose blend-shape to correct pose deformations that cannot be explained solely by linear blend skinning, and expression blend-shapes $BE(\hat{\psi};E)$ is used to capture facial expressions.
316
+
317
+ # E BLENDING MULTIPLE IDENTITIES
318
+
319
+ We also attempted to synthesize results using different individuals and styles as inputs. As shown in Fig. 10, even in the case of such diverse inputs, our method demonstrated robustness, producing reasonable results while combining features from different persons in the images.
320
+
321
+ ![](images/df704dadef583c6137fc095ca54a59c61904a50815d251c9ac43c86e8b19aa0f.jpg)
322
+ Figure 10: Blend results from in-the-wild images. The smaller images are the driving frames.
323
+
324
+ # F MORE QUALITATIVE RESULTS
325
+
326
+ In this section, we showcase more visual results of our method for cross-identity reenactment. Fig. 11 showcases additional results of our method. It's worth noting that, for a fair comparison with other methods, we standardized the evaluation details across all methods, as stated in the evaluation specifics in Appendix A.3. Our method can also encompass more non-facial regions, as illustrated in Fig. 11. Fig. 12 displays the results on VFHQ, and Fig. 13 presents results on HDTF, respectively. We also provide a supplementary video to show more dynamic results.
327
+
328
+ ![](images/ac91e08d42280982beccccbf013ee92182f45f8e65e33c2f1ce6311cbd776129.jpg)
329
+ Figure 11: Qualitative results on VFHQ (Xie et al., 2022) datasets. The smaller of the two source inputs is provided to Ours Two-in.
330
+
331
+ ![](images/f2918319c319ac5b512f08fb2c9d613f41e701865d2ec6d87fb76cf26d5e3382.jpg)
332
+ Figure 12: Qualitative results on VFHQ (Xie et al., 2022) datasets. The smaller of the two source inputs is provided to Ours Two-in.
333
+
334
+ ![](images/777affae18edc672e03667f9ff9396e92b1d7ca25d5109d4c9b80b6be9a83e68.jpg)
335
+ Figure 13: Qualitative results on HDTF (Zhang et al., 2021) datasets. The smaller of the two source inputs is provided to Ours Two-in.
2401.10xxx/2401.10215/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6e45576587e6997cf1ec2953251e81e743b16ce5c8db200a5dd7f8f73067c86
3
+ size 1542970
2401.10xxx/2401.10215/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10216/ea06c2af-58d9-41ff-9a19-b0f5df0c6d96_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10216/ea06c2af-58d9-41ff-9a19-b0f5df0c6d96_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10216/ea06c2af-58d9-41ff-9a19-b0f5df0c6d96_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e9a75ceda854c9e9b45b023e7cac0a0e1b24a840fcd3a8bde86ad06099fc7b2
3
+ size 827774
2401.10xxx/2401.10216/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10216/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e6f64d3b37255eb87595674b2bf366cce6db7cf5cfef3a323a35d96e17a2885
3
+ size 1067869
2401.10xxx/2401.10216/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10225/ba2787e2-76e7-4f16-aa8b-fe98ad04fa0e_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10225/ba2787e2-76e7-4f16-aa8b-fe98ad04fa0e_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10225/ba2787e2-76e7-4f16-aa8b-fe98ad04fa0e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:561098c144e09776177174349813aa1563b8bfdd6ec1383db0968731a6465ed1
3
+ size 968851
2401.10xxx/2401.10225/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10225/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf108f32b82854f8fd4a14d7ab8e193b3956e5fe31896ffc81cc75e075ad7659
3
+ size 1445936
2401.10xxx/2401.10225/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2401.10xxx/2401.10226/ecea0930-2696-45e5-b53e-a244117d8d6b_content_list.json ADDED
The diff for this file is too large to render. See raw diff