SlowGuess commited on
Commit
e09228f
·
verified ·
1 Parent(s): d5ca320

Add Batch f70b05fb-e61c-4bbc-8d49-8e63d95fd036

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +15 -0
  2. 2301.00xxx/2301.00023/4635d162-e5d4-4ff9-83b6-a0e11e214a21_content_list.json +1634 -0
  3. 2301.00xxx/2301.00023/4635d162-e5d4-4ff9-83b6-a0e11e214a21_model.json +2239 -0
  4. 2301.00xxx/2301.00023/4635d162-e5d4-4ff9-83b6-a0e11e214a21_origin.pdf +3 -0
  5. 2301.00xxx/2301.00023/full.md +326 -0
  6. 2301.00xxx/2301.00023/images.zip +3 -0
  7. 2301.00xxx/2301.00023/layout.json +0 -0
  8. 2301.00xxx/2301.00050/e9e4f2ea-c256-4f2d-8617-7d1874a9b913_content_list.json +0 -0
  9. 2301.00xxx/2301.00050/e9e4f2ea-c256-4f2d-8617-7d1874a9b913_model.json +0 -0
  10. 2301.00xxx/2301.00050/e9e4f2ea-c256-4f2d-8617-7d1874a9b913_origin.pdf +3 -0
  11. 2301.00xxx/2301.00050/full.md +454 -0
  12. 2301.00xxx/2301.00050/images.zip +3 -0
  13. 2301.00xxx/2301.00050/layout.json +0 -0
  14. 2301.00xxx/2301.00073/75ea4a89-7dbe-4180-bf18-4a5f3633018a_content_list.json +0 -0
  15. 2301.00xxx/2301.00073/75ea4a89-7dbe-4180-bf18-4a5f3633018a_model.json +0 -0
  16. 2301.00xxx/2301.00073/75ea4a89-7dbe-4180-bf18-4a5f3633018a_origin.pdf +3 -0
  17. 2301.00xxx/2301.00073/full.md +550 -0
  18. 2301.00xxx/2301.00073/images.zip +3 -0
  19. 2301.00xxx/2301.00073/layout.json +0 -0
  20. 2301.00xxx/2301.00127/97724fca-330b-4a45-9d7d-ecac8fcb1f6d_content_list.json +1346 -0
  21. 2301.00xxx/2301.00127/97724fca-330b-4a45-9d7d-ecac8fcb1f6d_model.json +1846 -0
  22. 2301.00xxx/2301.00127/97724fca-330b-4a45-9d7d-ecac8fcb1f6d_origin.pdf +3 -0
  23. 2301.00xxx/2301.00127/full.md +272 -0
  24. 2301.00xxx/2301.00127/images.zip +3 -0
  25. 2301.00xxx/2301.00127/layout.json +0 -0
  26. 2301.00xxx/2301.00130/01c945fb-cab9-4f7c-a745-0c790477032b_content_list.json +0 -0
  27. 2301.00xxx/2301.00130/01c945fb-cab9-4f7c-a745-0c790477032b_model.json +0 -0
  28. 2301.00xxx/2301.00130/01c945fb-cab9-4f7c-a745-0c790477032b_origin.pdf +3 -0
  29. 2301.00xxx/2301.00130/full.md +486 -0
  30. 2301.00xxx/2301.00130/images.zip +3 -0
  31. 2301.00xxx/2301.00130/layout.json +0 -0
  32. 2301.00xxx/2301.00157/128c83c4-e54e-4950-a434-c5755fa770fc_content_list.json +0 -0
  33. 2301.00xxx/2301.00157/128c83c4-e54e-4950-a434-c5755fa770fc_model.json +0 -0
  34. 2301.00xxx/2301.00157/128c83c4-e54e-4950-a434-c5755fa770fc_origin.pdf +3 -0
  35. 2301.00xxx/2301.00157/full.md +705 -0
  36. 2301.00xxx/2301.00157/images.zip +3 -0
  37. 2301.00xxx/2301.00157/layout.json +0 -0
  38. 2301.00xxx/2301.00174/bdcc3ba4-ac51-4fe2-85bf-aa12aea87957_content_list.json +0 -0
  39. 2301.00xxx/2301.00174/bdcc3ba4-ac51-4fe2-85bf-aa12aea87957_model.json +0 -0
  40. 2301.00xxx/2301.00174/bdcc3ba4-ac51-4fe2-85bf-aa12aea87957_origin.pdf +3 -0
  41. 2301.00xxx/2301.00174/full.md +0 -0
  42. 2301.00xxx/2301.00174/images.zip +3 -0
  43. 2301.00xxx/2301.00174/layout.json +0 -0
  44. 2301.00xxx/2301.00182/e088d165-4952-42cf-a3d7-7785f6182d67_content_list.json +0 -0
  45. 2301.00xxx/2301.00182/e088d165-4952-42cf-a3d7-7785f6182d67_model.json +0 -0
  46. 2301.00xxx/2301.00182/e088d165-4952-42cf-a3d7-7785f6182d67_origin.pdf +3 -0
  47. 2301.00xxx/2301.00182/full.md +554 -0
  48. 2301.00xxx/2301.00182/images.zip +3 -0
  49. 2301.00xxx/2301.00182/layout.json +0 -0
  50. 2301.00xxx/2301.00184/8445fc7b-3803-4777-88c1-9a5a57def64f_content_list.json +0 -0
.gitattributes CHANGED
@@ -11859,3 +11859,18 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
11859
  2301.04xxx/2301.04012/165aec82-a14a-4f10-be45-0c8c7c714f3e_origin.pdf filter=lfs diff=lfs merge=lfs -text
11860
  2301.07xxx/2301.07173/40b223d4-c09f-4971-a396-e18004b7876f_origin.pdf filter=lfs diff=lfs merge=lfs -text
11861
  2301.07xxx/2301.07519/f3b606e2-8c82-4e84-8e53-cc79163e69aa_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11859
  2301.04xxx/2301.04012/165aec82-a14a-4f10-be45-0c8c7c714f3e_origin.pdf filter=lfs diff=lfs merge=lfs -text
11860
  2301.07xxx/2301.07173/40b223d4-c09f-4971-a396-e18004b7876f_origin.pdf filter=lfs diff=lfs merge=lfs -text
11861
  2301.07xxx/2301.07519/f3b606e2-8c82-4e84-8e53-cc79163e69aa_origin.pdf filter=lfs diff=lfs merge=lfs -text
11862
+ 2301.00xxx/2301.00023/4635d162-e5d4-4ff9-83b6-a0e11e214a21_origin.pdf filter=lfs diff=lfs merge=lfs -text
11863
+ 2301.00xxx/2301.00050/e9e4f2ea-c256-4f2d-8617-7d1874a9b913_origin.pdf filter=lfs diff=lfs merge=lfs -text
11864
+ 2301.00xxx/2301.00073/75ea4a89-7dbe-4180-bf18-4a5f3633018a_origin.pdf filter=lfs diff=lfs merge=lfs -text
11865
+ 2301.00xxx/2301.00127/97724fca-330b-4a45-9d7d-ecac8fcb1f6d_origin.pdf filter=lfs diff=lfs merge=lfs -text
11866
+ 2301.00xxx/2301.00130/01c945fb-cab9-4f7c-a745-0c790477032b_origin.pdf filter=lfs diff=lfs merge=lfs -text
11867
+ 2301.00xxx/2301.00157/128c83c4-e54e-4950-a434-c5755fa770fc_origin.pdf filter=lfs diff=lfs merge=lfs -text
11868
+ 2301.00xxx/2301.00174/bdcc3ba4-ac51-4fe2-85bf-aa12aea87957_origin.pdf filter=lfs diff=lfs merge=lfs -text
11869
+ 2301.00xxx/2301.00182/e088d165-4952-42cf-a3d7-7785f6182d67_origin.pdf filter=lfs diff=lfs merge=lfs -text
11870
+ 2301.00xxx/2301.00184/8445fc7b-3803-4777-88c1-9a5a57def64f_origin.pdf filter=lfs diff=lfs merge=lfs -text
11871
+ 2301.00xxx/2301.00234/ee19e89e-4815-471f-9bcf-7803fc2a1883_origin.pdf filter=lfs diff=lfs merge=lfs -text
11872
+ 2301.00xxx/2301.00265/3307789b-e9e7-40a5-aee1-627293e6d991_origin.pdf filter=lfs diff=lfs merge=lfs -text
11873
+ 2301.00xxx/2301.00641/7387351c-4b25-4cb9-9c6b-a2231bf996b2_origin.pdf filter=lfs diff=lfs merge=lfs -text
11874
+ 2301.00xxx/2301.00767/548b5d26-5960-49e1-8ec5-42b644679e95_origin.pdf filter=lfs diff=lfs merge=lfs -text
11875
+ 2301.01xxx/2301.01602/d2632745-dc24-4801-b3da-573e500d78ce_origin.pdf filter=lfs diff=lfs merge=lfs -text
11876
+ 2301.10xxx/2301.10028/6fed2f27-8962-4b7b-8349-ee9d90b3c90c_origin.pdf filter=lfs diff=lfs merge=lfs -text
2301.00xxx/2301.00023/4635d162-e5d4-4ff9-83b6-a0e11e214a21_content_list.json ADDED
@@ -0,0 +1,1634 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Imitator: Personalized Speech-driven 3D Facial Animation",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 187,
8
+ 130,
9
+ 781,
10
+ 151
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Balamurugan Thambiraja<sup>1</sup> \nDarren Cosker<sup>3</sup>",
17
+ "bbox": [
18
+ 183,
19
+ 179,
20
+ 393,
21
+ 215
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Ikhsanul Habibie<sup>2</sup> \nChristian Theobalt<sup>2</sup>",
28
+ "bbox": [
29
+ 426,
30
+ 180,
31
+ 581,
32
+ 214
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Sadegh Aliakbarian<sup>3</sup> \nJustus Thies<sup>1</sup>",
39
+ "bbox": [
40
+ 602,
41
+ 180,
42
+ 784,
43
+ 215
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "<sup>1</sup> Max Planck Institute for Intelligent Systems, Tübingen, Germany",
50
+ "bbox": [
51
+ 220,
52
+ 232,
53
+ 750,
54
+ 251
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "list",
60
+ "sub_type": "text",
61
+ "list_items": [
62
+ "2 Max Planck Institute for Informatics, Saarland, Germany",
63
+ "<sup>3</sup> Microsoft Mixed Reality & AI Lab, Cambridge, UK"
64
+ ],
65
+ "bbox": [
66
+ 254,
67
+ 252,
68
+ 715,
69
+ 287
70
+ ],
71
+ "page_idx": 0
72
+ },
73
+ {
74
+ "type": "image",
75
+ "img_path": "images/f15d604f435fa17dc624988058978f9c514d0ff5dfd8ecdf7d5611070bf61685.jpg",
76
+ "image_caption": [
77
+ "Figure 1. Imitator is a novel method for personalized speech-driven 3D facial animation. Given an audio sequence and a personalized style-embedding as input, we generate person-specific motion sequences with accurate lip closures for bilabial consonants ('m', 'b', 'p'). The style-embedding of a subject can be computed by a short reference video (e.g., 5s)."
78
+ ],
79
+ "image_footnote": [],
80
+ "bbox": [
81
+ 93,
82
+ 305,
83
+ 875,
84
+ 539
85
+ ],
86
+ "page_idx": 0
87
+ },
88
+ {
89
+ "type": "text",
90
+ "text": "Abstract",
91
+ "text_level": 1,
92
+ "bbox": [
93
+ 233,
94
+ 609,
95
+ 313,
96
+ 625
97
+ ],
98
+ "page_idx": 0
99
+ },
100
+ {
101
+ "type": "text",
102
+ "text": "Speech-driven 3D facial animation has been widely explored, with applications in gaming, character animation, virtual reality, and telepresence systems. State-of-the-art methods deform the face topology of the target actor to sync the input audio without considering the identity-specific speaking style and facial idiosyncrasies of the target actor; thus, resulting in unrealistic and inaccurate lip movements. To address this, we present Imitator, a speech-driven facial expression synthesis method, which learns identity-specific details from a short input video and produces novel facial expressions matching the identity-specific speaking style and facial idiosyncrasies of the target actor. Specifically, we train a style-agnostic transformer on a large facial expression dataset which we use as a prior for audiodriven facial expressions. Based on this prior, we optimize for identity-specific speaking style based on a short reference video. To train the prior, we introduce a novel loss",
103
+ "bbox": [
104
+ 73,
105
+ 643,
106
+ 472,
107
+ 900
108
+ ],
109
+ "page_idx": 0
110
+ },
111
+ {
112
+ "type": "text",
113
+ "text": "function based on detected bilabial consonants to ensure plausible lip closures and consequently improve the realism of the generated expressions. Through detailed experiments and a user study, we show that our approach produces temporally coherent facial expressions from input audio while preserving the speaking style of the target actors. Please check out the project page for the supplemental video and more results.",
114
+ "bbox": [
115
+ 496,
116
+ 609,
117
+ 893,
118
+ 731
119
+ ],
120
+ "page_idx": 0
121
+ },
122
+ {
123
+ "type": "text",
124
+ "text": "1. Introduction",
125
+ "text_level": 1,
126
+ "bbox": [
127
+ 500,
128
+ 767,
129
+ 632,
130
+ 782
131
+ ],
132
+ "page_idx": 0
133
+ },
134
+ {
135
+ "type": "text",
136
+ "text": "3D digital humans raised a lot of attention in the past few years as they aim to replicate the appearance and motion of real humans for immersive applications, like telepresence in AR or VR, character animation and creation for entertainment (movies and games), and virtual mirrors for e-commerce. Especially, with the introduction of neural rendering [27, 28], we see immense progress in the photo-",
137
+ "bbox": [
138
+ 496,
139
+ 794,
140
+ 893,
141
+ 902
142
+ ],
143
+ "page_idx": 0
144
+ },
145
+ {
146
+ "type": "aside_text",
147
+ "text": "arXiv:2301.00023v1 [cs.CV] 30 Dec 2022",
148
+ "bbox": [
149
+ 22,
150
+ 260,
151
+ 57,
152
+ 705
153
+ ],
154
+ "page_idx": 0
155
+ },
156
+ {
157
+ "type": "page_number",
158
+ "text": "1",
159
+ "bbox": [
160
+ 480,
161
+ 924,
162
+ 488,
163
+ 936
164
+ ],
165
+ "page_idx": 0
166
+ },
167
+ {
168
+ "type": "text",
169
+ "text": "realistic synthesis of such digital doubles [11,20,38]. These avatars can be controlled via visual tracking to mirror the facial expressions of a real human. However, we need to control the facial avatars with text or audio inputs for a series of applications. For example, AI-driven digital assistants rely on motion synthesis instead of motion cloning. Even telepresence applications might need to work with audio inputs only, when the face of the person is occluded or cannot be tracked, since a face capture device is not available. To this end, we analyze motion synthesis for facial animations from audio inputs; note that text-to-speech approaches can be used to generate such audio. Humans are generally sensitive towards faces, especially facial motions, as they are crucial for communication (e.g., micro-expressions). Without full expressiveness and proper lip closures, the generated animation will be perceived as unnatural and implausible. Especially if the person is known, the facial animations must match the subject's idiosyncrasies.",
170
+ "bbox": [
171
+ 76,
172
+ 90,
173
+ 472,
174
+ 363
175
+ ],
176
+ "page_idx": 1
177
+ },
178
+ {
179
+ "type": "text",
180
+ "text": "Recent methods for speech-driven 3D facial animation [5, 10, 16, 21] are data-driven. They are trained on high-quality motion capture data and leverage pretrained speech models [13, 23] to extract an intermediate audio representation. We can classify these data-driven methods into two categories, generalized [5, 10, 21] and personalized animation generation methods [16]. In contrast to those approaches, we aim at a personalized 3D facial animation synthesis that can adapt to a new user while only relying on input RGB videos captured with commodity cameras. Specifically, we propose a transformer-based auto-regressive motion synthesis method that predicts a generalized motion representation. This intermediate representation is decoded by a motion decoder which is adaptable to new users. A speaker embedding is adjusted for a new user, and a new motion basis for the motion decoder is computed. Our method is trained on the VOCA dataset [5] and can be applied to new subjects captured in a short monocular RGB video. As lip closures are of paramount importance for bilabial consonants ('m', 'b', 'p'), we introduce a novel loss based on the detection of bilabials to ensure that the lips are closed properly. We take inspiration from the locomotion synthesis field [14, 18], where similar losses are used to enforce foot contact with the ground and transfer it to our scenario of physically plausible lip motions.",
181
+ "bbox": [
182
+ 76,
183
+ 363,
184
+ 472,
185
+ 739
186
+ ],
187
+ "page_idx": 1
188
+ },
189
+ {
190
+ "type": "text",
191
+ "text": "In a series of experiments and ablation studies, we demonstrate that our method is able to synthesize facial expressions that match the target subject's motions in terms of style and expressiveness. Our method outperforms state-of-the-art methods in our metrical evaluation and user study. Please refer to our supplemental video for a detailed qualitative comparison. In a user study, we confirm that personalized facial expressions are important for the perceived realism.",
192
+ "bbox": [
193
+ 76,
194
+ 741,
195
+ 472,
196
+ 878
197
+ ],
198
+ "page_idx": 1
199
+ },
200
+ {
201
+ "type": "text",
202
+ "text": "The contributions of our work Imitator are as follows:",
203
+ "bbox": [
204
+ 76,
205
+ 885,
206
+ 434,
207
+ 900
208
+ ],
209
+ "page_idx": 1
210
+ },
211
+ {
212
+ "type": "list",
213
+ "sub_type": "text",
214
+ "list_items": [
215
+ "- a novel auto-regressive motion synthesis architecture that allows for adaption to new users by disentangling generalized viseme generation and person-specific motion decoding,",
216
+ "- and a lip contact loss formulation for improved lip closures based on physiological cues of bilabial consonants ('m', 'b', 'p')."
217
+ ],
218
+ "bbox": [
219
+ 517,
220
+ 90,
221
+ 890,
222
+ 205
223
+ ],
224
+ "page_idx": 1
225
+ },
226
+ {
227
+ "type": "text",
228
+ "text": "2. Related Work",
229
+ "text_level": 1,
230
+ "bbox": [
231
+ 500,
232
+ 226,
233
+ 640,
234
+ 242
235
+ ],
236
+ "page_idx": 1
237
+ },
238
+ {
239
+ "type": "text",
240
+ "text": "Our work focuses on speech-driven 3D facial animation related to talking head methods that create photo-realistic video sequences from audio inputs.",
241
+ "bbox": [
242
+ 498,
243
+ 251,
244
+ 890,
245
+ 296
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "text",
251
+ "text": "Talking Head Videos: Several prior works on speech-driven generation focus on the synthesis of 2D talking head videos. Suwajanakorn et al. [25] train an LSTM network on 19h video material of Obama to predict his person-specific 2D lip landmarks from speech inputs, which is then used for image generation. Vougioukas et al. [33] propose a method to generate facial animation from a single RGB image by leveraging a temporal generative adversarial network. Chung et al. [4] introduce a real-time approach to generate an RGB video of a talking face by directly mapping the audio input to the video output space. This method can redub a new target identity not seen during training. Instead of performing direct mapping, Zhou et al. [39] disentangles the speech information in terms of speaker identity and content, allowing speech-driven generation that can be applied to various types of realistic and hand-drawn head portraits. A series of work [24, 29, 36, 37] uses an intermediate 3D Morphable Model (3DMM) [2, 8] to guide the 2D neural rendering of talking heads from audio. Wang et al. [34] extend this work also to model the head movements of the speaker. Lipsync3d [17] proposes data-efficient learning of personalized talking heads focusing on pose and lighting normalization. Based on dynamic neural radiance fields [11], Ad-nerf [12] and DFA-NeRF [35] learn personalized talking head models that can be rendered under novel views, while being controlled by audio inputs. In contrast to these methods, our work focuses on predicting 3D facial animations from speech that can be used to drive 3D digital avatars without requiring retraining of the entire model to capture the person-specific motion style.",
252
+ "bbox": [
253
+ 498,
254
+ 296,
255
+ 892,
256
+ 750
257
+ ],
258
+ "page_idx": 1
259
+ },
260
+ {
261
+ "type": "text",
262
+ "text": "Speech-Driven 3D Facial Animation: Speech-driven 3d facial animation is a vivid field of research. Earlier methods [6, 7, 9, 15, 32] focus on animating a predefined facial rig using procedural rules. HMM-based models generate visemes from input text or audio, and the facial animations are generated using viseme-dependent co-articulation models [6, 7] or by blending facial templates [15]. With recent advances in machine learning, data-driven methods [3, 5, 10, 16, 21, 26, 29] have demonstrated their capability to learn viseme patterns from data. These methods",
263
+ "bbox": [
264
+ 498,
265
+ 750,
266
+ 893,
267
+ 900
268
+ ],
269
+ "page_idx": 1
270
+ },
271
+ {
272
+ "type": "page_number",
273
+ "text": "2",
274
+ "bbox": [
275
+ 478,
276
+ 924,
277
+ 491,
278
+ 936
279
+ ],
280
+ "page_idx": 1
281
+ },
282
+ {
283
+ "type": "image",
284
+ "img_path": "images/9c464816c5e274a631a88b5e27f85e0c0048d9d44e42ea9a7fdb85fa62570a0e.jpg",
285
+ "image_caption": [
286
+ "Figure 2. Our architecture takes audio as input which is encoded by a pre-trained Wav2Vec2.0 model [1]. This audio embedding $\\hat{a}_{1:T}$ is interpreted by an auto-regressive viseme decoder which generates a generalized motion feature $\\hat{v}_{1:T}$ . A style-adaptable motion decoder maps these motion features to person-specific facial expressions $\\hat{y}_{1:T}$ in terms of vertex displacements on top of a template mesh."
287
+ ],
288
+ "image_footnote": [],
289
+ "bbox": [
290
+ 83,
291
+ 80,
292
+ 872,
293
+ 292
294
+ ],
295
+ "page_idx": 2
296
+ },
297
+ {
298
+ "type": "text",
299
+ "text": "are based on pretrained speech models [1, 13, 23] to generate an abstract and generalized representation of the input audio, which is then interpreted by a CNN or autoregressive model to map to either a 3DMM space or directly to 3D meshes. Karras et al. [16] learn a 3D facial animation model from 3-5 minutes of high-quality actor specific 3D data. VOCA [5] is trained on 3D data of multiple subjects and can animate the corresponding set of identities from input audio by providing a one-hot encoding during inference that indicates the subject. MeshTalk [21] is a generalized method that learns a categorical representation for facial expressions and auto-regressively samples from this categorical space to animate a given 3D facial template mesh of a subject from audio inputs. FaceFormer [10] uses a pretrained Wav2Vec [1] audio representation and applies a transformer-based decoder to regress displacements on top of a template mesh. Like VOCA, FaceFormer provides a speaker identification code to the decoder, allowing one to choose from the training set talking styles. In contrast, we aim at a method that can adapt to new users, capturing their talking style and expressiveness.",
300
+ "bbox": [
301
+ 75,
302
+ 358,
303
+ 472,
304
+ 676
305
+ ],
306
+ "page_idx": 2
307
+ },
308
+ {
309
+ "type": "text",
310
+ "text": "3. Method",
311
+ "text_level": 1,
312
+ "bbox": [
313
+ 76,
314
+ 691,
315
+ 169,
316
+ 709
317
+ ],
318
+ "page_idx": 2
319
+ },
320
+ {
321
+ "type": "text",
322
+ "text": "Our goal is to model person-specific speaking style and the facial idiosyncrasies of an actor, to generate 3D facial animations of the subject from novel audio inputs. As input, we assume a short video sequence of the subject which we leverage to compute the identity-specific speaking style. To enable fast adaptation to novel users without significant training sequences, we learn a generalized style-agnostic transformer on VOCaset [5]. This transformer provides generic motion features from audio inputs that are interpretable by a person-specific motion decoder. The motion decoder is pre-trained and adaptable to new users via speaking style optimization and refinement of the motion",
323
+ "bbox": [
324
+ 75,
325
+ 719,
326
+ 472,
327
+ 902
328
+ ],
329
+ "page_idx": 2
330
+ },
331
+ {
332
+ "type": "text",
333
+ "text": "basis. To further improve synthesis results, we introduce a novel lip contact loss based on physiological cues of the bilabial consonants [7]. In the following, we will detail our model architecture and the training objectives and describe the style adaptation.",
334
+ "bbox": [
335
+ 496,
336
+ 358,
337
+ 893,
338
+ 436
339
+ ],
340
+ "page_idx": 2
341
+ },
342
+ {
343
+ "type": "text",
344
+ "text": "3.1. Model Architecture",
345
+ "text_level": 1,
346
+ "bbox": [
347
+ 498,
348
+ 445,
349
+ 687,
350
+ 460
351
+ ],
352
+ "page_idx": 2
353
+ },
354
+ {
355
+ "type": "text",
356
+ "text": "Our architecture consists of three main components (see Figure 2): an audio encoder, a generalized auto-regressive viseme decoder, and an adaptable motion decoder.",
357
+ "bbox": [
358
+ 496,
359
+ 469,
360
+ 892,
361
+ 513
362
+ ],
363
+ "page_idx": 2
364
+ },
365
+ {
366
+ "type": "text",
367
+ "text": "Audio Encoder: Following state-of-the-art motion synthesis models [5, 10], we use a generalized speech model to encode the audio inputs $A$ . Specifically, we leverage the Wav2Vec 2.0 model [1]. The original Wav2Vec is based on a CNN architecture designed to produce a meaningful latent representation of human speech. To this end, the model is trained in a self-supervised and semi-supervised manner to predict the immediate future values of the current input speech by using a contrastive loss, allowing the model to learn from a large amount of unlabeled data. Wav2Vec 2.0 extends this idea by quantizing the latent representation and incorporating a Transformer-based architecture [31]. We resample the Wav2Vec 2.0 output with a linear interpolation layer to match the sampling frequency of the motion (30fps for the VOCAsset, with 16kHz audio), resulting in a contextual representation $\\{\\hat{a}\\}_{t=1}^{T}$ of the audio sequence for $T$ motion frames.",
368
+ "bbox": [
369
+ 496,
370
+ 518,
371
+ 893,
372
+ 775
373
+ ],
374
+ "page_idx": 2
375
+ },
376
+ {
377
+ "type": "text",
378
+ "text": "Auto-regressive Viseme Decoder: The decoder $F_{v}$ takes the contextual representation of the audio sequence as input and produces style agnostic viseme features $\\hat{v}_{t}$ in an auto-regressive manner. These viseme features describe how the lip should deform given the context audio and the previous viseme features. In contrast to Faceformer [10], we propose to use of a classical transformer architecture [31] as viseme decoder, which learns the mapping from audio-",
379
+ "bbox": [
380
+ 496,
381
+ 779,
382
+ 893,
383
+ 902
384
+ ],
385
+ "page_idx": 2
386
+ },
387
+ {
388
+ "type": "page_number",
389
+ "text": "3",
390
+ "bbox": [
391
+ 478,
392
+ 924,
393
+ 491,
394
+ 936
395
+ ],
396
+ "page_idx": 2
397
+ },
398
+ {
399
+ "type": "text",
400
+ "text": "features $\\{\\hat{a}\\}_{t = 1}^{T}$ to identity agnostic viseme features $\\{\\hat{v}\\}_{t = 1}^{T}$ . The autoregressive viseme decoder is defined as:",
401
+ "bbox": [
402
+ 76,
403
+ 89,
404
+ 467,
405
+ 121
406
+ ],
407
+ "page_idx": 3
408
+ },
409
+ {
410
+ "type": "equation",
411
+ "text": "\n$$\n\\hat {v} _ {t} = F _ {v} \\left(\\theta_ {v}; \\hat {v} _ {1: t - 1}, \\hat {a} _ {1: T}\\right), \\tag {1}\n$$\n",
412
+ "text_format": "latex",
413
+ "bbox": [
414
+ 184,
415
+ 133,
416
+ 467,
417
+ 148
418
+ ],
419
+ "page_idx": 3
420
+ },
421
+ {
422
+ "type": "text",
423
+ "text": "where $\\theta_v$ are the learnable parameters of the transformer.",
424
+ "bbox": [
425
+ 76,
426
+ 161,
427
+ 452,
428
+ 175
429
+ ],
430
+ "page_idx": 3
431
+ },
432
+ {
433
+ "type": "text",
434
+ "text": "In contrast to the traditional neural machine translation (NMT) architectures that produce discrete text, our output representation is a continuous vector. NMT models use a start and end token to indicate the beginning and end of the sequence. During inference, the NMT model autoregressively generates tokens until the end token is generated. Similarly, we use a start token to indicate the beginning of the sequences. However, since the sequence length $T$ is given by the length of the audio input, we do not use an end token. We inject temporal information into the sequences by adding encoded time to the viseme feature in the sequence. We formulate the positionally encoded intermediate representations $\\hat{h}_t$ as:",
435
+ "bbox": [
436
+ 75,
437
+ 176,
438
+ 467,
439
+ 372
440
+ ],
441
+ "page_idx": 3
442
+ },
443
+ {
444
+ "type": "equation",
445
+ "text": "\n$$\n\\hat {h} _ {t} = \\hat {v} _ {t} + P E (t), \\tag {2}\n$$\n",
446
+ "text_format": "latex",
447
+ "bbox": [
448
+ 210,
449
+ 382,
450
+ 467,
451
+ 400
452
+ ],
453
+ "page_idx": 3
454
+ },
455
+ {
456
+ "type": "text",
457
+ "text": "where $PE(t)$ is a sinusoidal encoding function [31]. Given the sequence of positional encoded inputs $\\hat{h}_t$ , we use multi-head self-attention which generates the context representation of the inputs by weighting the inputs based on their relevance. These context representations are used as input to a cross-modal multi-head attention block which also takes the audio features $\\hat{a}_{1:T}$ from the audio encoder as input. A final feed-forward layer maps the output of this audio-motion attention layer to the viseme embedding $\\hat{v}_t$ . In contrast to Faceformer [10], which feeds encoded face motions $\\hat{y}_t$ to the transformer, we work with identity-agnostic viseme features which are independently decoded by the motion decoder. We found that feeding face motions $\\hat{y}_t$ via an input embedding layer to the transformer contains identity-specific information, which we try to avoid since we aim for a generalized viseme decoder that is disentangled from person-specific motion. In addition, using a general start token instead of the identity code [10] as the start token reduces the identity bias further. Note that disentangling the identity-specific information from the viseme decoder improves the motion optimization in the style adaption stage of the pipeline (see Section 3.3), as gradients do not need to be propagated through the auto-regressive transformer.",
458
+ "bbox": [
459
+ 75,
460
+ 412,
461
+ 467,
462
+ 760
463
+ ],
464
+ "page_idx": 3
465
+ },
466
+ {
467
+ "type": "text",
468
+ "text": "Motion Decoder: The motion decoder aims to generate 3D facial animation $\\hat{y}_{1:T}$ from the style-agnostic viseme features $\\hat{v}_{1:T}$ and a style embedding $\\hat{S}_i$ . Specifically, our motion decoder consists of two components, a style embedding layer and a motion synthesis block. For the training of the style-agnostic transformer and for pre-training the motion decoder, we assume to have a one-hot encoding of the identities of the training set. The style embedding layer takes this identity information as input and produces the style",
469
+ "bbox": [
470
+ 75,
471
+ 763,
472
+ 467,
473
+ 900
474
+ ],
475
+ "page_idx": 3
476
+ },
477
+ {
478
+ "type": "text",
479
+ "text": "embedding $\\hat{S}_i$ , which encodes the identity-specific motion. The style embedding is concatenated with the viseme features $\\hat{v}_{1:T}$ and fed into the motion synthesis block. The motion synthesis block consists of non-linear layers which map the style-aware viseme features to the motion space defined by a linear deformation basis. During training, the deformation basis is learned across all identities in the dataset. The deformation basis is fine-tuned for style adaptation to out-of-training identities (see Section 3.3). The final mesh outputs $\\hat{y}_{1:T}$ are computed by adding the estimated per-vertex deformation to the template mesh of the subject.",
480
+ "bbox": [
481
+ 496,
482
+ 90,
483
+ 890,
484
+ 257
485
+ ],
486
+ "page_idx": 3
487
+ },
488
+ {
489
+ "type": "text",
490
+ "text": "3.2. Training",
491
+ "text_level": 1,
492
+ "bbox": [
493
+ 500,
494
+ 266,
495
+ 604,
496
+ 282
497
+ ],
498
+ "page_idx": 3
499
+ },
500
+ {
501
+ "type": "text",
502
+ "text": "Similar to Faceformer [10], we use an autoregressive training scheme instead of teacher-forcing to train our model on the VOCAset [5]. Given that VOCAset provides ground truth 3D facial animations, we define the following loss:",
503
+ "bbox": [
504
+ 496,
505
+ 289,
506
+ 890,
507
+ 363
508
+ ],
509
+ "page_idx": 3
510
+ },
511
+ {
512
+ "type": "equation",
513
+ "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\lambda_ {M S E} \\cdot \\mathcal {L} _ {M S E} + \\lambda_ {v e l} \\cdot \\mathcal {L} _ {v e l} + \\lambda_ {l i p} \\cdot \\mathcal {L} _ {l i p}, \\tag {3}\n$$\n",
514
+ "text_format": "latex",
515
+ "bbox": [
516
+ 514,
517
+ 377,
518
+ 890,
519
+ 393
520
+ ],
521
+ "page_idx": 3
522
+ },
523
+ {
524
+ "type": "text",
525
+ "text": "where $\\mathcal{L}_{MSE}$ defines a reconstruction loss of the vertices, $\\mathcal{L}_{vel}$ defines a velocity loss, and $\\mathcal{L}_{lip}$ measures lip contact. The weights are $\\lambda_{MSE} = 1.0$ , $\\lambda_{vel} = 10.0$ , and $\\lambda_{lip} = 5.0$ .",
526
+ "bbox": [
527
+ 498,
528
+ 405,
529
+ 890,
530
+ 450
531
+ ],
532
+ "page_idx": 3
533
+ },
534
+ {
535
+ "type": "text",
536
+ "text": "Reconstruction Loss: The reconstruction loss $\\mathcal{L}_{MSE}$ is:",
537
+ "bbox": [
538
+ 500,
539
+ 453,
540
+ 877,
541
+ 468
542
+ ],
543
+ "page_idx": 3
544
+ },
545
+ {
546
+ "type": "equation",
547
+ "text": "\n$$\n\\mathcal {L} _ {M S E} = \\sum_ {v = 1} ^ {V} \\sum_ {t = 1} ^ {T _ {v}} \\left| \\left| y _ {t, v} - \\hat {y} _ {t, v} \\right| \\right| ^ {2}, \\tag {4}\n$$\n",
548
+ "text_format": "latex",
549
+ "bbox": [
550
+ 583,
551
+ 478,
552
+ 890,
553
+ 520
554
+ ],
555
+ "page_idx": 3
556
+ },
557
+ {
558
+ "type": "text",
559
+ "text": "where $y_{t,v}$ is the ground truth mesh at time $t$ in sequence $v$ (of $V$ total sequences) and $\\hat{y}_{t,v}$ is the prediction.",
560
+ "bbox": [
561
+ 496,
562
+ 530,
563
+ 890,
564
+ 560
565
+ ],
566
+ "page_idx": 3
567
+ },
568
+ {
569
+ "type": "text",
570
+ "text": "Velocity Loss: Our motion decoder takes independent viseme features as input to produce facial expressions. To improve temporal consistency in the prediction, we introduce a velocity loss $\\mathcal{L}_{vel}$ similar to [5]:",
571
+ "bbox": [
572
+ 496,
573
+ 564,
574
+ 890,
575
+ 625
576
+ ],
577
+ "page_idx": 3
578
+ },
579
+ {
580
+ "type": "equation",
581
+ "text": "\n$$\n\\mathcal {L} _ {v e l} = \\sum_ {v = 1} ^ {V} \\sum_ {t = 2} ^ {T _ {v}} | | (y _ {t, v} - y _ {t - 1, v}) - (\\hat {y} _ {t, v} - \\hat {y} _ {t - 1, v}) | | ^ {2}. \\tag {5}\n$$\n",
582
+ "text_format": "latex",
583
+ "bbox": [
584
+ 508,
585
+ 635,
586
+ 890,
587
+ 676
588
+ ],
589
+ "page_idx": 3
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "Lip Contact Loss: Training with $L_{MSE}$ guides the model to learn an averaged facial expression, thus resulting in improper lip closures. To this end, we introduce a novel lip contact loss for bilabial consonants ('m', 'b', 'p') to improve lip closures. Specifically, we automatically annotate the VOCAset to extract the occurrences of these consonants; see Section 4. Using this data, we define the following lip loss:",
594
+ "bbox": [
595
+ 496,
596
+ 689,
597
+ 890,
598
+ 806
599
+ ],
600
+ "page_idx": 3
601
+ },
602
+ {
603
+ "type": "equation",
604
+ "text": "\n$$\n\\mathcal {L} _ {l i p} = \\sum_ {t = 1} ^ {T} \\sum_ {j = 1} ^ {N} w _ {t} \\left\\| y _ {t, v} - \\hat {y} _ {t, v} \\right\\| ^ {2}, \\tag {6}\n$$\n",
605
+ "text_format": "latex",
606
+ "bbox": [
607
+ 583,
608
+ 808,
609
+ 890,
610
+ 849
611
+ ],
612
+ "page_idx": 3
613
+ },
614
+ {
615
+ "type": "text",
616
+ "text": "where $w_{t,v}$ weights the prediction of frame $t$ according to the annotation of the bilabial consonants. Specifically, $w_{t,v}$ is one for frames with such consonants and zero otherwise.",
617
+ "bbox": [
618
+ 496,
619
+ 854,
620
+ 890,
621
+ 898
622
+ ],
623
+ "page_idx": 3
624
+ },
625
+ {
626
+ "type": "page_number",
627
+ "text": "4",
628
+ "bbox": [
629
+ 478,
630
+ 924,
631
+ 490,
632
+ 935
633
+ ],
634
+ "page_idx": 3
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "Note that for such consonant frames, the target $y_{t,v}$ represents a face with a closed mouth; thus, this loss improves lip closures at 'm', 'b' and 'p's (see Section 5).",
639
+ "bbox": [
640
+ 76,
641
+ 90,
642
+ 468,
643
+ 137
644
+ ],
645
+ "page_idx": 4
646
+ },
647
+ {
648
+ "type": "text",
649
+ "text": "3.3. Style Adaptation",
650
+ "text_level": 1,
651
+ "bbox": [
652
+ 76,
653
+ 147,
654
+ 243,
655
+ 165
656
+ ],
657
+ "page_idx": 4
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "Given a video of a new subject, we reconstruct and track the face $\\tilde{y}_{1:T}$ (see Section 4). Based on this reference data, we first optimize for the speaker style-embedding $\\hat{S}$ and then jointly refine the linear deformation basis using the $\\mathcal{L}_{MSE}$ and $\\mathcal{L}_{vel}$ loss. In our experiments, we found that this two-stage adaptation is essential for generalization to new audio inputs as it reuses the pretrained information of the motion decoder. As an initialization of the style embedding, we use a speaking style of the training set. We precompute all viseme features $\\hat{v}_{1:T}$ once, and optimize the speaking style to reproduce the tracked faces $\\tilde{y}_{1:T}$ . We then refine the linear motion basis of the decoder to match the person-specific deformations (e.g., asymmetric lip motions).",
662
+ "bbox": [
663
+ 76,
664
+ 171,
665
+ 470,
666
+ 367
667
+ ],
668
+ "page_idx": 4
669
+ },
670
+ {
671
+ "type": "text",
672
+ "text": "4. Dataset",
673
+ "text_level": 1,
674
+ "bbox": [
675
+ 76,
676
+ 382,
677
+ 165,
678
+ 398
679
+ ],
680
+ "page_idx": 4
681
+ },
682
+ {
683
+ "type": "text",
684
+ "text": "We train our method based on the VOCAset [5], which consists of 12 actors (6 female and 6 male) with 40 sequences each with a length of $3 - 5$ seconds. The dataset comes with a train/test set split which we use in our experiments. The test set contains 2 actors. The dataset offers audio and high-quality 3D face reconstructions per frame (60fps). For our experiment, we sample the 3D face reconstructions at 30fps. We train the auto-regressive transformer on this data using the loss from Equation (3). For the lip contact loss $L_{lip}$ , we automatically compute the labels as described below.",
685
+ "bbox": [
686
+ 75,
687
+ 407,
688
+ 468,
689
+ 571
690
+ ],
691
+ "page_idx": 4
692
+ },
693
+ {
694
+ "type": "text",
695
+ "text": "To adapt the motion decoder to a new subject, we require a short video clip of the person. Using this sequence, we run a 3DMM-based face tracker to get the per-frame 3D shape of the person. Based on this data, we adapt the motion decoder as detailed in Section 3.3.",
696
+ "bbox": [
697
+ 76,
698
+ 575,
699
+ 468,
700
+ 650
701
+ ],
702
+ "page_idx": 4
703
+ },
704
+ {
705
+ "type": "text",
706
+ "text": "Automatic Lip Closure Labeling: For the VOCAsset, the transcript is available. Based on Wav2Vec features, we align the transcript with the audio track. As the lip closure is formed before we hear the bilabial consonants, we search for the lip closure in the tracked face geometry before the time-stamp of the occurrence of the consonants in the script. We show this process for a single sequence in Figure 3. The lip closure is detected by lip distance, i.e., the frame with minimal lip distance in a short time window before the consonant is assumed to be the lip closure.",
707
+ "bbox": [
708
+ 76,
709
+ 655,
710
+ 468,
711
+ 805
712
+ ],
713
+ "page_idx": 4
714
+ },
715
+ {
716
+ "type": "text",
717
+ "text": "External Sequence Processing: We assume to have a monocular RGB video of about 2 minutes in length as input which we divide into train/validation/test sequences. Based on MICA [40], we estimate the 3D shape of the subject using the first frame of the video. Using this shape estimate, we run an analysis-by-synthesis approach [30] to",
718
+ "bbox": [
719
+ 76,
720
+ 810,
721
+ 470,
722
+ 901
723
+ ],
724
+ "page_idx": 4
725
+ },
726
+ {
727
+ "type": "image",
728
+ "img_path": "images/cd14d1d2a9c991ad49beebb0a3f0d86a490ecdc436d92083af1148885bb60d8b.jpg",
729
+ "image_caption": [
730
+ "Words Spoken: BAGPIPE AND BONGOS",
731
+ "Lip loss Weight"
732
+ ],
733
+ "image_footnote": [
734
+ "$\\times$ - Lip closure computed"
735
+ ],
736
+ "bbox": [
737
+ 506,
738
+ 101,
739
+ 885,
740
+ 243
741
+ ],
742
+ "page_idx": 4
743
+ },
744
+ {
745
+ "type": "image",
746
+ "img_path": "images/214c984ebec9ca3f3348373f02d2e6575626efe7a6fdfe822c75f4b9a73674bc.jpg",
747
+ "image_caption": [
748
+ "Audio",
749
+ "GT Lip distance curve",
750
+ "Figure 3. Automatic labeling of the bilabial consonants ('m', 'b' and 'p') and their corresponding lip closures in a sequence of VOCAset [5]. We align the transcript with the audio track using Wav2vec [1] features and extract the time stamps for the bilabial consonants. To detect the lip closures for the bilabial consonants, we search for local-minima on the Lip distance curves (red). The lip loss weights $w_{t,v}$ in a window around the detected lip closure are set to fixed values of a Gaussian function. We show an example of detected lip closures in the figure (in the blue bounding box)."
751
+ ],
752
+ "image_footnote": [
753
+ "Local Minimum search",
754
+ "$\\times$ - Detected consonants"
755
+ ],
756
+ "bbox": [
757
+ 506,
758
+ 244,
759
+ 573,
760
+ 320
761
+ ],
762
+ "page_idx": 4
763
+ },
764
+ {
765
+ "type": "text",
766
+ "text": "estimate per-frame blendshape parameters of the FLAME 3DMM [19]. Given these blendshape coefficients, we can compute the 3D vertices of the per-frame face meshes that we need to adapt the motion decoder. Note that in contrast to the training data of the transformer, we do not require any bilabial consonants labeling, as we adapt the motion decoder only based on the reconstruction and velocity loss.",
767
+ "bbox": [
768
+ 498,
769
+ 511,
770
+ 890,
771
+ 617
772
+ ],
773
+ "page_idx": 4
774
+ },
775
+ {
776
+ "type": "text",
777
+ "text": "5. Results",
778
+ "text_level": 1,
779
+ "bbox": [
780
+ 500,
781
+ 633,
782
+ 586,
783
+ 648
784
+ ],
785
+ "page_idx": 4
786
+ },
787
+ {
788
+ "type": "text",
789
+ "text": "To validate our method, we conducted a series of qualitative and quantitative evaluations, including a user study and ablation studies. For evaluation on the test set of VOCAset [5], we randomly sample 4 sequences from the test subjects' train set (each $\\sim$ 5s long) and learn the speaking-style and facial idiosyncrasies of the subject via style adaptation. We compare our method to the state-of-the-art methods VOCA [5], Faceformer [10], and MeshTalk [21]. We use the original implementations of the authors. However, we found that MeshTalk cannot train on the comparably small VOCAset. Thus, we qualitatively compare against MeshTalk with their provided model trained on a large-scale proprietary dataset with 200 subjects and 40 sequences for each. Note that the pretrained MeshTalk model is not compatible with the FLAME topology; thus, we cannot evaluate their method on novel identities. In addition to the experi",
790
+ "bbox": [
791
+ 498,
792
+ 659,
793
+ 890,
794
+ 900
795
+ ],
796
+ "page_idx": 4
797
+ },
798
+ {
799
+ "type": "page_number",
800
+ "text": "5",
801
+ "bbox": [
802
+ 480,
803
+ 924,
804
+ 488,
805
+ 936
806
+ ],
807
+ "page_idx": 4
808
+ },
809
+ {
810
+ "type": "image",
811
+ "img_path": "images/e9b5cadf1abf9e602dc6ffdb18609caa1db3405a7f2ed6e71dce013ae4f6ca20.jpg",
812
+ "image_caption": [
813
+ "Words spoken",
814
+ "So, I start talking now.... usually...",
815
+ "One of my favorite topics to discuss is ...",
816
+ "Figure 4. Qualitative comparison to the state-of-the-art methods VOCA [5], Faceformer [10], and MeshTalk [21]. Note that MeshTalk is performed with a different identity since we use their pretrained model, which cannot be trained on VOCAset. As we see in the highlighted regions, the geometry of the generated sequences without the person-specific style have muted and inaccurate lip animations."
817
+ ],
818
+ "image_footnote": [],
819
+ "bbox": [
820
+ 98,
821
+ 109,
822
+ 890,
823
+ 625
824
+ ],
825
+ "page_idx": 5
826
+ },
827
+ {
828
+ "type": "text",
829
+ "text": "ments on the VOCAset, we show results on external RGB sequences. The results can be best seen in the suppl. video.",
830
+ "bbox": [
831
+ 75,
832
+ 694,
833
+ 468,
834
+ 724
835
+ ],
836
+ "page_idx": 5
837
+ },
838
+ {
839
+ "type": "table",
840
+ "img_path": "images/71b31cb50c676b2c1d3599e0a5ed8bf882cef7a13be428ca993eb46efd400994.jpg",
841
+ "table_caption": [
842
+ "Quantitative Evaluation: To quantitatively evaluate our"
843
+ ],
844
+ "table_footnote": [
845
+ "Table 1. Quantitative results on the VOCAset [5]. Our method outperforms the baselines on all of the lip metrics while performing on par on the full-face metrics. Note that we are not targeting the animation of the upper face but aim for expressive and accurate lip movements, which is noticeable from the improved lip scores."
846
+ ],
847
+ "table_body": "<table><tr><td>Method</td><td>L2face↓</td><td>L2lip↓</td><td>F-DTW↓</td><td>Lip-DTW↓</td><td>Lip-sync↓</td></tr><tr><td>VOCA [5]</td><td>0.88</td><td>0.15</td><td>1.28</td><td>2.41</td><td>5.72</td></tr><tr><td>Faceformer [10]</td><td>0.8</td><td>0.14</td><td>1.18</td><td>2.85</td><td>5.41</td></tr><tr><td>Ours (w/ 1seq)</td><td>0.91</td><td>0.1</td><td>1.3</td><td>1.68</td><td>3.99</td></tr><tr><td>Ours</td><td>0.89</td><td>0.09</td><td>1.26</td><td>1.47</td><td>3.78</td></tr></table>",
848
+ "bbox": [
849
+ 78,
850
+ 751,
851
+ 465,
852
+ 820
853
+ ],
854
+ "page_idx": 5
855
+ },
856
+ {
857
+ "type": "text",
858
+ "text": "method, we use the test set of VOCAset [5], which provides high-quality reference mesh reconstructions. We evaluate the performance of our method based on a mean $L_{2}$ vertex distance for the entire mesh $L_{2}^{face}$ and the lip region $L_{2}^{lip}$ . Following MeshTalk [21], we also compute the Lipsync, which measures the mean of the maximal per-frame lip distances. In addition, we use Dynamic Time Wrapping (DTW) to compute the similarity between the produced and reference meshes, both for the entire mesh (F-DTW) and the lip region (Lip-DTW). Since VOCA and Faceformer do not adapt to new user talking styles, we select the talking style from their training with the best quantitative metrics. Note that the pretrained MeshTalk model is not applicable to this",
859
+ "bbox": [
860
+ 496,
861
+ 694,
862
+ 890,
863
+ 892
864
+ ],
865
+ "page_idx": 5
866
+ },
867
+ {
868
+ "type": "page_number",
869
+ "text": "6",
870
+ "bbox": [
871
+ 478,
872
+ 925,
873
+ 491,
874
+ 936
875
+ ],
876
+ "page_idx": 5
877
+ },
878
+ {
879
+ "type": "image",
880
+ "img_path": "images/4080d4530b3dfd6b0b6bb97aa39dfa5fec76ad6fefd4d4fdc6f0b64b64ab9dd6.jpg",
881
+ "image_caption": [
882
+ "Words spoken",
883
+ "His Failure to Open ... By Job.",
884
+ "Had Vinyl Technology Expand...",
885
+ "Figure 5. Qualitative ablation comparison. At first, we show that our complete method with style and $\\mathcal{L}_{lip}$ loss is able to generate personalized facial animation with expressive motion and accurate lip closures. Replacing the person-specific style with the style seen during training results in generic and muted facial animation. As highlighted in the per-vertex error maps (magenta), the generated expression is not similar to the target actor. Especially the facial deformations are missing person-specific details. Removing $\\mathcal{L}_{lip}$ from the training objective results in improper lip closures (red)."
886
+ ],
887
+ "image_footnote": [],
888
+ "bbox": [
889
+ 127,
890
+ 102,
891
+ 848,
892
+ 790
893
+ ],
894
+ "page_idx": 6
895
+ },
896
+ {
897
+ "type": "page_number",
898
+ "text": "7",
899
+ "bbox": [
900
+ 478,
901
+ 924,
902
+ 488,
903
+ 935
904
+ ],
905
+ "page_idx": 6
906
+ },
907
+ {
908
+ "type": "table",
909
+ "img_path": "images/57172374fd651da826713a35fe0fd0a726ee6b4e107cae5afde5e62b99fa57c7.jpg",
910
+ "table_caption": [],
911
+ "table_footnote": [],
912
+ "table_body": "<table><tr><td>Method</td><td>Expressiveness (%)</td><td>Realism/Lip-sync (%)</td></tr><tr><td>Ours vs VOCA [5]</td><td>86.48</td><td>76.92</td></tr><tr><td>Ours vs Faceformer [10]</td><td>81.89</td><td>75.46</td></tr><tr><td>Ours vs Ground truth</td><td>20.28</td><td>42.30</td></tr></table>",
913
+ "bbox": [
914
+ 78,
915
+ 88,
916
+ 467,
917
+ 148
918
+ ],
919
+ "page_idx": 7
920
+ },
921
+ {
922
+ "type": "text",
923
+ "text": "Table 2. In a perceptual A/B user study conducted on the test set of VOCAset [5] with 56 participants, we see that in comparison to VOCA [5] and Faceformer [10] our method is preferred.",
924
+ "bbox": [
925
+ 76,
926
+ 157,
927
+ 468,
928
+ 200
929
+ ],
930
+ "page_idx": 7
931
+ },
932
+ {
933
+ "type": "text",
934
+ "text": "evaluation due to the identity mismatch. As can be seen in Table 1, our method achieves the lowest lip reconstruction and lip-sync errors, confirming our qualitative results. Even when using a single reference video for style adaptation (5s), our results show significantly better lip scores.",
935
+ "bbox": [
936
+ 75,
937
+ 226,
938
+ 467,
939
+ 303
940
+ ],
941
+ "page_idx": 7
942
+ },
943
+ {
944
+ "type": "text",
945
+ "text": "Qualitative Evaluation: We conducted a qualitative evaluation on external sequences not part of VOCAset. In Figure 4, we show a series of frames from those sequences with the corresponding words. As we can see, our method is able to adapt to the speaking style of the respective subject. VOCA [5] and Faceformer [10] miss person-specific deformations and are not as expressive as our results. MeshTalk [21], which uses an identity that comes with the pretrained model, also shows dampened expressivity. In the suppl. video, we can observe that our method is generating better lip closures for bilabial consonants.",
946
+ "bbox": [
947
+ 75,
948
+ 305,
949
+ 467,
950
+ 470
951
+ ],
952
+ "page_idx": 7
953
+ },
954
+ {
955
+ "type": "text",
956
+ "text": "Perceptual Evaluation: We conducted a perceptual evaluation to quantify the quality of our method's generated results (see Table 2). Specifically, we conducted an A/B user study on the test set of VOCAset. We randomly sample 10 sequences of the test subjects and run our method, VOCA, and Faceformer. For VOCA and Faceformer, which do not adapt to the style of a new user, we use the talking style of the training Subject 137, which provided the best quantitative results. We use 20 videos per method resulting in 60 A/B comparisons. For every A/B test, we ask the user to choose the best method based on realism and expressiveness, following the user study protocol of Faceformer [10]. In Table 2, we show the result of this study in which 56 people participated. We observe that our method consistently outperforms VOCA and Faceformer. We also see that our model achieves similar realism and lip-sync as ground truth. Note that the users in the perceptual study have not seen the original talking style of the actors before. However, the results show that our personalized synthesis leads to more realistic-looking animations.",
957
+ "bbox": [
958
+ 75,
959
+ 474,
960
+ 468,
961
+ 777
962
+ ],
963
+ "page_idx": 7
964
+ },
965
+ {
966
+ "type": "text",
967
+ "text": "5.1. Ablation Studies",
968
+ "text_level": 1,
969
+ "bbox": [
970
+ 76,
971
+ 786,
972
+ 241,
973
+ 801
974
+ ],
975
+ "page_idx": 7
976
+ },
977
+ {
978
+ "type": "text",
979
+ "text": "To understand the impact of our style adaptation and the novel lip contact loss $\\mathcal{L}_{lip}$ on the perceptual quality, we show a qualitative ablation study including per-vertex error maps in Figure 5. As highlighted in the figure, the style adaptation is critical to match the person-specific deformations and mouth shapes and improves expressiveness.",
980
+ "bbox": [
981
+ 75,
982
+ 809,
983
+ 467,
984
+ 900
985
+ ],
986
+ "page_idx": 7
987
+ },
988
+ {
989
+ "type": "image",
990
+ "img_path": "images/62a5eb0db200d4bc4ef723dad7ea0191f341e0f680c9a6c934b2c771068121e1.jpg",
991
+ "image_caption": [
992
+ "Figure 6. Analysis of style adaptation in terms of lip distance on a test sequence of the VOCAset [5] (reference in red). Starting from an initial talking style from the training set (blue), we consecutively adapt the style code (green) and the motion basis of the motion decoder (purple)."
993
+ ],
994
+ "image_footnote": [],
995
+ "bbox": [
996
+ 501,
997
+ 87,
998
+ 890,
999
+ 191
1000
+ ],
1001
+ "page_idx": 7
1002
+ },
1003
+ {
1004
+ "type": "text",
1005
+ "text": "The lip contact loss improves the lip closures for the bilabial consonants, thus, improving the perceived realism, as can best be seen in the suppl. video. We rely on only $\\sim 60$ seconds-long reference videos to extract the person-specific speaking style. A detailed analysis of the sequence length's influence on the final output quality can be found in the suppl. material. It is also worth noting that our style-agnostic architecture allows us to perform style adaptation of the motion decoder in less than $30\\mathrm{min}$ , while an adaptation with an identity-dependent transformer takes about $6\\mathrm{h}$ .",
1006
+ "bbox": [
1007
+ 496,
1008
+ 306,
1009
+ 890,
1010
+ 459
1011
+ ],
1012
+ "page_idx": 7
1013
+ },
1014
+ {
1015
+ "type": "text",
1016
+ "text": "Our proposed style adaptation has two stages as explained in Section 3.3. In the first step, we optimize for the style code and the refine the motion basis. In Figure 6, we show an example of the style adaptation by evaluating the lip distances throughout a sequence with a motion decoder at initialization, with optimized style code, and with a refined motion basis. While the lip distance with the generalized motion decoder is considerable, it gets significantly improved by the consecutive steps of style adaptation. After style code optimization, we observe that the amplitude and frequency of the lip distance curves start resembling the ground truth. Refining the motion basis further improves the lip distance, and it is able to capture facial idiosyncrasies, like asymmetrical lip deformations.",
1017
+ "bbox": [
1018
+ 496,
1019
+ 464,
1020
+ 890,
1021
+ 675
1022
+ ],
1023
+ "page_idx": 7
1024
+ },
1025
+ {
1026
+ "type": "text",
1027
+ "text": "6. Discussion",
1028
+ "text_level": 1,
1029
+ "bbox": [
1030
+ 500,
1031
+ 704,
1032
+ 612,
1033
+ 719
1034
+ ],
1035
+ "page_idx": 7
1036
+ },
1037
+ {
1038
+ "type": "text",
1039
+ "text": "Our evaluation shows that our proposed method outperforms state-of-the-art methods in perceived expressiveness and realism. However, several limitations remain. Specifically, we only support the speaking style of the subject seen in the reference video and do not control the talking style w.r.t. emotions (e.g., sad, happy, angry). The viseme transformer and the motion decoder could be conditioned on an emotion flag; we leave this for future work. The expressiveness and facial details depend on the face tracker's quality; if the face tracking is improved, our method will predict better face shapes.",
1040
+ "bbox": [
1041
+ 496,
1042
+ 734,
1043
+ 890,
1044
+ 900
1045
+ ],
1046
+ "page_idx": 7
1047
+ },
1048
+ {
1049
+ "type": "page_number",
1050
+ "text": "8",
1051
+ "bbox": [
1052
+ 480,
1053
+ 924,
1054
+ 488,
1055
+ 935
1056
+ ],
1057
+ "page_idx": 7
1058
+ },
1059
+ {
1060
+ "type": "text",
1061
+ "text": "7. Conclusion",
1062
+ "text_level": 1,
1063
+ "bbox": [
1064
+ 76,
1065
+ 89,
1066
+ 194,
1067
+ 106
1068
+ ],
1069
+ "page_idx": 8
1070
+ },
1071
+ {
1072
+ "type": "text",
1073
+ "text": "We present Imitator, a novel approach for personalized speech-driven 3D facial animation. Based on a short reference video clip of a subject, we learn a personalized motion decoder driven by a generalized auto-regressive transformer that maps audio to intermediate viseme features. Our studies show that personalized facial animations are essential for the perceived realism of a generated sequence. Our new loss formulation for accurate lip closures of bilabial consonants further improves the results. We believe that personalized facial animations are a stepping stone towards audio-driven digital doubles.",
1074
+ "bbox": [
1075
+ 75,
1076
+ 114,
1077
+ 472,
1078
+ 282
1079
+ ],
1080
+ "page_idx": 8
1081
+ },
1082
+ {
1083
+ "type": "text",
1084
+ "text": "8. Acknowledgements",
1085
+ "text_level": 1,
1086
+ "bbox": [
1087
+ 76,
1088
+ 297,
1089
+ 264,
1090
+ 315
1091
+ ],
1092
+ "page_idx": 8
1093
+ },
1094
+ {
1095
+ "type": "text",
1096
+ "text": "This project has received funding from the Mesh Labs, Microsoft, Cambridge, UK. Further, we would like to thank Berna Kabadayi, Jalees Nehvi, Malte Prinzler and Wojciech Zielonka for their support and valuable feedback. The authors thank the International Max Planck Research School for Intelligent Systems (IMPRS-IS) for supporting Balamurugan Thambiraja.",
1097
+ "bbox": [
1098
+ 75,
1099
+ 324,
1100
+ 468,
1101
+ 431
1102
+ ],
1103
+ "page_idx": 8
1104
+ },
1105
+ {
1106
+ "type": "text",
1107
+ "text": "References",
1108
+ "text_level": 1,
1109
+ "bbox": [
1110
+ 76,
1111
+ 446,
1112
+ 174,
1113
+ 462
1114
+ ],
1115
+ "page_idx": 8
1116
+ },
1117
+ {
1118
+ "type": "list",
1119
+ "sub_type": "ref_text",
1120
+ "list_items": [
1121
+ "[1] Baevski, A., Zhou, Y., Mohamed, A., Auli, M.: wav2vec 2.0: A framework for self-supervised learning of speech representations. In: Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual (2020), https://proceedings.neurips.cc/paper/2020/hash/92d1e1eb1cd6f9fba3227870bb6d7f07-Abstract.html 3,5,12",
1122
+ "[2] Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: Proceedings of the 26th annual conference on Computer graphics and interactive techniques. pp. 187-194 (1999) 2",
1123
+ "[3] Cao, Y., Tien, W.C., Faloutsos, P., Pighin, F.: Expressive speech-driven facial animation. ACM Trans. Graph. 24(4), 1283-1302 (oct 2005). https://doi.org/10.1145/1095878.1095881, https://doi.org/10.1145/1095878.1095881 2",
1124
+ "[4] Chung, J.S., Jamaludin, A., Zisserman, A.: You said that? arXiv preprint arXiv:1705.02966 (2017) 2",
1125
+ "[5] Cudeiro, D., Bolkart, T., Laidlaw, C., Ranjan, A., Black, M.J.: Capture, Learning, and Synthesis of 3D Speaking Styles. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10093-10103. IEEE, Long Beach, CA, USA (Jun 2019). https://doi.org/10.1109/CVPR.2019.01034, https://ieeexplore.ieee.org/document/8954000/2,3,4,5,6,8"
1126
+ ],
1127
+ "bbox": [
1128
+ 84,
1129
+ 472,
1130
+ 470,
1131
+ 898
1132
+ ],
1133
+ "page_idx": 8
1134
+ },
1135
+ {
1136
+ "type": "list",
1137
+ "sub_type": "ref_text",
1138
+ "list_items": [
1139
+ "[6] De Martino, J.M., Pini Magalhães, L., Violaro, F.: Facial animation based on context-dependent visemes. Computers & Graphics 30(6), 971-980 (Dec 2006). https://doi.org/10.1016/j.cag.2006.08.017, https : / / linkinghub . elsevier . com / retrieve/pii/S0097849306001518 2",
1140
+ "[7] Edwards, P., Landreth, C., Fume, E., Singh, K.: Jali: an animator-centric viseme model for expressive lip synchronization. ACM Trans. Graph. 35, 127:1-127:11 (2016) 2, 3",
1141
+ "[8] Egger, B., Smith, W.A., Tewari, A., Wuhrer, S., Zollhoefer, M., Beeler, T., Bernard, F., Bolkart, T., Kortylewski, A., Romdhani, S., et al.: 3d morphable face models—past, present, and future. ACM Transactions on Graphics (TOG) 39(5), 1-38 (2020) 2",
1142
+ "[9] Ezzat, T., Poggio, T.: MikeTalk: a talking facial display based on morphing visemes. In: Proceedings Computer Animation '98 (Cat. No.98EX169). pp. 96-102. IEEE Comput. Soc, Philadelphia, PA, USA (1998). https://doi.org/10.1109/CA.1998.681913, http://ieeexplore.ieee.org/document/681913/2",
1143
+ "[10] Fan, Y., Lin, Z., Saito, J., Wang, W., Komura, T.: Faceformer: Speech-driven 3d facial animation with transformers. CoRR abs/2112.05329 (2021), https://arxiv.org/abs/2112.05329 2, 3, 4, 5, 6, 8, 12, 13",
1144
+ "[11] Gafni, G., Thies, J., Zollhöfer, M., Nießner, M.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. CoRR abs/2012.03065 (2020), https://arxiv.org/abs/2012.030652",
1145
+ "[12] Guo, Y., Chen, K., Liang, S., Liu, Y., Bao, H., Zhang, J.: Ad-nerf: Audio driven neural radiance fields for talking head synthesis. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2021) 2",
1146
+ "[13] Hannun, A., Case, C., Casper, J., Catanzaro, B., Diamos, G., Elsen, E., Prenger, R., Satheesh, S., Sengupta, S., Coates, A., Y. Ng, A.: DeepSpeech: Scaling up end-to-end speech recognition (12 2014) 2, 3",
1147
+ "[14] Holden, D., Saito, J., Komura, T.: A deep learning framework for character motion synthesis and editing. ACM Transactions on Graphics (TOG) 35(4), 1-11 (2016) 2",
1148
+ "[15] Kalberer, G., Van Gool, L.: Face animation based on observed 3D speech dynamics. In: Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation (Cat. No.01TH8596). pp. 20-251. IEEE Comput. Soc, Seoul, South Korea (2001). https://doi.org/10.1109/CA.2001.982373, http://ieeexplore.ieee.org/document/982373/2",
1149
+ "[16] Karras, T., Aila, T., Laine, S., Herva, A., Lehtinen, J.: Audio-driven facial animation by joint end-to-end learning of pose and emotion. ACM Transactions on Graphics 36(4), 1-12 (Jul 2017). https://doi.org/10.1145/3072959.3073658, https://dl.acm.org/doi/10.1145/3072959.3073658 2, 3"
1150
+ ],
1151
+ "bbox": [
1152
+ 501,
1153
+ 92,
1154
+ 893,
1155
+ 898
1156
+ ],
1157
+ "page_idx": 8
1158
+ },
1159
+ {
1160
+ "type": "page_number",
1161
+ "text": "9",
1162
+ "bbox": [
1163
+ 478,
1164
+ 924,
1165
+ 491,
1166
+ 936
1167
+ ],
1168
+ "page_idx": 8
1169
+ },
1170
+ {
1171
+ "type": "list",
1172
+ "sub_type": "ref_text",
1173
+ "list_items": [
1174
+ "[17] Lahiri, A., Kwatra, V., Frueh, C., Lewis, J., Bregler, C.: Lipsync3d: Data-efficient learning of personalized 3d talking faces from video using pose and lighting normalization (2021). https://doi.org/10.48550/ARXIV.2106.04185, https://arxiv.org/abs/2106.04185 2",
1175
+ "[18] Lee, J., Chai, J., Reitsma, P.S., Hodgins, J.K., Pollard, N.S.: Interactive control of avatars animated with human motion data. In: Proceedings of the 29th annual conference on Computer graphics and interactive techniques. pp. 491-500 (2002) 2",
1176
+ "[19] Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia) 36(6) (2017), https://doi.org/10.1145/3130800.31308135",
1177
+ "[20] Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: Learning dynamic renderable volumes from images. ACM Trans. Graph. 38(4), 65:1-65:14 (Jul 2019) 2",
1178
+ "[21] Richard, A., Zollhofer, M., Wen, Y., de la Torre, F., Sheikh, Y.: MeshTalk: 3D Face Animation from Speech using Cross-Modality Disentanglement. In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 1153-1162. IEEE, Montreal, QC, Canada (Oct 2021). https://doi.org/10.1109/ICCV48922.2021.00121, https://ieeexplore.ieee.org/document/9710491/2,3,5,6,8",
1179
+ "[22] Rössler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nießner, M.: Faceforensics++: Learning to detect manipulated facial images. ICCV 2019 (2019) 13",
1180
+ "[23] Schneider, S., Baevski, A., Collobert, R., Auli, M.: wav2vec: Unsupervised pre-training for speech recognition. In: Kubin, G., Kacic, Z. (eds.) Interspeech 2019, 20th Annual Conference of the International Speech Communication Association, Graz, Austria, 15-19 September 2019. pp. 3465-3469. ISCA (2019). https://doi.org/10.21437/Interspeech.2019-1873, https://doi.org/10.21437/Interspeech.2019-1873 2, 3",
1181
+ "[24] Song, L., Wu, W., Qian, C., He, R., Loy, C.C.: Everybody's talkin': Let me talk as you want. IEEE Transactions on Information Forensics and Security 17, 585-598 (2022) 2",
1182
+ "[25] Suwajanakorn, S., Seitz, S.M., Kemelmacher-Shlizerman, I.: Synthesizing america: learning lip sync from audio. ACM Transactions on Graphics (ToG) 36(4), 1-13 (2017) 2",
1183
+ "[26] Taylor, S.L., Kim, T., Yue, Y., Mahler, M., Krahe, J., Rodriguez, A.G., Hodgins, J.K., Matthews, I.A.: A deep learning approach for generalized speech animation. ACM Trans. Graph. 36(4), 93:1-93:11 (2017). https://doi.org/10.1145/3072959.3073699, https://doi.org/10.1145/3072959.3073699 2",
1184
+ "[27] Tewari, A., Thies, J., Mildenhall, B., Srinivasan, P., Tretschk, E., Wang, Y., Lassner, C., Sitzmann, V., Martin-Brualla, R., Lombardi, S., Simon, T., Theobalt, C., Niessner, M., Barron,"
1185
+ ],
1186
+ "bbox": [
1187
+ 78,
1188
+ 90,
1189
+ 470,
1190
+ 900
1191
+ ],
1192
+ "page_idx": 9
1193
+ },
1194
+ {
1195
+ "type": "list",
1196
+ "sub_type": "ref_text",
1197
+ "list_items": [
1198
+ "J.T., Wetzstein, G., Zollhoefer, M., Golyanik, V.: Advances in neural rendering (2022) 1",
1199
+ "[28] Thies, J., Tewari, A., Fried, O., Sitzmann, V., Lombardi, S., Sunkavalli, K., Martin-Brualla, R., Simon, T., Saragih, J., Nießner, M., Pandey, R., Fanello, S., Wetzstein, G., Zhu, J.Y., Theobalt, C., Agrawala, M., Shechtman, E., Goldman, D.B., Zollhöfer, M.: State of the art on neural rendering. EG (2020) 1",
1200
+ "[29] Thies, J., Elgharib, M., Tewari, A., Theobalt, C., Nießner, M.: Neural voice puppetry: Audio-driven facial reenactment. ECCV 2020 (2020) 2",
1201
+ "[30] Thies, J., Zollhöfer, M., Stamminger, M., Theobalt, C., Nießner, M.: Face2face: Real-time face capture and reenactment of rgb videos (2020). https://doi.org/10.48550/ARXIV.2007.14808, https://arxiv.org/abs/2007.148085",
1202
+ "[31] Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017) 3, 4, 12",
1203
+ "[32] Verma, A., Rajput, N., Subramaniam, L.: Using viseme based acoustic models for speech driven lip synthesis. In: 2003 IEEE International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings. (ICASSP '03). vol. 5, pp. V-720-3. IEEE, Hong Kong, China (2003). https://doi.org/10.1109/ICASSP.2003.1200072, http://ieeexplore.ieee.org/document/1200072/2",
1204
+ "[33] Vougioukas, K., Petridis, S., Pantic, M.: Realistic speech-driven facial animation with gans. International Journal of Computer Vision 128(5), 1398-1413 (2020) 2",
1205
+ "[34] Wang, S., Li, L., Ding, Y., Fan, C., Yu, X.: Audio2head: Audio-driven one-shot talking-head generation with natural head motion. In: International Joint Conference on Artificial Intelligence. IJCAI (2021) 2",
1206
+ "[35] Yao, S., Zhong, R., Yan, Y., Zhai, G., Yang, X.: Dfa-nerf: Personalized talking head generation via disentangled face attributes neural rendering. arXiv preprint arXiv:2201.00791 (2022) 2",
1207
+ "[36] Yi, R., Ye, Z., Zhang, J., Bao, H., Liu, Y.J.: Audio-driven talking face video generation with learning-based personalized head pose. arXiv preprint arXiv:2002.10137 (2020) 2",
1208
+ "[37] Zhang, Z., Li, L., Ding, Y., Fan, C.: Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3661–3670 (2021) 2",
1209
+ "[38] Zheng, Y., Abrevaya, V.F., Chen, X., Bühler, M.C., Black, M.J., Hilliges, O.: I M avatar: Implicit morphable head avatars from videos. CoRR abs/2112.07471 (2021), https://arxiv.org/abs/2112.07471 2",
1210
+ "[39] Zhou, Y., Han, X., Shechtman, E., Echevarria, J., Kalogerakis, E., Li, D.: Makelttalk: speaker-aware talking-head animation. ACM Transactions on Graphics (TOG) 39(6), 1-15 (2020) 2"
1211
+ ],
1212
+ "bbox": [
1213
+ 501,
1214
+ 92,
1215
+ 893,
1216
+ 900
1217
+ ],
1218
+ "page_idx": 9
1219
+ },
1220
+ {
1221
+ "type": "page_number",
1222
+ "text": "10",
1223
+ "bbox": [
1224
+ 477,
1225
+ 924,
1226
+ 495,
1227
+ 936
1228
+ ],
1229
+ "page_idx": 9
1230
+ },
1231
+ {
1232
+ "type": "ref_text",
1233
+ "text": "[40] Zielonka, W., Bolkart, T., Thies, J.: Towards metrical reconstruction of human faces. ECCV (2022). https://doi.org/10.48550/ARXIV.2204.06607, https://arxiv.org/abs/2204.066075",
1234
+ "bbox": [
1235
+ 78,
1236
+ 90,
1237
+ 470,
1238
+ 147
1239
+ ],
1240
+ "page_idx": 10
1241
+ },
1242
+ {
1243
+ "type": "page_number",
1244
+ "text": "11",
1245
+ "bbox": [
1246
+ 477,
1247
+ 924,
1248
+ 491,
1249
+ 935
1250
+ ],
1251
+ "page_idx": 10
1252
+ },
1253
+ {
1254
+ "type": "text",
1255
+ "text": "Imitator: Personalized Speech-driven 3D Facial Animation - Supplemental Document -",
1256
+ "text_level": 1,
1257
+ "bbox": [
1258
+ 189,
1259
+ 99,
1260
+ 782,
1261
+ 137
1262
+ ],
1263
+ "page_idx": 11
1264
+ },
1265
+ {
1266
+ "type": "text",
1267
+ "text": "9. Impact of Data to Style Adaptation:",
1268
+ "text_level": 1,
1269
+ "bbox": [
1270
+ 76,
1271
+ 184,
1272
+ 401,
1273
+ 202
1274
+ ],
1275
+ "page_idx": 11
1276
+ },
1277
+ {
1278
+ "type": "text",
1279
+ "text": "To analyze the impact of data on the style adaptation process, we randomly sample (1, 4, 10, 20) sequences from the train set of the VOCA test subjects and perform our style adaption. Each sequence contains about $3 - 5$ seconds of data. In Table 3, we observe that the performance on the quantitative metrics increase with the number of reference sequences. As mentioned in the main paper, even an adaptation based on a single sequence results in a significantly better animation in comparison to the baseline methods. This highlights the impact of style on the generated animations.",
1280
+ "bbox": [
1281
+ 75,
1282
+ 209,
1283
+ 468,
1284
+ 359
1285
+ ],
1286
+ "page_idx": 11
1287
+ },
1288
+ {
1289
+ "type": "text",
1290
+ "text": "Figure 7 illustrates the lip distance curve for one test sequence used in this study. We observe that the lip distance with more reference data better fits the ground truth curve.",
1291
+ "bbox": [
1292
+ 76,
1293
+ 361,
1294
+ 468,
1295
+ 405
1296
+ ],
1297
+ "page_idx": 11
1298
+ },
1299
+ {
1300
+ "type": "table",
1301
+ "img_path": "images/1d6c85b8b2cfc6fb2b5ad961be760fe2bbc3e91549d19a23273b65ae99a1394a.jpg",
1302
+ "table_caption": [],
1303
+ "table_footnote": [],
1304
+ "table_body": "<table><tr><td>No. Seq.</td><td>L2face↓</td><td>L2lip↓</td><td>F-DTW↓</td><td>Lip-DTW↓</td><td>Lip-sync↓</td></tr><tr><td>1</td><td>0.91</td><td>0.1</td><td>1.3</td><td>1.68</td><td>3.99</td></tr><tr><td>4</td><td>0.89</td><td>0.1</td><td>1.26</td><td>1.47</td><td>3.78</td></tr><tr><td>10</td><td>0.76</td><td>0.09</td><td>1.07</td><td>1.37</td><td>3.57</td></tr><tr><td>20</td><td>0.7</td><td>0.09</td><td>0.99</td><td>1.27</td><td>3.49</td></tr></table>",
1305
+ "bbox": [
1306
+ 78,
1307
+ 419,
1308
+ 467,
1309
+ 493
1310
+ ],
1311
+ "page_idx": 11
1312
+ },
1313
+ {
1314
+ "type": "text",
1315
+ "text": "Table 3. Ablation of the style adaptation w.r.t. the amount of reference sequences used. With an increasing number of data, the quantitative metrics improve. Each sequence is $3 - 5\\mathrm{s}$ long.",
1316
+ "bbox": [
1317
+ 75,
1318
+ 503,
1319
+ 468,
1320
+ 546
1321
+ ],
1322
+ "page_idx": 11
1323
+ },
1324
+ {
1325
+ "type": "image",
1326
+ "img_path": "images/f1fb3096e88d2a20d9b49e00ea3639cceb691da4eac2d4c84db2d1ce62e1da60.jpg",
1327
+ "image_caption": [
1328
+ "Figure 7. With an increasing number of reference data samples for style adaptation, the lip distance throughout a test sequence of VOCAset is approaching the ground truth lip distance curve."
1329
+ ],
1330
+ "image_footnote": [],
1331
+ "bbox": [
1332
+ 81,
1333
+ 566,
1334
+ 465,
1335
+ 684
1336
+ ],
1337
+ "page_idx": 11
1338
+ },
1339
+ {
1340
+ "type": "text",
1341
+ "text": "10. Architecture Details",
1342
+ "text_level": 1,
1343
+ "bbox": [
1344
+ 76,
1345
+ 761,
1346
+ 279,
1347
+ 777
1348
+ ],
1349
+ "page_idx": 11
1350
+ },
1351
+ {
1352
+ "type": "text",
1353
+ "text": "10.1. Audio Encoder:",
1354
+ "text_level": 1,
1355
+ "bbox": [
1356
+ 76,
1357
+ 786,
1358
+ 243,
1359
+ 801
1360
+ ],
1361
+ "page_idx": 11
1362
+ },
1363
+ {
1364
+ "type": "text",
1365
+ "text": "Similar to Faceformer [10], our audio encoder is built upon the Wav2Vec 2.0 [1] architecture to extract temporal audio features. These audio features are fed into a linear interpolation layer to convert the audio frequency to the motion frequency. The interpolated outputs are then fed into 12 identical transformer encoder layers with 12 attention heads",
1366
+ "bbox": [
1367
+ 75,
1368
+ 809,
1369
+ 468,
1370
+ 900
1371
+ ],
1372
+ "page_idx": 11
1373
+ },
1374
+ {
1375
+ "type": "text",
1376
+ "text": "and an output dimension of 768. A final linear projection layer converts the audio features from the 768-dimension features to a 64-dimensional phoneme representation.",
1377
+ "bbox": [
1378
+ 496,
1379
+ 185,
1380
+ 890,
1381
+ 231
1382
+ ],
1383
+ "page_idx": 11
1384
+ },
1385
+ {
1386
+ "type": "text",
1387
+ "text": "10.2. Auto-regressive Viseme Decoder:",
1388
+ "text_level": 1,
1389
+ "bbox": [
1390
+ 500,
1391
+ 241,
1392
+ 797,
1393
+ 257
1394
+ ],
1395
+ "page_idx": 11
1396
+ },
1397
+ {
1398
+ "type": "text",
1399
+ "text": "Our auto-regressive viseme decoder is built on top of traditional transformer decoder layers [31]. We use a zero vector of 64-dimension as a start token to indicate the start of sequence synthesis. We first add a positional encoding of 64-dimension to the input feature and fed it to decoder layers in the viseme decoder. For self-attention and cross-modal multi-head attention, we use 4 heads of dimension 64. Our feed forward layer dimension is 128.",
1400
+ "bbox": [
1401
+ 496,
1402
+ 263,
1403
+ 890,
1404
+ 385
1405
+ ],
1406
+ "page_idx": 11
1407
+ },
1408
+ {
1409
+ "type": "text",
1410
+ "text": "Multi-Head Self-Attention: Given a sequence of positional encoded inputs $\\hat{h}_t$ , we use multi-head self-attention (self-MHA), which generates the context representation of the inputs by weighting the inputs based on their relevance. The Scaled Dot-Product attention function can be defined as mapping a query and a set of key-value pairs to an output, where queries, keys, values and outputs are vectors [31]. The output is the weighted sum of the values; the weight is computed by a compatibility function of a query with the corresponding key. The attention can be formulated as:",
1411
+ "bbox": [
1412
+ 496,
1413
+ 388,
1414
+ 892,
1415
+ 541
1416
+ ],
1417
+ "page_idx": 11
1418
+ },
1419
+ {
1420
+ "type": "equation",
1421
+ "text": "\n$$\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\sigma \\left(\\frac {Q K ^ {T}}{\\sqrt {d _ {k}}}\\right) V, \\tag {7}\n$$\n",
1422
+ "text_format": "latex",
1423
+ "bbox": [
1424
+ 573,
1425
+ 551,
1426
+ 890,
1427
+ 585
1428
+ ],
1429
+ "page_idx": 11
1430
+ },
1431
+ {
1432
+ "type": "text",
1433
+ "text": "where $Q, K, V$ are the learned Queries, Keys and Values, $\\sigma(\\cdot)$ denotes the softmax activation function, and $d_k$ is the dimension of the keys. Instead of using a single attention mechanism and generating one context representation, MHA uses multiple self-attention heads to jointly generate multiple context representations and attend to the information in the different context representations at different positions. MHA is formulated as follows:",
1434
+ "bbox": [
1435
+ 496,
1436
+ 595,
1437
+ 890,
1438
+ 715
1439
+ ],
1440
+ "page_idx": 11
1441
+ },
1442
+ {
1443
+ "type": "equation",
1444
+ "text": "\n$$\nM H A (Q, K, V) = \\left[ h e a d _ {1}, \\dots , h e a d _ {h} \\right] \\cdot W ^ {O}, \\tag {8}\n$$\n",
1445
+ "text_format": "latex",
1446
+ "bbox": [
1447
+ 540,
1448
+ 728,
1449
+ 890,
1450
+ 744
1451
+ ],
1452
+ "page_idx": 11
1453
+ },
1454
+ {
1455
+ "type": "text",
1456
+ "text": "with $head_{i} = \\text{Attention}(QW_{i}^{Q}, KW_{i}^{K}, VW_{i}^{V})$ , where $W^{O}, W_{i}^{Q}, W_{i}^{K}, W_{i}^{V}$ are weights related to each input variable.",
1457
+ "bbox": [
1458
+ 496,
1459
+ 757,
1460
+ 890,
1461
+ 805
1462
+ ],
1463
+ "page_idx": 11
1464
+ },
1465
+ {
1466
+ "type": "text",
1467
+ "text": "Audio-Motion Multi-Head Attention The Audio-Motion Multi-Head attention aims to map the context representations from the audio encoder to the viseme representations by learning the alignment between the audio and style-agnostic viseme features. The decoder queries all the existing viseme features with the encoded audio features, which",
1468
+ "bbox": [
1469
+ 496,
1470
+ 809,
1471
+ 890,
1472
+ 900
1473
+ ],
1474
+ "page_idx": 11
1475
+ },
1476
+ {
1477
+ "type": "page_number",
1478
+ "text": "12",
1479
+ "bbox": [
1480
+ 475,
1481
+ 924,
1482
+ 495,
1483
+ 936
1484
+ ],
1485
+ "page_idx": 11
1486
+ },
1487
+ {
1488
+ "type": "text",
1489
+ "text": "carry both the positional information and the contextual information, thus, resulting in audio context-injected viseme features. Similar to Faceformer [10], we add an alignment bias along the diagonal to the query-key attention score to add more weight to the current time audio features. The alignment bias $B^{A}(1 \\leq i \\leq t, 1 \\leq j \\leq KT)$ is:",
1490
+ "bbox": [
1491
+ 75,
1492
+ 90,
1493
+ 472,
1494
+ 183
1495
+ ],
1496
+ "page_idx": 12
1497
+ },
1498
+ {
1499
+ "type": "equation",
1500
+ "text": "\n$$\nB ^ {A} (i, j) = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} (i = j), \\\\ - \\infty & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {9}\n$$\n",
1501
+ "text_format": "latex",
1502
+ "bbox": [
1503
+ 166,
1504
+ 191,
1505
+ 468,
1506
+ 232
1507
+ ],
1508
+ "page_idx": 12
1509
+ },
1510
+ {
1511
+ "type": "text",
1512
+ "text": "The modified Audio-Motion Attention is represented as:",
1513
+ "bbox": [
1514
+ 76,
1515
+ 242,
1516
+ 450,
1517
+ 257
1518
+ ],
1519
+ "page_idx": 12
1520
+ },
1521
+ {
1522
+ "type": "equation",
1523
+ "text": "\n$$\n\\operatorname {A t t e n t i o n} \\left(Q ^ {v}, K ^ {a}, V ^ {a}, B ^ {A}\\right) = \\sigma \\left(\\frac {Q ^ {v} \\left(K ^ {a}\\right) ^ {T}}{\\sqrt {d _ {k}}} + B ^ {A}\\right) V ^ {a}, \\tag {10}\n$$\n",
1524
+ "text_format": "latex",
1525
+ "bbox": [
1526
+ 81,
1527
+ 266,
1528
+ 468,
1529
+ 311
1530
+ ],
1531
+ "page_idx": 12
1532
+ },
1533
+ {
1534
+ "type": "text",
1535
+ "text": "where $Q^v$ are the learned queries from viseme features, $K^a$ the keys and $V^a$ the values from the audio features, $\\sigma(\\cdot)$ is the softmax activation function, and $d_k$ is the dimension of the keys.",
1536
+ "bbox": [
1537
+ 76,
1538
+ 313,
1539
+ 468,
1540
+ 375
1541
+ ],
1542
+ "page_idx": 12
1543
+ },
1544
+ {
1545
+ "type": "text",
1546
+ "text": "10.3. Motion Decoder:",
1547
+ "text_level": 1,
1548
+ "bbox": [
1549
+ 76,
1550
+ 382,
1551
+ 253,
1552
+ 397
1553
+ ],
1554
+ "page_idx": 12
1555
+ },
1556
+ {
1557
+ "type": "text",
1558
+ "text": "The motion decoder aims to generate 3D facial animations $\\hat{y}_{1:T}$ from the style-agnostic viseme features $\\hat{v}_{1:T}$ and a style embedding $\\hat{S}_i$ . Specifically, our motion decoder consists of two components, a style embedding layer and a motion synthesis block. The style linear layer takes a one-hot encoder of 8-dimension and produces a style-embedding of 64-dimension. The input viseme features are concatenated with the style-embedding and fed into 4 successive linear layers which have a leaky-ReLU as activation. The output dimension of the 4-layer block is 64 dimensional. A final fully connected layer maps the 64-dimension input features to the 3D face deformation described as per-vertex displacements of size 15069. This layer is defining the motion deformation basis of a subject and is adapted based on a reference sequence.",
1559
+ "bbox": [
1560
+ 75,
1561
+ 405,
1562
+ 472,
1563
+ 632
1564
+ ],
1565
+ "page_idx": 12
1566
+ },
1567
+ {
1568
+ "type": "text",
1569
+ "text": "Training Details: We use the ADAM optimizer with a learning rate of 1e-4 for both the style-agnostic transformer training and the style adaptation stage. During the style-agnostic transformer training, the parameters of the Wave2Vec 2.0 layers in the audio encoder are fixed. Our model is trained for 300 epochs, and the best model is chosen based on the validation reconstruction loss. During the style-adaptation stage, we first generate the viseme features and keep them fixed during the style adaptation stage. Then, we optimize for the style embedding for 300 epochs. Finally, the style-embedding and final motion deformation basis is refined for another 300 epochs.",
1570
+ "bbox": [
1571
+ 75,
1572
+ 635,
1573
+ 468,
1574
+ 816
1575
+ ],
1576
+ "page_idx": 12
1577
+ },
1578
+ {
1579
+ "type": "text",
1580
+ "text": "11. Broader Impact",
1581
+ "text_level": 1,
1582
+ "bbox": [
1583
+ 76,
1584
+ 830,
1585
+ 246,
1586
+ 847
1587
+ ],
1588
+ "page_idx": 12
1589
+ },
1590
+ {
1591
+ "type": "text",
1592
+ "text": "Our proposed method aims at the synthesis of realistic-looking 3D facial animations. Ultimately, these animations can be used to drive photo-realistic digital doubles of people",
1593
+ "bbox": [
1594
+ 75,
1595
+ 854,
1596
+ 470,
1597
+ 902
1598
+ ],
1599
+ "page_idx": 12
1600
+ },
1601
+ {
1602
+ "type": "text",
1603
+ "text": "in audio-driven immersive telepresence applications in AR or VR. However, this technology can also be misused for so-called DeepFakes. Given a voice cloning approach, our method could generate 3D facial animations that drive an image synthesis method. This can lead to identity theft, cyber mobbing, or other harmful criminal acts. We believe that conducting research openly and transparently could raise the awareness of the misuse of such technology. We will share our implementation to enable research on digital multi-media forensics. Specifically, synthesis methods are needed to produce the training data for forgery detection [22].",
1604
+ "bbox": [
1605
+ 496,
1606
+ 90,
1607
+ 890,
1608
+ 271
1609
+ ],
1610
+ "page_idx": 12
1611
+ },
1612
+ {
1613
+ "type": "text",
1614
+ "text": "All participants in the study have given written consent to the usage of their video material for this publication.",
1615
+ "bbox": [
1616
+ 498,
1617
+ 272,
1618
+ 890,
1619
+ 303
1620
+ ],
1621
+ "page_idx": 12
1622
+ },
1623
+ {
1624
+ "type": "page_number",
1625
+ "text": "13",
1626
+ "bbox": [
1627
+ 475,
1628
+ 924,
1629
+ 493,
1630
+ 936
1631
+ ],
1632
+ "page_idx": 12
1633
+ }
1634
+ ]
2301.00xxx/2301.00023/4635d162-e5d4-4ff9-83b6-a0e11e214a21_model.json ADDED
@@ -0,0 +1,2239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "aside_text",
5
+ "bbox": [
6
+ 0.023,
7
+ 0.261,
8
+ 0.058,
9
+ 0.707
10
+ ],
11
+ "angle": 270,
12
+ "content": "arXiv:2301.00023v1 [cs.CV] 30 Dec 2022"
13
+ },
14
+ {
15
+ "type": "title",
16
+ "bbox": [
17
+ 0.188,
18
+ 0.131,
19
+ 0.782,
20
+ 0.152
21
+ ],
22
+ "angle": 0,
23
+ "content": "Imitator: Personalized Speech-driven 3D Facial Animation"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.184,
29
+ 0.18,
30
+ 0.394,
31
+ 0.216
32
+ ],
33
+ "angle": 0,
34
+ "content": "Balamurugan Thambiraja<sup>1</sup> \nDarren Cosker<sup>3</sup>"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.427,
40
+ 0.181,
41
+ 0.582,
42
+ 0.215
43
+ ],
44
+ "angle": 0,
45
+ "content": "Ikhsanul Habibie<sup>2</sup> \nChristian Theobalt<sup>2</sup>"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.603,
51
+ 0.181,
52
+ 0.785,
53
+ 0.216
54
+ ],
55
+ "angle": 0,
56
+ "content": "Sadegh Aliakbarian<sup>3</sup> \nJustus Thies<sup>1</sup>"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.221,
62
+ 0.233,
63
+ 0.75,
64
+ 0.252
65
+ ],
66
+ "angle": 0,
67
+ "content": "<sup>1</sup> Max Planck Institute for Intelligent Systems, Tübingen, Germany"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.255,
73
+ 0.253,
74
+ 0.716,
75
+ 0.269
76
+ ],
77
+ "angle": 0,
78
+ "content": "2 Max Planck Institute for Informatics, Saarland, Germany"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.27,
84
+ 0.27,
85
+ 0.696,
86
+ 0.288
87
+ ],
88
+ "angle": 0,
89
+ "content": "<sup>3</sup> Microsoft Mixed Reality & AI Lab, Cambridge, UK"
90
+ },
91
+ {
92
+ "type": "list",
93
+ "bbox": [
94
+ 0.255,
95
+ 0.253,
96
+ 0.716,
97
+ 0.288
98
+ ],
99
+ "angle": 0,
100
+ "content": null
101
+ },
102
+ {
103
+ "type": "image",
104
+ "bbox": [
105
+ 0.094,
106
+ 0.306,
107
+ 0.877,
108
+ 0.54
109
+ ],
110
+ "angle": 0,
111
+ "content": null
112
+ },
113
+ {
114
+ "type": "image_caption",
115
+ "bbox": [
116
+ 0.076,
117
+ 0.555,
118
+ 0.893,
119
+ 0.598
120
+ ],
121
+ "angle": 0,
122
+ "content": "Figure 1. Imitator is a novel method for personalized speech-driven 3D facial animation. Given an audio sequence and a personalized style-embedding as input, we generate person-specific motion sequences with accurate lip closures for bilabial consonants ('m', 'b', 'p'). The style-embedding of a subject can be computed by a short reference video (e.g., 5s)."
123
+ },
124
+ {
125
+ "type": "title",
126
+ "bbox": [
127
+ 0.235,
128
+ 0.61,
129
+ 0.314,
130
+ 0.626
131
+ ],
132
+ "angle": 0,
133
+ "content": "Abstract"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.075,
139
+ 0.645,
140
+ 0.473,
141
+ 0.901
142
+ ],
143
+ "angle": 0,
144
+ "content": "Speech-driven 3D facial animation has been widely explored, with applications in gaming, character animation, virtual reality, and telepresence systems. State-of-the-art methods deform the face topology of the target actor to sync the input audio without considering the identity-specific speaking style and facial idiosyncrasies of the target actor; thus, resulting in unrealistic and inaccurate lip movements. To address this, we present Imitator, a speech-driven facial expression synthesis method, which learns identity-specific details from a short input video and produces novel facial expressions matching the identity-specific speaking style and facial idiosyncrasies of the target actor. Specifically, we train a style-agnostic transformer on a large facial expression dataset which we use as a prior for audiodriven facial expressions. Based on this prior, we optimize for identity-specific speaking style based on a short reference video. To train the prior, we introduce a novel loss"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.498,
150
+ 0.611,
151
+ 0.895,
152
+ 0.732
153
+ ],
154
+ "angle": 0,
155
+ "content": "function based on detected bilabial consonants to ensure plausible lip closures and consequently improve the realism of the generated expressions. Through detailed experiments and a user study, we show that our approach produces temporally coherent facial expressions from input audio while preserving the speaking style of the target actors. Please check out the project page for the supplemental video and more results."
156
+ },
157
+ {
158
+ "type": "title",
159
+ "bbox": [
160
+ 0.501,
161
+ 0.768,
162
+ 0.633,
163
+ 0.784
164
+ ],
165
+ "angle": 0,
166
+ "content": "1. Introduction"
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.498,
172
+ 0.795,
173
+ 0.895,
174
+ 0.903
175
+ ],
176
+ "angle": 0,
177
+ "content": "3D digital humans raised a lot of attention in the past few years as they aim to replicate the appearance and motion of real humans for immersive applications, like telepresence in AR or VR, character animation and creation for entertainment (movies and games), and virtual mirrors for e-commerce. Especially, with the introduction of neural rendering [27, 28], we see immense progress in the photo-"
178
+ },
179
+ {
180
+ "type": "page_number",
181
+ "bbox": [
182
+ 0.481,
183
+ 0.925,
184
+ 0.49,
185
+ 0.937
186
+ ],
187
+ "angle": 0,
188
+ "content": "1"
189
+ }
190
+ ],
191
+ [
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.077,
196
+ 0.091,
197
+ 0.473,
198
+ 0.364
199
+ ],
200
+ "angle": 0,
201
+ "content": "realistic synthesis of such digital doubles [11,20,38]. These avatars can be controlled via visual tracking to mirror the facial expressions of a real human. However, we need to control the facial avatars with text or audio inputs for a series of applications. For example, AI-driven digital assistants rely on motion synthesis instead of motion cloning. Even telepresence applications might need to work with audio inputs only, when the face of the person is occluded or cannot be tracked, since a face capture device is not available. To this end, we analyze motion synthesis for facial animations from audio inputs; note that text-to-speech approaches can be used to generate such audio. Humans are generally sensitive towards faces, especially facial motions, as they are crucial for communication (e.g., micro-expressions). Without full expressiveness and proper lip closures, the generated animation will be perceived as unnatural and implausible. Especially if the person is known, the facial animations must match the subject's idiosyncrasies."
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.077,
207
+ 0.364,
208
+ 0.473,
209
+ 0.741
210
+ ],
211
+ "angle": 0,
212
+ "content": "Recent methods for speech-driven 3D facial animation [5, 10, 16, 21] are data-driven. They are trained on high-quality motion capture data and leverage pretrained speech models [13, 23] to extract an intermediate audio representation. We can classify these data-driven methods into two categories, generalized [5, 10, 21] and personalized animation generation methods [16]. In contrast to those approaches, we aim at a personalized 3D facial animation synthesis that can adapt to a new user while only relying on input RGB videos captured with commodity cameras. Specifically, we propose a transformer-based auto-regressive motion synthesis method that predicts a generalized motion representation. This intermediate representation is decoded by a motion decoder which is adaptable to new users. A speaker embedding is adjusted for a new user, and a new motion basis for the motion decoder is computed. Our method is trained on the VOCA dataset [5] and can be applied to new subjects captured in a short monocular RGB video. As lip closures are of paramount importance for bilabial consonants ('m', 'b', 'p'), we introduce a novel loss based on the detection of bilabials to ensure that the lips are closed properly. We take inspiration from the locomotion synthesis field [14, 18], where similar losses are used to enforce foot contact with the ground and transfer it to our scenario of physically plausible lip motions."
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.077,
218
+ 0.742,
219
+ 0.473,
220
+ 0.879
221
+ ],
222
+ "angle": 0,
223
+ "content": "In a series of experiments and ablation studies, we demonstrate that our method is able to synthesize facial expressions that match the target subject's motions in terms of style and expressiveness. Our method outperforms state-of-the-art methods in our metrical evaluation and user study. Please refer to our supplemental video for a detailed qualitative comparison. In a user study, we confirm that personalized facial expressions are important for the perceived realism."
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.078,
229
+ 0.886,
230
+ 0.436,
231
+ 0.901
232
+ ],
233
+ "angle": 0,
234
+ "content": "The contributions of our work Imitator are as follows:"
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.518,
240
+ 0.092,
241
+ 0.892,
242
+ 0.152
243
+ ],
244
+ "angle": 0,
245
+ "content": "- a novel auto-regressive motion synthesis architecture that allows for adaption to new users by disentangling generalized viseme generation and person-specific motion decoding,"
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.518,
251
+ 0.161,
252
+ 0.892,
253
+ 0.207
254
+ ],
255
+ "angle": 0,
256
+ "content": "- and a lip contact loss formulation for improved lip closures based on physiological cues of bilabial consonants ('m', 'b', 'p')."
257
+ },
258
+ {
259
+ "type": "list",
260
+ "bbox": [
261
+ 0.518,
262
+ 0.092,
263
+ 0.892,
264
+ 0.207
265
+ ],
266
+ "angle": 0,
267
+ "content": null
268
+ },
269
+ {
270
+ "type": "title",
271
+ "bbox": [
272
+ 0.5,
273
+ 0.227,
274
+ 0.642,
275
+ 0.243
276
+ ],
277
+ "angle": 0,
278
+ "content": "2. Related Work"
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.499,
284
+ 0.252,
285
+ 0.892,
286
+ 0.297
287
+ ],
288
+ "angle": 0,
289
+ "content": "Our work focuses on speech-driven 3D facial animation related to talking head methods that create photo-realistic video sequences from audio inputs."
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.499,
295
+ 0.297,
296
+ 0.893,
297
+ 0.75
298
+ ],
299
+ "angle": 0,
300
+ "content": "Talking Head Videos: Several prior works on speech-driven generation focus on the synthesis of 2D talking head videos. Suwajanakorn et al. [25] train an LSTM network on 19h video material of Obama to predict his person-specific 2D lip landmarks from speech inputs, which is then used for image generation. Vougioukas et al. [33] propose a method to generate facial animation from a single RGB image by leveraging a temporal generative adversarial network. Chung et al. [4] introduce a real-time approach to generate an RGB video of a talking face by directly mapping the audio input to the video output space. This method can redub a new target identity not seen during training. Instead of performing direct mapping, Zhou et al. [39] disentangles the speech information in terms of speaker identity and content, allowing speech-driven generation that can be applied to various types of realistic and hand-drawn head portraits. A series of work [24, 29, 36, 37] uses an intermediate 3D Morphable Model (3DMM) [2, 8] to guide the 2D neural rendering of talking heads from audio. Wang et al. [34] extend this work also to model the head movements of the speaker. Lipsync3d [17] proposes data-efficient learning of personalized talking heads focusing on pose and lighting normalization. Based on dynamic neural radiance fields [11], Ad-nerf [12] and DFA-NeRF [35] learn personalized talking head models that can be rendered under novel views, while being controlled by audio inputs. In contrast to these methods, our work focuses on predicting 3D facial animations from speech that can be used to drive 3D digital avatars without requiring retraining of the entire model to capture the person-specific motion style."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.499,
306
+ 0.75,
307
+ 0.894,
308
+ 0.901
309
+ ],
310
+ "angle": 0,
311
+ "content": "Speech-Driven 3D Facial Animation: Speech-driven 3d facial animation is a vivid field of research. Earlier methods [6, 7, 9, 15, 32] focus on animating a predefined facial rig using procedural rules. HMM-based models generate visemes from input text or audio, and the facial animations are generated using viseme-dependent co-articulation models [6, 7] or by blending facial templates [15]. With recent advances in machine learning, data-driven methods [3, 5, 10, 16, 21, 26, 29] have demonstrated their capability to learn viseme patterns from data. These methods"
312
+ },
313
+ {
314
+ "type": "page_number",
315
+ "bbox": [
316
+ 0.48,
317
+ 0.925,
318
+ 0.492,
319
+ 0.937
320
+ ],
321
+ "angle": 0,
322
+ "content": "2"
323
+ }
324
+ ],
325
+ [
326
+ {
327
+ "type": "image",
328
+ "bbox": [
329
+ 0.084,
330
+ 0.082,
331
+ 0.873,
332
+ 0.293
333
+ ],
334
+ "angle": 0,
335
+ "content": null
336
+ },
337
+ {
338
+ "type": "image_caption",
339
+ "bbox": [
340
+ 0.076,
341
+ 0.298,
342
+ 0.893,
343
+ 0.342
344
+ ],
345
+ "angle": 0,
346
+ "content": "Figure 2. Our architecture takes audio as input which is encoded by a pre-trained Wav2Vec2.0 model [1]. This audio embedding \\(\\hat{a}_{1:T}\\) is interpreted by an auto-regressive viseme decoder which generates a generalized motion feature \\(\\hat{v}_{1:T}\\). A style-adaptable motion decoder maps these motion features to person-specific facial expressions \\(\\hat{y}_{1:T}\\) in terms of vertex displacements on top of a template mesh."
347
+ },
348
+ {
349
+ "type": "text",
350
+ "bbox": [
351
+ 0.076,
352
+ 0.359,
353
+ 0.473,
354
+ 0.678
355
+ ],
356
+ "angle": 0,
357
+ "content": "are based on pretrained speech models [1, 13, 23] to generate an abstract and generalized representation of the input audio, which is then interpreted by a CNN or autoregressive model to map to either a 3DMM space or directly to 3D meshes. Karras et al. [16] learn a 3D facial animation model from 3-5 minutes of high-quality actor specific 3D data. VOCA [5] is trained on 3D data of multiple subjects and can animate the corresponding set of identities from input audio by providing a one-hot encoding during inference that indicates the subject. MeshTalk [21] is a generalized method that learns a categorical representation for facial expressions and auto-regressively samples from this categorical space to animate a given 3D facial template mesh of a subject from audio inputs. FaceFormer [10] uses a pretrained Wav2Vec [1] audio representation and applies a transformer-based decoder to regress displacements on top of a template mesh. Like VOCA, FaceFormer provides a speaker identification code to the decoder, allowing one to choose from the training set talking styles. In contrast, we aim at a method that can adapt to new users, capturing their talking style and expressiveness."
358
+ },
359
+ {
360
+ "type": "title",
361
+ "bbox": [
362
+ 0.077,
363
+ 0.693,
364
+ 0.17,
365
+ 0.71
366
+ ],
367
+ "angle": 0,
368
+ "content": "3. Method"
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.076,
374
+ 0.72,
375
+ 0.473,
376
+ 0.903
377
+ ],
378
+ "angle": 0,
379
+ "content": "Our goal is to model person-specific speaking style and the facial idiosyncrasies of an actor, to generate 3D facial animations of the subject from novel audio inputs. As input, we assume a short video sequence of the subject which we leverage to compute the identity-specific speaking style. To enable fast adaptation to novel users without significant training sequences, we learn a generalized style-agnostic transformer on VOCaset [5]. This transformer provides generic motion features from audio inputs that are interpretable by a person-specific motion decoder. The motion decoder is pre-trained and adaptable to new users via speaking style optimization and refinement of the motion"
380
+ },
381
+ {
382
+ "type": "text",
383
+ "bbox": [
384
+ 0.498,
385
+ 0.359,
386
+ 0.895,
387
+ 0.437
388
+ ],
389
+ "angle": 0,
390
+ "content": "basis. To further improve synthesis results, we introduce a novel lip contact loss based on physiological cues of the bilabial consonants [7]. In the following, we will detail our model architecture and the training objectives and describe the style adaptation."
391
+ },
392
+ {
393
+ "type": "title",
394
+ "bbox": [
395
+ 0.499,
396
+ 0.446,
397
+ 0.688,
398
+ 0.461
399
+ ],
400
+ "angle": 0,
401
+ "content": "3.1. Model Architecture"
402
+ },
403
+ {
404
+ "type": "text",
405
+ "bbox": [
406
+ 0.498,
407
+ 0.47,
408
+ 0.893,
409
+ 0.515
410
+ ],
411
+ "angle": 0,
412
+ "content": "Our architecture consists of three main components (see Figure 2): an audio encoder, a generalized auto-regressive viseme decoder, and an adaptable motion decoder."
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.498,
418
+ 0.52,
419
+ 0.895,
420
+ 0.776
421
+ ],
422
+ "angle": 0,
423
+ "content": "Audio Encoder: Following state-of-the-art motion synthesis models [5, 10], we use a generalized speech model to encode the audio inputs \\( A \\). Specifically, we leverage the Wav2Vec 2.0 model [1]. The original Wav2Vec is based on a CNN architecture designed to produce a meaningful latent representation of human speech. To this end, the model is trained in a self-supervised and semi-supervised manner to predict the immediate future values of the current input speech by using a contrastive loss, allowing the model to learn from a large amount of unlabeled data. Wav2Vec 2.0 extends this idea by quantizing the latent representation and incorporating a Transformer-based architecture [31]. We resample the Wav2Vec 2.0 output with a linear interpolation layer to match the sampling frequency of the motion (30fps for the VOCAsset, with 16kHz audio), resulting in a contextual representation \\( \\{\\hat{a}\\}_{t=1}^{T} \\) of the audio sequence for \\( T \\) motion frames."
424
+ },
425
+ {
426
+ "type": "text",
427
+ "bbox": [
428
+ 0.498,
429
+ 0.78,
430
+ 0.895,
431
+ 0.903
432
+ ],
433
+ "angle": 0,
434
+ "content": "Auto-regressive Viseme Decoder: The decoder \\( F_{v} \\) takes the contextual representation of the audio sequence as input and produces style agnostic viseme features \\( \\hat{v}_{t} \\) in an auto-regressive manner. These viseme features describe how the lip should deform given the context audio and the previous viseme features. In contrast to Faceformer [10], we propose to use of a classical transformer architecture [31] as viseme decoder, which learns the mapping from audio-"
435
+ },
436
+ {
437
+ "type": "page_number",
438
+ "bbox": [
439
+ 0.48,
440
+ 0.925,
441
+ 0.492,
442
+ 0.938
443
+ ],
444
+ "angle": 0,
445
+ "content": "3"
446
+ }
447
+ ],
448
+ [
449
+ {
450
+ "type": "text",
451
+ "bbox": [
452
+ 0.077,
453
+ 0.09,
454
+ 0.468,
455
+ 0.122
456
+ ],
457
+ "angle": 0,
458
+ "content": "features \\(\\{\\hat{a}\\}_{t = 1}^{T}\\) to identity agnostic viseme features \\(\\{\\hat{v}\\}_{t = 1}^{T}\\). The autoregressive viseme decoder is defined as:"
459
+ },
460
+ {
461
+ "type": "equation",
462
+ "bbox": [
463
+ 0.185,
464
+ 0.134,
465
+ 0.468,
466
+ 0.15
467
+ ],
468
+ "angle": 0,
469
+ "content": "\\[\n\\hat {v} _ {t} = F _ {v} \\left(\\theta_ {v}; \\hat {v} _ {1: t - 1}, \\hat {a} _ {1: T}\\right), \\tag {1}\n\\]"
470
+ },
471
+ {
472
+ "type": "text",
473
+ "bbox": [
474
+ 0.077,
475
+ 0.162,
476
+ 0.454,
477
+ 0.176
478
+ ],
479
+ "angle": 0,
480
+ "content": "where \\(\\theta_v\\) are the learnable parameters of the transformer."
481
+ },
482
+ {
483
+ "type": "text",
484
+ "bbox": [
485
+ 0.076,
486
+ 0.177,
487
+ 0.468,
488
+ 0.373
489
+ ],
490
+ "angle": 0,
491
+ "content": "In contrast to the traditional neural machine translation (NMT) architectures that produce discrete text, our output representation is a continuous vector. NMT models use a start and end token to indicate the beginning and end of the sequence. During inference, the NMT model autoregressively generates tokens until the end token is generated. Similarly, we use a start token to indicate the beginning of the sequences. However, since the sequence length \\( T \\) is given by the length of the audio input, we do not use an end token. We inject temporal information into the sequences by adding encoded time to the viseme feature in the sequence. We formulate the positionally encoded intermediate representations \\( \\hat{h}_t \\) as:"
492
+ },
493
+ {
494
+ "type": "equation",
495
+ "bbox": [
496
+ 0.211,
497
+ 0.383,
498
+ 0.468,
499
+ 0.401
500
+ ],
501
+ "angle": 0,
502
+ "content": "\\[\n\\hat {h} _ {t} = \\hat {v} _ {t} + P E (t), \\tag {2}\n\\]"
503
+ },
504
+ {
505
+ "type": "text",
506
+ "bbox": [
507
+ 0.076,
508
+ 0.413,
509
+ 0.468,
510
+ 0.761
511
+ ],
512
+ "angle": 0,
513
+ "content": "where \\(PE(t)\\) is a sinusoidal encoding function [31]. Given the sequence of positional encoded inputs \\(\\hat{h}_t\\), we use multi-head self-attention which generates the context representation of the inputs by weighting the inputs based on their relevance. These context representations are used as input to a cross-modal multi-head attention block which also takes the audio features \\(\\hat{a}_{1:T}\\) from the audio encoder as input. A final feed-forward layer maps the output of this audio-motion attention layer to the viseme embedding \\(\\hat{v}_t\\). In contrast to Faceformer [10], which feeds encoded face motions \\(\\hat{y}_t\\) to the transformer, we work with identity-agnostic viseme features which are independently decoded by the motion decoder. We found that feeding face motions \\(\\hat{y}_t\\) via an input embedding layer to the transformer contains identity-specific information, which we try to avoid since we aim for a generalized viseme decoder that is disentangled from person-specific motion. In addition, using a general start token instead of the identity code [10] as the start token reduces the identity bias further. Note that disentangling the identity-specific information from the viseme decoder improves the motion optimization in the style adaption stage of the pipeline (see Section 3.3), as gradients do not need to be propagated through the auto-regressive transformer."
514
+ },
515
+ {
516
+ "type": "text",
517
+ "bbox": [
518
+ 0.076,
519
+ 0.765,
520
+ 0.468,
521
+ 0.901
522
+ ],
523
+ "angle": 0,
524
+ "content": "Motion Decoder: The motion decoder aims to generate 3D facial animation \\(\\hat{y}_{1:T}\\) from the style-agnostic viseme features \\(\\hat{v}_{1:T}\\) and a style embedding \\(\\hat{S}_i\\). Specifically, our motion decoder consists of two components, a style embedding layer and a motion synthesis block. For the training of the style-agnostic transformer and for pre-training the motion decoder, we assume to have a one-hot encoding of the identities of the training set. The style embedding layer takes this identity information as input and produces the style"
525
+ },
526
+ {
527
+ "type": "text",
528
+ "bbox": [
529
+ 0.498,
530
+ 0.091,
531
+ 0.892,
532
+ 0.258
533
+ ],
534
+ "angle": 0,
535
+ "content": "embedding \\(\\hat{S}_i\\), which encodes the identity-specific motion. The style embedding is concatenated with the viseme features \\(\\hat{v}_{1:T}\\) and fed into the motion synthesis block. The motion synthesis block consists of non-linear layers which map the style-aware viseme features to the motion space defined by a linear deformation basis. During training, the deformation basis is learned across all identities in the dataset. The deformation basis is fine-tuned for style adaptation to out-of-training identities (see Section 3.3). The final mesh outputs \\(\\hat{y}_{1:T}\\) are computed by adding the estimated per-vertex deformation to the template mesh of the subject."
536
+ },
537
+ {
538
+ "type": "title",
539
+ "bbox": [
540
+ 0.5,
541
+ 0.267,
542
+ 0.605,
543
+ 0.284
544
+ ],
545
+ "angle": 0,
546
+ "content": "3.2. Training"
547
+ },
548
+ {
549
+ "type": "text",
550
+ "bbox": [
551
+ 0.498,
552
+ 0.29,
553
+ 0.892,
554
+ 0.364
555
+ ],
556
+ "angle": 0,
557
+ "content": "Similar to Faceformer [10], we use an autoregressive training scheme instead of teacher-forcing to train our model on the VOCAset [5]. Given that VOCAset provides ground truth 3D facial animations, we define the following loss:"
558
+ },
559
+ {
560
+ "type": "equation",
561
+ "bbox": [
562
+ 0.515,
563
+ 0.378,
564
+ 0.891,
565
+ 0.395
566
+ ],
567
+ "angle": 0,
568
+ "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\lambda_ {M S E} \\cdot \\mathcal {L} _ {M S E} + \\lambda_ {v e l} \\cdot \\mathcal {L} _ {v e l} + \\lambda_ {l i p} \\cdot \\mathcal {L} _ {l i p}, \\tag {3}\n\\]"
569
+ },
570
+ {
571
+ "type": "text",
572
+ "bbox": [
573
+ 0.499,
574
+ 0.406,
575
+ 0.891,
576
+ 0.451
577
+ ],
578
+ "angle": 0,
579
+ "content": "where \\(\\mathcal{L}_{MSE}\\) defines a reconstruction loss of the vertices, \\(\\mathcal{L}_{vel}\\) defines a velocity loss, and \\(\\mathcal{L}_{lip}\\) measures lip contact. The weights are \\(\\lambda_{MSE} = 1.0\\), \\(\\lambda_{vel} = 10.0\\), and \\(\\lambda_{lip} = 5.0\\)."
580
+ },
581
+ {
582
+ "type": "text",
583
+ "bbox": [
584
+ 0.5,
585
+ 0.454,
586
+ 0.878,
587
+ 0.469
588
+ ],
589
+ "angle": 0,
590
+ "content": "Reconstruction Loss: The reconstruction loss \\(\\mathcal{L}_{MSE}\\) is:"
591
+ },
592
+ {
593
+ "type": "equation",
594
+ "bbox": [
595
+ 0.584,
596
+ 0.479,
597
+ 0.891,
598
+ 0.521
599
+ ],
600
+ "angle": 0,
601
+ "content": "\\[\n\\mathcal {L} _ {M S E} = \\sum_ {v = 1} ^ {V} \\sum_ {t = 1} ^ {T _ {v}} \\left| \\left| y _ {t, v} - \\hat {y} _ {t, v} \\right| \\right| ^ {2}, \\tag {4}\n\\]"
602
+ },
603
+ {
604
+ "type": "text",
605
+ "bbox": [
606
+ 0.498,
607
+ 0.531,
608
+ 0.891,
609
+ 0.561
610
+ ],
611
+ "angle": 0,
612
+ "content": "where \\(y_{t,v}\\) is the ground truth mesh at time \\(t\\) in sequence \\(v\\) (of \\(V\\) total sequences) and \\(\\hat{y}_{t,v}\\) is the prediction."
613
+ },
614
+ {
615
+ "type": "text",
616
+ "bbox": [
617
+ 0.498,
618
+ 0.565,
619
+ 0.892,
620
+ 0.625
621
+ ],
622
+ "angle": 0,
623
+ "content": "Velocity Loss: Our motion decoder takes independent viseme features as input to produce facial expressions. To improve temporal consistency in the prediction, we introduce a velocity loss \\(\\mathcal{L}_{vel}\\) similar to [5]:"
624
+ },
625
+ {
626
+ "type": "equation",
627
+ "bbox": [
628
+ 0.509,
629
+ 0.636,
630
+ 0.891,
631
+ 0.677
632
+ ],
633
+ "angle": 0,
634
+ "content": "\\[\n\\mathcal {L} _ {v e l} = \\sum_ {v = 1} ^ {V} \\sum_ {t = 2} ^ {T _ {v}} | | (y _ {t, v} - y _ {t - 1, v}) - (\\hat {y} _ {t, v} - \\hat {y} _ {t - 1, v}) | | ^ {2}. \\tag {5}\n\\]"
635
+ },
636
+ {
637
+ "type": "text",
638
+ "bbox": [
639
+ 0.498,
640
+ 0.69,
641
+ 0.892,
642
+ 0.808
643
+ ],
644
+ "angle": 0,
645
+ "content": "Lip Contact Loss: Training with \\( L_{MSE} \\) guides the model to learn an averaged facial expression, thus resulting in improper lip closures. To this end, we introduce a novel lip contact loss for bilabial consonants ('m', 'b', 'p') to improve lip closures. Specifically, we automatically annotate the VOCAset to extract the occurrences of these consonants; see Section 4. Using this data, we define the following lip loss:"
646
+ },
647
+ {
648
+ "type": "equation",
649
+ "bbox": [
650
+ 0.584,
651
+ 0.809,
652
+ 0.891,
653
+ 0.85
654
+ ],
655
+ "angle": 0,
656
+ "content": "\\[\n\\mathcal {L} _ {l i p} = \\sum_ {t = 1} ^ {T} \\sum_ {j = 1} ^ {N} w _ {t} \\left\\| y _ {t, v} - \\hat {y} _ {t, v} \\right\\| ^ {2}, \\tag {6}\n\\]"
657
+ },
658
+ {
659
+ "type": "text",
660
+ "bbox": [
661
+ 0.498,
662
+ 0.856,
663
+ 0.891,
664
+ 0.9
665
+ ],
666
+ "angle": 0,
667
+ "content": "where \\( w_{t,v} \\) weights the prediction of frame \\( t \\) according to the annotation of the bilabial consonants. Specifically, \\( w_{t,v} \\) is one for frames with such consonants and zero otherwise."
668
+ },
669
+ {
670
+ "type": "page_number",
671
+ "bbox": [
672
+ 0.48,
673
+ 0.925,
674
+ 0.491,
675
+ 0.936
676
+ ],
677
+ "angle": 0,
678
+ "content": "4"
679
+ }
680
+ ],
681
+ [
682
+ {
683
+ "type": "text",
684
+ "bbox": [
685
+ 0.077,
686
+ 0.092,
687
+ 0.47,
688
+ 0.138
689
+ ],
690
+ "angle": 0,
691
+ "content": "Note that for such consonant frames, the target \\( y_{t,v} \\) represents a face with a closed mouth; thus, this loss improves lip closures at 'm', 'b' and 'p's (see Section 5)."
692
+ },
693
+ {
694
+ "type": "title",
695
+ "bbox": [
696
+ 0.078,
697
+ 0.148,
698
+ 0.245,
699
+ 0.166
700
+ ],
701
+ "angle": 0,
702
+ "content": "3.3. Style Adaptation"
703
+ },
704
+ {
705
+ "type": "text",
706
+ "bbox": [
707
+ 0.077,
708
+ 0.172,
709
+ 0.471,
710
+ 0.368
711
+ ],
712
+ "angle": 0,
713
+ "content": "Given a video of a new subject, we reconstruct and track the face \\(\\tilde{y}_{1:T}\\) (see Section 4). Based on this reference data, we first optimize for the speaker style-embedding \\(\\hat{S}\\) and then jointly refine the linear deformation basis using the \\(\\mathcal{L}_{MSE}\\) and \\(\\mathcal{L}_{vel}\\) loss. In our experiments, we found that this two-stage adaptation is essential for generalization to new audio inputs as it reuses the pretrained information of the motion decoder. As an initialization of the style embedding, we use a speaking style of the training set. We precompute all viseme features \\(\\hat{v}_{1:T}\\) once, and optimize the speaking style to reproduce the tracked faces \\(\\tilde{y}_{1:T}\\). We then refine the linear motion basis of the decoder to match the person-specific deformations (e.g., asymmetric lip motions)."
714
+ },
715
+ {
716
+ "type": "title",
717
+ "bbox": [
718
+ 0.077,
719
+ 0.383,
720
+ 0.166,
721
+ 0.399
722
+ ],
723
+ "angle": 0,
724
+ "content": "4. Dataset"
725
+ },
726
+ {
727
+ "type": "text",
728
+ "bbox": [
729
+ 0.076,
730
+ 0.409,
731
+ 0.47,
732
+ 0.573
733
+ ],
734
+ "angle": 0,
735
+ "content": "We train our method based on the VOCAset [5], which consists of 12 actors (6 female and 6 male) with 40 sequences each with a length of \\(3 - 5\\) seconds. The dataset comes with a train/test set split which we use in our experiments. The test set contains 2 actors. The dataset offers audio and high-quality 3D face reconstructions per frame (60fps). For our experiment, we sample the 3D face reconstructions at 30fps. We train the auto-regressive transformer on this data using the loss from Equation (3). For the lip contact loss \\(L_{lip}\\), we automatically compute the labels as described below."
736
+ },
737
+ {
738
+ "type": "text",
739
+ "bbox": [
740
+ 0.077,
741
+ 0.576,
742
+ 0.47,
743
+ 0.651
744
+ ],
745
+ "angle": 0,
746
+ "content": "To adapt the motion decoder to a new subject, we require a short video clip of the person. Using this sequence, we run a 3DMM-based face tracker to get the per-frame 3D shape of the person. Based on this data, we adapt the motion decoder as detailed in Section 3.3."
747
+ },
748
+ {
749
+ "type": "text",
750
+ "bbox": [
751
+ 0.077,
752
+ 0.656,
753
+ 0.47,
754
+ 0.806
755
+ ],
756
+ "angle": 0,
757
+ "content": "Automatic Lip Closure Labeling: For the VOCAsset, the transcript is available. Based on Wav2Vec features, we align the transcript with the audio track. As the lip closure is formed before we hear the bilabial consonants, we search for the lip closure in the tracked face geometry before the time-stamp of the occurrence of the consonants in the script. We show this process for a single sequence in Figure 3. The lip closure is detected by lip distance, i.e., the frame with minimal lip distance in a short time window before the consonant is assumed to be the lip closure."
758
+ },
759
+ {
760
+ "type": "text",
761
+ "bbox": [
762
+ 0.077,
763
+ 0.811,
764
+ 0.471,
765
+ 0.902
766
+ ],
767
+ "angle": 0,
768
+ "content": "External Sequence Processing: We assume to have a monocular RGB video of about 2 minutes in length as input which we divide into train/validation/test sequences. Based on MICA [40], we estimate the 3D shape of the subject using the first frame of the video. Using this shape estimate, we run an analysis-by-synthesis approach [30] to"
769
+ },
770
+ {
771
+ "type": "image_caption",
772
+ "bbox": [
773
+ 0.592,
774
+ 0.092,
775
+ 0.791,
776
+ 0.102
777
+ ],
778
+ "angle": 0,
779
+ "content": "Words Spoken: BAGPIPE AND BONGOS"
780
+ },
781
+ {
782
+ "type": "image",
783
+ "bbox": [
784
+ 0.508,
785
+ 0.102,
786
+ 0.887,
787
+ 0.244
788
+ ],
789
+ "angle": 0,
790
+ "content": null
791
+ },
792
+ {
793
+ "type": "image",
794
+ "bbox": [
795
+ 0.508,
796
+ 0.245,
797
+ 0.575,
798
+ 0.321
799
+ ],
800
+ "angle": 0,
801
+ "content": null
802
+ },
803
+ {
804
+ "type": "image_caption",
805
+ "bbox": [
806
+ 0.562,
807
+ 0.334,
808
+ 0.593,
809
+ 0.343
810
+ ],
811
+ "angle": 0,
812
+ "content": "Audio"
813
+ },
814
+ {
815
+ "type": "image_footnote",
816
+ "bbox": [
817
+ 0.521,
818
+ 0.347,
819
+ 0.632,
820
+ 0.358
821
+ ],
822
+ "angle": 0,
823
+ "content": "Local Minimum search"
824
+ },
825
+ {
826
+ "type": "image_caption",
827
+ "bbox": [
828
+ 0.642,
829
+ 0.334,
830
+ 0.747,
831
+ 0.344
832
+ ],
833
+ "angle": 0,
834
+ "content": "GT Lip distance curve"
835
+ },
836
+ {
837
+ "type": "image_footnote",
838
+ "bbox": [
839
+ 0.641,
840
+ 0.347,
841
+ 0.752,
842
+ 0.358
843
+ ],
844
+ "angle": 0,
845
+ "content": "\\(\\times\\) - Detected consonants"
846
+ },
847
+ {
848
+ "type": "image_caption",
849
+ "bbox": [
850
+ 0.774,
851
+ 0.334,
852
+ 0.85,
853
+ 0.345
854
+ ],
855
+ "angle": 0,
856
+ "content": "Lip loss Weight"
857
+ },
858
+ {
859
+ "type": "image_footnote",
860
+ "bbox": [
861
+ 0.761,
862
+ 0.347,
863
+ 0.879,
864
+ 0.358
865
+ ],
866
+ "angle": 0,
867
+ "content": "\\(\\times\\) - Lip closure computed"
868
+ },
869
+ {
870
+ "type": "image_caption",
871
+ "bbox": [
872
+ 0.499,
873
+ 0.367,
874
+ 0.892,
875
+ 0.493
876
+ ],
877
+ "angle": 0,
878
+ "content": "Figure 3. Automatic labeling of the bilabial consonants ('m', 'b' and 'p') and their corresponding lip closures in a sequence of VOCAset [5]. We align the transcript with the audio track using Wav2vec [1] features and extract the time stamps for the bilabial consonants. To detect the lip closures for the bilabial consonants, we search for local-minima on the Lip distance curves (red). The lip loss weights \\( w_{t,v} \\) in a window around the detected lip closure are set to fixed values of a Gaussian function. We show an example of detected lip closures in the figure (in the blue bounding box)."
879
+ },
880
+ {
881
+ "type": "text",
882
+ "bbox": [
883
+ 0.499,
884
+ 0.512,
885
+ 0.892,
886
+ 0.618
887
+ ],
888
+ "angle": 0,
889
+ "content": "estimate per-frame blendshape parameters of the FLAME 3DMM [19]. Given these blendshape coefficients, we can compute the 3D vertices of the per-frame face meshes that we need to adapt the motion decoder. Note that in contrast to the training data of the transformer, we do not require any bilabial consonants labeling, as we adapt the motion decoder only based on the reconstruction and velocity loss."
890
+ },
891
+ {
892
+ "type": "title",
893
+ "bbox": [
894
+ 0.501,
895
+ 0.634,
896
+ 0.587,
897
+ 0.649
898
+ ],
899
+ "angle": 0,
900
+ "content": "5. Results"
901
+ },
902
+ {
903
+ "type": "text",
904
+ "bbox": [
905
+ 0.499,
906
+ 0.66,
907
+ 0.892,
908
+ 0.901
909
+ ],
910
+ "angle": 0,
911
+ "content": "To validate our method, we conducted a series of qualitative and quantitative evaluations, including a user study and ablation studies. For evaluation on the test set of VOCAset [5], we randomly sample 4 sequences from the test subjects' train set (each \\(\\sim\\) 5s long) and learn the speaking-style and facial idiosyncrasies of the subject via style adaptation. We compare our method to the state-of-the-art methods VOCA [5], Faceformer [10], and MeshTalk [21]. We use the original implementations of the authors. However, we found that MeshTalk cannot train on the comparably small VOCAset. Thus, we qualitatively compare against MeshTalk with their provided model trained on a large-scale proprietary dataset with 200 subjects and 40 sequences for each. Note that the pretrained MeshTalk model is not compatible with the FLAME topology; thus, we cannot evaluate their method on novel identities. In addition to the experi"
912
+ },
913
+ {
914
+ "type": "page_number",
915
+ "bbox": [
916
+ 0.481,
917
+ 0.925,
918
+ 0.49,
919
+ 0.937
920
+ ],
921
+ "angle": 0,
922
+ "content": "5"
923
+ }
924
+ ],
925
+ [
926
+ {
927
+ "type": "image_caption",
928
+ "bbox": [
929
+ 0.086,
930
+ 0.092,
931
+ 0.178,
932
+ 0.106
933
+ ],
934
+ "angle": 0,
935
+ "content": "Words spoken"
936
+ },
937
+ {
938
+ "type": "image_caption",
939
+ "bbox": [
940
+ 0.261,
941
+ 0.094,
942
+ 0.485,
943
+ 0.109
944
+ ],
945
+ "angle": 0,
946
+ "content": "So, I start talking now.... usually..."
947
+ },
948
+ {
949
+ "type": "image_caption",
950
+ "bbox": [
951
+ 0.594,
952
+ 0.094,
953
+ 0.851,
954
+ 0.108
955
+ ],
956
+ "angle": 0,
957
+ "content": "One of my favorite topics to discuss is ..."
958
+ },
959
+ {
960
+ "type": "image",
961
+ "bbox": [
962
+ 0.099,
963
+ 0.11,
964
+ 0.891,
965
+ 0.625
966
+ ],
967
+ "angle": 0,
968
+ "content": null
969
+ },
970
+ {
971
+ "type": "image_caption",
972
+ "bbox": [
973
+ 0.076,
974
+ 0.635,
975
+ 0.894,
976
+ 0.679
977
+ ],
978
+ "angle": 0,
979
+ "content": "Figure 4. Qualitative comparison to the state-of-the-art methods VOCA [5], Faceformer [10], and MeshTalk [21]. Note that MeshTalk is performed with a different identity since we use their pretrained model, which cannot be trained on VOCAset. As we see in the highlighted regions, the geometry of the generated sequences without the person-specific style have muted and inaccurate lip animations."
980
+ },
981
+ {
982
+ "type": "text",
983
+ "bbox": [
984
+ 0.076,
985
+ 0.695,
986
+ 0.47,
987
+ 0.725
988
+ ],
989
+ "angle": 0,
990
+ "content": "ments on the VOCAset, we show results on external RGB sequences. The results can be best seen in the suppl. video."
991
+ },
992
+ {
993
+ "type": "table_caption",
994
+ "bbox": [
995
+ 0.077,
996
+ 0.73,
997
+ 0.47,
998
+ 0.746
999
+ ],
1000
+ "angle": 0,
1001
+ "content": "Quantitative Evaluation: To quantitatively evaluate our"
1002
+ },
1003
+ {
1004
+ "type": "table",
1005
+ "bbox": [
1006
+ 0.08,
1007
+ 0.752,
1008
+ 0.466,
1009
+ 0.821
1010
+ ],
1011
+ "angle": 0,
1012
+ "content": "<table><tr><td>Method</td><td>L2face↓</td><td>L2lip↓</td><td>F-DTW↓</td><td>Lip-DTW↓</td><td>Lip-sync↓</td></tr><tr><td>VOCA [5]</td><td>0.88</td><td>0.15</td><td>1.28</td><td>2.41</td><td>5.72</td></tr><tr><td>Faceformer [10]</td><td>0.8</td><td>0.14</td><td>1.18</td><td>2.85</td><td>5.41</td></tr><tr><td>Ours (w/ 1seq)</td><td>0.91</td><td>0.1</td><td>1.3</td><td>1.68</td><td>3.99</td></tr><tr><td>Ours</td><td>0.89</td><td>0.09</td><td>1.26</td><td>1.47</td><td>3.78</td></tr></table>"
1013
+ },
1014
+ {
1015
+ "type": "table_footnote",
1016
+ "bbox": [
1017
+ 0.076,
1018
+ 0.827,
1019
+ 0.47,
1020
+ 0.898
1021
+ ],
1022
+ "angle": 0,
1023
+ "content": "Table 1. Quantitative results on the VOCAset [5]. Our method outperforms the baselines on all of the lip metrics while performing on par on the full-face metrics. Note that we are not targeting the animation of the upper face but aim for expressive and accurate lip movements, which is noticeable from the improved lip scores."
1024
+ },
1025
+ {
1026
+ "type": "text",
1027
+ "bbox": [
1028
+ 0.498,
1029
+ 0.695,
1030
+ 0.892,
1031
+ 0.893
1032
+ ],
1033
+ "angle": 0,
1034
+ "content": "method, we use the test set of VOCAset [5], which provides high-quality reference mesh reconstructions. We evaluate the performance of our method based on a mean \\( L_{2} \\) vertex distance for the entire mesh \\( L_{2}^{face} \\) and the lip region \\( L_{2}^{lip} \\). Following MeshTalk [21], we also compute the Lipsync, which measures the mean of the maximal per-frame lip distances. In addition, we use Dynamic Time Wrapping (DTW) to compute the similarity between the produced and reference meshes, both for the entire mesh (F-DTW) and the lip region (Lip-DTW). Since VOCA and Faceformer do not adapt to new user talking styles, we select the talking style from their training with the best quantitative metrics. Note that the pretrained MeshTalk model is not applicable to this"
1035
+ },
1036
+ {
1037
+ "type": "page_number",
1038
+ "bbox": [
1039
+ 0.48,
1040
+ 0.926,
1041
+ 0.492,
1042
+ 0.937
1043
+ ],
1044
+ "angle": 0,
1045
+ "content": "6"
1046
+ }
1047
+ ],
1048
+ [
1049
+ {
1050
+ "type": "image_caption",
1051
+ "bbox": [
1052
+ 0.137,
1053
+ 0.082,
1054
+ 0.232,
1055
+ 0.098
1056
+ ],
1057
+ "angle": 0,
1058
+ "content": "Words spoken"
1059
+ },
1060
+ {
1061
+ "type": "image_caption",
1062
+ "bbox": [
1063
+ 0.282,
1064
+ 0.082,
1065
+ 0.495,
1066
+ 0.096
1067
+ ],
1068
+ "angle": 0,
1069
+ "content": "His Failure to Open ... By Job."
1070
+ },
1071
+ {
1072
+ "type": "image_caption",
1073
+ "bbox": [
1074
+ 0.587,
1075
+ 0.082,
1076
+ 0.804,
1077
+ 0.098
1078
+ ],
1079
+ "angle": 0,
1080
+ "content": "Had Vinyl Technology Expand..."
1081
+ },
1082
+ {
1083
+ "type": "image",
1084
+ "bbox": [
1085
+ 0.129,
1086
+ 0.103,
1087
+ 0.849,
1088
+ 0.791
1089
+ ],
1090
+ "angle": 0,
1091
+ "content": null
1092
+ },
1093
+ {
1094
+ "type": "image_caption",
1095
+ "bbox": [
1096
+ 0.076,
1097
+ 0.805,
1098
+ 0.894,
1099
+ 0.876
1100
+ ],
1101
+ "angle": 0,
1102
+ "content": "Figure 5. Qualitative ablation comparison. At first, we show that our complete method with style and \\(\\mathcal{L}_{lip}\\) loss is able to generate personalized facial animation with expressive motion and accurate lip closures. Replacing the person-specific style with the style seen during training results in generic and muted facial animation. As highlighted in the per-vertex error maps (magenta), the generated expression is not similar to the target actor. Especially the facial deformations are missing person-specific details. Removing \\(\\mathcal{L}_{lip}\\) from the training objective results in improper lip closures (red)."
1103
+ },
1104
+ {
1105
+ "type": "page_number",
1106
+ "bbox": [
1107
+ 0.48,
1108
+ 0.925,
1109
+ 0.49,
1110
+ 0.936
1111
+ ],
1112
+ "angle": 0,
1113
+ "content": "7"
1114
+ }
1115
+ ],
1116
+ [
1117
+ {
1118
+ "type": "table",
1119
+ "bbox": [
1120
+ 0.08,
1121
+ 0.089,
1122
+ 0.468,
1123
+ 0.149
1124
+ ],
1125
+ "angle": 0,
1126
+ "content": "<table><tr><td>Method</td><td>Expressiveness (%)</td><td>Realism/Lip-sync (%)</td></tr><tr><td>Ours vs VOCA [5]</td><td>86.48</td><td>76.92</td></tr><tr><td>Ours vs Faceformer [10]</td><td>81.89</td><td>75.46</td></tr><tr><td>Ours vs Ground truth</td><td>20.28</td><td>42.30</td></tr></table>"
1127
+ },
1128
+ {
1129
+ "type": "table_caption",
1130
+ "bbox": [
1131
+ 0.077,
1132
+ 0.159,
1133
+ 0.47,
1134
+ 0.202
1135
+ ],
1136
+ "angle": 0,
1137
+ "content": "Table 2. In a perceptual A/B user study conducted on the test set of VOCAset [5] with 56 participants, we see that in comparison to VOCA [5] and Faceformer [10] our method is preferred."
1138
+ },
1139
+ {
1140
+ "type": "text",
1141
+ "bbox": [
1142
+ 0.076,
1143
+ 0.227,
1144
+ 0.468,
1145
+ 0.304
1146
+ ],
1147
+ "angle": 0,
1148
+ "content": "evaluation due to the identity mismatch. As can be seen in Table 1, our method achieves the lowest lip reconstruction and lip-sync errors, confirming our qualitative results. Even when using a single reference video for style adaptation (5s), our results show significantly better lip scores."
1149
+ },
1150
+ {
1151
+ "type": "text",
1152
+ "bbox": [
1153
+ 0.076,
1154
+ 0.306,
1155
+ 0.468,
1156
+ 0.472
1157
+ ],
1158
+ "angle": 0,
1159
+ "content": "Qualitative Evaluation: We conducted a qualitative evaluation on external sequences not part of VOCAset. In Figure 4, we show a series of frames from those sequences with the corresponding words. As we can see, our method is able to adapt to the speaking style of the respective subject. VOCA [5] and Faceformer [10] miss person-specific deformations and are not as expressive as our results. MeshTalk [21], which uses an identity that comes with the pretrained model, also shows dampened expressivity. In the suppl. video, we can observe that our method is generating better lip closures for bilabial consonants."
1160
+ },
1161
+ {
1162
+ "type": "text",
1163
+ "bbox": [
1164
+ 0.076,
1165
+ 0.476,
1166
+ 0.47,
1167
+ 0.779
1168
+ ],
1169
+ "angle": 0,
1170
+ "content": "Perceptual Evaluation: We conducted a perceptual evaluation to quantify the quality of our method's generated results (see Table 2). Specifically, we conducted an A/B user study on the test set of VOCAset. We randomly sample 10 sequences of the test subjects and run our method, VOCA, and Faceformer. For VOCA and Faceformer, which do not adapt to the style of a new user, we use the talking style of the training Subject 137, which provided the best quantitative results. We use 20 videos per method resulting in 60 A/B comparisons. For every A/B test, we ask the user to choose the best method based on realism and expressiveness, following the user study protocol of Faceformer [10]. In Table 2, we show the result of this study in which 56 people participated. We observe that our method consistently outperforms VOCA and Faceformer. We also see that our model achieves similar realism and lip-sync as ground truth. Note that the users in the perceptual study have not seen the original talking style of the actors before. However, the results show that our personalized synthesis leads to more realistic-looking animations."
1171
+ },
1172
+ {
1173
+ "type": "title",
1174
+ "bbox": [
1175
+ 0.077,
1176
+ 0.787,
1177
+ 0.242,
1178
+ 0.803
1179
+ ],
1180
+ "angle": 0,
1181
+ "content": "5.1. Ablation Studies"
1182
+ },
1183
+ {
1184
+ "type": "text",
1185
+ "bbox": [
1186
+ 0.076,
1187
+ 0.81,
1188
+ 0.468,
1189
+ 0.901
1190
+ ],
1191
+ "angle": 0,
1192
+ "content": "To understand the impact of our style adaptation and the novel lip contact loss \\(\\mathcal{L}_{lip}\\) on the perceptual quality, we show a qualitative ablation study including per-vertex error maps in Figure 5. As highlighted in the figure, the style adaptation is critical to match the person-specific deformations and mouth shapes and improves expressiveness."
1193
+ },
1194
+ {
1195
+ "type": "image",
1196
+ "bbox": [
1197
+ 0.503,
1198
+ 0.088,
1199
+ 0.892,
1200
+ 0.192
1201
+ ],
1202
+ "angle": 0,
1203
+ "content": null
1204
+ },
1205
+ {
1206
+ "type": "image_caption",
1207
+ "bbox": [
1208
+ 0.499,
1209
+ 0.202,
1210
+ 0.892,
1211
+ 0.271
1212
+ ],
1213
+ "angle": 0,
1214
+ "content": "Figure 6. Analysis of style adaptation in terms of lip distance on a test sequence of the VOCAset [5] (reference in red). Starting from an initial talking style from the training set (blue), we consecutively adapt the style code (green) and the motion basis of the motion decoder (purple)."
1215
+ },
1216
+ {
1217
+ "type": "text",
1218
+ "bbox": [
1219
+ 0.498,
1220
+ 0.308,
1221
+ 0.892,
1222
+ 0.46
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": "The lip contact loss improves the lip closures for the bilabial consonants, thus, improving the perceived realism, as can best be seen in the suppl. video. We rely on only \\(\\sim 60\\) seconds-long reference videos to extract the person-specific speaking style. A detailed analysis of the sequence length's influence on the final output quality can be found in the suppl. material. It is also worth noting that our style-agnostic architecture allows us to perform style adaptation of the motion decoder in less than \\(30\\mathrm{min}\\), while an adaptation with an identity-dependent transformer takes about \\(6\\mathrm{h}\\)."
1226
+ },
1227
+ {
1228
+ "type": "text",
1229
+ "bbox": [
1230
+ 0.498,
1231
+ 0.465,
1232
+ 0.892,
1233
+ 0.676
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": "Our proposed style adaptation has two stages as explained in Section 3.3. In the first step, we optimize for the style code and the refine the motion basis. In Figure 6, we show an example of the style adaptation by evaluating the lip distances throughout a sequence with a motion decoder at initialization, with optimized style code, and with a refined motion basis. While the lip distance with the generalized motion decoder is considerable, it gets significantly improved by the consecutive steps of style adaptation. After style code optimization, we observe that the amplitude and frequency of the lip distance curves start resembling the ground truth. Refining the motion basis further improves the lip distance, and it is able to capture facial idiosyncrasies, like asymmetrical lip deformations."
1237
+ },
1238
+ {
1239
+ "type": "title",
1240
+ "bbox": [
1241
+ 0.5,
1242
+ 0.705,
1243
+ 0.614,
1244
+ 0.72
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": "6. Discussion"
1248
+ },
1249
+ {
1250
+ "type": "text",
1251
+ "bbox": [
1252
+ 0.498,
1253
+ 0.735,
1254
+ 0.892,
1255
+ 0.901
1256
+ ],
1257
+ "angle": 0,
1258
+ "content": "Our evaluation shows that our proposed method outperforms state-of-the-art methods in perceived expressiveness and realism. However, several limitations remain. Specifically, we only support the speaking style of the subject seen in the reference video and do not control the talking style w.r.t. emotions (e.g., sad, happy, angry). The viseme transformer and the motion decoder could be conditioned on an emotion flag; we leave this for future work. The expressiveness and facial details depend on the face tracker's quality; if the face tracking is improved, our method will predict better face shapes."
1259
+ },
1260
+ {
1261
+ "type": "page_number",
1262
+ "bbox": [
1263
+ 0.481,
1264
+ 0.925,
1265
+ 0.49,
1266
+ 0.936
1267
+ ],
1268
+ "angle": 0,
1269
+ "content": "8"
1270
+ }
1271
+ ],
1272
+ [
1273
+ {
1274
+ "type": "title",
1275
+ "bbox": [
1276
+ 0.078,
1277
+ 0.09,
1278
+ 0.196,
1279
+ 0.107
1280
+ ],
1281
+ "angle": 0,
1282
+ "content": "7. Conclusion"
1283
+ },
1284
+ {
1285
+ "type": "text",
1286
+ "bbox": [
1287
+ 0.076,
1288
+ 0.116,
1289
+ 0.473,
1290
+ 0.283
1291
+ ],
1292
+ "angle": 0,
1293
+ "content": "We present Imitator, a novel approach for personalized speech-driven 3D facial animation. Based on a short reference video clip of a subject, we learn a personalized motion decoder driven by a generalized auto-regressive transformer that maps audio to intermediate viseme features. Our studies show that personalized facial animations are essential for the perceived realism of a generated sequence. Our new loss formulation for accurate lip closures of bilabial consonants further improves the results. We believe that personalized facial animations are a stepping stone towards audio-driven digital doubles."
1294
+ },
1295
+ {
1296
+ "type": "title",
1297
+ "bbox": [
1298
+ 0.077,
1299
+ 0.298,
1300
+ 0.266,
1301
+ 0.316
1302
+ ],
1303
+ "angle": 0,
1304
+ "content": "8. Acknowledgements"
1305
+ },
1306
+ {
1307
+ "type": "text",
1308
+ "bbox": [
1309
+ 0.076,
1310
+ 0.325,
1311
+ 0.47,
1312
+ 0.432
1313
+ ],
1314
+ "angle": 0,
1315
+ "content": "This project has received funding from the Mesh Labs, Microsoft, Cambridge, UK. Further, we would like to thank Berna Kabadayi, Jalees Nehvi, Malte Prinzler and Wojciech Zielonka for their support and valuable feedback. The authors thank the International Max Planck Research School for Intelligent Systems (IMPRS-IS) for supporting Balamurugan Thambiraja."
1316
+ },
1317
+ {
1318
+ "type": "title",
1319
+ "bbox": [
1320
+ 0.078,
1321
+ 0.447,
1322
+ 0.175,
1323
+ 0.463
1324
+ ],
1325
+ "angle": 0,
1326
+ "content": "References"
1327
+ },
1328
+ {
1329
+ "type": "ref_text",
1330
+ "bbox": [
1331
+ 0.085,
1332
+ 0.473,
1333
+ 0.47,
1334
+ 0.611
1335
+ ],
1336
+ "angle": 0,
1337
+ "content": "[1] Baevski, A., Zhou, Y., Mohamed, A., Auli, M.: wav2vec 2.0: A framework for self-supervised learning of speech representations. In: Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual (2020), https://proceedings.neurips.cc/paper/2020/hash/92d1e1eb1cd6f9fba3227870bb6d7f07-Abstract.html 3,5,12"
1338
+ },
1339
+ {
1340
+ "type": "ref_text",
1341
+ "bbox": [
1342
+ 0.087,
1343
+ 0.618,
1344
+ 0.472,
1345
+ 0.673
1346
+ ],
1347
+ "angle": 0,
1348
+ "content": "[2] Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: Proceedings of the 26th annual conference on Computer graphics and interactive techniques. pp. 187-194 (1999) 2"
1349
+ },
1350
+ {
1351
+ "type": "ref_text",
1352
+ "bbox": [
1353
+ 0.087,
1354
+ 0.68,
1355
+ 0.47,
1356
+ 0.749
1357
+ ],
1358
+ "angle": 0,
1359
+ "content": "[3] Cao, Y., Tien, W.C., Faloutsos, P., Pighin, F.: Expressive speech-driven facial animation. ACM Trans. Graph. 24(4), 1283-1302 (oct 2005). https://doi.org/10.1145/1095878.1095881, https://doi.org/10.1145/1095878.1095881 2"
1360
+ },
1361
+ {
1362
+ "type": "ref_text",
1363
+ "bbox": [
1364
+ 0.087,
1365
+ 0.755,
1366
+ 0.469,
1367
+ 0.784
1368
+ ],
1369
+ "angle": 0,
1370
+ "content": "[4] Chung, J.S., Jamaludin, A., Zisserman, A.: You said that? arXiv preprint arXiv:1705.02966 (2017) 2"
1371
+ },
1372
+ {
1373
+ "type": "ref_text",
1374
+ "bbox": [
1375
+ 0.087,
1376
+ 0.79,
1377
+ 0.47,
1378
+ 0.9
1379
+ ],
1380
+ "angle": 0,
1381
+ "content": "[5] Cudeiro, D., Bolkart, T., Laidlaw, C., Ranjan, A., Black, M.J.: Capture, Learning, and Synthesis of 3D Speaking Styles. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10093-10103. IEEE, Long Beach, CA, USA (Jun 2019). https://doi.org/10.1109/CVPR.2019.01034, https://ieeexplore.ieee.org/document/8954000/2,3,4,5,6,8"
1382
+ },
1383
+ {
1384
+ "type": "list",
1385
+ "bbox": [
1386
+ 0.085,
1387
+ 0.473,
1388
+ 0.472,
1389
+ 0.9
1390
+ ],
1391
+ "angle": 0,
1392
+ "content": null
1393
+ },
1394
+ {
1395
+ "type": "ref_text",
1396
+ "bbox": [
1397
+ 0.51,
1398
+ 0.093,
1399
+ 0.892,
1400
+ 0.176
1401
+ ],
1402
+ "angle": 0,
1403
+ "content": "[6] De Martino, J.M., Pini Magalhães, L., Violaro, F.: Facial animation based on context-dependent visemes. Computers & Graphics 30(6), 971-980 (Dec 2006). https://doi.org/10.1016/j.cag.2006.08.017, https : / / linkinghub . elsevier . com / retrieve/pii/S0097849306001518 2"
1404
+ },
1405
+ {
1406
+ "type": "ref_text",
1407
+ "bbox": [
1408
+ 0.51,
1409
+ 0.183,
1410
+ 0.894,
1411
+ 0.236
1412
+ ],
1413
+ "angle": 0,
1414
+ "content": "[7] Edwards, P., Landreth, C., Fume, E., Singh, K.: Jali: an animator-centric viseme model for expressive lip synchronization. ACM Trans. Graph. 35, 127:1-127:11 (2016) 2, 3"
1415
+ },
1416
+ {
1417
+ "type": "ref_text",
1418
+ "bbox": [
1419
+ 0.511,
1420
+ 0.246,
1421
+ 0.892,
1422
+ 0.315
1423
+ ],
1424
+ "angle": 0,
1425
+ "content": "[8] Egger, B., Smith, W.A., Tewari, A., Wuhrer, S., Zollhoefer, M., Beeler, T., Bernard, F., Bolkart, T., Kortylewski, A., Romdhani, S., et al.: 3d morphable face models—past, present, and future. ACM Transactions on Graphics (TOG) 39(5), 1-38 (2020) 2"
1426
+ },
1427
+ {
1428
+ "type": "ref_text",
1429
+ "bbox": [
1430
+ 0.511,
1431
+ 0.323,
1432
+ 0.892,
1433
+ 0.406
1434
+ ],
1435
+ "angle": 0,
1436
+ "content": "[9] Ezzat, T., Poggio, T.: MikeTalk: a talking facial display based on morphing visemes. In: Proceedings Computer Animation '98 (Cat. No.98EX169). pp. 96-102. IEEE Comput. Soc, Philadelphia, PA, USA (1998). https://doi.org/10.1109/CA.1998.681913, http://ieeexplore.ieee.org/document/681913/2"
1437
+ },
1438
+ {
1439
+ "type": "ref_text",
1440
+ "bbox": [
1441
+ 0.504,
1442
+ 0.413,
1443
+ 0.892,
1444
+ 0.468
1445
+ ],
1446
+ "angle": 0,
1447
+ "content": "[10] Fan, Y., Lin, Z., Saito, J., Wang, W., Komura, T.: Faceformer: Speech-driven 3d facial animation with transformers. CoRR abs/2112.05329 (2021), https://arxiv.org/abs/2112.05329 2, 3, 4, 5, 6, 8, 12, 13"
1448
+ },
1449
+ {
1450
+ "type": "ref_text",
1451
+ "bbox": [
1452
+ 0.503,
1453
+ 0.476,
1454
+ 0.892,
1455
+ 0.531
1456
+ ],
1457
+ "angle": 0,
1458
+ "content": "[11] Gafni, G., Thies, J., Zollhöfer, M., Nießner, M.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. CoRR abs/2012.03065 (2020), https://arxiv.org/abs/2012.030652"
1459
+ },
1460
+ {
1461
+ "type": "ref_text",
1462
+ "bbox": [
1463
+ 0.504,
1464
+ 0.539,
1465
+ 0.892,
1466
+ 0.594
1467
+ ],
1468
+ "angle": 0,
1469
+ "content": "[12] Guo, Y., Chen, K., Liang, S., Liu, Y., Bao, H., Zhang, J.: Ad-nerf: Audio driven neural radiance fields for talking head synthesis. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2021) 2"
1470
+ },
1471
+ {
1472
+ "type": "ref_text",
1473
+ "bbox": [
1474
+ 0.504,
1475
+ 0.602,
1476
+ 0.892,
1477
+ 0.657
1478
+ ],
1479
+ "angle": 0,
1480
+ "content": "[13] Hannun, A., Case, C., Casper, J., Catanzaro, B., Diamos, G., Elsen, E., Prenger, R., Satheesh, S., Sengupta, S., Coates, A., Y. Ng, A.: DeepSpeech: Scaling up end-to-end speech recognition (12 2014) 2, 3"
1481
+ },
1482
+ {
1483
+ "type": "ref_text",
1484
+ "bbox": [
1485
+ 0.504,
1486
+ 0.665,
1487
+ 0.892,
1488
+ 0.706
1489
+ ],
1490
+ "angle": 0,
1491
+ "content": "[14] Holden, D., Saito, J., Komura, T.: A deep learning framework for character motion synthesis and editing. ACM Transactions on Graphics (TOG) 35(4), 1-11 (2016) 2"
1492
+ },
1493
+ {
1494
+ "type": "ref_text",
1495
+ "bbox": [
1496
+ 0.504,
1497
+ 0.713,
1498
+ 0.892,
1499
+ 0.81
1500
+ ],
1501
+ "angle": 0,
1502
+ "content": "[15] Kalberer, G., Van Gool, L.: Face animation based on observed 3D speech dynamics. In: Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation (Cat. No.01TH8596). pp. 20-251. IEEE Comput. Soc, Seoul, South Korea (2001). https://doi.org/10.1109/CA.2001.982373, http://ieeexplore.ieee.org/document/982373/2"
1503
+ },
1504
+ {
1505
+ "type": "ref_text",
1506
+ "bbox": [
1507
+ 0.504,
1508
+ 0.818,
1509
+ 0.892,
1510
+ 0.9
1511
+ ],
1512
+ "angle": 0,
1513
+ "content": "[16] Karras, T., Aila, T., Laine, S., Herva, A., Lehtinen, J.: Audio-driven facial animation by joint end-to-end learning of pose and emotion. ACM Transactions on Graphics 36(4), 1-12 (Jul 2017). https://doi.org/10.1145/3072959.3073658, https://dl.acm.org/doi/10.1145/3072959.3073658 2, 3"
1514
+ },
1515
+ {
1516
+ "type": "list",
1517
+ "bbox": [
1518
+ 0.503,
1519
+ 0.093,
1520
+ 0.894,
1521
+ 0.9
1522
+ ],
1523
+ "angle": 0,
1524
+ "content": null
1525
+ },
1526
+ {
1527
+ "type": "page_number",
1528
+ "bbox": [
1529
+ 0.48,
1530
+ 0.925,
1531
+ 0.492,
1532
+ 0.937
1533
+ ],
1534
+ "angle": 0,
1535
+ "content": "9"
1536
+ }
1537
+ ],
1538
+ [
1539
+ {
1540
+ "type": "ref_text",
1541
+ "bbox": [
1542
+ 0.08,
1543
+ 0.092,
1544
+ 0.47,
1545
+ 0.162
1546
+ ],
1547
+ "angle": 0,
1548
+ "content": "[17] Lahiri, A., Kwatra, V., Frueh, C., Lewis, J., Bregler, C.: Lipsync3d: Data-efficient learning of personalized 3d talking faces from video using pose and lighting normalization (2021). https://doi.org/10.48550/ARXIV.2106.04185, https://arxiv.org/abs/2106.04185 2"
1549
+ },
1550
+ {
1551
+ "type": "ref_text",
1552
+ "bbox": [
1553
+ 0.08,
1554
+ 0.166,
1555
+ 0.472,
1556
+ 0.236
1557
+ ],
1558
+ "angle": 0,
1559
+ "content": "[18] Lee, J., Chai, J., Reitsma, P.S., Hodgins, J.K., Pollard, N.S.: Interactive control of avatars animated with human motion data. In: Proceedings of the 29th annual conference on Computer graphics and interactive techniques. pp. 491-500 (2002) 2"
1560
+ },
1561
+ {
1562
+ "type": "ref_text",
1563
+ "bbox": [
1564
+ 0.08,
1565
+ 0.24,
1566
+ 0.47,
1567
+ 0.309
1568
+ ],
1569
+ "angle": 0,
1570
+ "content": "[19] Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia) 36(6) (2017), https://doi.org/10.1145/3130800.31308135"
1571
+ },
1572
+ {
1573
+ "type": "ref_text",
1574
+ "bbox": [
1575
+ 0.08,
1576
+ 0.314,
1577
+ 0.47,
1578
+ 0.37
1579
+ ],
1580
+ "angle": 0,
1581
+ "content": "[20] Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: Learning dynamic renderable volumes from images. ACM Trans. Graph. 38(4), 65:1-65:14 (Jul 2019) 2"
1582
+ },
1583
+ {
1584
+ "type": "ref_text",
1585
+ "bbox": [
1586
+ 0.08,
1587
+ 0.374,
1588
+ 0.47,
1589
+ 0.485
1590
+ ],
1591
+ "angle": 0,
1592
+ "content": "[21] Richard, A., Zollhofer, M., Wen, Y., de la Torre, F., Sheikh, Y.: MeshTalk: 3D Face Animation from Speech using Cross-Modality Disentanglement. In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 1153-1162. IEEE, Montreal, QC, Canada (Oct 2021). https://doi.org/10.1109/ICCV48922.2021.00121, https://ieeexplore.ieee.org/document/9710491/2,3,5,6,8"
1593
+ },
1594
+ {
1595
+ "type": "ref_text",
1596
+ "bbox": [
1597
+ 0.08,
1598
+ 0.489,
1599
+ 0.47,
1600
+ 0.531
1601
+ ],
1602
+ "angle": 0,
1603
+ "content": "[22] Rössler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nießner, M.: Faceforensics++: Learning to detect manipulated facial images. ICCV 2019 (2019) 13"
1604
+ },
1605
+ {
1606
+ "type": "ref_text",
1607
+ "bbox": [
1608
+ 0.08,
1609
+ 0.535,
1610
+ 0.47,
1611
+ 0.66
1612
+ ],
1613
+ "angle": 0,
1614
+ "content": "[23] Schneider, S., Baevski, A., Collobert, R., Auli, M.: wav2vec: Unsupervised pre-training for speech recognition. In: Kubin, G., Kacic, Z. (eds.) Interspeech 2019, 20th Annual Conference of the International Speech Communication Association, Graz, Austria, 15-19 September 2019. pp. 3465-3469. ISCA (2019). https://doi.org/10.21437/Interspeech.2019-1873, https://doi.org/10.21437/Interspeech.2019-1873 2, 3"
1615
+ },
1616
+ {
1617
+ "type": "ref_text",
1618
+ "bbox": [
1619
+ 0.08,
1620
+ 0.665,
1621
+ 0.47,
1622
+ 0.707
1623
+ ],
1624
+ "angle": 0,
1625
+ "content": "[24] Song, L., Wu, W., Qian, C., He, R., Loy, C.C.: Everybody's talkin': Let me talk as you want. IEEE Transactions on Information Forensics and Security 17, 585-598 (2022) 2"
1626
+ },
1627
+ {
1628
+ "type": "ref_text",
1629
+ "bbox": [
1630
+ 0.08,
1631
+ 0.712,
1632
+ 0.47,
1633
+ 0.753
1634
+ ],
1635
+ "angle": 0,
1636
+ "content": "[25] Suwajanakorn, S., Seitz, S.M., Kemelmacher-Shlizerman, I.: Synthesizing america: learning lip sync from audio. ACM Transactions on Graphics (ToG) 36(4), 1-13 (2017) 2"
1637
+ },
1638
+ {
1639
+ "type": "ref_text",
1640
+ "bbox": [
1641
+ 0.08,
1642
+ 0.758,
1643
+ 0.47,
1644
+ 0.853
1645
+ ],
1646
+ "angle": 0,
1647
+ "content": "[26] Taylor, S.L., Kim, T., Yue, Y., Mahler, M., Krahe, J., Rodriguez, A.G., Hodgins, J.K., Matthews, I.A.: A deep learning approach for generalized speech animation. ACM Trans. Graph. 36(4), 93:1-93:11 (2017). https://doi.org/10.1145/3072959.3073699, https://doi.org/10.1145/3072959.3073699 2"
1648
+ },
1649
+ {
1650
+ "type": "ref_text",
1651
+ "bbox": [
1652
+ 0.08,
1653
+ 0.859,
1654
+ 0.47,
1655
+ 0.901
1656
+ ],
1657
+ "angle": 0,
1658
+ "content": "[27] Tewari, A., Thies, J., Mildenhall, B., Srinivasan, P., Tretschk, E., Wang, Y., Lassner, C., Sitzmann, V., Martin-Brualla, R., Lombardi, S., Simon, T., Theobalt, C., Niessner, M., Barron,"
1659
+ },
1660
+ {
1661
+ "type": "list",
1662
+ "bbox": [
1663
+ 0.08,
1664
+ 0.092,
1665
+ 0.472,
1666
+ 0.901
1667
+ ],
1668
+ "angle": 0,
1669
+ "content": null
1670
+ },
1671
+ {
1672
+ "type": "ref_text",
1673
+ "bbox": [
1674
+ 0.535,
1675
+ 0.093,
1676
+ 0.892,
1677
+ 0.121
1678
+ ],
1679
+ "angle": 0,
1680
+ "content": "J.T., Wetzstein, G., Zollhoefer, M., Golyanik, V.: Advances in neural rendering (2022) 1"
1681
+ },
1682
+ {
1683
+ "type": "ref_text",
1684
+ "bbox": [
1685
+ 0.503,
1686
+ 0.125,
1687
+ 0.894,
1688
+ 0.209
1689
+ ],
1690
+ "angle": 0,
1691
+ "content": "[28] Thies, J., Tewari, A., Fried, O., Sitzmann, V., Lombardi, S., Sunkavalli, K., Martin-Brualla, R., Simon, T., Saragih, J., Nießner, M., Pandey, R., Fanello, S., Wetzstein, G., Zhu, J.Y., Theobalt, C., Agrawala, M., Shechtman, E., Goldman, D.B., Zollhöfer, M.: State of the art on neural rendering. EG (2020) 1"
1692
+ },
1693
+ {
1694
+ "type": "ref_text",
1695
+ "bbox": [
1696
+ 0.503,
1697
+ 0.214,
1698
+ 0.892,
1699
+ 0.255
1700
+ ],
1701
+ "angle": 0,
1702
+ "content": "[29] Thies, J., Elgharib, M., Tewari, A., Theobalt, C., Nießner, M.: Neural voice puppetry: Audio-driven facial reenactment. ECCV 2020 (2020) 2"
1703
+ },
1704
+ {
1705
+ "type": "ref_text",
1706
+ "bbox": [
1707
+ 0.503,
1708
+ 0.26,
1709
+ 0.892,
1710
+ 0.329
1711
+ ],
1712
+ "angle": 0,
1713
+ "content": "[30] Thies, J., Zollhöfer, M., Stamminger, M., Theobalt, C., Nießner, M.: Face2face: Real-time face capture and reenactment of rgb videos (2020). https://doi.org/10.48550/ARXIV.2007.14808, https://arxiv.org/abs/2007.148085"
1714
+ },
1715
+ {
1716
+ "type": "ref_text",
1717
+ "bbox": [
1718
+ 0.503,
1719
+ 0.334,
1720
+ 0.892,
1721
+ 0.39
1722
+ ],
1723
+ "angle": 0,
1724
+ "content": "[31] Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017) 3, 4, 12"
1725
+ },
1726
+ {
1727
+ "type": "ref_text",
1728
+ "bbox": [
1729
+ 0.503,
1730
+ 0.394,
1731
+ 0.892,
1732
+ 0.492
1733
+ ],
1734
+ "angle": 0,
1735
+ "content": "[32] Verma, A., Rajput, N., Subramaniam, L.: Using viseme based acoustic models for speech driven lip synthesis. In: 2003 IEEE International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings. (ICASSP '03). vol. 5, pp. V-720-3. IEEE, Hong Kong, China (2003). https://doi.org/10.1109/ICASSP.2003.1200072, http://ieeexplore.ieee.org/document/1200072/2"
1736
+ },
1737
+ {
1738
+ "type": "ref_text",
1739
+ "bbox": [
1740
+ 0.503,
1741
+ 0.497,
1742
+ 0.892,
1743
+ 0.538
1744
+ ],
1745
+ "angle": 0,
1746
+ "content": "[33] Vougioukas, K., Petridis, S., Pantic, M.: Realistic speech-driven facial animation with gans. International Journal of Computer Vision 128(5), 1398-1413 (2020) 2"
1747
+ },
1748
+ {
1749
+ "type": "ref_text",
1750
+ "bbox": [
1751
+ 0.503,
1752
+ 0.543,
1753
+ 0.892,
1754
+ 0.598
1755
+ ],
1756
+ "angle": 0,
1757
+ "content": "[34] Wang, S., Li, L., Ding, Y., Fan, C., Yu, X.: Audio2head: Audio-driven one-shot talking-head generation with natural head motion. In: International Joint Conference on Artificial Intelligence. IJCAI (2021) 2"
1758
+ },
1759
+ {
1760
+ "type": "ref_text",
1761
+ "bbox": [
1762
+ 0.503,
1763
+ 0.604,
1764
+ 0.892,
1765
+ 0.659
1766
+ ],
1767
+ "angle": 0,
1768
+ "content": "[35] Yao, S., Zhong, R., Yan, Y., Zhai, G., Yang, X.: Dfa-nerf: Personalized talking head generation via disentangled face attributes neural rendering. arXiv preprint arXiv:2201.00791 (2022) 2"
1769
+ },
1770
+ {
1771
+ "type": "ref_text",
1772
+ "bbox": [
1773
+ 0.503,
1774
+ 0.664,
1775
+ 0.892,
1776
+ 0.707
1777
+ ],
1778
+ "angle": 0,
1779
+ "content": "[36] Yi, R., Ye, Z., Zhang, J., Bao, H., Liu, Y.J.: Audio-driven talking face video generation with learning-based personalized head pose. arXiv preprint arXiv:2002.10137 (2020) 2"
1780
+ },
1781
+ {
1782
+ "type": "ref_text",
1783
+ "bbox": [
1784
+ 0.503,
1785
+ 0.711,
1786
+ 0.892,
1787
+ 0.78
1788
+ ],
1789
+ "angle": 0,
1790
+ "content": "[37] Zhang, Z., Li, L., Ding, Y., Fan, C.: Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3661–3670 (2021) 2"
1791
+ },
1792
+ {
1793
+ "type": "ref_text",
1794
+ "bbox": [
1795
+ 0.503,
1796
+ 0.785,
1797
+ 0.892,
1798
+ 0.841
1799
+ ],
1800
+ "angle": 0,
1801
+ "content": "[38] Zheng, Y., Abrevaya, V.F., Chen, X., Bühler, M.C., Black, M.J., Hilliges, O.: I M avatar: Implicit morphable head avatars from videos. CoRR abs/2112.07471 (2021), https://arxiv.org/abs/2112.07471 2"
1802
+ },
1803
+ {
1804
+ "type": "ref_text",
1805
+ "bbox": [
1806
+ 0.503,
1807
+ 0.846,
1808
+ 0.892,
1809
+ 0.901
1810
+ ],
1811
+ "angle": 0,
1812
+ "content": "[39] Zhou, Y., Han, X., Shechtman, E., Echevarria, J., Kalogerakis, E., Li, D.: Makelttalk: speaker-aware talking-head animation. ACM Transactions on Graphics (TOG) 39(6), 1-15 (2020) 2"
1813
+ },
1814
+ {
1815
+ "type": "list",
1816
+ "bbox": [
1817
+ 0.503,
1818
+ 0.093,
1819
+ 0.894,
1820
+ 0.901
1821
+ ],
1822
+ "angle": 0,
1823
+ "content": null
1824
+ },
1825
+ {
1826
+ "type": "page_number",
1827
+ "bbox": [
1828
+ 0.478,
1829
+ 0.925,
1830
+ 0.496,
1831
+ 0.937
1832
+ ],
1833
+ "angle": 0,
1834
+ "content": "10"
1835
+ }
1836
+ ],
1837
+ [
1838
+ {
1839
+ "type": "ref_text",
1840
+ "bbox": [
1841
+ 0.079,
1842
+ 0.092,
1843
+ 0.472,
1844
+ 0.148
1845
+ ],
1846
+ "angle": 0,
1847
+ "content": "[40] Zielonka, W., Bolkart, T., Thies, J.: Towards metrical reconstruction of human faces. ECCV (2022). https://doi.org/10.48550/ARXIV.2204.06607, https://arxiv.org/abs/2204.066075"
1848
+ },
1849
+ {
1850
+ "type": "page_number",
1851
+ "bbox": [
1852
+ 0.478,
1853
+ 0.925,
1854
+ 0.493,
1855
+ 0.936
1856
+ ],
1857
+ "angle": 0,
1858
+ "content": "11"
1859
+ }
1860
+ ],
1861
+ [
1862
+ {
1863
+ "type": "title",
1864
+ "bbox": [
1865
+ 0.19,
1866
+ 0.1,
1867
+ 0.783,
1868
+ 0.138
1869
+ ],
1870
+ "angle": 0,
1871
+ "content": "Imitator: Personalized Speech-driven 3D Facial Animation - Supplemental Document -"
1872
+ },
1873
+ {
1874
+ "type": "title",
1875
+ "bbox": [
1876
+ 0.077,
1877
+ 0.185,
1878
+ 0.403,
1879
+ 0.203
1880
+ ],
1881
+ "angle": 0,
1882
+ "content": "9. Impact of Data to Style Adaptation:"
1883
+ },
1884
+ {
1885
+ "type": "text",
1886
+ "bbox": [
1887
+ 0.076,
1888
+ 0.21,
1889
+ 0.47,
1890
+ 0.36
1891
+ ],
1892
+ "angle": 0,
1893
+ "content": "To analyze the impact of data on the style adaptation process, we randomly sample (1, 4, 10, 20) sequences from the train set of the VOCA test subjects and perform our style adaption. Each sequence contains about \\(3 - 5\\) seconds of data. In Table 3, we observe that the performance on the quantitative metrics increase with the number of reference sequences. As mentioned in the main paper, even an adaptation based on a single sequence results in a significantly better animation in comparison to the baseline methods. This highlights the impact of style on the generated animations."
1894
+ },
1895
+ {
1896
+ "type": "text",
1897
+ "bbox": [
1898
+ 0.077,
1899
+ 0.362,
1900
+ 0.469,
1901
+ 0.406
1902
+ ],
1903
+ "angle": 0,
1904
+ "content": "Figure 7 illustrates the lip distance curve for one test sequence used in this study. We observe that the lip distance with more reference data better fits the ground truth curve."
1905
+ },
1906
+ {
1907
+ "type": "table",
1908
+ "bbox": [
1909
+ 0.08,
1910
+ 0.42,
1911
+ 0.468,
1912
+ 0.494
1913
+ ],
1914
+ "angle": 0,
1915
+ "content": "<table><tr><td>No. Seq.</td><td>L2face↓</td><td>L2lip↓</td><td>F-DTW↓</td><td>Lip-DTW↓</td><td>Lip-sync↓</td></tr><tr><td>1</td><td>0.91</td><td>0.1</td><td>1.3</td><td>1.68</td><td>3.99</td></tr><tr><td>4</td><td>0.89</td><td>0.1</td><td>1.26</td><td>1.47</td><td>3.78</td></tr><tr><td>10</td><td>0.76</td><td>0.09</td><td>1.07</td><td>1.37</td><td>3.57</td></tr><tr><td>20</td><td>0.7</td><td>0.09</td><td>0.99</td><td>1.27</td><td>3.49</td></tr></table>"
1916
+ },
1917
+ {
1918
+ "type": "table_caption",
1919
+ "bbox": [
1920
+ 0.076,
1921
+ 0.504,
1922
+ 0.47,
1923
+ 0.547
1924
+ ],
1925
+ "angle": 0,
1926
+ "content": "Table 3. Ablation of the style adaptation w.r.t. the amount of reference sequences used. With an increasing number of data, the quantitative metrics improve. Each sequence is \\(3 - 5\\mathrm{s}\\) long."
1927
+ },
1928
+ {
1929
+ "type": "image",
1930
+ "bbox": [
1931
+ 0.082,
1932
+ 0.568,
1933
+ 0.466,
1934
+ 0.685
1935
+ ],
1936
+ "angle": 0,
1937
+ "content": null
1938
+ },
1939
+ {
1940
+ "type": "image_caption",
1941
+ "bbox": [
1942
+ 0.076,
1943
+ 0.699,
1944
+ 0.47,
1945
+ 0.743
1946
+ ],
1947
+ "angle": 0,
1948
+ "content": "Figure 7. With an increasing number of reference data samples for style adaptation, the lip distance throughout a test sequence of VOCAset is approaching the ground truth lip distance curve."
1949
+ },
1950
+ {
1951
+ "type": "title",
1952
+ "bbox": [
1953
+ 0.078,
1954
+ 0.762,
1955
+ 0.281,
1956
+ 0.779
1957
+ ],
1958
+ "angle": 0,
1959
+ "content": "10. Architecture Details"
1960
+ },
1961
+ {
1962
+ "type": "title",
1963
+ "bbox": [
1964
+ 0.078,
1965
+ 0.787,
1966
+ 0.245,
1967
+ 0.803
1968
+ ],
1969
+ "angle": 0,
1970
+ "content": "10.1. Audio Encoder:"
1971
+ },
1972
+ {
1973
+ "type": "text",
1974
+ "bbox": [
1975
+ 0.076,
1976
+ 0.81,
1977
+ 0.47,
1978
+ 0.901
1979
+ ],
1980
+ "angle": 0,
1981
+ "content": "Similar to Faceformer [10], our audio encoder is built upon the Wav2Vec 2.0 [1] architecture to extract temporal audio features. These audio features are fed into a linear interpolation layer to convert the audio frequency to the motion frequency. The interpolated outputs are then fed into 12 identical transformer encoder layers with 12 attention heads"
1982
+ },
1983
+ {
1984
+ "type": "text",
1985
+ "bbox": [
1986
+ 0.498,
1987
+ 0.186,
1988
+ 0.892,
1989
+ 0.232
1990
+ ],
1991
+ "angle": 0,
1992
+ "content": "and an output dimension of 768. A final linear projection layer converts the audio features from the 768-dimension features to a 64-dimensional phoneme representation."
1993
+ },
1994
+ {
1995
+ "type": "title",
1996
+ "bbox": [
1997
+ 0.5,
1998
+ 0.242,
1999
+ 0.798,
2000
+ 0.258
2001
+ ],
2002
+ "angle": 0,
2003
+ "content": "10.2. Auto-regressive Viseme Decoder:"
2004
+ },
2005
+ {
2006
+ "type": "text",
2007
+ "bbox": [
2008
+ 0.498,
2009
+ 0.265,
2010
+ 0.892,
2011
+ 0.386
2012
+ ],
2013
+ "angle": 0,
2014
+ "content": "Our auto-regressive viseme decoder is built on top of traditional transformer decoder layers [31]. We use a zero vector of 64-dimension as a start token to indicate the start of sequence synthesis. We first add a positional encoding of 64-dimension to the input feature and fed it to decoder layers in the viseme decoder. For self-attention and cross-modal multi-head attention, we use 4 heads of dimension 64. Our feed forward layer dimension is 128."
2015
+ },
2016
+ {
2017
+ "type": "text",
2018
+ "bbox": [
2019
+ 0.498,
2020
+ 0.389,
2021
+ 0.893,
2022
+ 0.542
2023
+ ],
2024
+ "angle": 0,
2025
+ "content": "Multi-Head Self-Attention: Given a sequence of positional encoded inputs \\(\\hat{h}_t\\), we use multi-head self-attention (self-MHA), which generates the context representation of the inputs by weighting the inputs based on their relevance. The Scaled Dot-Product attention function can be defined as mapping a query and a set of key-value pairs to an output, where queries, keys, values and outputs are vectors [31]. The output is the weighted sum of the values; the weight is computed by a compatibility function of a query with the corresponding key. The attention can be formulated as:"
2026
+ },
2027
+ {
2028
+ "type": "equation",
2029
+ "bbox": [
2030
+ 0.574,
2031
+ 0.552,
2032
+ 0.892,
2033
+ 0.587
2034
+ ],
2035
+ "angle": 0,
2036
+ "content": "\\[\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\sigma \\left(\\frac {Q K ^ {T}}{\\sqrt {d _ {k}}}\\right) V, \\tag {7}\n\\]"
2037
+ },
2038
+ {
2039
+ "type": "text",
2040
+ "bbox": [
2041
+ 0.498,
2042
+ 0.596,
2043
+ 0.892,
2044
+ 0.716
2045
+ ],
2046
+ "angle": 0,
2047
+ "content": "where \\( Q, K, V \\) are the learned Queries, Keys and Values, \\( \\sigma(\\cdot) \\) denotes the softmax activation function, and \\( d_k \\) is the dimension of the keys. Instead of using a single attention mechanism and generating one context representation, MHA uses multiple self-attention heads to jointly generate multiple context representations and attend to the information in the different context representations at different positions. MHA is formulated as follows:"
2048
+ },
2049
+ {
2050
+ "type": "equation",
2051
+ "bbox": [
2052
+ 0.542,
2053
+ 0.729,
2054
+ 0.892,
2055
+ 0.746
2056
+ ],
2057
+ "angle": 0,
2058
+ "content": "\\[\nM H A (Q, K, V) = \\left[ h e a d _ {1}, \\dots , h e a d _ {h} \\right] \\cdot W ^ {O}, \\tag {8}\n\\]"
2059
+ },
2060
+ {
2061
+ "type": "text",
2062
+ "bbox": [
2063
+ 0.498,
2064
+ 0.758,
2065
+ 0.891,
2066
+ 0.806
2067
+ ],
2068
+ "angle": 0,
2069
+ "content": "with \\( head_{i} = \\text{Attention}(QW_{i}^{Q}, KW_{i}^{K}, VW_{i}^{V}) \\), where \\( W^{O}, W_{i}^{Q}, W_{i}^{K}, W_{i}^{V} \\) are weights related to each input variable."
2070
+ },
2071
+ {
2072
+ "type": "text",
2073
+ "bbox": [
2074
+ 0.498,
2075
+ 0.81,
2076
+ 0.892,
2077
+ 0.901
2078
+ ],
2079
+ "angle": 0,
2080
+ "content": "Audio-Motion Multi-Head Attention The Audio-Motion Multi-Head attention aims to map the context representations from the audio encoder to the viseme representations by learning the alignment between the audio and style-agnostic viseme features. The decoder queries all the existing viseme features with the encoded audio features, which"
2081
+ },
2082
+ {
2083
+ "type": "page_number",
2084
+ "bbox": [
2085
+ 0.477,
2086
+ 0.925,
2087
+ 0.496,
2088
+ 0.937
2089
+ ],
2090
+ "angle": 0,
2091
+ "content": "12"
2092
+ }
2093
+ ],
2094
+ [
2095
+ {
2096
+ "type": "text",
2097
+ "bbox": [
2098
+ 0.076,
2099
+ 0.092,
2100
+ 0.473,
2101
+ 0.184
2102
+ ],
2103
+ "angle": 0,
2104
+ "content": "carry both the positional information and the contextual information, thus, resulting in audio context-injected viseme features. Similar to Faceformer [10], we add an alignment bias along the diagonal to the query-key attention score to add more weight to the current time audio features. The alignment bias \\( B^{A}(1 \\leq i \\leq t, 1 \\leq j \\leq KT) \\) is:"
2105
+ },
2106
+ {
2107
+ "type": "equation",
2108
+ "bbox": [
2109
+ 0.167,
2110
+ 0.193,
2111
+ 0.47,
2112
+ 0.233
2113
+ ],
2114
+ "angle": 0,
2115
+ "content": "\\[\nB ^ {A} (i, j) = \\left\\{ \\begin{array}{l l} 0 & \\text {i f} (i = j), \\\\ - \\infty & \\text {o t h e r w i s e .} \\end{array} \\right. \\tag {9}\n\\]"
2116
+ },
2117
+ {
2118
+ "type": "text",
2119
+ "bbox": [
2120
+ 0.078,
2121
+ 0.243,
2122
+ 0.452,
2123
+ 0.258
2124
+ ],
2125
+ "angle": 0,
2126
+ "content": "The modified Audio-Motion Attention is represented as:"
2127
+ },
2128
+ {
2129
+ "type": "equation",
2130
+ "bbox": [
2131
+ 0.082,
2132
+ 0.267,
2133
+ 0.469,
2134
+ 0.313
2135
+ ],
2136
+ "angle": 0,
2137
+ "content": "\\[\n\\operatorname {A t t e n t i o n} \\left(Q ^ {v}, K ^ {a}, V ^ {a}, B ^ {A}\\right) = \\sigma \\left(\\frac {Q ^ {v} \\left(K ^ {a}\\right) ^ {T}}{\\sqrt {d _ {k}}} + B ^ {A}\\right) V ^ {a}, \\tag {10}\n\\]"
2138
+ },
2139
+ {
2140
+ "type": "text",
2141
+ "bbox": [
2142
+ 0.077,
2143
+ 0.314,
2144
+ 0.47,
2145
+ 0.375
2146
+ ],
2147
+ "angle": 0,
2148
+ "content": "where \\( Q^v \\) are the learned queries from viseme features, \\( K^a \\) the keys and \\( V^a \\) the values from the audio features, \\( \\sigma(\\cdot) \\) is the softmax activation function, and \\( d_k \\) is the dimension of the keys."
2149
+ },
2150
+ {
2151
+ "type": "title",
2152
+ "bbox": [
2153
+ 0.078,
2154
+ 0.383,
2155
+ 0.254,
2156
+ 0.398
2157
+ ],
2158
+ "angle": 0,
2159
+ "content": "10.3. Motion Decoder:"
2160
+ },
2161
+ {
2162
+ "type": "text",
2163
+ "bbox": [
2164
+ 0.076,
2165
+ 0.406,
2166
+ 0.473,
2167
+ 0.633
2168
+ ],
2169
+ "angle": 0,
2170
+ "content": "The motion decoder aims to generate 3D facial animations \\(\\hat{y}_{1:T}\\) from the style-agnostic viseme features \\(\\hat{v}_{1:T}\\) and a style embedding \\(\\hat{S}_i\\). Specifically, our motion decoder consists of two components, a style embedding layer and a motion synthesis block. The style linear layer takes a one-hot encoder of 8-dimension and produces a style-embedding of 64-dimension. The input viseme features are concatenated with the style-embedding and fed into 4 successive linear layers which have a leaky-ReLU as activation. The output dimension of the 4-layer block is 64 dimensional. A final fully connected layer maps the 64-dimension input features to the 3D face deformation described as per-vertex displacements of size 15069. This layer is defining the motion deformation basis of a subject and is adapted based on a reference sequence."
2171
+ },
2172
+ {
2173
+ "type": "text",
2174
+ "bbox": [
2175
+ 0.076,
2176
+ 0.636,
2177
+ 0.47,
2178
+ 0.818
2179
+ ],
2180
+ "angle": 0,
2181
+ "content": "Training Details: We use the ADAM optimizer with a learning rate of 1e-4 for both the style-agnostic transformer training and the style adaptation stage. During the style-agnostic transformer training, the parameters of the Wave2Vec 2.0 layers in the audio encoder are fixed. Our model is trained for 300 epochs, and the best model is chosen based on the validation reconstruction loss. During the style-adaptation stage, we first generate the viseme features and keep them fixed during the style adaptation stage. Then, we optimize for the style embedding for 300 epochs. Finally, the style-embedding and final motion deformation basis is refined for another 300 epochs."
2182
+ },
2183
+ {
2184
+ "type": "title",
2185
+ "bbox": [
2186
+ 0.078,
2187
+ 0.831,
2188
+ 0.248,
2189
+ 0.848
2190
+ ],
2191
+ "angle": 0,
2192
+ "content": "11. Broader Impact"
2193
+ },
2194
+ {
2195
+ "type": "text",
2196
+ "bbox": [
2197
+ 0.076,
2198
+ 0.856,
2199
+ 0.471,
2200
+ 0.903
2201
+ ],
2202
+ "angle": 0,
2203
+ "content": "Our proposed method aims at the synthesis of realistic-looking 3D facial animations. Ultimately, these animations can be used to drive photo-realistic digital doubles of people"
2204
+ },
2205
+ {
2206
+ "type": "text",
2207
+ "bbox": [
2208
+ 0.498,
2209
+ 0.092,
2210
+ 0.892,
2211
+ 0.272
2212
+ ],
2213
+ "angle": 0,
2214
+ "content": "in audio-driven immersive telepresence applications in AR or VR. However, this technology can also be misused for so-called DeepFakes. Given a voice cloning approach, our method could generate 3D facial animations that drive an image synthesis method. This can lead to identity theft, cyber mobbing, or other harmful criminal acts. We believe that conducting research openly and transparently could raise the awareness of the misuse of such technology. We will share our implementation to enable research on digital multi-media forensics. Specifically, synthesis methods are needed to produce the training data for forgery detection [22]."
2215
+ },
2216
+ {
2217
+ "type": "text",
2218
+ "bbox": [
2219
+ 0.499,
2220
+ 0.273,
2221
+ 0.892,
2222
+ 0.304
2223
+ ],
2224
+ "angle": 0,
2225
+ "content": "All participants in the study have given written consent to the usage of their video material for this publication."
2226
+ },
2227
+ {
2228
+ "type": "page_number",
2229
+ "bbox": [
2230
+ 0.477,
2231
+ 0.925,
2232
+ 0.495,
2233
+ 0.937
2234
+ ],
2235
+ "angle": 0,
2236
+ "content": "13"
2237
+ }
2238
+ ]
2239
+ ]
2301.00xxx/2301.00023/4635d162-e5d4-4ff9-83b6-a0e11e214a21_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:530a1729524890c3f3f1fcc0442ef11c8e4124cc6c382b19bb59516b6224bbed
3
+ size 11489177
2301.00xxx/2301.00023/full.md ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Imitator: Personalized Speech-driven 3D Facial Animation
2
+
3
+ Balamurugan Thambiraja<sup>1</sup>
4
+ Darren Cosker<sup>3</sup>
5
+
6
+ Ikhsanul Habibie<sup>2</sup>
7
+ Christian Theobalt<sup>2</sup>
8
+
9
+ Sadegh Aliakbarian<sup>3</sup>
10
+ Justus Thies<sup>1</sup>
11
+
12
+ <sup>1</sup> Max Planck Institute for Intelligent Systems, Tübingen, Germany
13
+
14
+ 2 Max Planck Institute for Informatics, Saarland, Germany
15
+ <sup>3</sup> Microsoft Mixed Reality & AI Lab, Cambridge, UK
16
+
17
+ ![](images/f15d604f435fa17dc624988058978f9c514d0ff5dfd8ecdf7d5611070bf61685.jpg)
18
+ Figure 1. Imitator is a novel method for personalized speech-driven 3D facial animation. Given an audio sequence and a personalized style-embedding as input, we generate person-specific motion sequences with accurate lip closures for bilabial consonants ('m', 'b', 'p'). The style-embedding of a subject can be computed by a short reference video (e.g., 5s).
19
+
20
+ # Abstract
21
+
22
+ Speech-driven 3D facial animation has been widely explored, with applications in gaming, character animation, virtual reality, and telepresence systems. State-of-the-art methods deform the face topology of the target actor to sync the input audio without considering the identity-specific speaking style and facial idiosyncrasies of the target actor; thus, resulting in unrealistic and inaccurate lip movements. To address this, we present Imitator, a speech-driven facial expression synthesis method, which learns identity-specific details from a short input video and produces novel facial expressions matching the identity-specific speaking style and facial idiosyncrasies of the target actor. Specifically, we train a style-agnostic transformer on a large facial expression dataset which we use as a prior for audiodriven facial expressions. Based on this prior, we optimize for identity-specific speaking style based on a short reference video. To train the prior, we introduce a novel loss
23
+
24
+ function based on detected bilabial consonants to ensure plausible lip closures and consequently improve the realism of the generated expressions. Through detailed experiments and a user study, we show that our approach produces temporally coherent facial expressions from input audio while preserving the speaking style of the target actors. Please check out the project page for the supplemental video and more results.
25
+
26
+ # 1. Introduction
27
+
28
+ 3D digital humans raised a lot of attention in the past few years as they aim to replicate the appearance and motion of real humans for immersive applications, like telepresence in AR or VR, character animation and creation for entertainment (movies and games), and virtual mirrors for e-commerce. Especially, with the introduction of neural rendering [27, 28], we see immense progress in the photo-
29
+
30
+ realistic synthesis of such digital doubles [11,20,38]. These avatars can be controlled via visual tracking to mirror the facial expressions of a real human. However, we need to control the facial avatars with text or audio inputs for a series of applications. For example, AI-driven digital assistants rely on motion synthesis instead of motion cloning. Even telepresence applications might need to work with audio inputs only, when the face of the person is occluded or cannot be tracked, since a face capture device is not available. To this end, we analyze motion synthesis for facial animations from audio inputs; note that text-to-speech approaches can be used to generate such audio. Humans are generally sensitive towards faces, especially facial motions, as they are crucial for communication (e.g., micro-expressions). Without full expressiveness and proper lip closures, the generated animation will be perceived as unnatural and implausible. Especially if the person is known, the facial animations must match the subject's idiosyncrasies.
31
+
32
+ Recent methods for speech-driven 3D facial animation [5, 10, 16, 21] are data-driven. They are trained on high-quality motion capture data and leverage pretrained speech models [13, 23] to extract an intermediate audio representation. We can classify these data-driven methods into two categories, generalized [5, 10, 21] and personalized animation generation methods [16]. In contrast to those approaches, we aim at a personalized 3D facial animation synthesis that can adapt to a new user while only relying on input RGB videos captured with commodity cameras. Specifically, we propose a transformer-based auto-regressive motion synthesis method that predicts a generalized motion representation. This intermediate representation is decoded by a motion decoder which is adaptable to new users. A speaker embedding is adjusted for a new user, and a new motion basis for the motion decoder is computed. Our method is trained on the VOCA dataset [5] and can be applied to new subjects captured in a short monocular RGB video. As lip closures are of paramount importance for bilabial consonants ('m', 'b', 'p'), we introduce a novel loss based on the detection of bilabials to ensure that the lips are closed properly. We take inspiration from the locomotion synthesis field [14, 18], where similar losses are used to enforce foot contact with the ground and transfer it to our scenario of physically plausible lip motions.
33
+
34
+ In a series of experiments and ablation studies, we demonstrate that our method is able to synthesize facial expressions that match the target subject's motions in terms of style and expressiveness. Our method outperforms state-of-the-art methods in our metrical evaluation and user study. Please refer to our supplemental video for a detailed qualitative comparison. In a user study, we confirm that personalized facial expressions are important for the perceived realism.
35
+
36
+ The contributions of our work Imitator are as follows:
37
+
38
+ - a novel auto-regressive motion synthesis architecture that allows for adaption to new users by disentangling generalized viseme generation and person-specific motion decoding,
39
+ - and a lip contact loss formulation for improved lip closures based on physiological cues of bilabial consonants ('m', 'b', 'p').
40
+
41
+ # 2. Related Work
42
+
43
+ Our work focuses on speech-driven 3D facial animation related to talking head methods that create photo-realistic video sequences from audio inputs.
44
+
45
+ Talking Head Videos: Several prior works on speech-driven generation focus on the synthesis of 2D talking head videos. Suwajanakorn et al. [25] train an LSTM network on 19h video material of Obama to predict his person-specific 2D lip landmarks from speech inputs, which is then used for image generation. Vougioukas et al. [33] propose a method to generate facial animation from a single RGB image by leveraging a temporal generative adversarial network. Chung et al. [4] introduce a real-time approach to generate an RGB video of a talking face by directly mapping the audio input to the video output space. This method can redub a new target identity not seen during training. Instead of performing direct mapping, Zhou et al. [39] disentangles the speech information in terms of speaker identity and content, allowing speech-driven generation that can be applied to various types of realistic and hand-drawn head portraits. A series of work [24, 29, 36, 37] uses an intermediate 3D Morphable Model (3DMM) [2, 8] to guide the 2D neural rendering of talking heads from audio. Wang et al. [34] extend this work also to model the head movements of the speaker. Lipsync3d [17] proposes data-efficient learning of personalized talking heads focusing on pose and lighting normalization. Based on dynamic neural radiance fields [11], Ad-nerf [12] and DFA-NeRF [35] learn personalized talking head models that can be rendered under novel views, while being controlled by audio inputs. In contrast to these methods, our work focuses on predicting 3D facial animations from speech that can be used to drive 3D digital avatars without requiring retraining of the entire model to capture the person-specific motion style.
46
+
47
+ Speech-Driven 3D Facial Animation: Speech-driven 3d facial animation is a vivid field of research. Earlier methods [6, 7, 9, 15, 32] focus on animating a predefined facial rig using procedural rules. HMM-based models generate visemes from input text or audio, and the facial animations are generated using viseme-dependent co-articulation models [6, 7] or by blending facial templates [15]. With recent advances in machine learning, data-driven methods [3, 5, 10, 16, 21, 26, 29] have demonstrated their capability to learn viseme patterns from data. These methods
48
+
49
+ ![](images/9c464816c5e274a631a88b5e27f85e0c0048d9d44e42ea9a7fdb85fa62570a0e.jpg)
50
+ Figure 2. Our architecture takes audio as input which is encoded by a pre-trained Wav2Vec2.0 model [1]. This audio embedding $\hat{a}_{1:T}$ is interpreted by an auto-regressive viseme decoder which generates a generalized motion feature $\hat{v}_{1:T}$ . A style-adaptable motion decoder maps these motion features to person-specific facial expressions $\hat{y}_{1:T}$ in terms of vertex displacements on top of a template mesh.
51
+
52
+ are based on pretrained speech models [1, 13, 23] to generate an abstract and generalized representation of the input audio, which is then interpreted by a CNN or autoregressive model to map to either a 3DMM space or directly to 3D meshes. Karras et al. [16] learn a 3D facial animation model from 3-5 minutes of high-quality actor specific 3D data. VOCA [5] is trained on 3D data of multiple subjects and can animate the corresponding set of identities from input audio by providing a one-hot encoding during inference that indicates the subject. MeshTalk [21] is a generalized method that learns a categorical representation for facial expressions and auto-regressively samples from this categorical space to animate a given 3D facial template mesh of a subject from audio inputs. FaceFormer [10] uses a pretrained Wav2Vec [1] audio representation and applies a transformer-based decoder to regress displacements on top of a template mesh. Like VOCA, FaceFormer provides a speaker identification code to the decoder, allowing one to choose from the training set talking styles. In contrast, we aim at a method that can adapt to new users, capturing their talking style and expressiveness.
53
+
54
+ # 3. Method
55
+
56
+ Our goal is to model person-specific speaking style and the facial idiosyncrasies of an actor, to generate 3D facial animations of the subject from novel audio inputs. As input, we assume a short video sequence of the subject which we leverage to compute the identity-specific speaking style. To enable fast adaptation to novel users without significant training sequences, we learn a generalized style-agnostic transformer on VOCaset [5]. This transformer provides generic motion features from audio inputs that are interpretable by a person-specific motion decoder. The motion decoder is pre-trained and adaptable to new users via speaking style optimization and refinement of the motion
57
+
58
+ basis. To further improve synthesis results, we introduce a novel lip contact loss based on physiological cues of the bilabial consonants [7]. In the following, we will detail our model architecture and the training objectives and describe the style adaptation.
59
+
60
+ # 3.1. Model Architecture
61
+
62
+ Our architecture consists of three main components (see Figure 2): an audio encoder, a generalized auto-regressive viseme decoder, and an adaptable motion decoder.
63
+
64
+ Audio Encoder: Following state-of-the-art motion synthesis models [5, 10], we use a generalized speech model to encode the audio inputs $A$ . Specifically, we leverage the Wav2Vec 2.0 model [1]. The original Wav2Vec is based on a CNN architecture designed to produce a meaningful latent representation of human speech. To this end, the model is trained in a self-supervised and semi-supervised manner to predict the immediate future values of the current input speech by using a contrastive loss, allowing the model to learn from a large amount of unlabeled data. Wav2Vec 2.0 extends this idea by quantizing the latent representation and incorporating a Transformer-based architecture [31]. We resample the Wav2Vec 2.0 output with a linear interpolation layer to match the sampling frequency of the motion (30fps for the VOCAsset, with 16kHz audio), resulting in a contextual representation $\{\hat{a}\}_{t=1}^{T}$ of the audio sequence for $T$ motion frames.
65
+
66
+ Auto-regressive Viseme Decoder: The decoder $F_{v}$ takes the contextual representation of the audio sequence as input and produces style agnostic viseme features $\hat{v}_{t}$ in an auto-regressive manner. These viseme features describe how the lip should deform given the context audio and the previous viseme features. In contrast to Faceformer [10], we propose to use of a classical transformer architecture [31] as viseme decoder, which learns the mapping from audio-
67
+
68
+ features $\{\hat{a}\}_{t = 1}^{T}$ to identity agnostic viseme features $\{\hat{v}\}_{t = 1}^{T}$ . The autoregressive viseme decoder is defined as:
69
+
70
+ $$
71
+ \hat {v} _ {t} = F _ {v} \left(\theta_ {v}; \hat {v} _ {1: t - 1}, \hat {a} _ {1: T}\right), \tag {1}
72
+ $$
73
+
74
+ where $\theta_v$ are the learnable parameters of the transformer.
75
+
76
+ In contrast to the traditional neural machine translation (NMT) architectures that produce discrete text, our output representation is a continuous vector. NMT models use a start and end token to indicate the beginning and end of the sequence. During inference, the NMT model autoregressively generates tokens until the end token is generated. Similarly, we use a start token to indicate the beginning of the sequences. However, since the sequence length $T$ is given by the length of the audio input, we do not use an end token. We inject temporal information into the sequences by adding encoded time to the viseme feature in the sequence. We formulate the positionally encoded intermediate representations $\hat{h}_t$ as:
77
+
78
+ $$
79
+ \hat {h} _ {t} = \hat {v} _ {t} + P E (t), \tag {2}
80
+ $$
81
+
82
+ where $PE(t)$ is a sinusoidal encoding function [31]. Given the sequence of positional encoded inputs $\hat{h}_t$ , we use multi-head self-attention which generates the context representation of the inputs by weighting the inputs based on their relevance. These context representations are used as input to a cross-modal multi-head attention block which also takes the audio features $\hat{a}_{1:T}$ from the audio encoder as input. A final feed-forward layer maps the output of this audio-motion attention layer to the viseme embedding $\hat{v}_t$ . In contrast to Faceformer [10], which feeds encoded face motions $\hat{y}_t$ to the transformer, we work with identity-agnostic viseme features which are independently decoded by the motion decoder. We found that feeding face motions $\hat{y}_t$ via an input embedding layer to the transformer contains identity-specific information, which we try to avoid since we aim for a generalized viseme decoder that is disentangled from person-specific motion. In addition, using a general start token instead of the identity code [10] as the start token reduces the identity bias further. Note that disentangling the identity-specific information from the viseme decoder improves the motion optimization in the style adaption stage of the pipeline (see Section 3.3), as gradients do not need to be propagated through the auto-regressive transformer.
83
+
84
+ Motion Decoder: The motion decoder aims to generate 3D facial animation $\hat{y}_{1:T}$ from the style-agnostic viseme features $\hat{v}_{1:T}$ and a style embedding $\hat{S}_i$ . Specifically, our motion decoder consists of two components, a style embedding layer and a motion synthesis block. For the training of the style-agnostic transformer and for pre-training the motion decoder, we assume to have a one-hot encoding of the identities of the training set. The style embedding layer takes this identity information as input and produces the style
85
+
86
+ embedding $\hat{S}_i$ , which encodes the identity-specific motion. The style embedding is concatenated with the viseme features $\hat{v}_{1:T}$ and fed into the motion synthesis block. The motion synthesis block consists of non-linear layers which map the style-aware viseme features to the motion space defined by a linear deformation basis. During training, the deformation basis is learned across all identities in the dataset. The deformation basis is fine-tuned for style adaptation to out-of-training identities (see Section 3.3). The final mesh outputs $\hat{y}_{1:T}$ are computed by adding the estimated per-vertex deformation to the template mesh of the subject.
87
+
88
+ # 3.2. Training
89
+
90
+ Similar to Faceformer [10], we use an autoregressive training scheme instead of teacher-forcing to train our model on the VOCAset [5]. Given that VOCAset provides ground truth 3D facial animations, we define the following loss:
91
+
92
+ $$
93
+ \mathcal {L} _ {\text {t o t a l}} = \lambda_ {M S E} \cdot \mathcal {L} _ {M S E} + \lambda_ {v e l} \cdot \mathcal {L} _ {v e l} + \lambda_ {l i p} \cdot \mathcal {L} _ {l i p}, \tag {3}
94
+ $$
95
+
96
+ where $\mathcal{L}_{MSE}$ defines a reconstruction loss of the vertices, $\mathcal{L}_{vel}$ defines a velocity loss, and $\mathcal{L}_{lip}$ measures lip contact. The weights are $\lambda_{MSE} = 1.0$ , $\lambda_{vel} = 10.0$ , and $\lambda_{lip} = 5.0$ .
97
+
98
+ Reconstruction Loss: The reconstruction loss $\mathcal{L}_{MSE}$ is:
99
+
100
+ $$
101
+ \mathcal {L} _ {M S E} = \sum_ {v = 1} ^ {V} \sum_ {t = 1} ^ {T _ {v}} \left| \left| y _ {t, v} - \hat {y} _ {t, v} \right| \right| ^ {2}, \tag {4}
102
+ $$
103
+
104
+ where $y_{t,v}$ is the ground truth mesh at time $t$ in sequence $v$ (of $V$ total sequences) and $\hat{y}_{t,v}$ is the prediction.
105
+
106
+ Velocity Loss: Our motion decoder takes independent viseme features as input to produce facial expressions. To improve temporal consistency in the prediction, we introduce a velocity loss $\mathcal{L}_{vel}$ similar to [5]:
107
+
108
+ $$
109
+ \mathcal {L} _ {v e l} = \sum_ {v = 1} ^ {V} \sum_ {t = 2} ^ {T _ {v}} | | (y _ {t, v} - y _ {t - 1, v}) - (\hat {y} _ {t, v} - \hat {y} _ {t - 1, v}) | | ^ {2}. \tag {5}
110
+ $$
111
+
112
+ Lip Contact Loss: Training with $L_{MSE}$ guides the model to learn an averaged facial expression, thus resulting in improper lip closures. To this end, we introduce a novel lip contact loss for bilabial consonants ('m', 'b', 'p') to improve lip closures. Specifically, we automatically annotate the VOCAset to extract the occurrences of these consonants; see Section 4. Using this data, we define the following lip loss:
113
+
114
+ $$
115
+ \mathcal {L} _ {l i p} = \sum_ {t = 1} ^ {T} \sum_ {j = 1} ^ {N} w _ {t} \left\| y _ {t, v} - \hat {y} _ {t, v} \right\| ^ {2}, \tag {6}
116
+ $$
117
+
118
+ where $w_{t,v}$ weights the prediction of frame $t$ according to the annotation of the bilabial consonants. Specifically, $w_{t,v}$ is one for frames with such consonants and zero otherwise.
119
+
120
+ Note that for such consonant frames, the target $y_{t,v}$ represents a face with a closed mouth; thus, this loss improves lip closures at 'm', 'b' and 'p's (see Section 5).
121
+
122
+ # 3.3. Style Adaptation
123
+
124
+ Given a video of a new subject, we reconstruct and track the face $\tilde{y}_{1:T}$ (see Section 4). Based on this reference data, we first optimize for the speaker style-embedding $\hat{S}$ and then jointly refine the linear deformation basis using the $\mathcal{L}_{MSE}$ and $\mathcal{L}_{vel}$ loss. In our experiments, we found that this two-stage adaptation is essential for generalization to new audio inputs as it reuses the pretrained information of the motion decoder. As an initialization of the style embedding, we use a speaking style of the training set. We precompute all viseme features $\hat{v}_{1:T}$ once, and optimize the speaking style to reproduce the tracked faces $\tilde{y}_{1:T}$ . We then refine the linear motion basis of the decoder to match the person-specific deformations (e.g., asymmetric lip motions).
125
+
126
+ # 4. Dataset
127
+
128
+ We train our method based on the VOCAset [5], which consists of 12 actors (6 female and 6 male) with 40 sequences each with a length of $3 - 5$ seconds. The dataset comes with a train/test set split which we use in our experiments. The test set contains 2 actors. The dataset offers audio and high-quality 3D face reconstructions per frame (60fps). For our experiment, we sample the 3D face reconstructions at 30fps. We train the auto-regressive transformer on this data using the loss from Equation (3). For the lip contact loss $L_{lip}$ , we automatically compute the labels as described below.
129
+
130
+ To adapt the motion decoder to a new subject, we require a short video clip of the person. Using this sequence, we run a 3DMM-based face tracker to get the per-frame 3D shape of the person. Based on this data, we adapt the motion decoder as detailed in Section 3.3.
131
+
132
+ Automatic Lip Closure Labeling: For the VOCAsset, the transcript is available. Based on Wav2Vec features, we align the transcript with the audio track. As the lip closure is formed before we hear the bilabial consonants, we search for the lip closure in the tracked face geometry before the time-stamp of the occurrence of the consonants in the script. We show this process for a single sequence in Figure 3. The lip closure is detected by lip distance, i.e., the frame with minimal lip distance in a short time window before the consonant is assumed to be the lip closure.
133
+
134
+ External Sequence Processing: We assume to have a monocular RGB video of about 2 minutes in length as input which we divide into train/validation/test sequences. Based on MICA [40], we estimate the 3D shape of the subject using the first frame of the video. Using this shape estimate, we run an analysis-by-synthesis approach [30] to
135
+
136
+ Words Spoken: BAGPIPE AND BONGOS
137
+ Lip loss Weight
138
+ ![](images/cd14d1d2a9c991ad49beebb0a3f0d86a490ecdc436d92083af1148885bb60d8b.jpg)
139
+ $\times$ - Lip closure computed
140
+
141
+ Audio
142
+ GT Lip distance curve
143
+ Figure 3. Automatic labeling of the bilabial consonants ('m', 'b' and 'p') and their corresponding lip closures in a sequence of VOCAset [5]. We align the transcript with the audio track using Wav2vec [1] features and extract the time stamps for the bilabial consonants. To detect the lip closures for the bilabial consonants, we search for local-minima on the Lip distance curves (red). The lip loss weights $w_{t,v}$ in a window around the detected lip closure are set to fixed values of a Gaussian function. We show an example of detected lip closures in the figure (in the blue bounding box).
144
+ ![](images/214c984ebec9ca3f3348373f02d2e6575626efe7a6fdfe822c75f4b9a73674bc.jpg)
145
+ Local Minimum search
146
+ $\times$ - Detected consonants
147
+
148
+ estimate per-frame blendshape parameters of the FLAME 3DMM [19]. Given these blendshape coefficients, we can compute the 3D vertices of the per-frame face meshes that we need to adapt the motion decoder. Note that in contrast to the training data of the transformer, we do not require any bilabial consonants labeling, as we adapt the motion decoder only based on the reconstruction and velocity loss.
149
+
150
+ # 5. Results
151
+
152
+ To validate our method, we conducted a series of qualitative and quantitative evaluations, including a user study and ablation studies. For evaluation on the test set of VOCAset [5], we randomly sample 4 sequences from the test subjects' train set (each $\sim$ 5s long) and learn the speaking-style and facial idiosyncrasies of the subject via style adaptation. We compare our method to the state-of-the-art methods VOCA [5], Faceformer [10], and MeshTalk [21]. We use the original implementations of the authors. However, we found that MeshTalk cannot train on the comparably small VOCAset. Thus, we qualitatively compare against MeshTalk with their provided model trained on a large-scale proprietary dataset with 200 subjects and 40 sequences for each. Note that the pretrained MeshTalk model is not compatible with the FLAME topology; thus, we cannot evaluate their method on novel identities. In addition to the experi
153
+
154
+ ![](images/e9b5cadf1abf9e602dc6ffdb18609caa1db3405a7f2ed6e71dce013ae4f6ca20.jpg)
155
+ Words spoken
156
+ So, I start talking now.... usually...
157
+ One of my favorite topics to discuss is ...
158
+ Figure 4. Qualitative comparison to the state-of-the-art methods VOCA [5], Faceformer [10], and MeshTalk [21]. Note that MeshTalk is performed with a different identity since we use their pretrained model, which cannot be trained on VOCAset. As we see in the highlighted regions, the geometry of the generated sequences without the person-specific style have muted and inaccurate lip animations.
159
+
160
+ ments on the VOCAset, we show results on external RGB sequences. The results can be best seen in the suppl. video.
161
+
162
+ Quantitative Evaluation: To quantitatively evaluate our
163
+
164
+ <table><tr><td>Method</td><td>L2face↓</td><td>L2lip↓</td><td>F-DTW↓</td><td>Lip-DTW↓</td><td>Lip-sync↓</td></tr><tr><td>VOCA [5]</td><td>0.88</td><td>0.15</td><td>1.28</td><td>2.41</td><td>5.72</td></tr><tr><td>Faceformer [10]</td><td>0.8</td><td>0.14</td><td>1.18</td><td>2.85</td><td>5.41</td></tr><tr><td>Ours (w/ 1seq)</td><td>0.91</td><td>0.1</td><td>1.3</td><td>1.68</td><td>3.99</td></tr><tr><td>Ours</td><td>0.89</td><td>0.09</td><td>1.26</td><td>1.47</td><td>3.78</td></tr></table>
165
+
166
+ Table 1. Quantitative results on the VOCAset [5]. Our method outperforms the baselines on all of the lip metrics while performing on par on the full-face metrics. Note that we are not targeting the animation of the upper face but aim for expressive and accurate lip movements, which is noticeable from the improved lip scores.
167
+
168
+ method, we use the test set of VOCAset [5], which provides high-quality reference mesh reconstructions. We evaluate the performance of our method based on a mean $L_{2}$ vertex distance for the entire mesh $L_{2}^{face}$ and the lip region $L_{2}^{lip}$ . Following MeshTalk [21], we also compute the Lipsync, which measures the mean of the maximal per-frame lip distances. In addition, we use Dynamic Time Wrapping (DTW) to compute the similarity between the produced and reference meshes, both for the entire mesh (F-DTW) and the lip region (Lip-DTW). Since VOCA and Faceformer do not adapt to new user talking styles, we select the talking style from their training with the best quantitative metrics. Note that the pretrained MeshTalk model is not applicable to this
169
+
170
+ ![](images/4080d4530b3dfd6b0b6bb97aa39dfa5fec76ad6fefd4d4fdc6f0b64b64ab9dd6.jpg)
171
+ Words spoken
172
+ His Failure to Open ... By Job.
173
+ Had Vinyl Technology Expand...
174
+ Figure 5. Qualitative ablation comparison. At first, we show that our complete method with style and $\mathcal{L}_{lip}$ loss is able to generate personalized facial animation with expressive motion and accurate lip closures. Replacing the person-specific style with the style seen during training results in generic and muted facial animation. As highlighted in the per-vertex error maps (magenta), the generated expression is not similar to the target actor. Especially the facial deformations are missing person-specific details. Removing $\mathcal{L}_{lip}$ from the training objective results in improper lip closures (red).
175
+
176
+ <table><tr><td>Method</td><td>Expressiveness (%)</td><td>Realism/Lip-sync (%)</td></tr><tr><td>Ours vs VOCA [5]</td><td>86.48</td><td>76.92</td></tr><tr><td>Ours vs Faceformer [10]</td><td>81.89</td><td>75.46</td></tr><tr><td>Ours vs Ground truth</td><td>20.28</td><td>42.30</td></tr></table>
177
+
178
+ Table 2. In a perceptual A/B user study conducted on the test set of VOCAset [5] with 56 participants, we see that in comparison to VOCA [5] and Faceformer [10] our method is preferred.
179
+
180
+ evaluation due to the identity mismatch. As can be seen in Table 1, our method achieves the lowest lip reconstruction and lip-sync errors, confirming our qualitative results. Even when using a single reference video for style adaptation (5s), our results show significantly better lip scores.
181
+
182
+ Qualitative Evaluation: We conducted a qualitative evaluation on external sequences not part of VOCAset. In Figure 4, we show a series of frames from those sequences with the corresponding words. As we can see, our method is able to adapt to the speaking style of the respective subject. VOCA [5] and Faceformer [10] miss person-specific deformations and are not as expressive as our results. MeshTalk [21], which uses an identity that comes with the pretrained model, also shows dampened expressivity. In the suppl. video, we can observe that our method is generating better lip closures for bilabial consonants.
183
+
184
+ Perceptual Evaluation: We conducted a perceptual evaluation to quantify the quality of our method's generated results (see Table 2). Specifically, we conducted an A/B user study on the test set of VOCAset. We randomly sample 10 sequences of the test subjects and run our method, VOCA, and Faceformer. For VOCA and Faceformer, which do not adapt to the style of a new user, we use the talking style of the training Subject 137, which provided the best quantitative results. We use 20 videos per method resulting in 60 A/B comparisons. For every A/B test, we ask the user to choose the best method based on realism and expressiveness, following the user study protocol of Faceformer [10]. In Table 2, we show the result of this study in which 56 people participated. We observe that our method consistently outperforms VOCA and Faceformer. We also see that our model achieves similar realism and lip-sync as ground truth. Note that the users in the perceptual study have not seen the original talking style of the actors before. However, the results show that our personalized synthesis leads to more realistic-looking animations.
185
+
186
+ # 5.1. Ablation Studies
187
+
188
+ To understand the impact of our style adaptation and the novel lip contact loss $\mathcal{L}_{lip}$ on the perceptual quality, we show a qualitative ablation study including per-vertex error maps in Figure 5. As highlighted in the figure, the style adaptation is critical to match the person-specific deformations and mouth shapes and improves expressiveness.
189
+
190
+ ![](images/62a5eb0db200d4bc4ef723dad7ea0191f341e0f680c9a6c934b2c771068121e1.jpg)
191
+ Figure 6. Analysis of style adaptation in terms of lip distance on a test sequence of the VOCAset [5] (reference in red). Starting from an initial talking style from the training set (blue), we consecutively adapt the style code (green) and the motion basis of the motion decoder (purple).
192
+
193
+ The lip contact loss improves the lip closures for the bilabial consonants, thus, improving the perceived realism, as can best be seen in the suppl. video. We rely on only $\sim 60$ seconds-long reference videos to extract the person-specific speaking style. A detailed analysis of the sequence length's influence on the final output quality can be found in the suppl. material. It is also worth noting that our style-agnostic architecture allows us to perform style adaptation of the motion decoder in less than $30\mathrm{min}$ , while an adaptation with an identity-dependent transformer takes about $6\mathrm{h}$ .
194
+
195
+ Our proposed style adaptation has two stages as explained in Section 3.3. In the first step, we optimize for the style code and the refine the motion basis. In Figure 6, we show an example of the style adaptation by evaluating the lip distances throughout a sequence with a motion decoder at initialization, with optimized style code, and with a refined motion basis. While the lip distance with the generalized motion decoder is considerable, it gets significantly improved by the consecutive steps of style adaptation. After style code optimization, we observe that the amplitude and frequency of the lip distance curves start resembling the ground truth. Refining the motion basis further improves the lip distance, and it is able to capture facial idiosyncrasies, like asymmetrical lip deformations.
196
+
197
+ # 6. Discussion
198
+
199
+ Our evaluation shows that our proposed method outperforms state-of-the-art methods in perceived expressiveness and realism. However, several limitations remain. Specifically, we only support the speaking style of the subject seen in the reference video and do not control the talking style w.r.t. emotions (e.g., sad, happy, angry). The viseme transformer and the motion decoder could be conditioned on an emotion flag; we leave this for future work. The expressiveness and facial details depend on the face tracker's quality; if the face tracking is improved, our method will predict better face shapes.
200
+
201
+ # 7. Conclusion
202
+
203
+ We present Imitator, a novel approach for personalized speech-driven 3D facial animation. Based on a short reference video clip of a subject, we learn a personalized motion decoder driven by a generalized auto-regressive transformer that maps audio to intermediate viseme features. Our studies show that personalized facial animations are essential for the perceived realism of a generated sequence. Our new loss formulation for accurate lip closures of bilabial consonants further improves the results. We believe that personalized facial animations are a stepping stone towards audio-driven digital doubles.
204
+
205
+ # 8. Acknowledgements
206
+
207
+ This project has received funding from the Mesh Labs, Microsoft, Cambridge, UK. Further, we would like to thank Berna Kabadayi, Jalees Nehvi, Malte Prinzler and Wojciech Zielonka for their support and valuable feedback. The authors thank the International Max Planck Research School for Intelligent Systems (IMPRS-IS) for supporting Balamurugan Thambiraja.
208
+
209
+ # References
210
+
211
+ [1] Baevski, A., Zhou, Y., Mohamed, A., Auli, M.: wav2vec 2.0: A framework for self-supervised learning of speech representations. In: Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems 2020, NeurIPS 2020, December 6-12, 2020, virtual (2020), https://proceedings.neurips.cc/paper/2020/hash/92d1e1eb1cd6f9fba3227870bb6d7f07-Abstract.html 3,5,12
212
+ [2] Blanz, V., Vetter, T.: A morphable model for the synthesis of 3d faces. In: Proceedings of the 26th annual conference on Computer graphics and interactive techniques. pp. 187-194 (1999) 2
213
+ [3] Cao, Y., Tien, W.C., Faloutsos, P., Pighin, F.: Expressive speech-driven facial animation. ACM Trans. Graph. 24(4), 1283-1302 (oct 2005). https://doi.org/10.1145/1095878.1095881, https://doi.org/10.1145/1095878.1095881 2
214
+ [4] Chung, J.S., Jamaludin, A., Zisserman, A.: You said that? arXiv preprint arXiv:1705.02966 (2017) 2
215
+ [5] Cudeiro, D., Bolkart, T., Laidlaw, C., Ranjan, A., Black, M.J.: Capture, Learning, and Synthesis of 3D Speaking Styles. In: 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10093-10103. IEEE, Long Beach, CA, USA (Jun 2019). https://doi.org/10.1109/CVPR.2019.01034, https://ieeexplore.ieee.org/document/8954000/2,3,4,5,6,8
216
+
217
+ [6] De Martino, J.M., Pini Magalhães, L., Violaro, F.: Facial animation based on context-dependent visemes. Computers & Graphics 30(6), 971-980 (Dec 2006). https://doi.org/10.1016/j.cag.2006.08.017, https : / / linkinghub . elsevier . com / retrieve/pii/S0097849306001518 2
218
+ [7] Edwards, P., Landreth, C., Fume, E., Singh, K.: Jali: an animator-centric viseme model for expressive lip synchronization. ACM Trans. Graph. 35, 127:1-127:11 (2016) 2, 3
219
+ [8] Egger, B., Smith, W.A., Tewari, A., Wuhrer, S., Zollhoefer, M., Beeler, T., Bernard, F., Bolkart, T., Kortylewski, A., Romdhani, S., et al.: 3d morphable face models—past, present, and future. ACM Transactions on Graphics (TOG) 39(5), 1-38 (2020) 2
220
+ [9] Ezzat, T., Poggio, T.: MikeTalk: a talking facial display based on morphing visemes. In: Proceedings Computer Animation '98 (Cat. No.98EX169). pp. 96-102. IEEE Comput. Soc, Philadelphia, PA, USA (1998). https://doi.org/10.1109/CA.1998.681913, http://ieeexplore.ieee.org/document/681913/2
221
+ [10] Fan, Y., Lin, Z., Saito, J., Wang, W., Komura, T.: Faceformer: Speech-driven 3d facial animation with transformers. CoRR abs/2112.05329 (2021), https://arxiv.org/abs/2112.05329 2, 3, 4, 5, 6, 8, 12, 13
222
+ [11] Gafni, G., Thies, J., Zollhöfer, M., Nießner, M.: Dynamic neural radiance fields for monocular 4d facial avatar reconstruction. CoRR abs/2012.03065 (2020), https://arxiv.org/abs/2012.030652
223
+ [12] Guo, Y., Chen, K., Liang, S., Liu, Y., Bao, H., Zhang, J.: Ad-nerf: Audio driven neural radiance fields for talking head synthesis. In: IEEE/CVF International Conference on Computer Vision (ICCV) (2021) 2
224
+ [13] Hannun, A., Case, C., Casper, J., Catanzaro, B., Diamos, G., Elsen, E., Prenger, R., Satheesh, S., Sengupta, S., Coates, A., Y. Ng, A.: DeepSpeech: Scaling up end-to-end speech recognition (12 2014) 2, 3
225
+ [14] Holden, D., Saito, J., Komura, T.: A deep learning framework for character motion synthesis and editing. ACM Transactions on Graphics (TOG) 35(4), 1-11 (2016) 2
226
+ [15] Kalberer, G., Van Gool, L.: Face animation based on observed 3D speech dynamics. In: Proceedings Computer Animation 2001. Fourteenth Conference on Computer Animation (Cat. No.01TH8596). pp. 20-251. IEEE Comput. Soc, Seoul, South Korea (2001). https://doi.org/10.1109/CA.2001.982373, http://ieeexplore.ieee.org/document/982373/2
227
+ [16] Karras, T., Aila, T., Laine, S., Herva, A., Lehtinen, J.: Audio-driven facial animation by joint end-to-end learning of pose and emotion. ACM Transactions on Graphics 36(4), 1-12 (Jul 2017). https://doi.org/10.1145/3072959.3073658, https://dl.acm.org/doi/10.1145/3072959.3073658 2, 3
228
+
229
+ [17] Lahiri, A., Kwatra, V., Frueh, C., Lewis, J., Bregler, C.: Lipsync3d: Data-efficient learning of personalized 3d talking faces from video using pose and lighting normalization (2021). https://doi.org/10.48550/ARXIV.2106.04185, https://arxiv.org/abs/2106.04185 2
230
+ [18] Lee, J., Chai, J., Reitsma, P.S., Hodgins, J.K., Pollard, N.S.: Interactive control of avatars animated with human motion data. In: Proceedings of the 29th annual conference on Computer graphics and interactive techniques. pp. 491-500 (2002) 2
231
+ [19] Li, T., Bolkart, T., Black, M.J., Li, H., Romero, J.: Learning a model of facial shape and expression from 4D scans. ACM Transactions on Graphics, (Proc. SIGGRAPH Asia) 36(6) (2017), https://doi.org/10.1145/3130800.31308135
232
+ [20] Lombardi, S., Simon, T., Saragih, J., Schwartz, G., Lehrmann, A., Sheikh, Y.: Neural volumes: Learning dynamic renderable volumes from images. ACM Trans. Graph. 38(4), 65:1-65:14 (Jul 2019) 2
233
+ [21] Richard, A., Zollhofer, M., Wen, Y., de la Torre, F., Sheikh, Y.: MeshTalk: 3D Face Animation from Speech using Cross-Modality Disentanglement. In: 2021 IEEE/CVF International Conference on Computer Vision (ICCV). pp. 1153-1162. IEEE, Montreal, QC, Canada (Oct 2021). https://doi.org/10.1109/ICCV48922.2021.00121, https://ieeexplore.ieee.org/document/9710491/2,3,5,6,8
234
+ [22] Rössler, A., Cozzolino, D., Verdoliva, L., Riess, C., Thies, J., Nießner, M.: Faceforensics++: Learning to detect manipulated facial images. ICCV 2019 (2019) 13
235
+ [23] Schneider, S., Baevski, A., Collobert, R., Auli, M.: wav2vec: Unsupervised pre-training for speech recognition. In: Kubin, G., Kacic, Z. (eds.) Interspeech 2019, 20th Annual Conference of the International Speech Communication Association, Graz, Austria, 15-19 September 2019. pp. 3465-3469. ISCA (2019). https://doi.org/10.21437/Interspeech.2019-1873, https://doi.org/10.21437/Interspeech.2019-1873 2, 3
236
+ [24] Song, L., Wu, W., Qian, C., He, R., Loy, C.C.: Everybody's talkin': Let me talk as you want. IEEE Transactions on Information Forensics and Security 17, 585-598 (2022) 2
237
+ [25] Suwajanakorn, S., Seitz, S.M., Kemelmacher-Shlizerman, I.: Synthesizing america: learning lip sync from audio. ACM Transactions on Graphics (ToG) 36(4), 1-13 (2017) 2
238
+ [26] Taylor, S.L., Kim, T., Yue, Y., Mahler, M., Krahe, J., Rodriguez, A.G., Hodgins, J.K., Matthews, I.A.: A deep learning approach for generalized speech animation. ACM Trans. Graph. 36(4), 93:1-93:11 (2017). https://doi.org/10.1145/3072959.3073699, https://doi.org/10.1145/3072959.3073699 2
239
+ [27] Tewari, A., Thies, J., Mildenhall, B., Srinivasan, P., Tretschk, E., Wang, Y., Lassner, C., Sitzmann, V., Martin-Brualla, R., Lombardi, S., Simon, T., Theobalt, C., Niessner, M., Barron,
240
+
241
+ J.T., Wetzstein, G., Zollhoefer, M., Golyanik, V.: Advances in neural rendering (2022) 1
242
+ [28] Thies, J., Tewari, A., Fried, O., Sitzmann, V., Lombardi, S., Sunkavalli, K., Martin-Brualla, R., Simon, T., Saragih, J., Nießner, M., Pandey, R., Fanello, S., Wetzstein, G., Zhu, J.Y., Theobalt, C., Agrawala, M., Shechtman, E., Goldman, D.B., Zollhöfer, M.: State of the art on neural rendering. EG (2020) 1
243
+ [29] Thies, J., Elgharib, M., Tewari, A., Theobalt, C., Nießner, M.: Neural voice puppetry: Audio-driven facial reenactment. ECCV 2020 (2020) 2
244
+ [30] Thies, J., Zollhöfer, M., Stamminger, M., Theobalt, C., Nießner, M.: Face2face: Real-time face capture and reenactment of rgb videos (2020). https://doi.org/10.48550/ARXIV.2007.14808, https://arxiv.org/abs/2007.148085
245
+ [31] Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in neural information processing systems 30 (2017) 3, 4, 12
246
+ [32] Verma, A., Rajput, N., Subramaniam, L.: Using viseme based acoustic models for speech driven lip synthesis. In: 2003 IEEE International Conference on Acoustics, Speech, and Signal Processing, 2003. Proceedings. (ICASSP '03). vol. 5, pp. V-720-3. IEEE, Hong Kong, China (2003). https://doi.org/10.1109/ICASSP.2003.1200072, http://ieeexplore.ieee.org/document/1200072/2
247
+ [33] Vougioukas, K., Petridis, S., Pantic, M.: Realistic speech-driven facial animation with gans. International Journal of Computer Vision 128(5), 1398-1413 (2020) 2
248
+ [34] Wang, S., Li, L., Ding, Y., Fan, C., Yu, X.: Audio2head: Audio-driven one-shot talking-head generation with natural head motion. In: International Joint Conference on Artificial Intelligence. IJCAI (2021) 2
249
+ [35] Yao, S., Zhong, R., Yan, Y., Zhai, G., Yang, X.: Dfa-nerf: Personalized talking head generation via disentangled face attributes neural rendering. arXiv preprint arXiv:2201.00791 (2022) 2
250
+ [36] Yi, R., Ye, Z., Zhang, J., Bao, H., Liu, Y.J.: Audio-driven talking face video generation with learning-based personalized head pose. arXiv preprint arXiv:2002.10137 (2020) 2
251
+ [37] Zhang, Z., Li, L., Ding, Y., Fan, C.: Flow-guided one-shot talking face generation with a high-resolution audio-visual dataset. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3661–3670 (2021) 2
252
+ [38] Zheng, Y., Abrevaya, V.F., Chen, X., Bühler, M.C., Black, M.J., Hilliges, O.: I M avatar: Implicit morphable head avatars from videos. CoRR abs/2112.07471 (2021), https://arxiv.org/abs/2112.07471 2
253
+ [39] Zhou, Y., Han, X., Shechtman, E., Echevarria, J., Kalogerakis, E., Li, D.: Makelttalk: speaker-aware talking-head animation. ACM Transactions on Graphics (TOG) 39(6), 1-15 (2020) 2
254
+
255
+ [40] Zielonka, W., Bolkart, T., Thies, J.: Towards metrical reconstruction of human faces. ECCV (2022). https://doi.org/10.48550/ARXIV.2204.06607, https://arxiv.org/abs/2204.066075
256
+
257
+ # Imitator: Personalized Speech-driven 3D Facial Animation - Supplemental Document -
258
+
259
+ # 9. Impact of Data to Style Adaptation:
260
+
261
+ To analyze the impact of data on the style adaptation process, we randomly sample (1, 4, 10, 20) sequences from the train set of the VOCA test subjects and perform our style adaption. Each sequence contains about $3 - 5$ seconds of data. In Table 3, we observe that the performance on the quantitative metrics increase with the number of reference sequences. As mentioned in the main paper, even an adaptation based on a single sequence results in a significantly better animation in comparison to the baseline methods. This highlights the impact of style on the generated animations.
262
+
263
+ Figure 7 illustrates the lip distance curve for one test sequence used in this study. We observe that the lip distance with more reference data better fits the ground truth curve.
264
+
265
+ <table><tr><td>No. Seq.</td><td>L2face↓</td><td>L2lip↓</td><td>F-DTW↓</td><td>Lip-DTW↓</td><td>Lip-sync↓</td></tr><tr><td>1</td><td>0.91</td><td>0.1</td><td>1.3</td><td>1.68</td><td>3.99</td></tr><tr><td>4</td><td>0.89</td><td>0.1</td><td>1.26</td><td>1.47</td><td>3.78</td></tr><tr><td>10</td><td>0.76</td><td>0.09</td><td>1.07</td><td>1.37</td><td>3.57</td></tr><tr><td>20</td><td>0.7</td><td>0.09</td><td>0.99</td><td>1.27</td><td>3.49</td></tr></table>
266
+
267
+ Table 3. Ablation of the style adaptation w.r.t. the amount of reference sequences used. With an increasing number of data, the quantitative metrics improve. Each sequence is $3 - 5\mathrm{s}$ long.
268
+
269
+ ![](images/f1fb3096e88d2a20d9b49e00ea3639cceb691da4eac2d4c84db2d1ce62e1da60.jpg)
270
+ Figure 7. With an increasing number of reference data samples for style adaptation, the lip distance throughout a test sequence of VOCAset is approaching the ground truth lip distance curve.
271
+
272
+ # 10. Architecture Details
273
+
274
+ # 10.1. Audio Encoder:
275
+
276
+ Similar to Faceformer [10], our audio encoder is built upon the Wav2Vec 2.0 [1] architecture to extract temporal audio features. These audio features are fed into a linear interpolation layer to convert the audio frequency to the motion frequency. The interpolated outputs are then fed into 12 identical transformer encoder layers with 12 attention heads
277
+
278
+ and an output dimension of 768. A final linear projection layer converts the audio features from the 768-dimension features to a 64-dimensional phoneme representation.
279
+
280
+ # 10.2. Auto-regressive Viseme Decoder:
281
+
282
+ Our auto-regressive viseme decoder is built on top of traditional transformer decoder layers [31]. We use a zero vector of 64-dimension as a start token to indicate the start of sequence synthesis. We first add a positional encoding of 64-dimension to the input feature and fed it to decoder layers in the viseme decoder. For self-attention and cross-modal multi-head attention, we use 4 heads of dimension 64. Our feed forward layer dimension is 128.
283
+
284
+ Multi-Head Self-Attention: Given a sequence of positional encoded inputs $\hat{h}_t$ , we use multi-head self-attention (self-MHA), which generates the context representation of the inputs by weighting the inputs based on their relevance. The Scaled Dot-Product attention function can be defined as mapping a query and a set of key-value pairs to an output, where queries, keys, values and outputs are vectors [31]. The output is the weighted sum of the values; the weight is computed by a compatibility function of a query with the corresponding key. The attention can be formulated as:
285
+
286
+ $$
287
+ \operatorname {A t t e n t i o n} (Q, K, V) = \sigma \left(\frac {Q K ^ {T}}{\sqrt {d _ {k}}}\right) V, \tag {7}
288
+ $$
289
+
290
+ where $Q, K, V$ are the learned Queries, Keys and Values, $\sigma(\cdot)$ denotes the softmax activation function, and $d_k$ is the dimension of the keys. Instead of using a single attention mechanism and generating one context representation, MHA uses multiple self-attention heads to jointly generate multiple context representations and attend to the information in the different context representations at different positions. MHA is formulated as follows:
291
+
292
+ $$
293
+ M H A (Q, K, V) = \left[ h e a d _ {1}, \dots , h e a d _ {h} \right] \cdot W ^ {O}, \tag {8}
294
+ $$
295
+
296
+ with $head_{i} = \text{Attention}(QW_{i}^{Q}, KW_{i}^{K}, VW_{i}^{V})$ , where $W^{O}, W_{i}^{Q}, W_{i}^{K}, W_{i}^{V}$ are weights related to each input variable.
297
+
298
+ Audio-Motion Multi-Head Attention The Audio-Motion Multi-Head attention aims to map the context representations from the audio encoder to the viseme representations by learning the alignment between the audio and style-agnostic viseme features. The decoder queries all the existing viseme features with the encoded audio features, which
299
+
300
+ carry both the positional information and the contextual information, thus, resulting in audio context-injected viseme features. Similar to Faceformer [10], we add an alignment bias along the diagonal to the query-key attention score to add more weight to the current time audio features. The alignment bias $B^{A}(1 \leq i \leq t, 1 \leq j \leq KT)$ is:
301
+
302
+ $$
303
+ B ^ {A} (i, j) = \left\{ \begin{array}{l l} 0 & \text {i f} (i = j), \\ - \infty & \text {o t h e r w i s e .} \end{array} \right. \tag {9}
304
+ $$
305
+
306
+ The modified Audio-Motion Attention is represented as:
307
+
308
+ $$
309
+ \operatorname {A t t e n t i o n} \left(Q ^ {v}, K ^ {a}, V ^ {a}, B ^ {A}\right) = \sigma \left(\frac {Q ^ {v} \left(K ^ {a}\right) ^ {T}}{\sqrt {d _ {k}}} + B ^ {A}\right) V ^ {a}, \tag {10}
310
+ $$
311
+
312
+ where $Q^v$ are the learned queries from viseme features, $K^a$ the keys and $V^a$ the values from the audio features, $\sigma(\cdot)$ is the softmax activation function, and $d_k$ is the dimension of the keys.
313
+
314
+ # 10.3. Motion Decoder:
315
+
316
+ The motion decoder aims to generate 3D facial animations $\hat{y}_{1:T}$ from the style-agnostic viseme features $\hat{v}_{1:T}$ and a style embedding $\hat{S}_i$ . Specifically, our motion decoder consists of two components, a style embedding layer and a motion synthesis block. The style linear layer takes a one-hot encoder of 8-dimension and produces a style-embedding of 64-dimension. The input viseme features are concatenated with the style-embedding and fed into 4 successive linear layers which have a leaky-ReLU as activation. The output dimension of the 4-layer block is 64 dimensional. A final fully connected layer maps the 64-dimension input features to the 3D face deformation described as per-vertex displacements of size 15069. This layer is defining the motion deformation basis of a subject and is adapted based on a reference sequence.
317
+
318
+ Training Details: We use the ADAM optimizer with a learning rate of 1e-4 for both the style-agnostic transformer training and the style adaptation stage. During the style-agnostic transformer training, the parameters of the Wave2Vec 2.0 layers in the audio encoder are fixed. Our model is trained for 300 epochs, and the best model is chosen based on the validation reconstruction loss. During the style-adaptation stage, we first generate the viseme features and keep them fixed during the style adaptation stage. Then, we optimize for the style embedding for 300 epochs. Finally, the style-embedding and final motion deformation basis is refined for another 300 epochs.
319
+
320
+ # 11. Broader Impact
321
+
322
+ Our proposed method aims at the synthesis of realistic-looking 3D facial animations. Ultimately, these animations can be used to drive photo-realistic digital doubles of people
323
+
324
+ in audio-driven immersive telepresence applications in AR or VR. However, this technology can also be misused for so-called DeepFakes. Given a voice cloning approach, our method could generate 3D facial animations that drive an image synthesis method. This can lead to identity theft, cyber mobbing, or other harmful criminal acts. We believe that conducting research openly and transparently could raise the awareness of the misuse of such technology. We will share our implementation to enable research on digital multi-media forensics. Specifically, synthesis methods are needed to produce the training data for forgery detection [22].
325
+
326
+ All participants in the study have given written consent to the usage of their video material for this publication.
2301.00xxx/2301.00023/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77bd4572362238a056eb015d0161e7a6afa3b784af3b042acc5ab343f16748f7
3
+ size 760878
2301.00xxx/2301.00023/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00050/e9e4f2ea-c256-4f2d-8617-7d1874a9b913_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00050/e9e4f2ea-c256-4f2d-8617-7d1874a9b913_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00050/e9e4f2ea-c256-4f2d-8617-7d1874a9b913_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9b8e17b1af34eef2ea2bf683c9cafc34d46ce77179da2827ccc27e43b0813ef
3
+ size 10080449
2301.00xxx/2301.00050/full.md ADDED
@@ -0,0 +1,454 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Long-Term Online Multi-Session Graph-Based SPLAM with Memory Management
2
+
3
+ Mathieu Labbe $\cdot$ François Michaud
4
+
5
+ Abstract For long-term simultaneous planning, localization and mapping (SPLAM), a robot should be able to continuously update its map according to the dynamic changes of the environment and the new areas explored. With limited onboard computation capabilities, a robot should also be able to limit the size of the map used for online localization and mapping. This paper addresses these challenges using a memory management mechanism, which identifies locations that should remain in a Working Memory (WM) for online processing from locations that should be transferred to a Long-Term Memory (LTM). When revisiting previously mapped areas that are in LTM, the mechanism can retrieve these locations and place them back in WM for online SPLAM. The approach is tested on a robot equipped with a short-range laser rangefinder and a RGB-D camera, patrolling autonomously $10.5\mathrm{km}$ in an indoor environment over 11 sessions while having encountered 139 people.
6
+
7
+ Keywords SLAM $\cdot$ path planning $\cdot$ pose graph $\cdot$ multi-session $\cdot$ loop closure detection
8
+
9
+ # 1 Introduction
10
+
11
+ The ability to simultaneously map an environment, localize itself in it, and plan paths using this information
12
+
13
+ This work was supported by the Natural Sciences and Engineering Research Council of Canada (NSERC), the Canada Research Chair program and the Canadian Foundation for Innovation.
14
+
15
+ M. Labbé
16
+
17
+ E-mail: mathieu.m.labbe@usherbrooke.ca
18
+
19
+ F. Michaud
20
+
21
+ E-mail: francois.michaud@usherbrooke.ca
22
+
23
+ Interdisciplinary Institute for Technological Innovation (3IT), Université de Sherbrooke, Sherbrooke, Québec, Canada
24
+
25
+ is known as Simultaneous Planning, Localization And Mapping, or SPLAM (Stachniss, 2009). This task can be particularly complex when done online on a robot with limited computing resources in large, unstructured and dynamic environments. Since SPLAM can be seen as an extension of Simultaneous Localization And Mapping (SLAM), many approaches exist (Thrun et al., 2005). Our interest lies with graph-based SLAM approaches (Grisetti et al., 2010), for which combining a lightweight topological map over a detailed metrical map reveals to be more suitable for large-scale mapping and navigation (Konolige et al., 2011).
26
+
27
+ Two important challenges in graph-based SPLAM are :
28
+
29
+ - Multi-session mapping, also known as the kidnapped robot problem or the initial state problem: when turned on, a robot does not know its relative position to a map previously created, making it impossible to plan a path to a previously visited location. A solution is to have the robot localize itself in a previously-built map before initiating mapping. This solution has the advantage of always using the same referential, resulting in only one map is created across the sessions. However, the robot must start in a portion already mapped of the environment. Another approach is to initialize a new map with its own referential on startup, and when a previously visited location is encountered, a transformation between the two maps can be computed. The transformations between the maps can be saved explicitly with special nodes called anchor nodes (McDonald et al., 2012; Kim et al., 2010), or implicitly with links added between each map (Konolige and Bowman, 2009; Latif et al., 2013). This process is referred to as loop closure detection. Loop closure detection approaches that are independent of the
30
+
31
+ robot's estimated position (Ho and Newman, 2006) can intrinsically detect if the current location is a new location or a previously visited one among all the mapping sessions conducted in the past. Popular loop closure detection approaches are appearance-based (Garcia-Fidalgo and Ortiz, 2015), exploiting the distinctiveness of images of the environment. The underlying idea is that loop closure detection is done by comparing all previous images with the new one. When loop closures are found between the maps, a global map can be created by combining the maps from each session. In graph-based SLAM, graph pose optimization approaches (Folkesson and Christensen, 2007; Grisetti et al., 2007; Kummerle et al., 2011; Johannsson et al., 2013) use these loop closures to reduce odometry errors inside each map and in between the maps.
32
+
33
+ - Long-term mapping in dynamic environments. Persistent (Milford and Wyeth, 2010), lifelong (Konolige and Bowman, 2009) or continuous (Pirker et al., 2011) are terms generally used to describe SLAM approaches working in such conditions. Continuously updating and adding new data to the map in unbounded or dynamic environments will inevitably increase the map size over time. Online simultaneous planning, localization and mapping requires that new incoming data be processed faster than the time to acquire them. For example, if data are acquired at $1\mathrm{Hz}$ , updating the map should be done in less than 1 sec. As the map grows, the time required for loop closure detection and graph optimization increases, and eventually limits the size of the environment that can be mapped and used online.
34
+
35
+ To address these challenges, we introduce SPLAM-MM, a graph-based SPLAM with a memory management (MM) mechanism. As demonstrated in (Labbe and Michaud, 2013), memory management can be used to limit the size of the map so that loop closure detections are always processed under a fixed time limit, thus satisfying online requirements for long-term and largescale environment mapping. The idea behind SPLAM-MM is to limit the number of nodes available for loop closure detection and graph optimization, keeping enough observations in the map for successful online localization and planning while still having the ability to generate a global representation of the environment that can adapt to changes over time.
36
+
37
+ The paper is organized as follows. Section 2 reviews graph-based SLAM approaches that reduce the size of the map when revisiting the same environment while continuously adapting to dynamic changes. Section 3 describes the implementation and the operating prin
38
+
39
+ cicles associated with the use of memory management with a graph-based SPLAM approach, which extends our previous metric-based SLAM approach (Labbe and Michaud, 2014) with a new planning capability. The implementation integrates four algorithms: loop closure detection (Labbe and Michaud, 2013), graph optimization (Grisetti et al., 2007), metrical path planner (Marder-Eppstein et al., 2010) and a custom topological path planner. Section 4 presents experimental results of 11 SPLAM sessions using the AZIMUT-3 robot in an indoor environment over $10.5\mathrm{km}$ . Section 5 discusses strengths and limitations of SPLAM-MM, and Section 6 concludes the paper.
40
+
41
+ # 2 Related Work
42
+
43
+ Lifelong appearance-based SLAM requires dealing with dynamic environments. Glover et al. (2010) present an appearance-based SLAM approach that had to operate in different lighting conditions over three weeks. An interesting observation from their experiments is that even when revisiting the same locations, the map still grows: in dynamic environments, the loop closure detector is sometimes unable to detect loop closures, duplicating locations in the map. A map management approach is therefore required to limit map size. In highly dynamic environments, multiple views of the same location may also be required for proper localization. Churchill and Newman (2012) present a graph-based SLAM approach where visual experiences of the same locations are kept in the map, to increase localization robustness to dynamic changes caused for instance by outdoor illumination conditions. If localization fails when revisiting an area, new experiences are added to the map. Even if adding new visual experiences to the map happens less often over time (as the robot explores the same location), there is no mechanism to limit this. Pirker et al. (2011) present a continuous monocular SLAM approach where new key frames are added to the map only when the environment has changed, to keep its size proportional to the explored space. But if the environment changes very often, there is no mechanism to limit the number of key frames over the same physical location.
44
+
45
+ Some SLAM approaches can handle dynamic changes of the environment while limiting the size of the map for long-term operation. Biber et al. (2005) present a sample-based representation for maps, to handle changes at different timescales, tracking both stationary and non-stationary elements of the environment. The idea is to refresh samples stored for each timescale with new sensor measurements. Map growth is then indirectly limited as older memories fade at
46
+
47
+ different rates depending on the timescale. Walcott-Bryant et al. (2012) describe Dynamic Pose-Graph SLAM (DPG-SLAM), a long-term mapping approach that detects static and dynamic changes of the environment through time. To keep consistency of the graph while reducing its size, nodes that are not observable anymore are removed. Johannsson et al. (2013) also remove unobservable nodes to limit the size of the map over time when revisiting the same area. Similar nodes of the graph are merged together while keeping only the new loop closure detection. However, the graph size is not bounded when exploring new areas. Krajnik et al. (2016) present an occupancy grid approach where each cell in the map estimates its occupancy value depending on periodical and cyclic changes occurring in the environment. This increases localization and navigation accuracy in dynamic environments compared to static maps, as the predicted map represents the correct state of the environment at that time of the day (e.g., doors can change to be opened or closed). The maximum data kept for each cell is bounded by some parameters (depending on the smallest and longest cyclic periods that should be detected), thus keeping memory usage fixed. However, the approach assumes that the navigation phase always occurs in the same environment as the first mapping cycle, without possibility to extend it afterward.
48
+
49
+ These problems of lifelong SLAM are also addressed in some SPLAM approaches. Milford and Wyeth (2010) present a solution to limit the size of the map (called experience map) while revisiting the same area: close nodes are merged together up to a maximum density threshold. This approach has the advantage of making the map size independent of the operating time, but the diversity of the observations on each location is somewhat lost. Konolige et al. (2011) use a view-based graph SLAM approach (Konolige and Bowman, 2009) in a SPLAM context. The approach preserves diversity of the images referring to the same location so that the map can handle dynamic changes over time, and forgetting images limits the size of the graph over time when revisiting the same area. However, the graph still grows when visiting new areas.
50
+
51
+ Overall, these approaches reduce map size when revisiting the same area, while continuously adapting to dynamic changes. This makes them independent or almost independent of the operation time of the robot in these conditions, but they are all limited to a maximum size of the environment that can be mapped online. The SPLAM-MM approach deals specifically with this limitation.
52
+
53
+ ![](images/dc25706f5e00caee7c8b529f88702b2fb14456a87b0e5973c10da9ead91897c1.jpg)
54
+ Fig. 1 The AZIMUT-3 robot equipped with a URG-04LX laser range finder and a Xtion PRO LIVE sensor.
55
+
56
+ # 3 Memory Management for SPLAM
57
+
58
+ The underlying representation of SPLAM-MM is a graph with nodes and links. The nodes contain the following information:
59
+
60
+ - ID: unique time index of the node.
61
+
62
+ - Weight: an indication of the importance of the node, used for memory management.
63
+
64
+ - Bag-of-words (BOW): visual words used for loop closure detections. They are SURF features (Bay et al., 2008) quantized to an incremental vocabulary based on KD-Trees.
65
+
66
+ - Sensor data: used to find similarities between nodes and to construct maps. For this paper, our implementation of SPLAM-MM is using the AZIMUT-3 robot (Ferland et al., 2010), equipped with an URG04LX laser rangefinder and a Xtion Pro Live RGB-D camera, as shown by Fig. 1. The sensory data used are:
67
+
68
+ - Pose: the position of the robot computed by its odometry system (e.g., the value given by wheel odometry), expressed in $(x, y, \theta)$ coordinates.
69
+ RGB image: used to extract visual words.
70
+ - Depth image: used to find 3D position of the visual words. The depth image is registered with the RGB image, i.e., each depth pixel corresponds exactly to the same RGB pixel.
71
+ - Laser scan: used for loop closure transformations and odometry refinements, and by the Proximity Detection module.
72
+
73
+ The links store rigid transformations (i.e., Euclidean transformation derived from odometry or loop closures) between nodes. There are four types of links:
74
+
75
+ ![](images/5035346411f2bebc9cad81bfbda19ae529ff33f18e7a7ffb9f0fd80586a49c30.jpg)
76
+ Fig. 2 Memory management and control architecture of SPLAM-MM.
77
+
78
+ - Neighbor link: created between a new node and the previous one.
79
+ - Loop closure link: added when a loop closure is detected between the new node and one in the map.
80
+ - Proximity link: added when two close nodes are aligned together.
81
+ - Temporary link: used for path planning purposes. It is used to keep the planned path connected to the current map.
82
+
83
+ Figure 2 presents a high-level representation of SPLAM-MM. Basically, it consists of a graph-based SLAM module with memory management, to which path planners are added. Memory management involves the use of a Working Memory (WM) and a Long-Term Memory (LTM). WM is where maps, which are graphs of nodes and links, are processed. To satisfy online constraints, nodes can be transferred and retrieved from LTM. More specifically, the WM size indirectly depends on a fixed time limit $T$ : when the time required to update the map (i.e., the time required to execute the processes in the Graph-based SLAM-MM block) reaches $T$ , some nodes of the map are transferred from WM to LTM, thus keeping WM size nearly constant and processing time around $T$ . However, when a loop closure is detected, neighbors in LTM with the loop closure node can be retrieved from LTM to WM for further loop closure detections. In other words, when a robot revisits an area which was previously transferred to LTM, it can incrementally retrieve the area if a least one node of this area is still in WM. When some LTM nodes are retrieved, nodes in WM from other areas in the map
84
+
85
+ can be transferred to LTM, to limit map size in WM and therefore keeping processing time around $T$ .
86
+
87
+ Therefore, the choice of which nodes to keep in WM is key in SPLAM-MM. The objective is to have enough nodes in WM from each mapping session for loop closure detections and to keep a maximum number of nodes in WM for generating a map usable to follow correctly a planned path, while still satisfying online processing. Two heuristics are used to establish the compromise between selection of which nodes to keep in WM and online processing:
88
+
89
+ - Heuristic 1 is inspired from observations made by psychologists (Atkinson and Shiffrin, 1968; Baddeley, 1997) that people remember more the areas where they spent most of their time, compared to those where they spent less time. In terms of memory management, this means that the longer the robot is at a particular location, the larger the weight of the corresponding node should be. Oldest and less weighted nodes in WM are transferred to LTM before the others, thus keeping in WM only the nodes seen for longer periods of time. As demonstrated in (Labbe and Michaud, 2013), this heuristic reveals to be quite efficient in establishing the compromise between search time and space, as driven by the environment and the experiences of the robot.
90
+ - Heuristic 2 is used to identify nodes that should stay in WM for autonomous navigation. Nodes on a planned path could have small weights and may be identified for transfer to LTM by Heuristic 1, thus eliminating the possibility of finding a loop closure link or a proximity link with these nodes and cor
91
+
92
+ ![](images/0bb16ff4e24b0af1bddeb66199f11923e21ccad0816e6deffe1a05069b9e7e4c.jpg)
93
+ Fig. 3 Illustration of the local map (inner dashed area) and the global map (outer dotter area) in multi-session mapping. Red nodes are in LTM, while all other nodes are in WM. Loop closure links are shown using bidirectional green arrows.
94
+
95
+ rectly follow the path. Therefore, Heuristic 2 must supersede Heuristic 1 and allow upcoming nodes to remain in WM, even if they are old and have a small weight.
96
+
97
+ The Graph-based SLAM-MM block provides two types of maps derived from nodes in WM and LTM:
98
+
99
+ - Local map, i.e., the largest connected graph that can be created from the last node in WM with nodes available in WM only. The local map is used for online path planning.
100
+ - Global map, i.e., the largest connected graph that can be created from the last node in WM with nodes in WM and LTM. It is used for offline path planning.
101
+
102
+ Figure 3 uses diamonds to represent initial and end nodes for each mapping session. The nodes in LTM are shown in red and the others are those in WM. The local map is created using only the nodes in WM that are linked to the last node. The graph linking the last node with other nodes in WM and LTM represents the global map (outer dotted area). If loop closure detections are found between nodes of different maps, loop closure links can be generated, and the local map can span over multiple mapping sessions. Other nodes in WM but not included in the local map are unreachable from the last node, but they are still used for loop closure detections since all nodes in WM (including those in Map 2 for instance) are examined.
103
+
104
+ The modules presented in Fig. 2 are described as follows.
105
+
106
+ # 3.1 Short-Term Memory Module
107
+
108
+ Short-Term Memory (STM) is the entry point where sensor data are assembled into a node to be added to the map. Similarly to (Labbe and Michaud, 2013), the role of the STM module is to update node weight based on visual similarity. When a node is created, a unique time index ID is assigned and its weight is initialized to 0. The current pose, RBG image, depth image and laser scan readings are also memorized in the node. If two consecutive nodes have similar images, i.e., the ratio of corresponding visual words between the nodes is over a specified threshold $Y$ , the weight of the previous node is increased by one. If the robot is not moving (i.e., odometry poses are the same), the new node is deleted. To reduce odometry errors on successive STM nodes, transformation refinement is done using 2D iterative-closest-point (ICP) optimization (Besl and McKay, 1992) on the rigid transformation of the neighbor link with the previous node and the corresponding laser scans. If the ratio of ICP point correspondences between the laser scans over the total laser scan size is greater or equal to $C$ , the neighbor link's transformation is updated with the correction.
109
+
110
+ When the STM size reaches a fixed size limit of $S$ nodes, the oldest node in STM is moved to WM. STM size is determined based on the velocity of the robot and at which rate the nodes are added to the map. Images are generally very similar to the newly added node, keeping $S$ nodes in STM avoids using them for appearance-based loop closure detection once in WM. For example, at the same velocity, STM size should be larger if the rate at which the nodes are added to map increases, in order to keep nodes with consecutive similar images in STM. Transferring nodes with images very similar with the current node from STM to WM too early limits the ability to detect loop closures with older nodes in WM.
111
+
112
+ # 3.2 Appearance-based Loop Closure Detection Module
113
+
114
+ Appearance-based loop closure detection is based on the bag-of-words approach described in (Labbe and Michaud, 2013). Briefly, this approach uses a bayesian filter to evaluate appearance-based loop closure hypotheses over all previous images in WM. When a loop closure hypothesis reaches a pre-defined threshold $H$ , a loop closure is detected. Visual words of the nodes are used to compute the likelihood required by the filter. In this work, the Term Frequency-Inverse Document Frequency (TF-IDF) approach (Sivic and Zisserman, 2003) is used for fast likelihood estimation, and FLANN (Fast
115
+
116
+ Library for Approximate Nearest Neighbors) incremental KD-Trees (Muja and Lowe, 2009) are used to avoid rebuilding the vocabulary at each iteration. To keep it balanced, the vocabulary is rebuilt only when it doubles in size.
117
+
118
+ The RGB image, from which the visual words are extracted, is registered with a depth image. Using (1), for each 2D point $(x,y)$ in the rectified RGB image, a 3D position $P_{xyz}$ can be computed using the calibration matrix (focal lengths $f_{x}$ and $f_{y}$ , optical centres $c_{x}$ and $c_{y}$ ) and the depth information $d$ for the corresponding pixel in the depth image. The 3D positions of the visual words are then known. When a loop closure is detected, the rigid transformation between the matching images is computed using a RANSAC (RANdom SAmple Consensus) approach which exploits the 3D visual word correspondences (Rusu and Cousins, 2011). If a minimum of $I$ inliers are found, the transformation is refined using the laser scans in the same way as the odometry correction in STM using 2D ICP transformation refinement. If transformation refinement is accepted, then a loop closure link is added with the computed transformation between the corresponding nodes. The weight of the current node is updated by adding the weight of the loop closure hypothesis node and the latter is reset to 0, so that only one node with a large weight represents the same location.
119
+
120
+ $$
121
+ P _ {x y z} = \left[ \frac {(x - c _ {x}) \cdot d}{f _ {x}}, \frac {(y - c _ {y}) \cdot d}{f _ {y}}, d \right] ^ {T} \tag {1}
122
+ $$
123
+
124
+ By doing appearance-based loop closure detection this way, setting $H$ high means that there is less chance of detecting false positives, but at the cost of detecting less loop closures (Labbe and Michaud, 2013). For SPLAM-MM, $H$ can be set relatively low to detect more loop closures because false positives that are geometrically different will be rejected by the rigid transformation computation step (i.e., the 3D visual word correspondences and 2D ICP transformation refinement).
125
+
126
+ # 3.3 Proximity Detection Module
127
+
128
+ Appearance-based loop closure detection is limited by the perceptual range of the sensory data used. For instance, when the robot is revisiting areas in opposite direction, the RGB-D camera on AZIMUT-3 is not pointing in the same direction compared to when the nodes were created, and thus no appearance-based loop closures can be detected. This also happens when there are not enough visual features under the depth range of the RGB-D camera (e.g., white walls or long halls).
129
+
130
+ Simply relying on appearance-based loop closure detections for map corrections would then limit path planning capabilities, and make navigation difficult in such conditions. Figure 4a illustrates a situation where the robot is in a hall coming back to its starting position in reverse direction. Setting a goal at the starting position would make the planner fail because no loop closures could be found to correct the odometry, resulting in having a wall directly placed on the starting position. One solution would be to have the robot visit the nodes of the graph backward so loop closures could be detected to correct the map, and ultimately be able to reach the starting position. However, it is inefficient and unsafe if the robot does not have sensors pointing backward. To deal with such situations, the Proximity Detection module uses laser rangefinder data to correct odometry drift in areas where the camera cannot detect loop closures. With a field of view of more than $180^{\circ}$ , the laser scans can be aligned in reverse direction, generating proximity links. As laser scans are not as discriminative as images, proximity detection is restricted to nodes of the local map located around the estimated position of the robot. Figure 4b illustrates the result.
131
+
132
+ Figure 5 illustrates how nodes located close to the robot are selected by the Proximity Detection module. Only nodes in the local map with their pose inside radius $R$ centered on the robot are used. Nodes in STM are not considered in order to avoid adding useless links with nodes close by: this would increase graph optimization time without adding significative improvements of the map. The nodes are then segmented into groups with nodes connected only by neighbor links. A group must have its nearest node from the robot inside a fixed radius $L$ defining close-by nodes (with $L < R$ ) to be considered for proximity detection, to keep the length of the resulting proximity links small for path planning. Note that Appearance-based Loop Closure Detection is done before Proximity Detection, thus if the nearest node has already a loop closure with the new node, the group is ignored. Proximity detection is then applied separately on each group of nodes by doing the following steps:
133
+
134
+ 1. A rigid transformation between the nearest node of each group and the new node added to map is computed as in Section 3.2, and if it is accepted, a proximity link is added between the corresponding nodes, and the group of nodes is ignored for step 2. These links are referred as visual proximity links because visual words are used in the transformation estimation.
135
+ 2. To avoid having to compare multiple nodes with very similar laser scans (and thus to save computa
136
+
137
+ ![](images/1b75a3438aad1ff1e0aa294394b060a50e2ce8703631aba653293dd8431aa8c2.jpg)
138
+
139
+ ![](images/49f2eda8a121ac085ed55d3a2d013801b059fe478354c4601b82dd682b72bd27.jpg)
140
+
141
+ ![](images/90c5c0c95a10dc3d1204688b95ef0eabaee74d3178c8d05e8b45509461d04d9c.jpg)
142
+ Fig. 4 Illustration of the role of the Proximity Detection module. On the left are the raw laser scans, the blue dot is the starting position, and on the right the corresponding occupancy grid map at $0.05\mathrm{m}$ resolution (black, light gray and dark gray areas are occupied, empty and unknown spaces, respectively). In a), the yellow circle on the right locates the problematic situation: after the second traversal, the first nodes of the graph are located exactly over the wall, making it impossible to plan a path (red arrow on the right) to return to the starting position. In b), proximity links are detected using only the laser scans, and the local map can then be correctly optimized.
143
+
144
+ ![](images/29cb3e6f2ff4e18985f9f70d10ee8caabbf553aa17221c896ba36d1043f152e2.jpg)
145
+
146
+ tion), only the more recent node among those in the same fixed small radius $L$ (centered on each node) is kept along the nodes in a remaining group. Then for each group, the laser scans of the nodes are merged together using their respective pose. 2D ICP transformation refinement is done between the merged laser scans and the one of the new node. If the transformation is accepted, a new proximity link with this transformation is added to the graph between the new node and the nearest one in the group.
147
+
148
+ # 3.4 Graph Optimization Module
149
+
150
+ TORO (Tree-based network Optimizer) (Grisetti et al., 2007) is used for graph optimization. When loop closure and proximity links are added, the errors derived from odometry can be propagated to all links, thus correcting the local map. This also guarantees that nodes belonging to different maps are transformed into the same referential when loop closures are found.
151
+
152
+ When only one map exists, it is relatively straightforward to use TORO to create a tree because it only has one root. However, for multi-session mapping, each map has its own root with its own reference frame. When loop closures occur between the maps, TORO cannot optimize the graph if there are multiple roots. It may also be difficult to find a unique root when some of the nodes have been transferred in LTM. As a solution, our approach takes the root of the tree to be the latest
153
+
154
+ ![](images/fb7be54f5e503a73614b28a47159c9cdf17fb8232521e55c7bb4334fd60c5f85.jpg)
155
+
156
+ ![](images/e1f7deea050f20c79831778bf44707f090a959679c07a0122b260df6ae78383d.jpg)
157
+ Fig. 5 Illustration of how proximity detection works. In a), the larger dashed circle represents the radius $R$ used to determine close-by nodes, and the smaller dashed circle defined by $L$ is used to limit the length of the links to be created. The empty dots are nodes for which the laser scans are not used, either because they are outside the radius $R$ , they are too close from each other or they are in STM. In b) and c), nodes in the radius $R$ from the two segmented groups of nodes are processed for proximity detection. In d), proximity links are added (yellow), and after graph optimization, the groups of nodes are connected together and the respective laser scans are now aligned.
158
+
159
+ node added to the local map, which is always uniquely defined across intra-session and inter-session mapping. All other poses in the graph are then optimized using the last odometry pose as the referential.
160
+
161
+ # 3.5 Path Planning Modules
162
+
163
+ Memory management has a significant effect on how to do path planning online using graph-based SLAM, for which the map changes almost at each iteration and with only the local map accessible while executing the plan. This differs from approaches that assume that the map is static and/or that all the previously visited locations always remain in the map. In this paper, SPLAM-MM uses two path planners: a Metrical Path Planner (MPP) and a Topological Path Planner (TPP).
164
+
165
+ # 3.5.1 Metrical Path Planning Module
166
+
167
+ MPP receives a pose expressed in $(x, y, \theta)$ coordinates, and uses the local map to plan a trajectory and to make the robot move toward the targeted pose while avoiding obstacles. Our MPP implementation exploits the ROS navigation stack (Marder-Eppstein et al., 2010) to compute trajectories expressed as a sequence of velocity commands (expressed as twists) sent to the robot's Motion Controller module. A global Costmap is used to plan a trajectory to a targeted pose. MPP creates the global Costmap from an occupancy grid created using the assembled laser scans from the latest local map. Each time the local map is updated, the occupancy grid is re-assembled and the trajectory is re-planned. MPP also uses a local Costmap for its Dynamic Window Approach (DWA) (Fox et al., 1997) to handle dynamic obstacles for collision avoidance. The local Costmap is created directly from sensor readings. To create the local Costmap, only using the laser rangefinder for obstacle detection revealed to be insufficient: while the laser range finder can detect most of the obstacles (e.g., walls, people, table legs), it is located $40~\mathrm{cm}$ above the floor and all obstacles under this height cannot be detected. Therefore, the depth image from the RGB-D camera is also used to detect these small obstacles and to add them to the local Costmap. Figure 6 shows an example where combining laser scans and RGB-D data creates a more robust and a safer local Costmap for navigation. Note that segmentation of the point cloud generated from the depth image is required to be able to add or clear small dynamic obstacles below the RGB-D camera. To segment the ground, all points with normal parallel to $z$ -axis (up to an angle $Z$ ) are labeled as ground. Then, all other points under a maximum height $U$ are
168
+
169
+ labeled as obstacles. This method would also make the robot capable of operating on uneven terrain.
170
+
171
+ # 3.5.2 Topological Path Planning Module
172
+
173
+ When TPP receives a goal identified by a node ID from a user (or a high-level module like a task planner, or in this paper the Patrol module), the global map is provided by the graph-based SLAM-MM module, and a topological path is computed to reach this goal. The topological path is a sequence of poses, expressed by their respective node IDs, to reach the goal. This step must be done offline or when the robot is not moving because all nodes linked to the current local map should be retrieved from LTM to build the global map.
174
+
175
+ To choose which nodes to use for navigation, TPP computes a path from the current node to the goal node using Dijkstra algorithm (Dijkstra, 1959). The choice of using Dijkstra over $\mathrm{A}^*$ is to avoid global graph optimization, which is time consuming, to know the distance to goal required by $\mathrm{A}^*$ . Dijkstra can also be computed directly when fetching the global map from LTM. Similar to (Valencia et al., 2013), to avoid losing track of the planned path, TPP prefers paths traversed in the same direction (e.g., where the camera is facing the same direction than on the nodes on the path) over shortest paths. This increases localization confidence: loop closure detection and visual proximity detection are more reliable than proximity detection using only laser scans because of their double verification (3D visual word correspondences and 2D ICP transformation refinement). To embed this preference in Dijkstra, the search cost is angular-based instead of distance-based, i.e., it finds the path with less orientation changes when traversing it in the forward direction.
176
+
177
+ Then, TPP selects the farthest node on the path in the local map and sends its pose to MPP. While MPP makes the robot navigate to its targeted pose, TPP indicates to the graph-based SLAM-MM module which upcoming nodes on the topological path is needed, expressed as a list of node IDs from the latest node reached on the path to the farthest node inside the radius $R$ (to limit the size of the list). The required nodes are identified by the graph-based SLAM-MM module with Heuristic 2 either to remain in WM or to be retrieved from LTM to extend the local map. The maximum number of retrieved nodes per map update is limited to $M$ because this operation is time consuming as it needs to load nodes from LTM. $M$ is set based on the hardware on which LTM is saved and according to the maximum velocity of the robot: for instance, if the robot is moving at the same speed or less as when it traversed the same area the first time, $M = 1$ would
178
+
179
+ ![](images/3efe31e5e857c8f0162c4075ddfa9bd6dcc64e062048b15f5e85a6ea238bc649.jpg)
180
+ (a)
181
+
182
+ ![](images/a4ef5c814a8f7a1e81c8bb18ae2b2ca7a462022df342492ecb4814fd1fd0cf2e.jpg)
183
+ (b)
184
+
185
+ ![](images/d00c5567527a02e3c865ebf25311518d6a571eca233a72c3cecba21ad5c95b82.jpg)
186
+ (c)
187
+ Fig. 6 Example of obstacle detection using the laser rangefinder and the RGB-D camera. The red dots on the chair show what is detected using the laser rangefinder data. The cyan area is derived from the obstacle projection on the ground plane up to robot's footprint radius, delimiting where the center of the robot should not enter to avoid collisions. In a), only the laser rangefinder data are used and the chair's wheels are not detected, making unsafe for the robot to plan a path around the chair. In b), the point cloud generated from the camera's depth image is used and the chair's wheels are detected (shown by the orange dots), increasing the cyan area (and consequently the area to avoid colliding with the chair). Illustration c) presents a view from the RGB-D camera where the segmented ground is shown in green and the obstacles in orange.
188
+
189
+ suffice to retrieve nodes on the path without having to slow down to wait for nodes not yet retrieved.
190
+
191
+ Extending the local map with nodes of the topological path is important for the robot to localize itself using the Appearance-based Loop Closure Detection module or using the Proximity Detection module, making it able to follow the topological path appropriately. As the robot moves and new local maps are created, TPP always looks for the farthest node of the topological path that can be reached in the local map to update the current pose sent to MPP module. If new nodes are retrieved from LTM on the topological path, then the farthest pose is sent to MPP. TPP also detects changes in the local map after graph optimization (e.g., when new loop closures are detected): if so, the updated position of the current pose is sent to MPP.
192
+
193
+ Up to a ratio $O$ of the WM size, nodes identified by the planner and located in the radius $R$ from the robot's current position are immunized to be transferred, with $R$ being the sensor range.
194
+
195
+ Figure 7 presents an example of the interaction between MPP and TPP to reach a goal G. While the robot is moving, TPP always sends the farthest pose P of the node on the topological path (purple links) in the local map. An occupancy grid is assembled with the laser scans contained in the nodes of the local map. MPP uses this occupancy grid to plan a trajectory (yellow arrow) to P. To keep the WM size constant, as nodes are retrieved from LTM on the path, older nodes are transferred to LTM. To follow the path appropriately,
196
+
197
+ proximity links are detected to correct the map as the robot moves, otherwise the situation explained by Fig. 4a would happen.
198
+
199
+ TPP iterates by sending poses until the node of the goal (under a goal radius $D$ expressed in $\mathrm{m}$ ) is reached. Finally, handling situations where the environment has changed too much for proper localization must be taken into consideration. If no loop closures and proximity detections occur when following a path, a temporary link is added between the current node and the closest one in the path so that the topological path is always linked to the current node in the local map. Without this link, if previous nodes between the current node and those of the topological path are transferred to LTM, the local map would be divided and the nodes of the path would not be in the local map anymore. This temporary link is removed when a new link is added between the current node and the closest one in the path or when the goal is reached. If the robot has not reached the current pose set to MPP after $F$ iterations of SPLAM-MM (e.g., MPP cannot plan to the requested pose because of the presence of a new obstacle or because the robot cannot localize itself on the path), TPP chooses another pose on the upcoming nodes and sends it to MPP. If all the upcoming nodes cannot be reached, TPP fails and sends a status message to its connected modules so that they can be notified that the goal cannot be reached.
200
+
201
+ ![](images/2f384711445bb8e39579ebae0f766d308973bc37c61ec86f3e87f8e739fb4321.jpg)
202
+ (a)
203
+
204
+ ![](images/ada9ad8cf10e80649ee6bee0cc09ad09df107995287456c3d930ee4836c40829.jpg)
205
+ (b)
206
+
207
+ ![](images/b74edb97ee43222cabadf39327a54fee5bc3f1b7707b5cff84d9ec18ca1fb1a2.jpg)
208
+ (c)
209
+
210
+ ![](images/fd0eaee0e193f3f2cc6da4cc6c78c1fcb972c8b082b1a68055d50f4226d4d394.jpg)
211
+ (d)
212
+
213
+ ![](images/28217fd66799aeeed57faee20da7fb611ad378c107d21f9a59c763b47166347d.jpg)
214
+ (e)
215
+
216
+ ![](images/c11e6ae2eb65a2aab7274cce0d65d617aee9eb993d612017e57b1dabf0f5f571.jpg)
217
+ (f)
218
+ Fig. 7 Interaction between TPP and MPP for path planning. The goal is identified by the purple G. The topological path is shown with purple links. The dashed yellow arrow is the trajectory computed by MPP to the targeted poses designated by the yellow P. Light gray, dark gray and black areas of the occupancy grid represent free, unknown and occupied cells, respectively. Blue nodes are in WM, and red nodes are in LTM. Yellow links are proximity links.
219
+
220
+ # 3.6 Patrol Module
221
+
222
+ We implemented the Patrol module to generate navigation goals, referred to as waypoints so that the robot is programmed to continuously patrol an area. The Patrol module receives waypoints as inputs and sends them successively to TPP. By examining TPP's status messages, Patrol can know when a goal is reached or if TPP has failed. Whenever the status indicates that the goal is reached or not, the Patrol module sends the next waypoint, and restart to the first one once the whole list has been processed.
223
+
224
+ # 4 Results
225
+
226
+ Table 1 shows the parameters used for the trials<sup>1</sup>. The acquisition time $A$ used is 1 sec (i.e., the map update rate is $1\mathrm{Hz}$ ), which set the maximum online time allowed to process each node added to the map. For the trials, $T$ is set to $200\mathrm{ms}$ to limit CPU usage for SPLAM-MM to around $20\%$ , to make sure that higher
227
+
228
+ frequency modules (acquisition of Sensor Data acquisition and MPP) can run at their fixed frequency of $10\mathrm{Hz}$ . The robot is relatively moving at the same velocity during the trials, and therefore $M$ is fixed to 2 to make sure that nodes on a planned path are retrieved fast enough to avoid having the robot wait for nodes still in LTM. All computations are done onboard on the robot, which is equipped with a 2.66 GHz Intel Core i7-620M and a 128 GB SSD hard drive (on which the LTM is saved).
229
+
230
+ To define the area over which the robot had to patrol, during session 1 we first teleoperated the robot and defined four waypoints (WP1 to WP4). There were no people in the environment during the teleoperation phase. After reaching WP4, the autonomous navigation phase is initiated by sending the waypoints to the Patrol module. Figure 8 illustrates the four waypoints on the global map and the first planned trajectory by TPP (purple path) from the current position of the robot (WP4) to WP1. To come back to WP1, the robot had to follow the path in the opposite direction from when these nodes were created. Proximity detection made it able to follow the path appropriately. To see more clearly the effect of proximity links, Fig. 9 shows the
231
+
232
+ Table 1 Parameters used for the trials
233
+
234
+ <table><tr><td>Acquisition time</td><td>A</td><td>1 sec</td></tr><tr><td>ICP correspondence ratio</td><td>C</td><td>0.3</td></tr><tr><td>Radius of the goal area</td><td>D</td><td>0.5 m</td></tr><tr><td>TPP iterations before failure</td><td>F</td><td>10</td></tr><tr><td>Loop closure hypothesis threshold</td><td>H</td><td>0.11</td></tr><tr><td>Minimum RANSAC visual word inliers</td><td>I</td><td>5</td></tr><tr><td>Close nodes radius</td><td>L</td><td>0.5 m</td></tr><tr><td>Maximum retrieved close nodes</td><td>M</td><td>2</td></tr><tr><td>Heuristics 2 close-by nodes ratio</td><td>O</td><td>0.25</td></tr><tr><td>Laser scan range</td><td>R</td><td>4 m</td></tr><tr><td>STM size</td><td>S</td><td>20</td></tr><tr><td>Time limit</td><td>T</td><td>200 ms</td></tr><tr><td>Maximum obstacle height</td><td>U</td><td>0.4 m</td></tr><tr><td>Similarity threshold</td><td>Y</td><td>0.3</td></tr><tr><td>Ground segmentation maximum angle</td><td>Z</td><td>0.1 rad</td></tr></table>
235
+
236
+ ![](images/ebc78736b017fe04a72a2e65757ebaea0841f21a287667afc78b08fb8613fbee.jpg)
237
+ Fig. 8 Waypoints WP1 to WP4 identified on the global map. The purple path is the first path planned by TPP from the WP4 to WP1.
238
+
239
+ maps after reaching WP1 with and without graph optimization. Navigation would not have been possible without proximity links: the local map would have look like the map in (b) without the yellow links because no appearance-based similarities would have been found with nodes from the map on the planned path. When reaching WP1, the Patrol module sends the next waypoint (WP2), making the robot continue patrolling.
240
+
241
+ Every 45 minutes or so of operation, the robot was manually shutdown and moved to the battery charger near WP1. Once recharged, a new session of SPLAMMM was initiated, creating a new node in STM with odometry reset, while preserving the nodes in WM and LTM. As the robot was initialized in the area of WP1 for each session, loop closures were found, con
242
+
243
+ ![](images/b6d18ae4e59dbe7e82f802a58fd56ec1e3c7f25820a43a97c67e973fedbaf044.jpg)
244
+ Fig. 9 Global maps, optimized and not optimized, after reaching WP1. Yellow and red links are proximity and loop closure links, respectively.
245
+
246
+ ![](images/a0667b428fae579864106924a65328d1978c42fd888485baf3f49859b6b91c91.jpg)
247
+
248
+ necting and optimizing the new map with nodes created from previous sessions, and allowing the Patrol module to provide waypoints as navigation goals to patrol the area. Overall, 11 indoor mapping sessions were conducted, for a total distance of $10.5\mathrm{km}$ lasting 7.5 hours of operation spent over two weeks. The robot did 111 patrolling cycles (i.e., traversing from WP1 through WP2, WP3, WP4 and coming back to WP1). The sessions were conducted during office hours, with people walking by. A total of 139 people were encountered by the robot while patrolling. Figure 10 illustrates the dynamic conditions and some of the obstacles that the robot had to deal with during the trials.
249
+
250
+ The main goal of the trials is to see how SPLAM is influenced by memory management over long-term operation, only having the local map for online processing. This can be illustrated by looking at the influences of memory management on SPLAM, interactions between TPP and MPP, and the influences of LTM on TPP. As the robot is continuously adding new nodes, the trials also demonstrate how SPLAM-MM works in an unbounded environment.
251
+
252
+ # 4.1 Influences of MM on SPLAM
253
+
254
+ Figure 11 shows a typical navigation result when reaching the time limit $T$ , thus limiting the size of the local map used for online navigation. This example shows the path planned between WP4 and WP1 after 4.7 hours of operation. The local maps used for online planning, localization and mapping are shown for different time steps along the trajectory. At $t = 17031$ sec, the planned path had 67 nodes and was $33$ m long. It took 1.3 sec to be generated by TPP and to have the first pose on the path sent to MPP. The laser scan range $R$ is delimiting the upcoming nodes on the path provided by TPP. As the robot navigates in the environment, the farthest available pose in the local map on the path (end of the cyan line) is sent from TPP to MPP. Up
255
+
256
+ ![](images/cc52117db3da4b4ebf9f80d373ef0aa18dd62fbdfff0027610399bf7f3034f00.jpg)
257
+
258
+ ![](images/be6ede559501efe7e247e93938e84d5905bd11d1ff2341fc1399e873c5a872b8.jpg)
259
+ a)
260
+
261
+ ![](images/3dca1b75c9b9d11aaf180b859a4c49c85deb8b6764e2b68e73c48f756e800960.jpg)
262
+
263
+ ![](images/31685de9ee1ed929ea4a957e97fda44f2fddfa8c7bd5a8a352f6a86e58919efd.jpg)
264
+ b)
265
+
266
+ ![](images/ccdea1abb0d07eb8e4d11a49ecfb8a2708a6fe9284093de2b7da244cfc2d1180.jpg)
267
+
268
+ ![](images/d6a9d8c6c9dc0ca347adde5317c58ab1161dc1be3825509ad6a3f62558562dca.jpg)
269
+ c)
270
+
271
+ ![](images/275dccdeb9a233b7719e8665eaebdb94d713edba3f31323b17f8770b679c0743.jpg)
272
+
273
+ ![](images/a7f445ff3d568a5c11788cbca20ff40d555064340de40aa4fbe25113f7447e8a.jpg)
274
+ d)
275
+
276
+ ![](images/4750711c30c85d5c62c939ea384740f8f527ec70a904516108cc66ef961766e3.jpg)
277
+
278
+ ![](images/749f645b71cb944fa062d229f9865436cda91f30df3990dd4c222d449e580a67.jpg)
279
+ e)
280
+
281
+ ![](images/3c27a4a14be3438bf4cf1b2234fddaf376d4560085a94af5ad86602dd02e6290.jpg)
282
+ Fig. 10 Events that occurred during the trials: a) open and closed doors between traversals; b) camera exposure that led to the extraction of different visual features, making it difficult to find loop closures; c) someone opening a door while the robot is navigating; d) people walking around or blocking the robot; e) featureless images on which loop closure detection cannot work.
283
+
284
+ ![](images/b9114014f78ddf2e9a664b9a940f8892b278bc2ae4dbdb5192b74985faa6da82.jpg)
285
+
286
+ ![](images/60621ccd96edffbe6dc880615a4720a3597afb0af8391dc2d42092df52e4b677.jpg)
287
+
288
+ ![](images/309a4bdaa5d67471dc21be4261d1c9c49b840d5c14e3c44767d17b333044805d.jpg)
289
+
290
+ ![](images/ea9ebd762a80748cf18b619b14f707b1f4b451b720c3f640a1623ab0601e7ba2.jpg)
291
+ Fig. 11 Example of the effect of memory management when travelling from WP4 to WP1 after 4.7 hours of operation. The path planned is shown in purple. The small colored icon represents the robot position at each time step. The dotted circle around the robot position illustrates the laser scan range $R$ . The cyan lines represent the upcoming nodes on the planned path.
292
+
293
+ ![](images/2a127ca41b337f4667817396562766c00f4ca043ddea41d1e19f26cfc04103c8.jpg)
294
+
295
+ ![](images/04bc8164aef575408610c1a26e71dd1dc80d1b2aa4338af16d4ea4536af21d6c.jpg)
296
+
297
+ ![](images/8b8f4daf60640d9967adcb1c1a2c9f1b3091230c880e023455a4fb67b4a5ca27.jpg)
298
+
299
+ coming nodes, if they are not in WM, are retrieved to make the robot able to localize itself (though loop closures and proximity detections) on the path. Looking at how the local map changes in these snapshots, notice how starting from $t = 17075$ sec, the initial portion of the path is transferred in LTM to keep the size of the WM relatively constant. At $t = 17108$ sec, the robot reached WP1.
300
+
301
+ Figure 12 compares the images between each waypoint and the final position of the robot at the waypoints. The robot successfully reached the waypoints (within $D$ as the goal radius) 445 out of 446 times. For
302
+
303
+ WP2, WP3 and WP4, the robot always came from behind the waypoint, and as soon the robot reached the waypoint within a $D$ radius, TPP detected that the goal was reached. This explains why all the poses are behind the waypoints but inside the goal radius $D$ . Similarly, for WP1, the robot came from behind from a slightly different direction. Spurious poses on the right part of the circle are those where there was an obstacle that caused the robot to avoid it, making it reach the waypoint from a different direction. The one time the robot failed to reach a waypoint is because someone blocked the robot for a long time, making TPP failed after $F$ at
304
+
305
+ tempt of reaching the upcoming nodes: a failure status message was then sent to the Patrol module to provide the next waypoint. The person left soon after the next waypoint was sent, and the robot reached the new waypoint provided.
306
+
307
+ Figure 13 illustrates the evolution of the number of nodes in WM and online processing time over the 11 mapping sessions. Processing time includes all SPLAM-MM modules except MPP which was running concurrently on a separate process (its processing time is only dependent of the local map size). As explained in Section 3.5.2, TPP occurs offline and only when a new goal is received from the Patrol module, and is examined in Section 4.3. Fig. 13a illustrates that the number of nodes in WM and the local map was identical until $T$ sec was reached. After that, nodes were transferred to LTM to limit the WM size for online processing, which is satisfied as shown by Fig. 13b. Processing time also remained well under the acquisition time $A$ .
308
+
309
+ # 4.2 TPP-MPP Interactions
310
+
311
+ To illustrate with a concrete example of the situation described in Fig. 7, Fig. 14 presents an example of consecutive poses sent by TPP to MPP while nodes from LTM are retrieved for the planned path. The red arrow shows the pose of the farthest node on the path (the direction of the arrow shows the orientation of the pose). The red line represents the trajectory computed by MPP from the current position of the robot to its targeted pose, combined with obstacle avoidance. The blue lines represent the local map. In Fig. 14a, the targeted pose is on a node traversed backward (as shown by the arrow pointing backward). Between a) and b), the local map was updated with nodes loaded from LTM of the topological path. The targeted pose was updated farther on the path and at the same time, the occupancy grid was extended to previously mapped areas and MPP recomputed its trajectory. The robot could then move farther toward its goal and the nodes retrieved were used for proximity detection to correctly follow the planned path.
312
+
313
+ To also illustrate the importance of obstacle detection described in Fig. 6, Fig. 15 presents an example where an unexpected obstacle was encountered: as the laser rangefinder is $0.4\mathrm{m}$ above the ground, the forklift could only be detected using the RGB-D camera. MPP planned a slightly different path (orange) that the one planned by TPP (pink) to avoid the obstacle.
314
+
315
+ # 4.3 Influences of LTM on TPP
316
+
317
+ Although Fig. 13 demonstrates that SPLAM-MM is able to satisfy online constraints on a map increasing linearly in size (i.e., not bounded to a maximum size of environment), memory used by LTM and consequently TPP planning time increase linearly. For example, at the end of experiment, LTM contains 24002 nodes and 113368 links. All raw sensor data in the nodes were also saved in the LTM's database (for debugging and visualization purposes), including RGB image (JPEG format) and depth image (PNG format) of each node. The final database took 6.7 GB of hard drive space. With as many links at the end of the experiment, TPP required 2.4 sec to compute a plan to the next waypoint. In term of memory usage and planning time, LTM must be somewhat limited over time when revisiting the same areas.
318
+
319
+ As a solution to limit LTM memory growth, nodes from STM can be merged when moved to WM if they have loop closure and/or visual proximity links. We studied this possibility by adding a graph reduction algorithm to STM, to remove the node from the graph and to add its neighbor links to the corresponding old node(s). Algorithm 1 summarizes the approach used to maintain the graph at the same size (same number of removed links and nodes than added) if there are many successive nodes with loop closure or visual proximity links. If two nodes of a same location do not have similar images (i.e., they don't have loop closure or visual proximity links), they will not be merged, thus still keeping a variety of different images representing the same location. To make sure nodes to be merged are still in WM (to avoid to modify the LTM), nodes having a link to a node in STM are identified as nodes that must stay in WM (similarly to Heuristic 2). Figure 16 shows how links are merged between the node moved to WM and its corresponding node(s) linked by loop closure link. In a), the purple node has two loop closure links. On graph reduction, its two neighbor links (blue) are merged with the loop closure links (red) by multiplying the corresponding transformations together, creating merged neighbor links (orange). In this case, the same number of links are added than those removed but one node is removed. In b), the green node has only one neighbor link (with the cyan node), then the loop closure link is only merged with it, creating only one link and four are removed. Merged neighbor links are ignored to be merged again to limit the number of links. In c), the cyan node does not have any loop closure and no graph reduction is done.
320
+
321
+ To test this idea, data from the 11 sessions were processed again to test the influences of the graph re
322
+
323
+ ![](images/13121a1fb9772c25b0be134a2a5f2d86b4cabd158275428c8ef08caa67f8a92c.jpg)
324
+ Fig. 12 Comparison of the corresponding images between the waypoint (left image) and at the last pose reached on one of the planned path (right image) for the waypoints. The top view grid shows the laser scan readings and referentials of the waypoint's nodes (at the origin of the grid) and the final node. The zoomed portions represent the final poses of the robot (represented by blue dots), for all paths planned for each waypoint. The circle represents the goal radius $D$ , and the grid's cells used for visualization have a width of $1\mathrm{m}$ .
325
+
326
+ ![](images/d282406cb1b0e17080570953c6c9ffe33a38bf5717dc565801412a792f44a022.jpg)
327
+ (a) Number of nodes in WM and in the local map.
328
+
329
+ ![](images/4c992f7c4fe4301b5431c7a7f967751289b7d144d663844c9e6a1dc84d3495fd.jpg)
330
+ (b) Processing time (the horizontal line represents $T = 0.2$ sec).
331
+ Fig. 13 Memory size and total processing time over the 11 mapping sessions.
332
+
333
+ ![](images/3b6fa15482804d605bf3c6fe203ebfd8460b3347fd9984bc5a209ce32fcd5533.jpg)
334
+ (a)
335
+
336
+ ![](images/4dcf73d48f270940ab18aa24d6b3391752efdbbd1dc602a25a164eb145dbe7d8.jpg)
337
+ (b)
338
+
339
+ # Algorithm 1 Graph Reduction
340
+
341
+ 1: $o\gets$ node moved to WM
342
+ 2: $m \gets$ loop closure and visual proximity links of $o$
343
+ 3: if $m$ is not empty then
344
+ 4: $n\gets$ neighbor links of $o$
345
+ 5: for all $m$ in $\pmb{m}$ do
346
+ 6: $o_{m}\gets$ node pointed by $m$
347
+ 7: for all $n$ in $n$ do
348
+ 8: $o_{n}\gets$ node pointed by $n$
349
+ 9: $t\gets m^{-1}\cdot n$
350
+ 10: Add $t$ to $o_m$
351
+ 11: Add $t^{-1}$ to $o_n$
352
+ 12: end for
353
+ 13: end for
354
+ 14: Remove $o$ from the graph
355
+ 15: end if
356
+
357
+ duction approach using real data acquired by the robot. Note that even though graph reduction was validated offline, we carefully monitored the experiment manually to make sure that the robot could still localize itself correctly on the planned paths.
358
+
359
+ Figure 17 shows a comparison of the final global map without and with graph reduction. The zones with
360
+
361
+ ![](images/7a5647f5e4f6a8288995e8375607930246a489b0b11968d5fa315cd542bb68a4.jpg)
362
+ Fig. 15 Example where MPP plans a slightly different path (orange) than the one provided by TPP (pink). The yellow dot is the current position of the robot and the lower right image is the corresponding RGB image.
363
+
364
+ ![](images/88f676a9b4b082fc1409c9cafafa2a85229510320187bd752ee14005539dbce2.jpg)
365
+ Fig. 14 Example of poses sent by TPP to MPP while nodes from LTM are retrieved for the planned path. The goal of the path is somewhere outside these images in the direction shown by Goal. The bottom left images shows the actual RGB image from the RGB-D camera. The blue lines are nodes and links of the local map. The red line is the computed trajectory from MPP using the local map's occupancy grid from its current pose (red arrow). The RGB point cloud and the occupancy grid are created using RGB-D images and laser scans stored in nodes from the local map, respectively. In a), the robot is following the red trajectory. In b), some nodes are retrieved from LTM and a new trajectory is computed to move further on the path toward the goal.
366
+ Fig. 16 Three examples illustrating how the graph reduction algorithm works. Blue, red and orange links represent neighbor, loop closure and merged neighbor links, respectively. Black links and white nodes are those removed using graph reduction. The left column shows the rightmost node (the oldest) of STM moved to WM. Then on the right column, this node is removed if it has a loop closure link.
367
+
368
+ less blue links indicate that there were many nodes merged. The zones with more blue links are where nodes were not merged, because of a lack of features or because of obstacles: the robot was not able to localize itself perfectly on the paths every time, thus adding new nodes to the map.
369
+
370
+ Figure 18 illustrates TPP planning time corresponding to LTM size with and without graph reduction. As the LTM became larger, TPP planning time increased: with graph reduction, TPP planning time was reduced by $89\%$ for the last path planned (272 ms instead of 2.4
371
+
372
+ ![](images/22aec2fdef64395f5fe05e8e9ba36902a0f9ac430e91493b8e8400cbad9d5f2c.jpg)
373
+ Fig. 17 Comparison between the global maps a) without graph reduction (24002 nodes and 113368 links); b) with graph reduction (6059 nodes and 18255 links).
374
+
375
+ sec). Figure 19 illustrates hard drive usage with and without graph reduction. Extrapolating linearly memory usage with a $100\mathrm{Gb}$ hard drive, the robot could navigate online approximately 110 hours without graph reduction before filling up the hard drive. When debugging data (not used for navigation) are not recorded in the database, this estimate would increase to approximately 33 days (800 hours). This means that if the robot is always visiting new locations at a mean velocity of $1.4\mathrm{km/h}$ (as in this experiment), it could travel up to $1120\mathrm{km}$ to map environments online. When graph reduction is used, debugging data are not saved and having the robot always revisiting the same areas like in this experiment, it could do SPLAM continuously for about 130 days before reaching the hard drive capacity.
376
+
377
+ # 5 Discussion
378
+
379
+ In terms of processing time, results show that SPLAM-MM is able to satisfy online processing requirements independently of the size of the environment, by transferring in LTM portions of the map which then cannot be used for loop closure detection, proximity detection and graph optimization. Results show also that path following is still possible in such conditions by incrementally retrieving locations on the planned path. Thus, as shown in Section 4.3, the current hardware limitation of the system for long-term continuous SPLAM is hard drive capacity, not computation power.
380
+
381
+ ![](images/81c2da4050a184a6640f69aa63f166c65c2e397eee7cc0a39c5d6659fb19ac7f.jpg)
382
+ Fig. 18 Comparison of TPP planning time and LTM size, with (blue) and without (red) graph reduction. The peaks in the zoomed section show more precisely when a planning is done (when a waypoint is reached).
383
+
384
+ ![](images/505900d3278ec51e829aefaf55ea4404b822d4cd2ffd0d5d3ce79ac0e4a81c96.jpg)
385
+ Fig. 19 Comparison of hard drive usage with (blue) and without (red) graph reduction. The dashed curves represent results without saving in database the debugging data (i.e., raw RGB and depth images).
386
+
387
+ To successfully follow a path, results demonstrate the importance of adding loop closure and/or proximity links with nodes on the planned path to localize the robot in the map. In our trials, the robot navigated indoor where static structures (e.g., walls) were most of the time visible using the laser rangefinder. However, in large empty spaces where the laser rangefinder would not be able to perceive nearby structures, it would be difficult for the robot to follow a path if appearance-based loop closure detection and visual proximity detection do not occur. A laser rangefinder with larger perceptual range or a 3D LIDAR sensor like the Velodyne could be used to increase perceptual range. For a lower cost solution, using a camera facing backward could be useful to allow the robot to detect similarities in images when traversing a path in opposite direction (Carrera et al., 2011). Without adding new sensors, TPP could also stop sending new poses when no loop closure links or proximity links occur for a while. If no
388
+
389
+ loop closures were found over the next few meters, it would be possible to wait for the robot to rotate at this location so that it can look backward, increasing its chance to detect a loop closure to correct its position on the planned path and then generate a new pose. A similar recovery approach is presented in (Milford and Wyeth, 2010), where an exploration phase is triggered to re-localize the robot when failing to follow the planned path. Also, to be more robust to dynamic environments where there are cyclic changes over time, TPP could select nodes that match better the current time of the day rather than the most recent ones, to increase localization success as in (Krajinik et al., 2016).
390
+
391
+ In comparison with large empty environments, those in which a lot of dynamic changes occur (e.g., navigating through a crowd) would also make simultaneous planning and localization more difficult. For instance, mapping the area in session 1 without people walking by helped the robot acquire the static structures of the environment since they were not hidden by people. These static structures facilitate localization when the robot comes back to these areas later one. If these static structures were previously occluded, they would be added to the map as the robot comes back to these areas (obviously if people are no longer in the robot's field of view). If people partially occlude the robot's sensors over a long distance, localization would still be possible but would occur less frequently.
392
+
393
+ For online multi-session mapping with our memory management approach, the worst case is when all nodes of a previous map are transferred to LTM before a loop closure is detected (Labbe and Michaud, 2013). This results in definitely ignoring the previous map and disabling at the same time the ability to plan paths to a location in it. To avoid this problem, an additional heuristic could be to keep in WM at least one discriminative node for each map. However, if the number of mapping sessions becomes very high (e.g., thousands of sessions), these nodes would definitely have to be transferred in LTM to satisfy online processing requirements. A strategy that makes the robot explore potential paths to link maps together would then be useful, and maps that could not be linked would eventually be unretrievable.
394
+
395
+ In the trials conducted, no invalid loop closures were detected, avoiding to corrupt the map with erroneous loop closure links. If this happens, graph optimization approaches such as (Latif et al., 2013; Sunderhauf and Protzel, 2012; Lee et al., 2013) deal with possible invalid matches, and could be used to increase robustness of SPLAM-MM. However, these approaches assume that the whole global map is available online, which is not
396
+
397
+ the case here. They could be still used offline at the end of a session.
398
+
399
+ As shown by Fig. 15, MPP in SPLAM-MM allows the robot to find an alternative path to reach the targeted pose when possible. However, if the alternative path is outside the local map, re-planning with TPP is required. Some paths may be also blocked temporary or permanently by some dynamic or new static obstacles. An approach similar to (Konolige et al., 2011) could be used to identify some links as blocked so that TPP cannot plan a path using them. The Patrol module could also manage waypoints that can and cannot be reached.
400
+
401
+ Finally, the graph reduction approach can reduce significantly the number of nodes and links saved in LTM to reduce TPP planning time. However, because of dynamic events or the lack of features (e.g., Fig. 10e), new nodes and links will inevitably be added to LTM over time when revisiting the same areas. As an improvement, nodes with featureless image could be merged through a maximum density threshold like in (Milford and Wyeth, 2010), as they cannot be used for loop closure detection. After applying graph reduction on the experimental data, there are still 3068 featureless nodes of 6059 nodes in the global graph, which would reduce by about $50\%$ the remaining graph. However, even by limiting the rate at which the LTM grows, a continuous SLAM approach in unbounded dynamic environments will always add new data over time. A complementary strategy would be to definitely forget some parts of the global map, at the cost of not being able to return to some locations.
402
+
403
+ # 6 Conclusion
404
+
405
+ By limiting the nodes of the map available online in WM for loop closure detection, proximity detection and graph optimization, results presented in this paper suggest that the proposed graph-based SPLAM-MM approach is able to meet online processing requirements needed for simultaneous mapping, localizing and planning in multi-session conditions. SPLAM-MM is tightly based on appearance-based loop closure detection, allowing it to naturally deal with the initial state problem of multi-session mapping. To successfully localize on a planned path through areas previously transferred in LTM, memory management allows SPLAM-MM to deal with the necessity of retrieving upcoming nodes on the path in WM. Our code is open source and available at http://introlab.github.io/rtabmap.
406
+
407
+ In future works, more robust failure recovery approaches will be examined to test SPLAM-MM in dynamic environments where the paths could often be blocked (temporally or permanently). We also plan to
408
+
409
+ study the impact of autonomous coverage and exploration strategies, especially how it can actively direct exploration based on nodes available for online mapping. This could be also useful to conduct longer experiments at larger scale.
410
+
411
+ # References
412
+
413
+ Atkinson R, Shiffrin R (1968) Human memory: A proposed system and its control processes. In: Psychology of Learning and Motivation: Advances in Research and Theory, vol 2, Elsevier, pp 89-195
414
+ Baddeley A (1997) Human Memory: Theory and Practice. Psychology Press
415
+ Bay H, Ess A, Tuytelaars T, Gool LV (2008) Speeded Up Robust Features (SURF). Computer Vision and Image Understanding 110(3):346-359
416
+ Besl PJ, McKay ND (1992) Method for registration of 3-D shapes. In: Robotics-DL tentative, International Society for Optics and Photonics, pp 586-606
417
+ Biber P, Duckett T, et al. (2005) Dynamic maps for long-term operation of mobile service robots. In: Robotics: Science and Systems, pp 17-24
418
+ Carrera G, Angeli A, Davison AJ (2011) Lightweight SLAM and navigation with a multi-camera rig. In: European Conference on Mobile Robots, pp 77-82
419
+ Churchill W, Newman P (2012) Practice makes perfect? Managing and leveraging visual experiences for lifelong navigation. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 4525-4532
420
+ Dijkstra EW (1959) A note on two problems in connection with graphs. Numerische Mathematik 1(1):269-271
421
+ Ferland F, Clavien L, Frémy J, Letourneau D, Michaud F, Lauria M (2010) Teleoperation of AZIMUT-3, an omnidirectional non-holonomic platform with steerable wheels. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 2515-2516
422
+ Folkesson J, Christensen HI (2007) Closing the loop with graphical SLAM. IEEE Trans on Robotics 23(4):731-41
423
+ Fox D, Burgard W, Thrun S (1997) The dynamic window approach to collision avoidance. IEEE Robotics & Automation Magazine 4(1):23-33
424
+ Garcia-Fidalgo E, Ortiz A (2015) Vision-based topological mapping and localization methods: A survey. Robotics and Autonomous Systems 64:1 - 20
425
+ Glover AJ, Maddern WP, Milford MJ, Wyeth GF (2010) FAB-MAP + RatSLAM: Appearance-based SLAM for multiple times of day. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 3507-3512
426
+ Grisetti G, Grzonka S, Stachniss C, Pfaff P, Burgard W (2007) Efficient estimation of accurate maximum
427
+
428
+ likelihood maps in 3D. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 3472-3478
429
+ Grisetti G, Kümmerle R, Stachniss C, Burgard W (2010) A tutorial on graph-based SLAM. IEEE Intelligent Transportation Systems Magazine 2(4):31-43
430
+ Ho KL, Newman P (2006) Loop closure detection in SLAM by combining visual and spatial appearance. Robotics and Autonomous Systems 54(9):740-749
431
+ Johannsson H, Kaess M, Fallon M, Leonard J (2013) Temporally scalable visual SLAM using a reduced pose graph. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 54-61
432
+ Kim B, Kaess M, Fletcher L, Leonard J, Bachrach A, Roy N, Teller S (2010) Multiple relative pose graphs for robust cooperative mapping. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 3185-3192
433
+ Konolige K, Bowman J (2009) Towards lifelong visual maps. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 1156-1163
434
+ Konolige K, Marder-Eppstein E, Marthi B (2011) Navigation in hybrid metric-topological maps. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 3041-3047
435
+ Krajinik T, Fentanes JP, Hanheide M, Duckett T (2016) Persistent localization and life-long mapping in changing environments using the frequency map enhancement. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 4558-4563
436
+ Kummerle R, Grisetti G, Strasdat H, Konolige K, Burgard W (2011) g2o: A general framework for graph optimization. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 3607-3613
437
+ Labbe M, Michaud F (2013) Appearance-based loop closure detection for online large-scale and long-term operation. IEEE Trans on Robotics 29(3):734-745
438
+ Labbe M, Michaud F (2014) Online global loop closure detection for large-scale multi-session graph-gased SLAM. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 2661-2666
439
+ Latif Y, Cadena C, Neira J (2013) Robust loop closing over time for pose graph SLAM. Int J of Robotics Research 32(14):1611-1626
440
+ Lee GH, Fraundorfer F, Pollefeys M (2013) Robust pose-graph loop-closures with expectation-maximization. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 556-563
441
+ Marder-Eppstein E, Berger E, Foote T, Gerkey B, Konolige K (2010) The Office Marathon: Robust navigation in an indoor office environment. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 300-307
442
+ McDonald J, Kaess M, Cadena C, Neira J, Leonard J (2012) Real-time 6-DOF multi-session visual SLAM
443
+
444
+ over large scale environments. Robotics and Autonomous Systems 61(10):1144-58
445
+ Milford M, Wyeth G (2010) Persistent navigation and mapping using a biologically inspired SLAM system. Int J of Robotics Research 29(9):1131-53
446
+ Muja M, Lowe DG (2009) Fast approximate nearest neighbors with automatic algorithm configuration. In: Proc. Int. Conf. on Computer Vision Theory and Application, pp 331-340
447
+ Pirker K, Ruther M, Bischof H (2011) CD SLAM - Continuous localization and mapping in a dynamic world. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 3990-3997
448
+ Rusu RB, Cousins S (2011) 3D is here: Point Cloud Library (PCL). In: Proc. IEEE Int. Conf. on Robotics and Automation, Shanghai, China, pp 1-4
449
+ Sivic J, Zisserman A (2003) Video Google: A text retrieval approach to object matching in videos. In: Proc. 9th Int. Conf. on Computer Vision, Nice, France, pp 1470-1478
450
+ Stachniss C (2009) Robotic Mapping and Exploration, vol 55. Springer Science & Business Media
451
+ Sunderhauf N, Protzel P (2012) Towards a robust backend for pose graph SLAM. In: Proc. IEEE Int. Conf. on Robotics and Automation, pp 1254-1261
452
+ Thrun S, Burgard W, Fox D (2005) Probabilistic Robotics. The MIT Press
453
+ Valencia R, Morta M, Andrade-Cetto J, Porta JM (2013) Planning reliable paths with Pose SLAM. IEEE Trans on Robotics 29(4):1050-1059
454
+ Walcott-Bryant A, Kaess M, Johannsson H, Leonard JJ (2012) Dynamic pose graph SLAM: Long-term mapping in low dynamic environments. In: Proc. IEEE/RSJ Int. Conf. on Intelligent Robots and Systems, pp 1871-1878
2301.00xxx/2301.00050/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36b8141d169ea5a217d643e2cb99dd8c48dc53f3b04c215b9fa7846c0c3f2af0
3
+ size 1256940
2301.00xxx/2301.00050/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00073/75ea4a89-7dbe-4180-bf18-4a5f3633018a_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00073/75ea4a89-7dbe-4180-bf18-4a5f3633018a_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00073/75ea4a89-7dbe-4180-bf18-4a5f3633018a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d728776ad39f8e6d0958702d296bae4ed72cabbe4804a441b8a82adecb66b629
3
+ size 650258
2301.00xxx/2301.00073/full.md ADDED
@@ -0,0 +1,550 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Fluid Antenna System: New Insights on Outage Probability and Diversity Gain
2
+
3
+ Wee Kiat New, Member, IEEE, Kai-Kit Wong, Fellow, IEEE, Hao Xu, Member, IEEE, Kin-Fai Tong, Fellow, IEEE, and Chan-Byoung Chae, Fellow, IEEE
4
+
5
+ # Abstract
6
+
7
+ To enable innovative applications and services, both industry and academia are exploring new technologies for sixth generation (6G) communications. One of the promising candidates is fluid antenna system (FAS). Unlike existing systems, FAS is a novel communication technology where its antenna can freely change its position and shape within a given space. Compared to the traditional systems, this unique capability has the potential of providing higher diversity and interference-free communications. Nevertheless, the performance limits of FAS remain unclear as its system properties are difficult to analyze. To address this, we approximate the outage probability and diversity gain of FAS in closed-form expressions. We then propose a suboptimal FAS with $N^{*}$ ports, where a significant gain can be obtained over FAS with $N^{*} - 1$ ports whilst FAS with $N^{*} + 1$ ports only yields marginal improvement over the proposed suboptimal FAS. In this paper, we also provide analytical and simulation results to unfold the key factors that affect the performance of FAS. Limited to systems with one active radio frequency (RF)-chain, we show that the proposed suboptimal FAS outperforms single-antenna (SISO) system and selection combining (SC) system in terms of outage probability. Interestingly, when the given space is $\frac{\lambda}{2}$ , the outage probability of the proposed suboptimal FAS with one active RF-chain achieves near to that of the maximal ratio combining (MRC) system with multiple active RF-chains.
8
+
9
+ The work of W. K. New, K.-K. Wong and K.-F. Tong is supported by the Engineering and Physical Sciences Research Council (EPSRC) under grant EP/W026813/1. The work of C.-B. Chae is supported by the Institute for Information and Communication Technology Promotion (IITP) grant funded by the Ministry of Science and ICT (MSIT), Korea (No. 2021-0-02208, No. 2021-0-00486). The work of H. Xu is supported by the European Union's Horizon 2020 Research and Innovation Programme under Marie Sklodowska-Curie Grant No. 101024636.
10
+
11
+ Wee Kiat New (email: a.new@ucl.ac.uk), Kai-Kit Wong (email: kai-kit.wong@ucl.ac.uk), Hao Xu (email:hao.xu@ucl.uk), and Kin-Fai Tong (email: k.tong@ucl.ac.uk) are with the Department of Electronic and Electrical Engineering, University College London, London WC1E 6BT, United Kingdom.
12
+
13
+ Chan-Byoung Chae (email: cbchae@yonsei.ac.kr) is with the School of Integrated Technology, Yonsei University, Seoul 03722 Korea. Kai-Kit Wong is also affiliated with Yonsei Frontier Lab., Yonsei University, Seoul 03722, Korea.
14
+
15
+ # Index Terms
16
+
17
+ 6G, fluid antenna system, outage probability, diversity gain, performance analysis.
18
+
19
+ # I. INTRODUCTION
20
+
21
+ Fifth generation (5G) wireless networks have recently been deployed worldwide and thus the industry and academia are now looking for new technologies to maximize the potentials of sixth generation (6G) wireless networks. One of the promising candidates is fluid antenna system (FAS). Unlike traditional antenna systems, FAS is a software-controllable fluidic, conductive or dielectric structure that can freely adjust its position and shape within a given space [1]. The most basic single fluid antenna consists of one radio frequency (RF)-chain and $N$ preset positions (known as ports) that are distributed within a given space while more advanced designs are also possible. The fluid radiator can freely switch its position among these ports to obtain a stronger channel gain, lower interference, and other desirable performance [2].
22
+
23
+ Fluid antenna is now feasible thanks to the recent advancement of using liquid metals and ionized solutions for antennas. Some prototypes can be found in [3]–[6]. As discussed in [7], other flexible antenna structures such as software-controlled pixel antenna or movable antenna can also be considered as fluid antenna. In essence, the key principle of FAS is to exploit its dynamic position and shape to achieve ultimate diversity and multiplexing gains [7]. Moreover, in the future, FAS can be applied together with other 6G candidates such as re-configurable intelligent surfaces (RIS), massive multiple-input multiple-output (MIMO) and terahertz (THz) communications. In particular, FAS can help to reduce the optimization complexity of RIS [1], improve the multiplexing gain of massive MIMO [8] and combat the high path loss effect of THz communications [9].
24
+
25
+ Despite its advantages, the fundamental limits of FAS and key factors that affect its performance remain unclear. One of the reasons is because the channels of FAS are strongly correlated since the ports can be closely placed to each other. Consequently, the probability density function (PDF) and cumulative distribution function (CDF) of FAS channels are intractable [10]. As a result, the outage probability and diversity gain of FAS are not known in closed-form expressions. In addition, increasing the number of ports of FAS has an inherit diminishing gain due to
26
+
27
+ one active RF-chain [11].<sup>1</sup> Thus, a suboptimal number of ports that are required to achieve a satisfactory performance is not known. Yet, this number is practically and theoretically important as it reduces the implementation challenges and analysis complexity.
28
+
29
+ Conceptually, FAS can be viewed similar to a traditional selection combining (SC) system since both systems use only one active RF-chain and there is a set of ports (i.e., FAS) or antennas (i.e., SC) to select from. Nevertheless, unlike traditional SC system, FAS can have infinitely many ports in a limited space (e.g., when using liquid metals) which makes the implementation and analysis much more challenging. In addition, the unique capability of freely switching the radiating element among the ports can be exploited to mitigate multi-user interference. These features are impractical in traditional SC systems.
30
+
31
+ State-of-the-arts show that FAS outperforms maximal ratio combining (MRC) system if the number of ports is sufficiently large [7]. In fact, [7] proves that FAS achieves arbitrarily small outage for a fixed rate/signal-to-noise ratio (SNR) as $N \to \infty$ . In [12], the authors reveal that the ergodic capacity of FAS increases with $N$ and thus FAS can outperform MRC in terms of ergodic capacity. Interestingly, FAS can also be used for multiple access. Specifically, [13] proposes a fluid antenna multiple access (FAMA) system which leverages the moment of deep fades in space to reduce multi-user interference. Motivated by these works, [14] employs stochastic geometry to analyze the outage probability of FAS in large-scale downlink cellular networks and [15] analyzes the performance of FAS in a more general correlated fading channel.
32
+
33
+ Nevertheless, [16] alludes that the channel modeling in the previous works might be inaccurate. To address this, [10] proposes a highly complicated channel model to follow closely the spatial correlation of the Jake's model. Using this channel model, they highlight that FAS has limited performance gain as $N$ increases. Yet, the key reasons that limit the performance of FAS remain ambiguous. This is because the eigenvalue and eigenvector entries that are used in the analytical PDF/CDF expressions provide limited insights.
34
+
35
+ It is important to highlight that deriving the PDF/CDF of FAS channels is extremely challenging [10]. This is because the channels of FAS are strongly correlated and thus they have to be formulated in terms of multivariate correlated Rayleigh distributions. Over the past few decades, extensive efforts have been dedicated to this problem [17]. However, most of the
36
+
37
+ works only obtain the bivariate [18], [19], trivariate [18], [20], [21], or quadvariate [21], [22] distributions while other works restrict the correlation matrix to certain forms (e.g., equally correlated [23] and exponentially correlated [24]). Fortunately, the multivariate PDF/CDF of arbitrarily correlated Rayleigh distributions are recently derived in [25]-[27]. Nevertheless, the assumption of non-singular correlation matrix is retained. In this paper, we omit this assumption (i.e., our correlation matrix could be near-singular) and address the computation problem via a suboptimal approximation. $^{2}$
38
+
39
+ In addition to the above works, [29] develops a port selection algorithm that can approach the performance of optimal FAS when only the received SNR of a few ports are observed. Furthermore, [30] considers a field-response channel model while omitting the spatial correlation effect and [31] extends the model to a MIMO scenario. Moreover, FAMA can be categorized into i) slow-FAMA and ii) fast-FAMA. The earlier switches its port when the channel changes [32] while the latter switches its port on a symbol-by-symbol basis [33]. The analytical outage probability of two-user FAMA is also derived in [34].
40
+
41
+ Motivated by the aforementioned works, this paper aims to understand the fundamental limits of FAS as well as the key factors that affect its performance. To this end, we approximate the outage probability and diversity gain of FAS in closed-form expressions via a simple and accurate channel model that follows closely the spatial correlation of Jake's model. In addition, we propose a suboptimal FAS with $N^{*}$ ports as well as an algorithm to approximate $N^{*}$ . The main contributions of our paper are summarized as follows:
42
+
43
+ - We employ a simple and accurate channel model that follows the spatial correlation of Jake's model. Based on this channel model, we approximate the outage probability in closed-form expressions. By applying Taylor series approximation, we simplify the outage probability at high SNR into a simpler and more meaningful expression. Using this result, we obtain the diversity gain of FAS.
44
+ - We propose a suboptimal FAS with $N^{*}$ ports. The proposed suboptimal FAS plays an important role as it enables FAS to achieve near-optimal performance with minimal number of ports. In particular, one may define $\varepsilon_{\mathrm{tol}}$ to adjust the sub-optimality of the proposed FAS.
45
+
46
+ For example, if $\varepsilon_{\mathrm{tol}}$ is small, the proposed FAS is quantifiably near-optimal at a cost of more ports. In addition, we develop a polynomial-time algorithm to approximate $N^{*}$ . Besides, $N^{*}$ can be used to address the near-singular correlation matrix problem.
47
+
48
+ - We provide analytical and simulation results to demonstrate the key parameters that affect the performance of FAS. Our discussions include intuitive insights on the system characteristics as well as practical guidelines for efficient FAS design.
49
+
50
+ The rest of the paper is organized as follows: Section II details the system model and performance metrics. Section III presents the outage probability and diversity gain of FAS. The details of suboptimal FAS and the algorithm to approximate $N^{*}$ are discussed in Section IV. Section V provides our simulation results and we conclude the paper in Section VI.
51
+
52
+ Notations: Scalar variables are denoted by italic letters (e.g., $c$ ), vectors are denoted by boldface italic small letters (e.g., $c$ ) and matrices are denoted by boldface italic capital letters (e.g., $C$ ). Besides, $(\cdot)^T$ denotes transpose, $(\cdot)^H$ denotes conjugate transpose, $(\cdot)^{-1}$ denotes inverse of a matrix while $|\cdot|$ and $\| \cdot \|_F$ denotes absolute and Frobenius norm, respectively. Throughout this paper, $\log (\cdot)$ denotes logarithm with base 2, $\mathbb{E}[\cdot]$ denotes the expectation and $\mathbb{P}\{\cdot\}$ denotes the probability of an event. In addition, $f_c(\cdot)$ denotes the PDF of $c$ , and $F_{c}(\cdot)$ denotes the CDF of $c$ . The notation $\mathbf{1}_c\{\cdot\}$ is an indication function for condition $c$ and $[\cdot]_c^{+/-}$ outputs the argument that is lower/upper bounded by $c$ . To help readers with our mathematical content, the meanings of the key variables are explained in Table I.
53
+
54
+ # II. SYSTEM MODEL
55
+
56
+ In this paper, we consider a point-to-point FAS where the transmitter is equipped with a conventional antenna and the receiver is equipped with a fluid antenna. The fluid antenna consists of one RF-chain and $N$ preset locations (also known as ports), which are evenly distributed along a linear dimension of length $W\lambda$ where $\lambda$ is the wavelength of the carrier frequency. Since the ports are closely packed together, there is a strong spatial correlation among them. Based on Jake's model [35], the spatial correlation between the $m$ -th and $n$ -th ports is given by
57
+
58
+ $$
59
+ J _ {m, n} = \sigma^ {2} J _ {0} \left(2 \pi \frac {(m - n)}{N - 1} W\right), \tag {1}
60
+ $$
61
+
62
+ where $\sigma^2$ accounts for the large-scale fading effect and $J_0(\cdot)$ is the zero-order Bessel function of the first kind.
63
+
64
+ Table I: The meanings of key variables
65
+
66
+ <table><tr><td>Notation</td><td>Meaning</td></tr><tr><td>DFAS</td><td>Diversity gain of FAS</td></tr><tr><td>hn</td><td>Complex channel coefficient of the n-th port</td></tr><tr><td>ˆhn</td><td>Approximation of hn</td></tr><tr><td>|hFAS|</td><td>Maximum signal envelope of FAS</td></tr><tr><td>Jm,n</td><td>Spatial correlation between the m-th and n-th ports</td></tr><tr><td>J</td><td>Spatial correlation matrix</td></tr><tr><td>J&#x27;</td><td>Spatial correlation matrix with N → ∞</td></tr><tr><td>K</td><td>Co-factor of J</td></tr><tr><td>λ</td><td>Wavelength of the carrier frequency</td></tr><tr><td>N</td><td>Total number of ports</td></tr><tr><td>N&#x27;</td><td>Rank of J&#x27;</td></tr><tr><td>q</td><td>Minimum required rate</td></tr><tr><td>SNR</td><td>Transmit SNR</td></tr><tr><td>W</td><td>Length of the fluid antenna in terms of λ</td></tr><tr><td>Θ</td><td>Instantaneous received SNR of the receiver</td></tr></table>
67
+
68
+ For ease of analysis, we introduce the correlation matrix $J$ where
69
+
70
+ $$
71
+ \boldsymbol {J} = \left[ \begin{array}{c c c} J _ {1, 1} & \dots & J _ {1, N} \\ \vdots & \ddots & \vdots \\ J _ {N, 1} & \dots & J _ {N, N} \end{array} \right]. \tag {2}
72
+ $$
73
+
74
+ In (2), we have $J_{m,n} = J_{n,m}$ . Therefore, using eigenvalue decomposition, we can obtain $\boldsymbol{J} = \boldsymbol{U} \boldsymbol{\Lambda} \boldsymbol{U}^H$ where $\boldsymbol{U}$ is an $N \times N$ matrix whose $n$ -th column (denoted by $\boldsymbol{u}_n$ ) is the eigenvector of $\boldsymbol{J}$ and $\boldsymbol{\Lambda} = \operatorname{diag}(\lambda_1, \dots, \lambda_N)$ is an $N \times N$ diagonal matrix whose $n$ -th diagonal entry is the corresponding eigenvalue of $\boldsymbol{u}_n$ . Without loss of generality, we assume that the values of the eigenvalues in $\boldsymbol{\Lambda}$ are arranged in descending order. i.e., $\lambda_1 \geq \dots \geq \lambda_N$ .
75
+
76
+ Throughout this paper, we assume there is only one RF-chain in FAS and thus only one port can be activated for communications. The received signal of the $n$ -th port is expressed as
77
+
78
+ $$
79
+ y _ {n} = h _ {n} x + w _ {n}, n = 1, \dots , N, \tag {3}
80
+ $$
81
+
82
+ where $h_n$ is the complex channel coefficient of the $n$ -th port, $x$ is the information signal with $\mathbb{E}\left[|x|^2\right] = P$ and $w_n$ is the additive white Gaussian noise of the $n$ -th port with zero mean and
83
+
84
+ variance of $N_0$ . Due to the spatial correlation of the ports, $h_n$ can be modeled as
85
+
86
+ $$
87
+ h _ {n} = \sum_ {m = 1} ^ {N} u _ {n, m} \sqrt {\lambda_ {m}} z _ {m}, \tag {4}
88
+ $$
89
+
90
+ where $u_{n,m}$ is the $(n,m)$ -th entry of $U$ , $z_{m} = a_{m} + jb_{m}$ , where $a_{m}, b_{m}, \forall m$ , are independent and identically distributed (i.i.d.) Gaussian random variables with zero mean and variance of $\frac{1}{2}$ . According to [10], (4) can also be approximated as
91
+
92
+ $$
93
+ \hat {h} _ {n} = \Psi v _ {n} + \sum_ {m = 1} ^ {\epsilon - \operatorname {r a n k}} u _ {n, m} \sqrt {\lambda_ {m}} z _ {m}, \tag {5}
94
+ $$
95
+
96
+ where $\epsilon$ -rank is a modeling parameter, $\Psi = \sqrt{\sigma^2 - \sum_{m=1}^{\epsilon\text{-rank}} u_{n,m}^2 \lambda_m}$ , $v_n = c_n + j d_n$ and $c_n, d_n, \forall n$ , are i.i.d. Gaussian random variables with zero mean and variance of $\frac{1}{2}$ .
97
+
98
+ To obtain the global optimum performance, FAS activates a port with the maximum signal envelope [7],<sup>3</sup> i.e.,
99
+
100
+ $$
101
+ \left| h _ {\text {F A S}} \right| = \max \left\{\left| h _ {1} \right|, \dots , \left| h _ {N} \right| \right\}. \tag {6}
102
+ $$
103
+
104
+ The instantaneous received SNR of the receiver is found as
105
+
106
+ $$
107
+ \Theta = \left| h _ {\mathrm {F A S}} \right| ^ {2} \frac {P}{N _ {0}} = \left| h _ {\mathrm {F A S}} \right| ^ {2} S N R, \tag {7}
108
+ $$
109
+
110
+ where $SNR = \frac{P}{N_0}$ is the transmit SNR and its outage probability is defined as
111
+
112
+ $$
113
+ \mathbb {P} \left\{\log (1 + \Theta) < q \right\} = \mathbb {P} \left\{| h _ {\mathrm {F A S}} | < \Omega \right\}, \tag {8}
114
+ $$
115
+
116
+ where $\Omega = \sqrt{\frac{2^q - 1}{SNR}}$ and $q$ is the minimum required rate. In addition, the diversity gain of FAS can be defined as [36]
117
+
118
+ $$
119
+ \lim _ {S N R \rightarrow \infty} - \frac {\log \mathbb {P} _ {e} (S N R)}{\log (S N R)} \stackrel {(a)} {=} \lim _ {S N R \rightarrow \infty} - \frac {\log \mathbb {P} \left\{\log \left(1 + \left| h _ {\mathrm {F A S}} \right| ^ {2} S N R\right) < q \right\}}{\log (S N R)} = D _ {\text {F A S}}, \tag {9}
120
+ $$
121
+
122
+ where $(a)$ follows from the fact that error probability and outage probability differ by a constant shift at high SNR [37].
123
+
124
+ # III. OUTAGE PROBABILITY AND DIVERSITY GAIN OF FAS
125
+
126
+ As it is seen in (4), the complex channel coefficients $\pmb{h} = [h_1, \dots, h_N]^T$ are correlated. Therefore, $|\pmb{h}|$ is a correlated Rayleigh random vector. We present the following lemmas to obtain the closed-form outage probability and diversity gain of FAS.
127
+
128
+ Lemma 1. The PDF of $|h|$ can be approximated as
129
+
130
+ $$
131
+ \begin{array}{l} f _ {| h |} \left(\left| h _ {1} \right|, \dots , \left| h _ {N} \right|\right) \tag {10} \\ \approx \eta \sum_ {s _ {1} = 0} ^ {s _ {0}} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \left(\frac {1}{2}\right) ^ {\sum_ {t = 1} ^ {T} s _ {t} ^ {*}} \prod_ {t = 1} ^ {T} \beta \left(t, s _ {t} ^ {*}\right) \sum_ {\boldsymbol {v} \in \mathcal {V}} \left[ \prod_ {t = 1} ^ {T} \left( \begin{array}{c} s _ {t} ^ {*} \\ v _ {t} \end{array} \right) \right] \left[ (2 \pi) ^ {N} \prod_ {i = 1} ^ {N} \mathbf {1} _ {\{\Delta_ {i} = 0 \}} \right]. \\ \end{array}
132
+ $$
133
+
134
+ Proof: See Appendix A.
135
+
136
+ In (10), $\eta = \frac{\prod_{n=1}^{N}|h_n|}{\pi^N\operatorname*{det}(J)}\exp\left\{-\frac{\sum_{n=1}^{N}|h_n|^2K_{n,n}}{\operatorname*{det}(J)}\right\}$ , $T = \frac{N(N-1)}{2}$ , $\beta(t,s_t^*) \triangleq \frac{\zeta_t^{s_t^*}}{s_t^*!}$ , $\zeta_t = -\frac{2K_{m,n}|h_n||h_m|}{\operatorname*{det}(J)}$ and $s_t^* = s_t - s_{t+1}$ with $s_{T+1} = 0$ . Throughout this paper, the subscript $t$ and $m,n$ are related as follows: $t = n + (m-1)N - \frac{m(m+1)}{2}$ , $m < n$ , while $m,n$ can be obtained from $t$ with $m = \min m' \in \mathbb{Z}$ subject to $\sum_{i=1}^{m'}(N-i) > t$ and $n = t - (m-1)N + \frac{m(m+1)}{2}$ .
137
+
138
+ Note that $s_0$ is a finite constant which has to be large for the approximation to be accurate. In addition, $\pmb{v} = [v_{1},\dots,v_{T}]^{T}$ , $\mathcal{V}$ denotes the set of all the possible permutations and $\Delta_i = \sum_{n=1}^N G_{i,n} - \sum_{n=1}^N G_{n,i} - G_{i,i}$ . Furthermore, $K_{m,n}$ is the $(m,n)$ -th entry of $\pmb{K}$ where $\pmb{K}$ is the co-factor of $\pmb{J}$ , and $G_{m,n}$ is the $(m,n)$ -th entry of $\pmb{G}$ where $\pmb{G}$ is defined as
139
+
140
+ $$
141
+ \boldsymbol {G} = \left[ \begin{array}{c c c c c} 0 & \gamma_ {1} & \gamma_ {2} & \dots & \gamma_ {N - 1} \\ & & \gamma_ {N} & \dots & \gamma_ {2 N - 3} \\ \vdots & & \ddots & & \vdots \\ & & & & \gamma_ {T} \\ 0 & & \dots & & 0 \end{array} \right], \tag {11}
142
+ $$
143
+
144
+ and $\gamma_t = 2v_t - s_t^* \in \mathbb{Z}$ .
145
+
146
+ Lemma 2. The CDF of $|h|$ can be approximated as
147
+
148
+ $$
149
+ F _ {| \boldsymbol {h} |} \left(R _ {1}, \dots , R _ {N}\right) \approx \sum_ {s _ {1} = 0} ^ {s _ {0}} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \frac {g \left(\boldsymbol {s} ^ {*}\right)}{\pi^ {N} \det (\boldsymbol {J})} \prod_ {t = 1} ^ {T} \frac {\left(- K _ {m , n}\right) ^ {s _ {t} ^ {*}}}{s _ {t} ^ {*} ! \det (\boldsymbol {J}) ^ {s _ {t} ^ {*}}} \times \tag {12}
150
+ $$
151
+
152
+ $$
153
+ \prod_ {n = 1} ^ {N} \frac {1}{2} \left(\frac {K _ {n , n}}{\det (\boldsymbol {J})}\right) ^ {- \frac {\bar {s} _ {n}}{2} - \frac {1}{2}} \left[ \Gamma \left(\frac {1 + \bar {s} _ {n}}{2}\right) - \Gamma \left(\frac {1 + \bar {s} _ {n}}{2}, \frac {K _ {n , n} R _ {n} ^ {2}}{\det (\boldsymbol {J})}\right) \right].
154
+ $$
155
+
156
+ Proof: See Appendix B.
157
+
158
+ In (12), we have $\bar{s}_n = \sum_{i=1}^{N} S_{n,i}^* + \sum_{i=1}^{n-1} S_{i,n}^* + 1$ where $S_{i,n}^*$ is the $(i,n)$ -th entry of $S^*$ and $S^*$ is introduced in (36). Furthermore,
159
+
160
+ $$
161
+ g \left(\boldsymbol {s} ^ {*}\right) = \left(\frac {1}{2}\right) ^ {\sum_ {t = 1} ^ {T} s _ {t} ^ {*}} \sum_ {\boldsymbol {v} \in \mathcal {V}} \left[ \prod_ {t = 1} ^ {T} \binom {s _ {t} ^ {*}} {v _ {t}} \right] (2 \pi) ^ {N} \prod_ {i = 1} ^ {N} \mathbf {1} _ {\left\{\Delta_ {i} = 0 \right\}}. \tag {13}
162
+ $$
163
+
164
+ The expressions in (10) and (12) are extremely complicated. Nevertheless, they enable us to obtain more insightful derivations as shown later in this paper. Using the above lemmas, we present the following theorems.
165
+
166
+ Theorem 1. The outage probability of FAS can be approximated in a closed-form expression as
167
+
168
+ $$
169
+ \begin{array}{l} \mathbb {P} \left\{\left| h _ {F A S} \right| < \Omega \right\} = F _ {\left| \boldsymbol {h} \right|} (\Omega , \dots , \Omega) \tag {14} \\ \approx \sum_ {s _ {1} = 0} ^ {s _ {0}} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \frac {g \left(\boldsymbol {s} ^ {*}\right)}{\pi^ {N} \det (\boldsymbol {J})} \prod_ {t = 1} ^ {T} \frac {\left(- K _ {m , n}\right) ^ {s _ {t} ^ {*}}}{s _ {t} ^ {*} ! \det (\boldsymbol {J}) ^ {s _ {t} ^ {*}}} \times \\ \prod_ {n = 1} ^ {N} \frac {1}{2} \left(\frac {K _ {n , n}}{\det (\boldsymbol {J})}\right) ^ {- \frac {\bar {s} _ {n}}{2} - \frac {1}{2}} \left[ \Gamma \left(\frac {1 + \bar {s} _ {n}}{2}\right) - \Gamma \left(\frac {1 + \bar {s} _ {n}}{2}, \frac {K _ {n , n} \varOmega^ {2}}{\det (\boldsymbol {J})}\right) \right]. \\ \end{array}
170
+ $$
171
+
172
+ Proof: The result can be obtained using Lemma 2 and substituting $R_{1} = \dots = R_{N} = \Omega$ .
173
+
174
+ Remark 1. According to [10], $\pmb{h}$ can be modeled using $\hat{\pmb{h}} = \left[\hat{h}_1,\dots ,\hat{h}_N\right]^T$ and using the latter model, they show that the outage probability of FAS can be approximated by
175
+
176
+ $$
177
+ F _ {\left| h _ {\mathrm {F A S}} \right|} (\Omega) \approx \left[ \prod_ {n = 1} ^ {N} \int_ {0} ^ {\infty} \frac {1}{\sum_ {m = 1} ^ {\epsilon - \operatorname {r a n k}} u _ {n , m} ^ {2} \lambda_ {m}} \exp \left(- \frac {r}{\sum_ {m = 1} ^ {\epsilon - \operatorname {r a n k}} u _ {n , m} ^ {2} \lambda_ {m}}\right) \left(1 - Q _ {1} \left(\frac {\sqrt {2 r}}{\Psi}, \frac {\sqrt {2} \Omega}{\Psi}\right)\right) ^ {L} d r \right] ^ {\frac {1}{L}}, \tag {15}
178
+ $$
179
+
180
+ where $Q_{1}(\cdot, \cdot)$ is the Marcum-Q function and $L = \min \left\{\frac{1.52(N - 1)}{2\pi W}, N\right\}$ . Note that (15) is a remarkable expression as each $n$ term only has a single integral. Nevertheless, we found that it is challenging to obtain deeper insights from this expression.
181
+
182
+ Theorem 2. The outage probability of FAS at high SNR is given by
183
+
184
+ $$
185
+ \mathbb {P} \left\{\left| h _ {F A S} \right| < \Omega \right\} = \frac {1}{\det (\boldsymbol {J})} \Omega^ {2 N} + o \left(\frac {1}{S N R ^ {N}}\right). \tag {16}
186
+ $$
187
+
188
+ Proof: See Appendix C.
189
+
190
+ Theorem 3. The diversity gain of FAS is approximately expressed as
191
+
192
+ $$
193
+ D _ {F A S} \approx \min \left\{N, N ^ {\prime} \right\}, \tag {17}
194
+ $$
195
+
196
+ where $N'$ is the numerical rank of $J'$ such that $J'$ is the covariance matrix as defined in (2) with $N \to \infty$ for a fixed $W$ .
197
+
198
+ Proof: See Appendix D.
199
+
200
+ In Theorem 2, we can interpret $\det \left( J^{-1} \right)$ as the penalty term and $\Omega^2$ as gain of FAS that scales exponentially w.r.t. $N$ . Meanwhile, the term with little-o can be ignored as it approaches zero if the SNR is high. Nevertheless, in Theorem 3, we can see that the diversity gain is limited by $\min \{N, N'\}$ . Thus, increasing $N$ over $N'$ might not be useful. Notice that these interpretations cannot be directly obtained from (15).
201
+
202
+ # IV. SUBOPTIMAL SOLUTION: FAS WITH $N^{*}$ PORTS
203
+
204
+ At a fundamental level, [11] showed that increasing the number of channels (or ports) would yield a diminishing gain (i.e., the average received SNR gain is $\sum_{n}^{N}\frac{1}{n}$ ). In fact, [10] showed that for a fixed $W$ , the outage probability of FAS might remain similar after some $N$ . For ease of expositions, we denote this $N$ as $N^*$ where $N^* \leq N$ .
205
+
206
+ To the best of our knowledge, little is known about $N^*$ . In fact, it is very challenging to obtain $N^*$ as it varies with the parameter $W$ or more precisely the correlation matrix $J$ .<sup>4</sup> Yet, finding $N^*$ is essential in both theory and practice since it helps FAS to achieve an efficient performance with a minimal number of ports. In this section, we present a simple method to approximate $N^*$ for a given $W$ .
207
+
208
+ To begin with, we present the following theorem.
209
+
210
+ Theorem 4. Suppose the channels of FAS with $N$ ports are denoted by $\pmb{h}$ . Then $\pmb{h}$ can be well-approximated by $\tilde{\pmb{h}} = \left[\tilde{h}_1, \dots, \tilde{h}_N\right]^T$ where
211
+
212
+ $$
213
+ \tilde {h} _ {n} = \sum_ {m = 1} ^ {\tilde {N}} u _ {n, m} \sqrt {\lambda_ {m}} z _ {m}, \tag {18}
214
+ $$
215
+
216
+ where $\tilde{N}$ is the numerical rank of $J$ . That is, the PDF and CDF of $\pmb{h}$ and $\tilde{\pmb{h}}$ are similar.
217
+
218
+ Proof: Let $\tilde{N}$ be the numerical rank of $J$ where $\tilde{N} \leq N$ . Using the definition of numerical rank, we have $\lambda_n < \epsilon$ for $n \in \{\tilde{N} + 1, \dots, N\}$ where $\epsilon \approx 0$ . According to Eckart-Young-Mirsky
219
+
220
+ theorem [38], the optimal $\tilde{\boldsymbol{J}}$ that minimizes the Frobenius norm between matrix $\boldsymbol{J}$ and $\tilde{\boldsymbol{J}}$ subject to the constraint that $\mathrm{rank}\bigg(\tilde{\boldsymbol{J}}\bigg)\leq \tilde{N}$ is $\tilde{\boldsymbol{J}} = \boldsymbol {U}\tilde{\boldsymbol{\Lambda}}\boldsymbol{U}^{H}$ where $\tilde{\Lambda} = \mathrm{diag}\left(\lambda_1,\ldots ,\lambda_{\tilde{N}},0,\ldots 0\right)$ .
221
+
222
+ Using this insight, we introduce $\tilde{\pmb{h}}$ as defined in Theorem 4 where the covariance of $\tilde{\pmb{h}}$ is $\tilde{\pmb{J}}$ (i.e., the best approximation of $\pmb{J}$ for $\mathrm{rank}\bigl (\tilde{\boldsymbol{J}}\bigr)\leq \tilde{N}$ ). As a result, we can well-approximate $\pmb{h}$ using $\tilde{\pmb{h}}$ since the Frechet distance between the two distributions is [39]
223
+
224
+ $$
225
+ \begin{array}{l} \left. W _ {2} \left(\mathcal {C N} \left(0 _ {N \times 1}, \boldsymbol {J}\right), \mathcal {C N} \left(0 _ {N \times 1}, \tilde {\boldsymbol {J}}\right)\right) = \left\| (\boldsymbol {\Lambda}) ^ {\frac {1}{2}} - \left(\tilde {\boldsymbol {\Lambda}}\right) ^ {\frac {1}{2}} \right\| _ {F} ^ {2} \right. \tag {19} \\ \approx 0. \\ \end{array}
226
+ $$
227
+
228
+ Corollary 1. If we have the exact eigenvalues and rank of $J$ , then $h = \tilde{h}$ .
229
+
230
+ Proof: Let $\Lambda$ and $\tilde{N}$ be the exact eigenvalues and rank of $J$ . Using the definition of rank, we have $\lambda_{n} = 0$ for $n \in \{\tilde{N} + 1, \dots, N\}$ . It then follows that the Frechet distance between the distributions of $h$ and $\tilde{h}$ is zero.
231
+
232
+ As seen in (19), it is the eigenvalues of correlation matrix that play a critical role in the channel approximation. Motivated by this insight, we introduce a new formula as follows:
233
+
234
+ $$
235
+ \begin{array}{l} \varepsilon_ {N ^ {*}} = S _ {N} - S _ {N ^ {*}} \tag {20} \\ = \sigma^ {2} - S _ {N ^ {*}}, \\ \end{array}
236
+ $$
237
+
238
+ where $S_{N^{*}} = \frac{1}{N}\sum_{n = 1}^{N^{*}}\lambda_{n}$ . Note that (20) is analogous to (19) in the sense that the left hand side of (20) measures the gap between the distributions of $h$ and $h^{*}$ , where $h^{*}$ is similarly defined as in (18) but we instead replace $\tilde{N}$ with $N^{*}$ and impose that $N^{*}\leq \tilde{N}$ . Meanwhile, on the right hand side of (20), we consider the average eigenvalues of $J^{*}$ , where $J^{*}$ is the covariance of $h^{*}$ .
239
+
240
+ To reduce the number of required ports, we define $\varepsilon_{\mathrm{tol}} > 0$ and find the smallest integer $N^{*}$ such that $\varepsilon_{\mathrm{tol}} \geq \varepsilon_{N^{*}}$ . Since $J^{*}$ only has $N^{*}$ dominant eigenvalues, we propose to employ a suboptimal FAS with $N^{*}$ ports. Interestingly, $\varepsilon_{\mathrm{tol}}$ has a nice heuristic interpretation in practice. Specifically, it defines the sub-optimality of the proposed FAS, i.e., the proposed FAS is near optimal if $\varepsilon_{\mathrm{tol}}$ is small and less optimal if $\varepsilon_{\mathrm{tol}}$ is large.
241
+
242
+ By fixing $\varepsilon_{\mathrm{tol}}$ appropriately, we observe that FAS with $N^{*}$ ports yields considerable improvement over all FAS with $N < N^{*}$ ports while most of the FAS with $N > N^{*}$ ports yields
243
+
244
+ Algorithm 1 Method of approximating $N^{*}$ given $W$
245
+
246
+ 1: Input: $W, \varepsilon_{\mathrm{tol}}$ ; Output: $N^{*}$
247
+ 2: Compute $\boldsymbol {J} = \boldsymbol {U}\boldsymbol{\Lambda}\boldsymbol{U}^{H}$
248
+ 3: Define $n = 1$ and compute $\varepsilon_{n}$
249
+ 4: While $\varepsilon_{\mathrm{tol}} < \varepsilon_n$ and $n < \tilde{N}$
250
+ 5: $n = n + 1$
251
+ 6: $\varepsilon_{n} = \sigma^{2} - S_{n}$
252
+ 7: end
253
+ 8: Return $n$ as $N^*$
254
+
255
+ marginal improvement over FAS with $N - 1$ ports. Note that we usually have $N^{*} < \tilde{N}$ if $\pmb{J}$ is ill-conditioned and $N^{*} = \tilde{N}$ if $\pmb{J}$ is well-conditioned.
256
+
257
+ The method of approximating $N^*$ is given in Algorithm 1. To measure the computational complexity of our algorithm, we consider the floating-point operations (flops). A flop is defined as one addition, subtraction, multiplication or division of two floating point numbers [40]. In Algorithm 1, computing $J$ and $U\Lambda U^H$ requires $6N^2$ and $21N^3$ flops, respectively [41]. Computing $\varepsilon_n$ requires $n + 1$ flops for each $n$ . Therefore, the total flops of Algorithm 1 is $21N^3 + 6N^2 + \frac{1}{2} N^{*2} + \frac{3}{2} N^*$ , which has a polynomial time-complexity of $\mathcal{O}(N^3)$ since $N^* \leq N$ . In other words, Algorithm 1 is only dominated by the computation of $U\Lambda U^H$ .
258
+
259
+ Note that $N^{*}$ is also useful in theory. For example, Lemma 1 and 2 and Theorem 1, 2, and 3 are incalculable if $J$ is near-singular. To address this, we present the following theorem.
260
+
261
+ Theorem 5. If $J$ is near-singular, then we can approximate the channels of FAS with $N$ ports using $N^*$ ports from a computational perspective. Nevertheless, a small gap between the channel distributions of FAS with $N$ ports and that of $N^*$ ports might exist.
262
+
263
+ Proof: If $J$ is near-singular, then one or more entries are almost linear combinations of the other entries. Thus, we can remove these nearly-dependent entries and only consider $N^*$ independent entries. Since FAS with $N^*$ ports has $N^*$ dominant eigenvalues, Lemma 1 and 2 and Theorem 1, 2, and 3 are calculable. Nevertheless, there might be a small gap between the channel distributions of FAS with $N$ ports and that of $N^*$ ports since the entries are nearly-dependent only.
264
+
265
+ ![](images/14a2d37a921014a969858d0690e63855a388287d7ca5a7e139969b19a383dda5.jpg)
266
+ (a)
267
+ Figure 1: FAS with 2 ports: a) joint PDF; b) joint CDF.
268
+
269
+ ![](images/bbf7deca085db4297b7c71ae19fc377e47d9d5137b902ee28fddb062502b4a90.jpg)
270
+ (b)
271
+
272
+ # V. RESULTS AND DISCUSSIONS
273
+
274
+ In this section, we present simulation results to better understand the performance of FAS. We focus on the design of an efficient FAS as well as the factors that limit its performance. Unless stated otherwise, we assume that $\sigma^2 = 1$ , $N = 50$ , $W = 0.5$ , $q = 10$ and $SNR = 30\mathrm{dB}$ .
275
+
276
+ Firstly, we demonstrate the accuracy of (10) and (12). In order to visualize the joint PDF and CDF of $|h|$ , we consider a FAS with 2 ports (i.e., $N = 2$ ). In Fig. 1, the red grid represents the numerical PDF/CDF while the solid surface is the analytical PDF/CDF. As observed, the approximation of the PDF/CDF of $|h|$ matches closely with the numerical ones over all the distributed region. Still, it is worth noting that (10) and (12) are very complicated. Thus, approximations with simpler expressions remain desirable.
277
+
278
+ Fig. 2 compares the outage probability of FAS to (14) and (15). As observed, (14) is more accurate because the analytical expression is derived directly from the multivariate correlated Rayleigh distributions and the approximation is only used when truncating the infinite series to a finite one. Here, we assume that $s_0 = 20$ . Compared to the numerical result, the truncation error is negligible as long as $s_0$ is sufficiently large. In contrast, (15) is less accurate because the outage probability of FAS is approximated using the power of single integrals where such simplification may lead to some inaccuracies. Nevertheless, it is worth highlighting that (14) can only be computed for small $N$ as its expression is highly complicated. Thus, (15) is still useful for large $N$ .
279
+
280
+ ![](images/e615f6ff52e1bdf1c2fc165fce3eb11e63c08cd6cb4bced6bce18feed7e0ec1b.jpg)
281
+ Figure 2: Outage probability of FAS versus SNR.
282
+
283
+ ![](images/944531f1d3592b3f8b5c28d498ee8c035aa3d56662ec82e979e3d92445f513af.jpg)
284
+ Figure 3: Outage probability of FAS versus SNR for different $N$ and $W$ : a) $W = 0.5$ ; b) $W = 10$ .
285
+
286
+ ![](images/ca83deca59a8437ab0148f5e8370e03704231ca2d6f6612d410435cc6b624914.jpg)
287
+
288
+ In Fig. 3, we compute the outage probability of FAS versus SNR for different $N$ and $W$ . Comparing Fig. 3(a) and Fig. 3(b), we can clearly see that the outage probability is mainly limited by $W$ . In particular, if $W$ is small and $N$ is large, the outage probability remains similar which is in alignment with the findings of [10]. Nevertheless, if $W$ is sufficiently large, the outage probability decreases significantly as $N$ increases.
289
+
290
+ To better understand this, we further compare the outage probability of FAS to (15) and (16) in Fig. 4. Compared to the numerical result, we can see that (15) is less accurate while (16) is accurate as $SNR$ increases. Specifically, (16) is much more accurate as $SNR$ increases because we apply Taylor series approximation at around zero which corresponds to asymptotically high
291
+
292
+ ![](images/c7a582c0ea39ab88da2b462bd875f07e728804e9aadc675a9b0ddebff74da1bf.jpg)
293
+ Figure 4: Outage probability of FAS at high SNR.
294
+
295
+ SNR. Hence, the error becomes negligible at high SNR. From (16), we learn that $\operatorname{det}\left(J^{-1}\right)$ plays a critical role in the performance of FAS. In particular, $J$ has to be well-conditioned in order for $\Omega^{2N}$ to be the dominant term. If $J$ is near-singular, then $N$ is no longer important. This is because $\operatorname{det}\left(J^{-1}\right)$ cannot be compensated by $\Omega^{2N}$ . To make $J$ a well-conditioned matrix, we can either increase $W$ for a fixed $N$ or decrease $N$ for a fixed $W$ . Nevertheless, we believe that larger $N$ does not cause any harm to the system in practice. It only makes the theoretical analysis harder.
296
+
297
+ As shown in Fig. 5(a), we compare the outage probability of FAS with $N$ ports and that of $N'$ ports for different $W$ where $N < N'$ . As it is seen, the outage probability of the earlier is lower bounded by the latter regardless of $W$ . In Fig. 5(b), we investigate the opposite case where $N > N'$ . As observed, the outage probability of FAS with $N$ ports and that of $N'$ ports are the same for different $W$ . Thus, the diversity gain of FAS is limited by $\min \{N, N'\}$ , which verifies Theorem 3. Theorem 3 also suggests that increasing the ports beyond $N'$ provides no improvement in a point-to-point setting.
298
+
299
+ Fig. 6(a) presents the CDF of $h$ and $\tilde{h}$ where we fix $R_{1} = \dots = R_{N} = R$ . In the result, no significant variation is observed between $h$ and $\tilde{h}$ regardless of $R$ , $N$ and $W$ . This is because the Frechet distance between the two distributions is always near zero. This confirms Theorem 4 and suggests that one can always use $\tilde{h}$ instead of $h$ . In addition, Fig. 6(b) shows the CDF of $h$ and $h^{*}$ . Unlike the previous result, there is a small gap between the two distributions as $W$ increases. Despite having some gaps, the approximation is still fairly good. This result verifies
300
+
301
+ ![](images/39fe1522674f0bd3df17a97a6f6a48567ff807fd20642a81666baf0172cee274.jpg)
302
+ (a)
303
+
304
+ ![](images/276ff8042e1400db4c6cf46bfdc89f0a295e4db3cbbb99db4f690af5193d31cb.jpg)
305
+ (b)
306
+ Figure 5: Outage probability of FAS with $N$ ports versus $N'$ ports: a) $N = 3 < N'$ ; b) $N = 50 > N'$ .
307
+
308
+ ![](images/e7c1e8d81f4b9d9b15de72a6f083d7a2bb69c5ce6f360c4f4e1946a07e55d86c.jpg)
309
+ (a)
310
+ Figure 6: CDF between: a) $h$ and $\tilde{h}$ ; b) $h$ and $h^*$ .
311
+
312
+ ![](images/62d2f41f8287f1ec833256221f266a43989a29d7526a1ee230bd24e73b3d7822.jpg)
313
+ (b)
314
+
315
+ # Theorem 5.
316
+
317
+ Next, we investigate the accuracy of Algorithm 1 and the efficiency of the proposed suboptimal FAS. The parameter $N^{*}$ for different $W$ using Algorithm 1 is summarized in Table II. As seen in Fig. 7, the outage probability of FAS with $N^{*}$ ports is promising. Specifically, FAS with $N^{*}$ ports yields a significant improvement over FAS with $N^{*} - 1$ ports. Meanwhile, FAS with $N + 1$ ports provides negligible improvement over FAS with $N^{*}$ ports. Thus, we may use the
318
+
319
+ ![](images/a8b0f0dc4736b109f17149904f8be41ece4e762687fa6822f51672b877e778e3.jpg)
320
+ Figure 7: Outage probability of suboptimal FAS.
321
+
322
+ Table II: Parameter $N^{*}$ for different $W$ using algorithm 1 where $\varepsilon_{\mathrm{tol}} = 0.01$
323
+
324
+ <table><tr><td>W</td><td>0.5</td><td>1</td><td>2</td><td>3</td><td>4</td></tr><tr><td>N*</td><td>3</td><td>4</td><td>6</td><td>8</td><td>10</td></tr></table>
325
+
326
+ suboptimal FAS for an efficient performance.
327
+
328
+ Finally in Fig. 8, we compare the outage probability of the proposed suboptimal FAS, the optimal FAS, the single antenna (SISO) system, the $N$ -branch SC system, and the $N$ -branch MRC system. In SC and MRC systems, we assume there are $N$ RF-chains where each antenna has to be at least $\frac{\lambda}{2}$ apart and their spatial correlations are considered. Note that MRC has $N$ active RF-chains. Results show that the proposed suboptimal FAS outperforms SISO and SC systems. This improvement is due to the ability of FAS switching to the best port within a finite $W$ .
329
+
330
+ In addition, MRC has the lowest outage probability and it outperforms optimal FAS. This superiority is due to the power gain where a larger number of active RF-chains (i.e., $\left\lfloor \frac{W}{0.5} \right\rfloor + 1$ ) is utilized in MRC while FAS has only one active RF-chain. Although MRC is more superior than the suboptimal FAS, the latter can achieve a similar performance as compared to the earlier when $W = 0.5$ . Yet, it is important to recall that MRC has one additional RF-chain as compared to the suboptimal FAS in this case. Thus, it will be very interesting to compare the performance of MIMO-FAS and MIMO with the same number of active RF-chains.
331
+
332
+ ![](images/8b2880e1f057b2fbd0e6e79bd2822a997875c7909770b68d38b2c50480f005a5.jpg)
333
+ Figure 8: Outage probability of suboptimal FAS vs. SISO, SC, and MRC.
334
+
335
+ # VI. CONCLUSIONS
336
+
337
+ In this paper, we considered FAS and approximated its outage probability and diversity gain in closed-form expressions. New meaningful insights were obtained from the analytical results, and simulation results were given to better understand the factors that limit the performance of FAS. Our results showed that the performance of FAS strongly depends on the spatial correlation matrix $J$ . Specifically, increasing the ports beyond $N'$ yields no diversity gain in a point-to-point setting. Instead, increasing $N$ causes the correlation matrix $J$ to be ill-conditioned. To address this, one can either increase $W$ for a fixed $N$ or decrease $N$ for a fixed $W$ . In addition, we proposed a suboptimal FAS with $N^*$ ports. By fixing an appropriate $\varepsilon_{\mathrm{tol}}$ , the proposed scheme enabled us to obtain a significant gain over FAS with $N^* - 1$ while it nearly achieved the same performance as FAS with $N^* + 1$ ports. Thus, the approximation of $N^*$ is useful since a larger number of ports yields diminishing gains and additional costs. Furthermore, $N^*$ can be used to approximate the channels of FAS with $N$ ports if the correlation matrix $J$ is near-singular. Last but not least, the proposed suboptimal FAS outperforms SISO and SC systems but falls behind MRC due to having a single active RF-chain. Nevertheless, it was discovered that suboptimal FAS and MRC achieve similar performance when $W = 0.5$ . Thus, it would be interesting to study the performance of MIMO-FAS and MIMO in the future.
338
+
339
+ # APPENDIX A: APPROXIMATED PDF OF $|h|$
340
+
341
+ The exact PDF of $|h|$ is first derived in [25]–[27]. In this paper, we employ similar steps and further approximate the PDF of $|h|$ by introducing $G$ : an $N \times N$ matrix, using an accurate
342
+
343
+ binomial theorem, and truncating the infinite series to a finite one for ease of computation. According to [36], the PDF of a circularly symmetric complex Gaussian random variables is known as
344
+
345
+ $$
346
+ f (\boldsymbol {h}) = \frac {1}{\pi^ {N} \det (\boldsymbol {J})} \exp \left\{- \boldsymbol {h} ^ {H} \boldsymbol {J} ^ {- 1} \boldsymbol {h} \right\}, \tag {21}
347
+ $$
348
+
349
+ where $J^{-1} = \frac{K^T}{\operatorname*{det}(J)}$ via Crammer rule. Using [42, (7-8) & (7-9)], the PDF of (21) in terms of its amplitude and phase can be obtained as
350
+
351
+ $$
352
+ f _ {| \boldsymbol {h} |, \boldsymbol {\theta}} \left(| h _ {1} |, \theta_ {1}, \dots , | h _ {N} |, \theta_ {N}\right) = \eta \prod_ {t = 1} ^ {T} \exp \left\{\zeta_ {t} \cos (\bar {\theta} _ {t}) \right\}, \tag {22}
353
+ $$
354
+
355
+ where $\eta = \frac{\prod_{n=1}^{N}|h_n|}{\pi^N\operatorname*{det}(J)}\exp\left\{-\frac{\sum_{n=1}^{N}|h_n|^2K_{n,n}}{\operatorname*{det}(J)}\right\}, T = \frac{N(N-1)}{2}$ , $\zeta_t = -\frac{2K_{m,n}|h_n||h_m|}{\operatorname*{det}(J)}$ and $\bar{\theta}_t = \theta_n - \theta_m$ . Throughout this paper, we use the mapping function $t = n + (m-1)N - \frac{m(m+1)}{2}$ , $m < n$ , while $(m,n)$ can be obtained from $t$ by setting $m = \min m' \in \mathbb{Z}$ subject to $\sum_{i=1}^{m'}(N-i) > t$ and $n = t - (m-1)N + \frac{m(m+1)}{2}$ .
356
+
357
+ Integrating (22) w.r.t. $\theta_{n},\forall n$ over $[0,2\pi ]$ , we have
358
+
359
+ $$
360
+ f _ {| \boldsymbol {h} |} \left(\left| h _ {1} \right|, \dots , \left| h _ {N} \right|\right)
361
+ $$
362
+
363
+ $$
364
+ = \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} f \left(\left| h _ {1} \right|, \theta_ {1}, \dots , \left| h _ {N} \right|, \theta_ {N}\right) d \theta_ {1} \dots d \theta_ {N} \tag {23}
365
+ $$
366
+
367
+ $$
368
+ \stackrel {(a)} {=} \eta \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} \prod_ {t = 1} ^ {T} \sum_ {s _ {t} = 0} ^ {\infty} \frac {\zeta_ {t} ^ {s _ {t}}}{s _ {t} !} \cos (\bar {\theta} _ {t}) ^ {s _ {t}} d \theta_ {1} \dots d \theta_ {N} \tag {24}
369
+ $$
370
+
371
+ $$
372
+ \stackrel {(b)} {=} \eta \sum_ {s _ {1} = 0} ^ {\infty} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \prod_ {t = 1} ^ {T} \beta (t, s _ {t} ^ {*}) \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} \cos (\bar {\theta} _ {t}) ^ {s _ {t} ^ {*}} d \theta_ {1} \dots d \theta_ {N} \tag {25}
373
+ $$
374
+
375
+ $$
376
+ \stackrel {(c)} {=} \eta \sum_ {s _ {1} = 0} ^ {\infty} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \left(\frac {1}{2}\right) ^ {\sum_ {t = 1} ^ {T} s _ {t} ^ {*}} \prod_ {t = 1} ^ {T} \beta (t, s _ {t} ^ {*}) \times \tag {26}
377
+ $$
378
+
379
+ $$
380
+ \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} \prod_ {t = 1} ^ {T} \left(\exp \left\{j \bar {\theta} _ {t} \right\} + \exp \left\{- j \bar {\theta} _ {t} \right\}\right) ^ {s _ {t} ^ {*}} d \theta_ {1} \dots d \theta_ {N}
381
+ $$
382
+
383
+ $$
384
+ \stackrel {(d)} {=} \eta \sum_ {s _ {1} = 0} ^ {\infty} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \left(\frac {1}{2}\right) ^ {\sum_ {t = 1} ^ {T} s _ {t} ^ {*}} \prod_ {t = 1} ^ {T} \beta \left(t, s _ {t} ^ {*}\right) \sum_ {\boldsymbol {v} \in \mathcal {V}} \prod_ {t = 1} ^ {T} \binom {s _ {t} ^ {*}} {v _ {t}} \times \tag {27}
385
+ $$
386
+
387
+ $$
388
+ \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} \exp \left\{j \sum_ {t = 1} ^ {T} \gamma_ {t} \bar {\theta} _ {t} \right\} d \theta_ {1} \dots d \theta_ {N},
389
+ $$
390
+
391
+ where (24) is obtained by using $\exp \{x\} = \sum_{s=0}^{\infty} \frac{x^s}{s!}$ and (25) is obtained using Cauchy product of power series where $\beta(t, s_t^*) \triangleq \frac{\zeta_t^{s_t^*}}{s_t^{*!}}$ and $s_t^* = s_t - s_{t+1}$ with $s_{T+1} = 0$ . Furthermore, (26)
392
+
393
+ is obtained using $\cos (x) = \frac{\exp(jx) + \exp(-jx)}{2}$ and (27) is obtained using binomial theorem where $\pmb {v} = [v_{1},\dots ,v_{T}]^{T}$ , $\mathcal{V}$ denotes the set of all the possible permutations and $\gamma_t = 2v_t - s_t^* \in \mathbb{Z}$ .
394
+
395
+ Note that $\int_0^{2\pi}\dots \int_0^{2\pi}\exp \left\{j\sum_{t = 1}^T\gamma_t\bar{\theta}_t\right\} d\theta_1\dots d\theta_N = (2\pi)^N$ if and only if $\sum_{t = 1}^{T}\gamma_{t}\bar{\theta}_{t} = 0$ and otherwise zero. Therefore, we introduce a new matrix $G$ as defined in (11) and the matrix $\bar{\Theta}$ given by
396
+
397
+ $$
398
+ \bar {\boldsymbol {\Theta}} = \left[ \begin{array}{c c c c c} 0 & \bar {\theta} _ {1} & \bar {\theta} _ {2} & \dots & \bar {\theta} _ {N - 1} \\ & & \bar {\theta} _ {N} & \dots & \bar {\theta} _ {2 N - 3} \\ \vdots & & \ddots & & \vdots \\ & & & & \bar {\theta} _ {T} \\ 0 & & \dots & & 0 \end{array} \right] = \left[ \begin{array}{c c c c c} 0 & \theta_ {2} - \theta_ {1} & \theta_ {3} - \theta_ {1} & \dots & \theta_ {N} - \theta_ {1} \\ & & \theta_ {3} - \theta_ {2} & \dots & \theta_ {N} - \theta_ {2} \\ \vdots & & \ddots & & \vdots \\ & & & & \theta_ {N} - \theta_ {N - 1} \\ 0 & & \dots & & 0 \end{array} \right]. \tag {28}
399
+ $$
400
+
401
+ Using $\bar{\Theta}$ and $G$ , we can easily integrate (27) w.r.t. to $\theta_{i}$ by taking the sum of the same entries of $G$ as that of $\bar{\Theta}$ with $\theta_{i}$ , i.e., $\Delta_{i} = \sum_{n=1}^{N} G_{i,n} - \sum_{n=1}^{N} G_{n,i} - G_{i,i}$ . Therefore, (27) leads to
402
+
403
+ $$
404
+ (2 7) = \eta \sum_ {s _ {1} = 0} ^ {\infty} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \left(\frac {1}{2}\right) ^ {\sum_ {t = 1} ^ {T} s _ {t} ^ {*}} \prod_ {t = 1} ^ {T} \beta (t, s _ {t} ^ {*}) \sum_ {\boldsymbol {v} \in \mathcal {V}} \left[ \prod_ {t = 1} ^ {T} \binom {s _ {t} ^ {*}} {v _ {t}} \right] \left[ (2 \pi) ^ {N} \prod_ {i = 1} ^ {N} \mathbf {1} _ {\{\Delta_ {i} = 0 \}} \right] \tag {29}
405
+ $$
406
+
407
+ $$
408
+ \stackrel {(a)} {\approx} \eta \sum_ {s _ {1} = 0} ^ {s _ {0}} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \left(\frac {1}{2}\right) ^ {\sum_ {t = 1} ^ {T} s _ {t} ^ {*}} \prod_ {t = 1} ^ {T} \beta (t, s _ {t} ^ {*}) \sum_ {\boldsymbol {v} \in \mathcal {V}} \left[ \prod_ {t = 1} ^ {T} \binom {s _ {t} ^ {*}} {v _ {t}} \right] \left[ (2 \pi) ^ {N} \prod_ {i = 1} ^ {N} \mathbf {1} _ {\{\Delta_ {i} = 0 \}} \right], \tag {30}
409
+ $$
410
+
411
+ where $(a)$ can be obtained using the facts that $\left(\frac{1}{2}\right)^{\sum_{t=1}^{T} s_t^*}$ is monotonically decreasing in each summation term and $\beta(t, s_t^*) \approx 0$ if $s_t^*$ is sufficiently large.
412
+
413
+ # APPENDIX B: APPROXIMATED CDF OF $|h|$
414
+
415
+ Using (10), the CDF of $|h|$ can be obtained as
416
+
417
+ $$
418
+ \begin{array}{l} F \left(R _ {1}, \dots , R _ {N}\right) \approx \int_ {0} ^ {R _ {1}} \dots \int_ {0} ^ {R _ {N}} f _ {| \boldsymbol {h} |} \left(\left| h _ {1} \right|, \dots , \left| h _ {N} \right|\right) d \left| h _ {1} \right| \dots d \left| h _ {N} \right| (31) \\ = \sum_ {s _ {1} = 0} ^ {s _ {0}} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \frac {g \left(\boldsymbol {s} ^ {*}\right)}{\pi^ {N} \det (\boldsymbol {J})} \prod_ {t = 1} ^ {T} \frac {\left(- 2 K _ {m , n}\right) ^ {s _ {t} ^ {*}}}{s _ {t} ^ {*} ! \det (\boldsymbol {J}) ^ {s _ {t} ^ {*}}} \int_ {0} ^ {R _ {1}} \dots \int_ {0} ^ {R _ {N}} \times (32) \\ \prod_ {n = 1} ^ {N} \left| h _ {n} \right| \prod_ {n = 1} ^ {N} \prod_ {m < n} ^ {N} \left| h _ {n} \right| ^ {s _ {n} ^ {*}} \left| h _ {m} \right| ^ {s _ {m} ^ {*}} \exp \left\{- \frac {\sum_ {n = 1} ^ {N} \left| h _ {n} \right| ^ {2} K _ {n , n}}{\det (\boldsymbol {J})} \right\} d \left| h _ {1} \right| \dots d \left| h _ {N} \right| \\ = \sum_ {s _ {1} = 0} ^ {s _ {0}} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \frac {g \left(\boldsymbol {s} ^ {*}\right)}{\pi^ {N} \det (\boldsymbol {J})} \prod_ {t = 1} ^ {T} \frac {\left(- 2 K _ {m , n}\right) ^ {s _ {t} ^ {*}}}{s _ {t} ^ {*} ! \det (\boldsymbol {J}) ^ {s _ {t} ^ {*}}} \times (33) \\ \prod_ {n = 1} ^ {N} \int_ {0} ^ {R _ {n}} | h _ {n} | ^ {\bar {s} _ {n} + 1} \exp \left\{- \frac {| h _ {n} | ^ {2} K _ {n , n}}{\det (\boldsymbol {J})} \right\} d | h _ {n} | \\ = \sum_ {s _ {1} = 0} ^ {s _ {0}} \sum_ {s _ {2} = 0} ^ {s _ {1}} \dots \sum_ {s _ {T} = 0} ^ {s _ {T - 1}} \frac {g (\boldsymbol {s} ^ {*})}{\pi^ {N} \det (\boldsymbol {J})} \prod_ {t = 1} ^ {T} \frac {\left(- K _ {m , n}\right) ^ {s _ {t} ^ {*}}}{s _ {t} ^ {*} ! \det (\boldsymbol {J}) ^ {s _ {t} ^ {*}}} \times (34) \\ \prod_ {n = 1} ^ {N} \frac {1}{2} \left(\frac {K _ {n , n}}{\det (\boldsymbol {J})}\right) ^ {- \frac {\bar {s} _ {n}}{2} - \frac {1}{2}} \left[ \Gamma \left(\frac {1 + \bar {s} _ {n}}{2}\right) - \Gamma \left(\frac {1 + \bar {s} _ {n}}{2}, \frac {K _ {n , n} R _ {n} ^ {2}}{\det (\boldsymbol {J})}\right) \right], \\ \end{array}
419
+ $$
420
+
421
+ where
422
+
423
+ $$
424
+ g \left(\boldsymbol {s} ^ {*}\right) = \left(\frac {1}{2}\right) ^ {\sum_ {t = 1} ^ {T} s _ {t} ^ {*}} \sum_ {\boldsymbol {v} \in \mathcal {V}} \left[ \prod_ {t = 1} ^ {T} \binom {s _ {t} ^ {*}} {v _ {t}} \right] (2 \pi) ^ {N} \prod_ {i = 1} ^ {N} \mathbf {1} _ {\left\{\Delta_ {i} = 0 \right\}}, \tag {35}
425
+ $$
426
+
427
+ and $\bar{s}_n$ is the sum of $s_t^*$ affecting $\left(|h_n||h_m|\right)^{s_t^*}$ . To compute $\bar{s}_n$ , let us introduce a new matrix
428
+
429
+ $$
430
+ \boldsymbol {S} ^ {*} = \left[ \begin{array}{c c c c c} 0 & s _ {1} ^ {*} & s _ {2} ^ {*} & \dots & s _ {N - 1} ^ {*} \\ & & s _ {N} ^ {*} & \dots & s _ {2 N - 3} ^ {*} \\ \vdots & & \ddots & & \vdots \\ & & & & s _ {T} ^ {*} \\ 0 & & \dots & & 0 \end{array} \right]. \tag {36}
431
+ $$
432
+
433
+ Using (36), we have $\bar{s}_n = \sum_{i=1}^{N} S_{n,i}^* + \sum_{i=1}^{n-1} S_{i,n}^* + 1$ such that $S_{i,n}^*$ is the $(i,n)$ -th entry of $S^*$ .
434
+
435
+ # APPENDIX C: OUTAGE PROBABILITY AT HIGH SNR
436
+
437
+ According to [37], the outage probability of a wireless communication system at high SNR can be obtained via the PDF of its fading channels. In particular, suppose the PDF of the channels at high SNR can be approximated as
438
+
439
+ $$
440
+ f _ {\left| h _ {\text {F A S}} \right|} (\Omega) = 2 \xi \Omega^ {2 M + 1} + o \left(\Omega^ {2 M + 1}\right). \tag {37}
441
+ $$
442
+
443
+ Then the outage probability at high SNR is found as
444
+
445
+ $$
446
+ \mathbb {P} \left\{\left| h _ {\mathrm {F A S}} \right| < \Omega \right\} = \frac {\xi}{M + 1} \Omega^ {2 (M + 1)} + o \left(\frac {1}{S N R ^ {M + 1}}\right). \tag {38}
447
+ $$
448
+
449
+ Before approximating the PDF of FAS at high SNR, we highlight that the PDF of (21) in terms of its amplitude and phase can be rewritten as
450
+
451
+ $$
452
+ f _ {| \boldsymbol {h} |, \boldsymbol {\theta}} \left(| h _ {1} |, \theta_ {1}, \dots , | h _ {N} |, \theta_ {N}\right) = \prod_ {n = 1} ^ {N} \frac {\left| h _ {n} \right| H _ {n}}{\pi^ {N} \det (\boldsymbol {J})}, \tag {39}
453
+ $$
454
+
455
+ where
456
+
457
+ $$
458
+ H _ {n} = \exp \left\{- \frac {K _ {n , n} \left| h _ {n} \right| ^ {2}}{\det (\boldsymbol {J})} - \frac {2 \sum_ {m = n + 1} ^ {N} K _ {m , n} \left| h _ {n} \right| \left| h _ {m} \right| \cos \left(\theta_ {n} - \theta_ {m}\right)}{\det (\boldsymbol {J})} \right\}. \tag {40}
459
+ $$
460
+
461
+ Using (39), the approximated PDF of FAS at high SNR can be derived as
462
+
463
+ $$
464
+ \begin{array}{l} f _ {\left| h _ {\mathrm {F A S}} \right|} (\Omega) = \frac {\partial F _ {\left| h _ {\mathrm {F A S}} \right|} (\Omega)}{\partial \Omega} (41) \\ \stackrel {(a)} {=} N \int_ {0} ^ {\Omega} \dots \int_ {0} ^ {\Omega} \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} f _ {| \boldsymbol {h} |, \boldsymbol {\theta}} \left(\left| h _ {1} \right|, \theta_ {1}, \dots , \left| h _ {N - 1} \right|, \theta_ {N - 1}, \Omega , \theta_ {N}\right) \times (42) \\ d \left| h _ {1} \right| \dots d \left| h _ {N - 1} \right| d \theta_ {1} \dots d \theta_ {N} \\ \stackrel {(b)} {=} \frac {N \Omega}{\pi^ {N} \operatorname {d e t} (\boldsymbol {J})} \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} H _ {N} \int_ {0} ^ {\Omega} | h _ {N - 1} | \left(H _ {N - 1} \times \dots \right. (43) \\ \left(\int_ {0} ^ {\Omega} \left| h _ {2} \right| H _ {2} \left(\int_ {0} ^ {\Omega} \left| h _ {1} \right| H _ {1} d \left| h _ {1} \right|\right) d \left| h _ {2} \right|\right) d \left| h _ {N - 1} \right|) d \theta_ {1} \dots d \theta_ {N}, \\ \end{array}
465
+ $$
466
+
467
+ where $(a)$ is obtained using Leibniz integral and $(b)$ is obtained using (39).
468
+
469
+ According to [43], the term $\int_0^\Omega |h_n|H_n d|h_n|$ can be solved by applying Taylor series approximation at around zero. Specifically, we have
470
+
471
+ $$
472
+ \int_ {0} ^ {\Omega} \left| h _ {n} \right| H _ {n} d \left| h _ {n} \right| = \frac {\Omega^ {2}}{2} + o (\Omega^ {2}), n = \{1, \dots , N - 1 \} \tag {44}
473
+ $$
474
+
475
+ and the Taylor series approximation of $H_N$ at zero is
476
+
477
+ $$
478
+ H _ {N} = 1 + o (1). \tag {45}
479
+ $$
480
+
481
+ Substituting (44) and (45) into (43), we have
482
+
483
+ $$
484
+ \begin{array}{l} f _ {\left| h _ {\text {F A S}} \right|} (\Omega) \\ = \frac {N \Omega}{\pi^ {N} \operatorname {d e t} (\boldsymbol {J})} \left[ \frac {\Omega^ {2}}{2} + o \left(\Omega^ {2}\right) \right] ^ {N - 1} \int_ {0} ^ {2 \pi} \dots \int_ {0} ^ {2 \pi} d \theta_ {1} \dots d \theta_ {N} (46) \\ = \frac {2 N}{\det (\boldsymbol {J})} \Omega^ {2 N - 1} + o \left(\Omega^ {2 N - 1}\right). (47) \\ \end{array}
485
+ $$
486
+
487
+ Comparing (47) to (37), we have $M = N - 1$ and $\xi = \frac{N}{\operatorname*{det}(J)}$ . Applying (38), we have
488
+
489
+ $$
490
+ \mathbb {P} \left\{\left| h _ {\text {F A S}} \right| < \Omega \right\} \approx \frac {1}{\det (\boldsymbol {J})} \Omega^ {2 N} + o \left(\frac {1}{S N R ^ {N}}\right). \tag {48}
491
+ $$
492
+
493
+ # APPENDIX D: DIVERSITY GAIN OF FAS
494
+
495
+ Let us consider the case where $W \to \infty$ . According to [37], the diversity gain of a wireless communication system can be obtained via the PDF of its fading channels at high SNR. Specifically, suppose the PDF of the channels at high SNR can be approximated as in (37). Then diversity gain of such system is given by
496
+
497
+ $$
498
+ D = M + 1. \tag {49}
499
+ $$
500
+
501
+ In Appendix C, we have $M = N - 1$ . Thus, it is straightforward that the diversity gain of FAS as $W \to \infty$ is $N$ . Nevertheless, if $W$ is finite, $J$ might be near to being singular. To see this, let us consider FAS with $N \to \infty$ ports within a finite $W$ where each port is equally separated, and they are indexed as $1,2,\ldots$ . Without loss of generality, let us focus on two ports: the $n$ -th and $(n + 1)$ -th port. The correlation between the $n$ -th port and $(n + 1)$ -th port is $J_{n,n + 1} = \lim_{N\to \infty}\sigma^2 J_0\left(2\pi \frac{1}{N - 1} W\right) = \sigma^2 J_0(0)$ , and we have $h_{n + 1} = h_n$ . Thus, the joint CDF of $h_n$ and $h_{n + 1}$ is $F_{h_n,h_{n + 1}}(g_1,g_2) = F_{h_n}(\min \{g_1,g_2\})$ , which implies that they reduce to singularity. Since there are many such ports, we can use a finite $N'$ ports to approximate the channels of FAS with $N$ ports, where $N'$ is the numerical rank of $J'$ such that $J'$ is covariance matrix as defined in (2) with $N \to \infty$ for a fixed $W$ . As a result, the diversity gain of FAS is approximately limited by $\min \{N,N'\}$ . If $N$ is large, the same observation can be obtained. To remove the nearly-dependent entries of $J$ , one may employ rank-revealing QR factorization [44] or Gauss-Jordan elimination with a given tolerance.
502
+
503
+ # REFERENCES
504
+
505
+ [1] A. Shojaeifard, K.-K. Wong, K.-F. Tong, Z. Chu, A. Mourad, A. Haghighat, I. Hemadeh, N. T. Nguyen, V. Tapio, and M. Juntti, “MIMO evolution beyond 5G through reconfigurable intelligent surfaces and fluid antenna systems,” Proceedings of the IEEE, vol. 110, no. 9, pp. 1244–1265, 2022.
506
+ [2] K. K. Wong, K.-F. Tong, Y. Shen, Y. Chen, and Y. Zhang, "Bruce Lee-inspired fluid antenna system: Six research topics and the potentials for 6G," Frontiers in Communications and Networks, p. 5, 2022.
507
+ [3] Y. Shen, K.-F. Tong, and K.-K. Wong, "Radiation pattern diversified single-fluid-channel surface-wave antenna for mobile communications," in 2022 IEEE-APS Topical Conference on Antennas and Propagation in Wireless Communications (APWC), 2022, pp. 049-051.
508
+
509
+ [4] ——, “Radiation pattern diversified double-fluid-channel surface-wave antenna for mobile communications,” in 2022 IEEE-APS Topical Conference on Antennas and Propagation in Wireless Communications (APWC), 2022, pp. 085–088.
510
+ [5] ——, “Reconfigurable surface wave fluid antenna for spatial MIMO applications,” in 2021 IEEE-APS Topical Conference on Antennas and Propagation in Wireless Communications (APWC), 2021, pp. 150–152.
511
+ [6] ——, “Beam-steering surface wave fluid antennas for MIMO applications,” in 2020 IEEE Asia-Pacific Microwave Conference (APMC), 2020, pp. 634–636.
512
+ [7] K.-K. Wong, A. Shojaeifard, K.-F. Tong, and Y. Zhang, “Fluid antenna systems,” IEEE Transactions on Wireless Communications, vol. 20, no. 3, pp. 1950–1962, 2021.
513
+ [8] K.-K. Wong, K.-F. Tong, Y. Chen, and Y. Zhang, "Extra-large MIMO enabling slow fluid antenna massive access for millimeter-wave bands," *Electronics Letters*, vol. 58, no. 25, pp. 1016-1018, 2022.
514
+ [9] L. Tlebaldiyeva, S. Arzykulov, K. M. Rabie, X. Li, and G. Nauryzbayev, "Outage performance of fluid antenna system (FAS)-aided terahertz communication networks," Accepted by 2023 IEEE International Conference on Communications (ICC), 2023.
515
+ [10] M. Khammassi, A. Kammoun, and M.-S. Alouini, “A new analytical approximation of the fluid antenna system channel,” IEEE Transactions on Wireless Communications, pp. 1-1, 2023.
516
+ [11] D. G. Brennan, "Linear diversity combining techniques," Proceedings of the IRE, vol. 47, no. 6, pp. 1075-1102, 1959.
517
+ [12] K. K. Wong, A. ShojaEIFard, K.-F. Tong, and Y. Zhang, "Performance limits of fluid antenna systems," IEEE Communications Letters, vol. 24, no. 11, pp. 2469-2472, 2020.
518
+ [13] K.-K. Wong and K.-F. Tong, “Fluid antenna multiple access,” IEEE Transactions on Wireless Communications, vol. 21, no. 7, pp. 4801–4815, 2022.
519
+ [14] C. Skouroumounis and I. Krikidis, "Large-scale fluid antenna systems with linear MMSE channel estimation," in ICC 2022 - IEEE International Conference on Communications, 2022, pp. 1330-1335.
520
+ [15] L. Tlebaldiyeva, G. Nauryzbayev, S. Arzykulov, A. Eltawil, and T. Tsiftsis, “Enhancing QoS through fluid antenna systems over correlated Nakagami-m fading channels,” in 2022 IEEE Wireless Communications and Networking Conference (WCNC), 2022, pp. 78–83.
521
+ [16] K. Wong, K. Tong, Y. Chen, and Y. Zhang, "Closed-form expressions for spatial correlation parameters for performance analysis of fluid antenna systems," *Electronics Letters*, vol. 58, no. 11, pp. 454-457, 2022.
522
+ [17] K. N. Le, “A review of selection combining receivers over correlated rician fading,” Digital Signal Processing, vol. 88, pp. 1–22, 2019. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S1051200418307176
523
+ [18] K. S. Miller, “Complex Gaussian processes,” *Siam Review*, vol. 11, no. 4, pp. 544–567, 1969.
524
+ [19] C. Tan and N. Beaulieu, “Infinite series representations of the bivariate Rayleigh and Nakagami-m distributions,” IEEE Transactions on Communications, vol. 45, no. 10, pp. 1159–1161, 1997.
525
+ [20] P. Dharmawansa, N. Rajatheva, and C. Tellambura, “On the trivariate Rician distribution,” IEEE Transactions on Communications, vol. 56, no. 12, pp. 1993–1997, 2008.
526
+ [21] Y. Chen and C. Tellambura, "Infinite series representations of the trivariate and quadrivariate Rayleigh distribution and their applications," IEEE Transactions on Communications, vol. 53, no. 12, pp. 2092-2101, 2005.
527
+ [22] M. Tekinay and C. Beard, “Moments of the quadrivariate Rayleigh distribution with applications for diversity receivers,” Annals of Telecommunications, vol. 75, no. 7, pp. 447–459, 2020.
528
+ [23] Y. Chen and C. Tellambura, "Distribution functions of selection combiner output in equally correlated Rayleigh, Rician, and Nakagami-m fading channels," IEEE Transactions on Communications, vol. 52, no. 11, pp. 1948-1956, 2004.
529
+ [24] G. Karagiannidis, D. Zogas, and S. Kotsopoulos, “On the multivariate Nakagami-m distribution with exponential correlation,” IEEE Transactions on Communications, vol. 51, no. 8, pp. 1240–1244, 2003.
530
+
531
+ [25] M. Wiegand and S. Nadarajah, “A series representation for multidimensional Rayleigh distributions,” International Journal of Communication Systems, vol. 31, no. 6, p. e3510, 2018, e3510 dac.3510. [Online]. Available: https://onlinelibrary.wiley.com/doi/abs/10.1002/dac.3510
532
+ [26] ——, “Series approximations for Rayleigh distributions of arbitrary dimensions and covariance matrices,” Signal Processing, vol. 165, pp. 20–29, 2019.
533
+ [27] ——, “New generalised approximation methods for the cumulative distribution function of arbitrary multivariate Rayleigh random variables,” Signal Processing, vol. 176, p. 107664, 2020. [Online]. Available: https://www.sciencedirect.com/science/article/pii/S0165168420302073
534
+ [28] R. G. Gallager, Principles of digital communication. Cambridge University Press Cambridge, UK, 2008, vol. 1.
535
+ [29] Z. Chai, K.-K. Wong, K.-F. Tong, Y. Chen, and Y. Zhang, “Port selection for fluid antenna systems,” IEEE Communications Letters, vol. 26, no. 5, pp. 1180–1184, 2022.
536
+ [30] L. Zhu, W. Ma, and R. Zhang, "Modeling and performance analysis for movable antenna enabled wireless communications," arXiv preprint arXiv:2210.05325, 2022.
537
+ [31] W. Ma, L. Zhu, and R. Zhang, “MIMO capacity characterization for movable antenna systems,” arXiv preprint arXiv:2210.05396, 2022.
538
+ [32] N. Waqar, K.-K. Wong, K.-F. Tong, A. Sharples, and Y. Zhang, “Deep learning enabled slow fluid antenna multiple access,” IEEE Communications Letters, vol. 27, no. 3, pp. 861–865, 2023.
539
+ [33] K.-K. Wong, K.-F. Tong, Y. Chen, and Y. Zhang, "Fast fluid antenna multiple access enabling massive connectivity," IEEE Communications Letters, vol. 27, no. 2, pp. 711-715, 2023.
540
+ [34] H. Xu, K.-K. Wong, W. K. New, and K.-F. Tong, “On the outage probability for two-user fluid antenna multiple access,” Accepted by 2023 IEEE International Conference on Communications (ICC), 2023.
541
+ [35] G. L. Stüber and G. L. Steuber, Principles of mobile communication. Springer, 1996, vol. 2.
542
+ [36] D. Tse and P. Viswanath, Fundamentals of wireless communication. Cambridge university press, 2005.
543
+ [37] Z. Wang and G. Giannakis, “A simple and general parameterization quantifying performance in fading channels,” IEEE Transactions on Communications, vol. 51, no. 8, pp. 1389-1398, 2003.
544
+ [38] G. Golub, A. Hoffman, and G. Stewart, “A generalization of the eckart-young-mirsky matrix approximation theorem,” Linear Algebra and its Applications, vol. 88-89, pp. 317-327, 1987. [Online]. Available: https://www.sciencedirect.com/science/article/pii/0024379587901145
545
+ [39] D. Dowson and B. Landau, “The Frechet distance between multivariate normal distributions,” Journal of Multivariate Analysis, vol. 12, no. 3, pp. 450-455, 1982. [Online]. Available: https://www.sciencedirect.com/science/article/pii/0047259X8290077X
546
+ [40] S. Boyd, S. P. Boyd, and L. Vandenberghe, Convex optimization. Cambridge university press, 2004.
547
+ [41] W. Ford, Numerical linear algebra with applications: Using MATLAB. Academic Press, 2014.
548
+ [42] A. Papoulis and S. U. Pillai, "Probability, random variables, and stochastic processes," 2002.
549
+ [43] S. Liu, J. Cheng, and N. C. Beaulieu, "Asymptotic error analysis of diversity schemes on arbitrarily correlated Rayleigh channels," IEEE Transactions on Communications, vol. 58, no. 5, pp. 1351-1355, 2010.
550
+ [44] G. Golub, “Numerical methods for solving linear least squares problems,” Numerische Mathematik, vol. 7, no. 3, pp. 206–216, 1965.
2301.00xxx/2301.00073/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c4a5e6096aeeb5d09144fb8c916fc698daa028c83f8caec5d36340c8dc5f2f3
3
+ size 891561
2301.00xxx/2301.00073/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00127/97724fca-330b-4a45-9d7d-ecac8fcb1f6d_content_list.json ADDED
@@ -0,0 +1,1346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Spatiotemporal implicit neural representation for unsupervised dynamic MRI reconstruction",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 124,
8
+ 103,
9
+ 872,
10
+ 143
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Jie Feng $^{a}$ , Ruimin Feng $^{a}$ , Qing Wu $^{b}$ , Zhiyong Zhang $^{a}$ , Yuyao Zhang $^{b,c}$ , Hongjiang Wei $^{a,*}$",
17
+ "bbox": [
18
+ 196,
19
+ 162,
20
+ 796,
21
+ 179
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{a}$ School of Biomedical Engineering, Shanghai Jiao Tong University, Shanghai, China \n $^{b}$ School of Information Science and Technology, ShanghaiTech University, Shanghai, China \n $^{c}$ iHuman Institute, Shanghaitech University, Shanghai, China",
28
+ "bbox": [
29
+ 248,
30
+ 187,
31
+ 746,
32
+ 224
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "Abstract",
39
+ "text_level": 1,
40
+ "bbox": [
41
+ 58,
42
+ 278,
43
+ 126,
44
+ 291
45
+ ],
46
+ "page_idx": 0
47
+ },
48
+ {
49
+ "type": "text",
50
+ "text": "Supervised Deep-Learning (DL)-based reconstruction algorithms have shown state-of-the-art results for highly-undersampled dynamic Magnetic Resonance Imaging (MRI) reconstruction. However, the requirement of excessive high-quality ground-truth data hinders their applications due to the generalization problem. Recently, Implicit Neural Representation (INR) has appeared as a powerful DL-based tool for solving the inverse problem by characterizing the attributes of a signal as a continuous function of corresponding coordinates in an unsupervised manner. In this work, we proposed an INR-based method to improve dynamic MRI reconstruction from highly undersampled $k$ -space data, which only takes spatiotemporal coordinates as inputs. Specifically, the proposed INR represents the dynamic MRI images as an implicit function and encodes them into neural networks. The weights of the network are learned from sparsely-acquired $(k, t)$ -space data itself only, without external training datasets or prior images. Benefiting from the strong implicit continuity regularization of INR together with explicit regularization for low-rankness and sparsity, our proposed method outperforms the compared scan-specific methods at various acceleration factors. E.g., experiments on retrospective cardiac cine datasets show an improvement of $5.5 \\sim 7.1$ dB in PSNR for extremely high accelerations (up to $41.6 \\times$ ). The high-quality and inner continuity of the images provided by INR has great potential to further improve the spatiotemporal resolution of dynamic MRI, without the need of any training data.",
51
+ "bbox": [
52
+ 57,
53
+ 298,
54
+ 939,
55
+ 483
56
+ ],
57
+ "page_idx": 0
58
+ },
59
+ {
60
+ "type": "text",
61
+ "text": "Keywords: Dynamic MR imaging, Implicit Neural Representation, Unsupervised learning",
62
+ "bbox": [
63
+ 58,
64
+ 491,
65
+ 668,
66
+ 506
67
+ ],
68
+ "page_idx": 0
69
+ },
70
+ {
71
+ "type": "text",
72
+ "text": "1. Introduction",
73
+ "text_level": 1,
74
+ "bbox": [
75
+ 58,
76
+ 532,
77
+ 174,
78
+ 546
79
+ ],
80
+ "page_idx": 0
81
+ },
82
+ {
83
+ "type": "text",
84
+ "text": "Dynamic Magnetic Resonance Imaging (MRI) is one of the most popular MRI technologies, which can preserve not only excellent tissue contrast but also dynamic temporal changes of tissue. Dynamic MRI requires rapid data collection for the study of moving organs with severe physiological motion, such as the heart [1] and abdomen [2]. Dynamic Contrast-Enhanced (DCE) MRI has also made tremendous contributions to the study of microvascular structure and function of in vivo organs [3].",
85
+ "bbox": [
86
+ 57,
87
+ 557,
88
+ 485,
89
+ 684
90
+ ],
91
+ "page_idx": 0
92
+ },
93
+ {
94
+ "type": "text",
95
+ "text": "However, the limitations of MRI hardware on gradient encoding performance and long acquisition time slow down our pace for higher spatiotemporal resolutions in dynamic MRI[4]. Spatial and temporal resolution are always inversely related. High spatial resolution images can only be acquired with low temporal resolution and vice versa. Thus, a trade-off has to be made between spatial and temporal resolution in practical dynamic MRI. This conflict can be potentially resolved by developing advanced MRI reconstruction methods from highly-undersampled $k$ -space data, including the traditional Compressed-Sensing (CS)-based methods and the Deep-Learning (DL)-based methods.",
96
+ "bbox": [
97
+ 57,
98
+ 684,
99
+ 485,
100
+ 854
101
+ ],
102
+ "page_idx": 0
103
+ },
104
+ {
105
+ "type": "text",
106
+ "text": "CS methods exploit spatial and temporal correlations of dynamic MRI by using irregular $k$ -space undersampling patterns to create incoherent artifacts in a suitable transform domain where the medical images are compressible, such as in the $k$ -t domain [5], temporal-gradient domain (temporal total variation regularizer) [6, 7] and many others. Image reconstruction is performed by exploiting the sparsity in the solution, subject to data consistency constraints. The further development of sparsity extended to the usage of low-rank prior: the Low-rank and Sparsity (L&S) strategy enforced a both sparse and low-rank output solution [8, 9], and the Low-rank plus Sparsity $(\\mathrm{L} + \\mathrm{S})$ strategy decomposed the solution images into a low-rank and a sparsity component for background and the dynamic foreground, respectively [10]. Recently, the subspace-modeling strategy enforced a combination of a temporal sparsity constraint and a low-rank spatial subspace constraint to improve DCE-MRI reconstruction [11, 12].",
107
+ "bbox": [
108
+ 507,
109
+ 532,
110
+ 937,
111
+ 774
112
+ ],
113
+ "page_idx": 0
114
+ },
115
+ {
116
+ "type": "text",
117
+ "text": "Recent advances in DL techniques have shown potential for further accelerating dynamic MRI data acquisition. By adopting the supervised-learning strategy with large quantities of undersampled and fully-sampled image pairs, DL-based methods showed superior performance compared to CS-based methods [13]. DL-based methods applied in dynamic MRI reconstruction can be separated into two categories, i.e., end-to-end and unrolled methods. The end-to-end methods [14, 15] enable the networks to directly learn the mapping from undersampled im",
118
+ "bbox": [
119
+ 507,
120
+ 778,
121
+ 937,
122
+ 906
123
+ ],
124
+ "page_idx": 0
125
+ },
126
+ {
127
+ "type": "page_footnote",
128
+ "text": "*Corresponding author.",
129
+ "bbox": [
130
+ 78,
131
+ 882,
132
+ 208,
133
+ 892
134
+ ],
135
+ "page_idx": 0
136
+ },
137
+ {
138
+ "type": "page_footnote",
139
+ "text": "Email address: hongjiang.wei@sjtu.edu.cn (Hongjiang Wei)",
140
+ "bbox": [
141
+ 82,
142
+ 894,
143
+ 447,
144
+ 904
145
+ ],
146
+ "page_idx": 0
147
+ },
148
+ {
149
+ "type": "aside_text",
150
+ "text": "arXiv:2301.00127v2 [eess.IV] 13 Jan 2023",
151
+ "bbox": [
152
+ 25,
153
+ 312,
154
+ 55,
155
+ 734
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "footer",
161
+ "text": "Preprint submitted to arXiv",
162
+ "bbox": [
163
+ 58,
164
+ 915,
165
+ 210,
166
+ 927
167
+ ],
168
+ "page_idx": 0
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "ages with artifacts to fully sampled high-quality images. In contrast, the unrolled strategy is inspired by unrolling the iterative optimization process of CS, using networks to learn the auxiliary parameters or regularizers [16, 17, 18, 19] during the iterations. Especially, $\\mathrm{L + S}$ -Net [19] combined the $\\mathrm{L + S}$ strategy of CS-based methods with the unrolled DL methods, demonstrating the availability of low-rank and sparsity in DL methods. However, the excessive demand for high-quality ground-truth labels in supervised learning hinders its applications in practice due to the generalization issue [13]. For example, the performance of the trained networks would degrade when the data is acquired with different scan parameters or pathological conditions. While in the case of DCE MRI, the ground-truth data are not available [20]. Alternatively, the unsupervised-learning strategy was introduced to the DL-based dynamic MRI reconstruction without involving external data in the training process. For example, Ke et al. [21] used a time-interleaved acquisition scheme, where the fully-sampled images were generated by merging adjacent frames. However, a large dataset is still needed for training the neural net. Yoo et al. [22] and Ahmed et al. [23] both adopted the Deep Image Prior (DIP) approach [24], which leveraged the tendency of untrained Convolutional Neural Networks (CNN) to generate natural-structured images as an implicit regularizer and then optimized the CNN parameters for scan-specific reconstruction. However, DIP-based methods suffer from a heavy computational burden and are still limited for application [22].",
173
+ "bbox": [
174
+ 60,
175
+ 97,
176
+ 484,
177
+ 479
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "Implicit Neural Representation (INR) is a new way which parameterizes signals by a multi-layer perceptron (MLP) [25]. Unlike traditional explicit representation that uses discrete elements such as pixels (2D images) or voxels (3D volumes), INR represents the desired object itself as a continuous representation function of the spatial coordinates. In other words, the values at any spatial location of the object can be retrieved by querying the trained MLP with the corresponding coordinate. It provides a general solution for various applications of object reconstruction. With the application of MLP and proper encoding function mapping the input coordinates to a high-dimensional space [26], INR has achieved superior performance in multiple computer vision tasks [27, 26, 28]. Previous research also showed the INR's capability to solve the inverse problem in medical imaging fields, e.g., CT image reconstruction [29, 30, 31] and undersampled MRI [32] in an unsupervised manner. For example, implicit Neural Representation learning with Prior embedding (NeRP) [32] was proposed to perform the static MRI reconstruction from the sparsely-sampled $k$ -space data. However, NeRP requires a fully-sampled prior image with the same modality for the reconstruction of longitudinal MRI images of follow-up scans. Additionally, the INR for object reconstruction usually takes hours or even days to converge on one single data. Recently, parametric encoding functions with extra learnable parameters [33, 28, 34, 35] were proposed to significantly shorten the convergence time. For example, the hash encoding [28] function has shown promising results for accelerating the computational processes of INR in seconds for many graphics applications.",
184
+ "bbox": [
185
+ 60,
186
+ 480,
187
+ 484,
188
+ 890
189
+ ],
190
+ "page_idx": 1
191
+ },
192
+ {
193
+ "type": "text",
194
+ "text": "In this paper, we aim to present a new unsupervised method",
195
+ "bbox": [
196
+ 73,
197
+ 892,
198
+ 484,
199
+ 904
200
+ ],
201
+ "page_idx": 1
202
+ },
203
+ {
204
+ "type": "text",
205
+ "text": "for highly accelerated dynamic MRI reconstruction. Inspired by the insight of INR, the proposed method treated the dynamic MR image sequence as a continuous function mapping the spatiotemporal coordinates to the corresponding image intensities. The function was parameterized by a hash encoding function and an MLP and served as an implicit continuity regularizer for dynamic MRI reconstruction. The MLP weights were directly learned from the imaging-model-based $(\\pmb{k},\\mathrm{t})$ -space data consistency loss combined with the explicit regularizers, without training databases or any ground-truth data. When inferring, the reconstructed images can simply be querying the optimized network with the same or denser spatiotemporal coordinates, which would allow for sampling and interpolating the dynamic MRI at an arbitrary frame rate. Experiments on retrospective cardiac cine data and prospective untriggered DCE liver MRI data showed that the proposed method outperformed the compared scan-specific methods. Our results showed an improvement of $5.5\\mathrm{dB}\\sim 7.1$ dB in PSNR at an extremely high acceleration factor (41.6-fold). A temporal super-resolution test $(4\\times)$ was conducted without retraining the network to demonstrate the strong continuity of the optimized representation function as an implicit regularizer for dynamic MRI reconstruction. The main contributions of this study are as follows:",
206
+ "bbox": [
207
+ 507,
208
+ 96,
209
+ 936,
210
+ 423
211
+ ],
212
+ "page_idx": 1
213
+ },
214
+ {
215
+ "type": "list",
216
+ "sub_type": "text",
217
+ "list_items": [
218
+ "- INR is first introduced to dynamic MRI reconstruction as an implicit continuity regularizer, achieving an improvement of $5.5\\mathrm{dB}\\sim 7.1$ dB in PSNR at an extremely high acceleration rate $(41.6\\times)$ compared to other scan-specific methods.",
219
+ "- The INR-based method is an unsupervised-learning strategy, meaning that it does not require external datasets or prior images for training. Thus, the proposed method generalizes on the data acquired with different scan parameters and imaging areas.",
220
+ "- The proposed method achieved a reasonable $4 \\times$ temporal super-resolution for dynamic MRI reconstruction without network retraining, suggesting its strong implicit continuity to achieve higher temporal resolutions."
221
+ ],
222
+ "bbox": [
223
+ 527,
224
+ 436,
225
+ 936,
226
+ 661
227
+ ],
228
+ "page_idx": 1
229
+ },
230
+ {
231
+ "type": "text",
232
+ "text": "2. Method",
233
+ "text_level": 1,
234
+ "bbox": [
235
+ 509,
236
+ 690,
237
+ 591,
238
+ 703
239
+ ],
240
+ "page_idx": 1
241
+ },
242
+ {
243
+ "type": "text",
244
+ "text": "2.1. Dynamic MRI with regularizers",
245
+ "text_level": 1,
246
+ "bbox": [
247
+ 509,
248
+ 724,
249
+ 759,
250
+ 738
251
+ ],
252
+ "page_idx": 1
253
+ },
254
+ {
255
+ "type": "text",
256
+ "text": "In dynamic MRI, the relationship between measured $(\\pmb{k},\\mathrm{t})$ space data and the reconstructed image matrix can be expressed by a linear model. Given the discretized image matrix $d\\in \\mathbb{C}^{(N\\times N)\\times T}$ and the measured $(\\pmb{k},\\mathrm{t})$ -space data of the cth coil $m_{c}\\in \\mathbb{C}^{(N\\times M)\\times T}$ $(1\\leq c\\leq C)$ , where $N$ is the image size, $T$ denotes the total temporal frames of the image, $M$ $(M < N)$ is the number of acquired readout lines for each frame and $C$ is the total number of coil channels. The relationship between $d$ and $m_{c}$ can be formulated as:",
257
+ "bbox": [
258
+ 507,
259
+ 750,
260
+ 936,
261
+ 876
262
+ ],
263
+ "page_idx": 1
264
+ },
265
+ {
266
+ "type": "equation",
267
+ "text": "\n$$\nm _ {c} = F _ {u} S _ {c} d. \\tag {1}\n$$\n",
268
+ "text_format": "latex",
269
+ "bbox": [
270
+ 680,
271
+ 892,
272
+ 936,
273
+ 906
274
+ ],
275
+ "page_idx": 1
276
+ },
277
+ {
278
+ "type": "page_number",
279
+ "text": "2",
280
+ "bbox": [
281
+ 492,
282
+ 914,
283
+ 502,
284
+ 925
285
+ ],
286
+ "page_idx": 1
287
+ },
288
+ {
289
+ "type": "text",
290
+ "text": "Here, $F_{u} \\in \\mathbb{C}^{(N \\times M) \\times (N \\times N)}$ denotes the Fourier operator with the undersampling mask, which simulates the undersampled acquisition process of dynamic MRI, and $S_{c} \\in \\mathbb{C}^{(N \\times N) \\times (N \\times N)}$ is a diagonal matrix representing the cth coil sensitivity map.",
291
+ "bbox": [
292
+ 57,
293
+ 95,
294
+ 484,
295
+ 153
296
+ ],
297
+ "page_idx": 2
298
+ },
299
+ {
300
+ "type": "text",
301
+ "text": "Reconstructing image $d$ from the undersampled $(\\pmb{k},\\mathrm{t})$ -space data is actually solving an ill-posed inverse problem, and the optimization process is formulated as:",
302
+ "bbox": [
303
+ 57,
304
+ 154,
305
+ 485,
306
+ 196
307
+ ],
308
+ "page_idx": 2
309
+ },
310
+ {
311
+ "type": "equation",
312
+ "text": "\n$$\n\\underset {d} {\\arg \\min } \\frac {1}{2} \\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d - m _ {c} \\| _ {2} ^ {2} + \\mathcal {R} (d), \\tag {2}\n$$\n",
313
+ "text_format": "latex",
314
+ "bbox": [
315
+ 146,
316
+ 203,
317
+ 485,
318
+ 241
319
+ ],
320
+ "page_idx": 2
321
+ },
322
+ {
323
+ "type": "text",
324
+ "text": "where $\\mathcal{R}(d)$ is the prior regularizer, helping target $d$ reach optimal results at ill-posed conditions.",
325
+ "bbox": [
326
+ 57,
327
+ 247,
328
+ 485,
329
+ 275
330
+ ],
331
+ "page_idx": 2
332
+ },
333
+ {
334
+ "type": "text",
335
+ "text": "It has been shown that using sparsity and low-rank regularizers as prior knowledge in CS-based [8, 9, 10] and DL-based methods [19] is able to reach state-of-the-art results for dynamic MRI reconstruction. An example can be formulated as:",
336
+ "bbox": [
337
+ 57,
338
+ 275,
339
+ 485,
340
+ 332
341
+ ],
342
+ "page_idx": 2
343
+ },
344
+ {
345
+ "type": "equation",
346
+ "text": "\n$$\n\\underset {d} {\\arg \\min } \\frac {1}{2} \\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d - m _ {c} \\| _ {2} ^ {2} + \\lambda_ {S} \\| T V _ {t} (d) \\| _ {1} + \\lambda_ {L} \\| d \\| _ {*}, \\tag {3}\n$$\n",
347
+ "text_format": "latex",
348
+ "bbox": [
349
+ 78,
350
+ 340,
351
+ 484,
352
+ 378
353
+ ],
354
+ "page_idx": 2
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "where $TV_{t}(\\bullet)$ is the temporal TV operator as the sparsity regularizer. $\\| d\\|_{*}$ is the nuclear norm (sum of singular values) of image matrix $d$ , representing the low-rank regularizer. $\\lambda_{S}$ and $\\lambda_{L}$ are the sparsity and low-rank regularization hyperparameters, respectively. Previous works [8] have proved that the target in Eq. 3 can be optimized iteratively for a good dynamic MRI performance without Ground Truth (GT).",
359
+ "bbox": [
360
+ 57,
361
+ 384,
362
+ 485,
363
+ 483
364
+ ],
365
+ "page_idx": 2
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "2.2. INR in dynamic MRI",
370
+ "text_level": 1,
371
+ "bbox": [
372
+ 58,
373
+ 495,
374
+ 236,
375
+ 508
376
+ ],
377
+ "page_idx": 2
378
+ },
379
+ {
380
+ "type": "text",
381
+ "text": "Inspired by INR, the internal continuity of the image can be a powerful regularizer for solving the ill-posed inverse problem of dynamic MRI reconstruction from sparsely-acquired $(\\pmb{k}, t)$ -space data. The INR-based method can be implemented by applying a learnable continuous mapping function between spatiotemporal coordinates and desired image intensities to be reconstructed. We introduce $f_{\\theta}: \\mathbb{R}^3 \\to \\mathbb{C}$ be the continuous function parameterized by learnable parameters $\\theta$ , mapping the spatiotemporal coordinates $(x,y,t)$ into corresponding image intensities, where $(x,y)$ represent the 2D spatial coordinates $(1 \\leq x,y \\leq N)$ and $t$ represents the temporal coordinate $(1 \\leq t \\leq T)$ . Thus, the image $d$ is rewritten to $d_{\\theta} \\in \\mathbb{C}^{(N \\times N) \\times T}$ by feeding all the spatiotemporal coordinates of the dynamic images into $f_{\\theta}$ and the Casorati matrix $d_{\\theta}$ is:",
382
+ "bbox": [
383
+ 57,
384
+ 511,
385
+ 485,
386
+ 711
387
+ ],
388
+ "page_idx": 2
389
+ },
390
+ {
391
+ "type": "equation",
392
+ "text": "\n$$\nd _ {\\theta} = \\left[ \\begin{array}{c c c} f _ {\\theta} (1, 1, 1) & \\dots & f _ {\\theta} (1, 1, T) \\\\ \\vdots & & \\vdots \\\\ f _ {\\theta} (N, 1, 1) & \\ddots & f _ {\\theta} (N, 1, T) \\\\ \\vdots & & \\vdots \\\\ f _ {\\theta} (N, N, 1) & \\dots & f _ {\\theta} (N, N, T) \\end{array} \\right]. \\tag {4}\n$$\n",
393
+ "text_format": "latex",
394
+ "bbox": [
395
+ 142,
396
+ 718,
397
+ 484,
398
+ 814
399
+ ],
400
+ "page_idx": 2
401
+ },
402
+ {
403
+ "type": "text",
404
+ "text": "Thus, Eq. 3 can be written as a fitting problem that searches the optimal parameters $\\theta$ of the continuous mapping function $f_{\\theta}$ :",
405
+ "bbox": [
406
+ 57,
407
+ 821,
408
+ 485,
409
+ 864
410
+ ],
411
+ "page_idx": 2
412
+ },
413
+ {
414
+ "type": "equation",
415
+ "text": "\n$$\n\\underset {\\theta} {\\arg \\min } \\frac {1}{2} \\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d _ {\\theta} - m _ {c} \\| _ {2} ^ {2} + \\lambda_ {S} \\| T V _ {t} (d _ {\\theta}) \\| _ {1} + \\lambda_ {L} \\| d _ {\\theta} \\| _ {*}. \\tag {5}\n$$\n",
416
+ "text_format": "latex",
417
+ "bbox": [
418
+ 70,
419
+ 871,
420
+ 485,
421
+ 909
422
+ ],
423
+ "page_idx": 2
424
+ },
425
+ {
426
+ "type": "text",
427
+ "text": "Here, Eq. 5 incorporates the implicit continuity on the desired image sequence, together with the explicit sparsity and low-rankness regularizers.",
428
+ "bbox": [
429
+ 507,
430
+ 96,
431
+ 937,
432
+ 139
433
+ ],
434
+ "page_idx": 2
435
+ },
436
+ {
437
+ "type": "text",
438
+ "text": "2.3. Continuous mapping function with MLP and hash encoding",
439
+ "text_level": 1,
440
+ "bbox": [
441
+ 509,
442
+ 153,
443
+ 937,
444
+ 181
445
+ ],
446
+ "page_idx": 2
447
+ },
448
+ {
449
+ "type": "text",
450
+ "text": "In INR, the continuous representation function $f_{\\theta}$ is based on MLP. A better high-frequency fitting performance can be achieved by mapping the input coordinates to a higher dimensional space using an encoding function $\\varphi$ before passing them to MLP [26, 36]:",
451
+ "bbox": [
452
+ 507,
453
+ 184,
454
+ 937,
455
+ 255
456
+ ],
457
+ "page_idx": 2
458
+ },
459
+ {
460
+ "type": "equation",
461
+ "text": "\n$$\nf _ {\\theta} (x, y, t) = M L P (\\varphi (x, y, t)). \\tag {6}\n$$\n",
462
+ "text_format": "latex",
463
+ "bbox": [
464
+ 626,
465
+ 266,
466
+ 936,
467
+ 281
468
+ ],
469
+ "page_idx": 2
470
+ },
471
+ {
472
+ "type": "text",
473
+ "text": "In this work, we adopted hash encoding [37] as the coordinate encoding function $\\varphi$ , which enables the use of smaller MLPs and significantly a faster convergence time. Specifically, hash encoding uses a total of $L$ independent hash grids with the size of $T$ as learnable feature storages. These hash grids represent a set of resolutions in the form of a geometric series, i.e., $N_{min}, b * N_{min}, \\dots, b^{(L-1)} * N_{min}$ , where $N_{min}$ and $b$ are the first term and the ratio of the geometric series, respectively. Trilinear interpolation is applied in each queried hash grid entry to keep continuity. Each hash grid outputs an $F$ -dim feature vector and then these interpolated feature vectors are concatenated as the final encoded input vector. As pointed out by Müller et al. [37], the five hyperparameters mentioned above can be tuned to fit large quantities of tasks better: $N_{min}$ and $b$ decide how the resolution among different hash grids increases, and $L, T, F$ are important tuners for the tradeoff between performance, memory and quality.",
474
+ "bbox": [
475
+ 507,
476
+ 290,
477
+ 937,
478
+ 532
479
+ ],
480
+ "page_idx": 2
481
+ },
482
+ {
483
+ "type": "text",
484
+ "text": "2.4. Loss functions",
485
+ "text_level": 1,
486
+ "bbox": [
487
+ 509,
488
+ 545,
489
+ 645,
490
+ 558
491
+ ],
492
+ "page_idx": 2
493
+ },
494
+ {
495
+ "type": "text",
496
+ "text": "Eq. 5 is rewritten to the form of the following loss functions for the implementation with gradient-descent-based algorithms:",
497
+ "bbox": [
498
+ 507,
499
+ 562,
500
+ 937,
501
+ 590
502
+ ],
503
+ "page_idx": 2
504
+ },
505
+ {
506
+ "type": "equation",
507
+ "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\underbrace {\\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d _ {\\theta} - m _ {c} \\| _ {2} ^ {2}} _ {\\mathcal {L} _ {D C}} + \\lambda_ {S} \\underbrace {\\| T V _ {t} \\left(d _ {\\theta}\\right) \\| _ {1}} _ {\\mathcal {L} _ {T V}} + \\lambda_ {L} \\underbrace {\\left\\| d _ {\\theta} \\right\\| _ {*}} _ {\\mathcal {L} _ {L R}}. \\tag {7}\n$$\n",
508
+ "text_format": "latex",
509
+ "bbox": [
510
+ 526,
511
+ 600,
512
+ 936,
513
+ 656
514
+ ],
515
+ "page_idx": 2
516
+ },
517
+ {
518
+ "type": "text",
519
+ "text": "Here $\\mathcal{L}_{DC},\\mathcal{L}_{TV}$ and $\\mathcal{L}_{LR}$ stand for data consistency (DC) loss in $(\\pmb{k},\\mathrm{t})$ -space, temporal TV loss and low-rank loss, corresponding to the three terms of the optimization objective in Eq. 5, respectively.",
520
+ "bbox": [
521
+ 507,
522
+ 664,
523
+ 937,
524
+ 721
525
+ ],
526
+ "page_idx": 2
527
+ },
528
+ {
529
+ "type": "text",
530
+ "text": "Considering that the magnitudes of the $k$ -space low-frequency elements are several orders greater than those of the high-frequency elements, a relative L2 loss [38, 28] is used as the DC loss. Compared with normal L2 loss, the relative L2 loss is normalized by the square of predicted output, helping balance the gradients across $k$ -space for better high-frequency performance. Let $\\hat{Y}_i$ be one element of the multi-coil predicted $k$ -space data $[FS_1dFS_2d\\dots FS_Cd]$ and $Y_{i}$ is the corresponding element of the multi-coil acquired $k$ -space data $[m_1m_2\\dots m_C]$ , then DC loss is written as:",
531
+ "bbox": [
532
+ 507,
533
+ 722,
534
+ 937,
535
+ 863
536
+ ],
537
+ "page_idx": 2
538
+ },
539
+ {
540
+ "type": "equation",
541
+ "text": "\n$$\n\\mathcal {L} _ {D C} = \\sum_ {i = 1} ^ {N \\times M \\times T \\times C} \\frac {\\left(\\hat {Y} _ {i} - Y _ {i}\\right) ^ {2}}{\\left(\\hat {Y} _ {i}\\right) ^ {2} + \\epsilon}. \\tag {8}\n$$\n",
542
+ "text_format": "latex",
543
+ "bbox": [
544
+ 630,
545
+ 871,
546
+ 936,
547
+ 909
548
+ ],
549
+ "page_idx": 2
550
+ },
551
+ {
552
+ "type": "page_number",
553
+ "text": "3",
554
+ "bbox": [
555
+ 492,
556
+ 914,
557
+ 502,
558
+ 925
559
+ ],
560
+ "page_idx": 2
561
+ },
562
+ {
563
+ "type": "image",
564
+ "img_path": "images/023528a343bdf79f0426b545d385990ed75bea42cb4159522ed029ba0a8b7ec2.jpg",
565
+ "image_caption": [
566
+ "Figure 1: Overview of the proposed method. All the spatiotemporal coordinates are fed into hash grids and an MLP to output two-channel intensities as the real and imaginary parts of the image series. The predicted $k$ -space data are generated with the undersampled Fourier Transform (a golden-angle radial undersampling pattern) from the reconstructed complex-valued images following Eq. 1. The difference between the predicted $k$ -space data and acquired $k$ -space data is calculated as the data consistency loss. Two regularization terms, temporal Total Variation and low-rankness, are applied to the output image series in the loss function. The parameters in the hash grids and the MLP are updated iteratively by minimizing the loss function."
567
+ ],
568
+ "image_footnote": [],
569
+ "bbox": [
570
+ 78,
571
+ 104,
572
+ 915,
573
+ 218
574
+ ],
575
+ "page_idx": 3
576
+ },
577
+ {
578
+ "type": "text",
579
+ "text": "The parameter $\\epsilon$ with a value of $10^{-4}$ is added to the denominator to prevent the zero-division problem.",
580
+ "bbox": [
581
+ 57,
582
+ 318,
583
+ 485,
584
+ 347
585
+ ],
586
+ "page_idx": 3
587
+ },
588
+ {
589
+ "type": "text",
590
+ "text": "Therefore, the parameters $\\theta$ of hash grids and MLP are optimized to minimize the total loss:",
591
+ "bbox": [
592
+ 57,
593
+ 349,
594
+ 484,
595
+ 376
596
+ ],
597
+ "page_idx": 3
598
+ },
599
+ {
600
+ "type": "equation",
601
+ "text": "\n$$\n\\mathcal {L} _ {\\text {t o t a l}} = \\underbrace {\\sum_ {i = 1} ^ {N \\times M \\times T \\times C} \\frac {\\left(\\hat {Y} _ {i} - Y _ {i}\\right) ^ {2}}{\\left(\\hat {Y} _ {i}\\right) ^ {2} + \\epsilon}} _ {\\mathcal {L} _ {D C}} + \\lambda_ {S} \\underbrace {\\| T V _ {I} (d _ {\\theta}) \\| _ {1}} _ {\\mathcal {L} _ {T V}} + \\lambda_ {L} \\underbrace {\\| d _ {\\theta} \\| _ {*}} _ {\\mathcal {L} _ {L R}}. \\tag {9}\n$$\n",
602
+ "text_format": "latex",
603
+ "bbox": [
604
+ 77,
605
+ 390,
606
+ 485,
607
+ 447
608
+ ],
609
+ "page_idx": 3
610
+ },
611
+ {
612
+ "type": "text",
613
+ "text": "2.5. Implementation details",
614
+ "text_level": 1,
615
+ "bbox": [
616
+ 58,
617
+ 476,
618
+ 250,
619
+ 491
620
+ ],
621
+ "page_idx": 3
622
+ },
623
+ {
624
+ "type": "text",
625
+ "text": "We used a tiny MLP containing 5 hidden layers and each hidden layer consisted of 64 neurons followed by a ReLU activation function. The MLP output 2 channels, representing the real and imaginary components of the complex-valued MRI images. No activation function was adopted for the last layer.",
626
+ "bbox": [
627
+ 57,
628
+ 512,
629
+ 485,
630
+ 585
631
+ ],
632
+ "page_idx": 3
633
+ },
634
+ {
635
+ "type": "text",
636
+ "text": "During the optimization process, all the spatiotemporal coordinates were gathered in one batch and the batch size was set to 1. All the coordinates were isotropically normalized to [0, 1] for fast convergence. The number of optimization epochs was set to 500. The Adam optimizer [39] was used with a constant learning rate of $0.001$ , $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ , and $\\epsilon = 10^{-8}$ .",
637
+ "bbox": [
638
+ 57,
639
+ 586,
640
+ 485,
641
+ 671
642
+ ],
643
+ "page_idx": 3
644
+ },
645
+ {
646
+ "type": "text",
647
+ "text": "Once the optimization process was done, the continuous function $f_{\\theta}$ was considered a good representation of the underlying image sequences. Then the same coordinate batch or a denser coordinate batch can be fed into the INR network to output the image sequences.",
648
+ "bbox": [
649
+ 57,
650
+ 674,
651
+ 485,
652
+ 745
653
+ ],
654
+ "page_idx": 3
655
+ },
656
+ {
657
+ "type": "text",
658
+ "text": "The whole pipeline is illustrated in Fig.1, and was conducted on a system equipped with an Intel i7-9700 processor, 64G RAM, and an NVIDIA RTX 2080Ti 11G GPU. The networks were implemented with PyTorch 1.11.0 and tiny-cudann<sup>1</sup>. The non-cartesian Fourier undersampling operation was implemented with the Non-Uniform Fast Fourier Transform (NUFFT) and was deployed with torchkbnufft 1.3.0 [40] for fast calculation and gradient backpropagation on GPU.",
659
+ "bbox": [
660
+ 57,
661
+ 747,
662
+ 485,
663
+ 862
664
+ ],
665
+ "page_idx": 3
666
+ },
667
+ {
668
+ "type": "text",
669
+ "text": "3. Experiments and results",
670
+ "text_level": 1,
671
+ "bbox": [
672
+ 509,
673
+ 318,
674
+ 710,
675
+ 332
676
+ ],
677
+ "page_idx": 3
678
+ },
679
+ {
680
+ "type": "text",
681
+ "text": "3.1. Setup",
682
+ "text_level": 1,
683
+ "bbox": [
684
+ 510,
685
+ 360,
686
+ 586,
687
+ 375
688
+ ],
689
+ "page_idx": 3
690
+ },
691
+ {
692
+ "type": "text",
693
+ "text": "3.1.1. Datasets",
694
+ "text_level": 1,
695
+ "bbox": [
696
+ 510,
697
+ 395,
698
+ 620,
699
+ 409
700
+ ],
701
+ "page_idx": 3
702
+ },
703
+ {
704
+ "type": "text",
705
+ "text": "The proposed method was tested on a simulated retrospective cardiac cine dataset and a perspective untriggered DCE liver dataset to prove its effectiveness and generalization.",
706
+ "bbox": [
707
+ 507,
708
+ 413,
709
+ 937,
710
+ 456
711
+ ],
712
+ "page_idx": 3
713
+ },
714
+ {
715
+ "type": "text",
716
+ "text": "(1) Retrospective cardiac cine dataset:",
717
+ "text_level": 1,
718
+ "bbox": [
719
+ 526,
720
+ 458,
721
+ 786,
722
+ 472
723
+ ],
724
+ "page_idx": 3
725
+ },
726
+ {
727
+ "type": "text",
728
+ "text": "The fully sampled cardiac cine data from the OCMR dataset [41] were acquired from healthy volunteers on a 1.5T scanner (MAGNETOM Avanto, Siemens Healthineers, Erlangen, Germany) using a bSSFP sequence with the following parameters: $\\mathrm{FOV} = 320\\times 260\\mathrm{mm}^2$ , imaging matrix $= 256\\times 208$ , slice thickness $= 8\\mathrm{mm}$ , TR/TE $= 2.79\\mathrm{ms} / 1.33\\mathrm{ms}$ , number of frame $= 18$ . The data acquisition was collected with prospective ECGgating and breath-holding. The number of receiver coils is 18. A simulation undersampling pattern of 2D golden-angle radial acquisition scheme is adopted, where the readout lines are repetitively through the center of $k$ -space and rotated with a step of $111.25^{\\circ}$ . The simulation process includes cropping original data to $208\\times 208$ in the image domain and then converting to the frequency domain by multi-coil NUFFT with golden-angle trajectories of Fibonacci numbers [42]. The coil sensitivity maps were calculated by the ESPIRiT algorithm [43].",
729
+ "bbox": [
730
+ 507,
731
+ 475,
732
+ 937,
733
+ 702
734
+ ],
735
+ "page_idx": 3
736
+ },
737
+ {
738
+ "type": "text",
739
+ "text": "(2) Untriggered DCE liver dataset:",
740
+ "text_level": 1,
741
+ "bbox": [
742
+ 526,
743
+ 705,
744
+ 764,
745
+ 718
746
+ ],
747
+ "page_idx": 3
748
+ },
749
+ {
750
+ "type": "text",
751
+ "text": "The DCE liver data were acquired continuously with the golden-angle acquisition scheme. The 3D stack-of-stars Fast Low Angle SHot (FLASH) sequence was acquired on a breathtaking healthy volunteer using a 3T Siemens MAGNETOM Verio scanner with the following parameters: $\\mathrm{FOV} = 370 \\times 370 \\, \\mathrm{mm}^2$ , $\\mathrm{TR/TE} = 3.83 \\, \\mathrm{ms}/1.71 \\, \\mathrm{ms}$ , imaging matrix $= 384 \\times 384$ , slice thickness $= 3 \\, \\mathrm{mm}$ , total spoke number of each slice $= 600$ . A total of 12 receiver coils were used during the scan. The data including coil sensitivity maps were from Feng et al. [7]'s demo and details about intravenous contrast enhancement can be found in the paper. Each 34 acquired spokes were grouped to reconstruct one frame, which corresponds to an Acceleration Factor $(\\mathrm{AF}) \\approx 11.3$ and 17 frames in total.",
752
+ "bbox": [
753
+ 507,
754
+ 721,
755
+ 937,
756
+ 904
757
+ ],
758
+ "page_idx": 3
759
+ },
760
+ {
761
+ "type": "page_footnote",
762
+ "text": "1https://github.com/nvlabs/tiny-cuda-nn",
763
+ "bbox": [
764
+ 77,
765
+ 892,
766
+ 294,
767
+ 904
768
+ ],
769
+ "page_idx": 3
770
+ },
771
+ {
772
+ "type": "page_number",
773
+ "text": "4",
774
+ "bbox": [
775
+ 492,
776
+ 914,
777
+ 502,
778
+ 925
779
+ ],
780
+ "page_idx": 3
781
+ },
782
+ {
783
+ "type": "text",
784
+ "text": "3.1.2. Performance evaluation",
785
+ "text_level": 1,
786
+ "bbox": [
787
+ 58,
788
+ 96,
789
+ 272,
790
+ 110
791
+ ],
792
+ "page_idx": 4
793
+ },
794
+ {
795
+ "type": "text",
796
+ "text": "In this work, we chose NUFFT, $\\mathrm{L + S}$ [10] and GRASP [7] as the baselines for comparison. NUFFT gives the results obtained by directly zero-filling the frequency domain. $\\mathrm{L + S}$ and GRASP two of the CS-based reconstruction methods which use a similar optimization pipeline as Eq. 2. The difference between them is that GRASP adopted a temporal TV regularizer, while $\\mathrm{L + S}$ decomposed the solution images into a background component with low-rank regularizer and a dynamic foreground with temporal TV regularizer. We did not compare the proposed INR-based method to the supervised DL methods for dynamic MRI reconstruction since the datasets used in this work are insufficient for supervised network training. In addition, the ground truth is not available for the untriggered DCE liver dataset, which also limits the training process of previous supervised methods.",
797
+ "bbox": [
798
+ 55,
799
+ 111,
800
+ 485,
801
+ 323
802
+ ],
803
+ "page_idx": 4
804
+ },
805
+ {
806
+ "type": "text",
807
+ "text": "We tested the performance of the proposed method with 21, 13, 8 and 5 spokes per frame (AF ≈ 9.9, 16, 26, 41.6) on the cardiac cine dataset, and with 34 spokes per frame (AF ≈ 11.3) on the DCE liver dataset. For a fair comparison, the hyperparameters of all the methods are tuned to get the best performance and fit the GPU storage in different datasets and AFs, respectively.",
808
+ "bbox": [
809
+ 57,
810
+ 325,
811
+ 485,
812
+ 411
813
+ ],
814
+ "page_idx": 4
815
+ },
816
+ {
817
+ "type": "text",
818
+ "text": "Quantitative visual comparison and quantitative comparison were used for evaluation. For the cardiac cine dataset, quantitative metrics including peak signal-to-noise ratio (PSNR) and structural similarity index (SSIM) were calculated frame-by-frame as follows:",
819
+ "bbox": [
820
+ 57,
821
+ 411,
822
+ 485,
823
+ 481
824
+ ],
825
+ "page_idx": 4
826
+ },
827
+ {
828
+ "type": "equation",
829
+ "text": "\n$$\nP S N R = 1 0 \\times \\log_ {1 0} \\left(\\frac {1}{\\| y - \\hat {y} \\| _ {2} ^ {2}}\\right), \\tag {10}\n$$\n",
830
+ "text_format": "latex",
831
+ "bbox": [
832
+ 166,
833
+ 485,
834
+ 485,
835
+ 519
836
+ ],
837
+ "page_idx": 4
838
+ },
839
+ {
840
+ "type": "equation",
841
+ "text": "\n$$\nS S I M = \\frac {\\left(2 \\mu_ {y} \\mu_ {\\hat {y}} + c _ {1}\\right) \\left(2 \\sigma_ {y \\hat {y}} + c _ {2}\\right)}{\\left(\\mu_ {y} ^ {2} + \\mu_ {\\hat {y}} ^ {2} + c _ {1}\\right) \\left(\\sigma_ {y} ^ {2} + \\sigma_ {\\hat {y}} ^ {2} + c _ {2}\\right)}, \\tag {11}\n$$\n",
842
+ "text_format": "latex",
843
+ "bbox": [
844
+ 124,
845
+ 524,
846
+ 484,
847
+ 556
848
+ ],
849
+ "page_idx": 4
850
+ },
851
+ {
852
+ "type": "text",
853
+ "text": "where $y$ and $\\hat{y}$ represent ground truth and reconstructed image, respectively, $\\mu_y$ and $\\mu_{\\hat{y}}$ are the mean intensity of $y$ and $\\hat{y}$ , $\\sigma_y$ and $\\sigma_{\\hat{y}}$ are the variance of $y$ and $\\hat{y}$ , $\\sigma_{y\\hat{y}}$ is the covariance of $y$ and $\\hat{y}$ , the constant $c_1$ and $c_2$ were set to $0.01^2$ and $0.03^2$ . $y$ and $\\hat{y}$ were both normalized to [0, 1] according to the image sequence maximum and minimum.",
854
+ "bbox": [
855
+ 57,
856
+ 558,
857
+ 485,
858
+ 644
859
+ ],
860
+ "page_idx": 4
861
+ },
862
+ {
863
+ "type": "text",
864
+ "text": "The $k$ -space data were calculated from the reconstructed complex-valued MR images with the 2D Fast Fourier Transform. For quantitative comparison, the normalized root mean square error (NRMSE) against GT $k$ -space data was calculated coil-by-coil:",
865
+ "bbox": [
866
+ 57,
867
+ 645,
868
+ 485,
869
+ 715
870
+ ],
871
+ "page_idx": 4
872
+ },
873
+ {
874
+ "type": "equation",
875
+ "text": "\n$$\nN R M S E = \\sqrt {\\frac {\\left\\| Y - \\hat {Y} \\right\\| _ {2} ^ {2}}{\\| Y \\| _ {2} ^ {2}}}, \\tag {12}\n$$\n",
876
+ "text_format": "latex",
877
+ "bbox": [
878
+ 186,
879
+ 715,
880
+ 484,
881
+ 758
882
+ ],
883
+ "page_idx": 4
884
+ },
885
+ {
886
+ "type": "text",
887
+ "text": "where $Y$ and $\\hat{Y}$ represents the predicted and acquired $k$ -space data, respectively.",
888
+ "bbox": [
889
+ 57,
890
+ 763,
891
+ 485,
892
+ 791
893
+ ],
894
+ "page_idx": 4
895
+ },
896
+ {
897
+ "type": "text",
898
+ "text": "For the DCE liver dataset, only visual comparison and temporal ROI intensity assessment were conducted due to the lack of the GT image. The ROIs of the aorta (AO) and portal vein (PV) were manually drawn for signal intensity-time curves. For temporal fidelity comparison, NUFFT was used as the reference since no temporal regularization was involved in the reconstructed images. Although contaminated by the streaking artifacts, the average signal intensity from NUFFT results across",
899
+ "bbox": [
900
+ 55,
901
+ 791,
902
+ 485,
903
+ 906
904
+ ],
905
+ "page_idx": 4
906
+ },
907
+ {
908
+ "type": "text",
909
+ "text": "large ROI was still able to preserve contrast evolution for fidelity analysis.",
910
+ "bbox": [
911
+ 507,
912
+ 96,
913
+ 937,
914
+ 124
915
+ ],
916
+ "page_idx": 4
917
+ },
918
+ {
919
+ "type": "text",
920
+ "text": "3.2. Reconstruction performance of the proposed method",
921
+ "text_level": 1,
922
+ "bbox": [
923
+ 509,
924
+ 141,
925
+ 902,
926
+ 156
927
+ ],
928
+ "page_idx": 4
929
+ },
930
+ {
931
+ "type": "text",
932
+ "text": "3.2.1.Cardiac cine dataset",
933
+ "text_level": 1,
934
+ "bbox": [
935
+ 510,
936
+ 162,
937
+ 702,
938
+ 175
939
+ ],
940
+ "page_idx": 4
941
+ },
942
+ {
943
+ "type": "text",
944
+ "text": "Fig.2 compares the reconstruction performance of different methods on the cardiac cine dataset with 21 and 13 spokes per frame (AF=9.9, 16, respectively). Visually, the images reconstructed by the proposed method appear to have better anatomical details and provide a more accurate temporal fidelity than the baselines at both acceleration conditions of 21 and 13 spokes. NUFFT, $\\mathrm{L + S}$ and GRASP all suffer from artifacts and noises in the cardiac chamber area. While the reconstructed images by the proposed method show highly similar anatomical details as the ground truth, as pointed out by red arrows. In the y-t view, the $\\mathrm{L + S}$ results are over smooth and the GRASP results suffer from noticeable streaking artifacts along the temporal axis. The proposed method provides the highest temporal fidelity of the dynamic images between frames. The error map between the reconstruction and the ground truth further supports our observation. It is noted that the reconstructed errors observed on the error maps at the edge of the cardiac ventricles were potentially blurred by cardiac motion. The proposed INR-based method shows the smallest error at the edge of the ventricles, and is consistent with the observation from the y-t view. Quantitatively, the proposed method achieves the best performance with a PSNR of $39.00\\pm 0.55$ dB (21 spokes) / $37.86\\pm 0.61$ dB (13 spokes) and an SSIM of $0.980\\pm 0.003$ (21 spokes) / $0.975\\pm 0.004$ (13 spokes) than the compared methods.",
945
+ "bbox": [
946
+ 507,
947
+ 178,
948
+ 937,
949
+ 518
950
+ ],
951
+ "page_idx": 4
952
+ },
953
+ {
954
+ "type": "text",
955
+ "text": "We further tested the ability of the proposed method for dynamic MRI reconstruction at extremely high acceleration rates (8 and 5 per frame, AF=26 and 41.6, respectively), as shown in Fig.3. The proposed method exhibits comparable performance between AF=26 and 41.6, with the PSNR/SSIM of $36.88 \\pm 0.63$ dB/0.968 $\\pm 0.005$ (8 spokes) and $35.41 \\pm 0.56$ dB/0.957 $\\pm 0.006$ (5 spokes). The proposed method has the best image quality with minimal noise and artifacts. Contrarily, L+S and GRASP suffer from temporal smoothness and noticeable streaking artifacts with increased acceleration rates. From the y-t view, the dynamic information on the reconstructed images is well captured by the proposed method, even with 5 spokes per frame. Additionally, the proposed INR-based method results in a higher PSNR than GRASP (5.5 dB) and L+S (7.1 dB), respectively.",
956
+ "bbox": [
957
+ 507,
958
+ 519,
959
+ 937,
960
+ 733
961
+ ],
962
+ "page_idx": 4
963
+ },
964
+ {
965
+ "type": "text",
966
+ "text": "3.2.2. DCE liver dataset",
967
+ "text_level": 1,
968
+ "bbox": [
969
+ 509,
970
+ 749,
971
+ 684,
972
+ 760
973
+ ],
974
+ "page_idx": 4
975
+ },
976
+ {
977
+ "type": "text",
978
+ "text": "For DCE liver dataset with 34 spokes per frame (AF=11.3), the visual comparisons at different temporal phases are demonstrated in Fig. 4(a). As can be seen from the zoomed-in images, the anatomical details of the kidney can be well visible on the reconstructed images by the proposed method. Severe streaking and noise can be observed on the reconstructed images by NUFFT, L+S, and GRASP. While the proposed method provides high-quality images with less noise than other methods. The signal intensity-time curves in Fig. 4(b) suggest that the proposed method yields the best temporal fidelity, which is",
979
+ "bbox": [
980
+ 507,
981
+ 763,
982
+ 937,
983
+ 906
984
+ ],
985
+ "page_idx": 4
986
+ },
987
+ {
988
+ "type": "page_number",
989
+ "text": "5",
990
+ "bbox": [
991
+ 492,
992
+ 914,
993
+ 504,
994
+ 925
995
+ ],
996
+ "page_idx": 4
997
+ },
998
+ {
999
+ "type": "image",
1000
+ "img_path": "images/f304faa96a109472fb8e77c2a432e70147f5609eb983e4f11d4160d117eee8a3.jpg",
1001
+ "image_caption": [
1002
+ "Figure 2: The reconstruction results of NUFFT, $\\mathrm{L + S}$ , GRASP and the proposed method (from left to right) on the cardiac cine dataset with 21 and 13 spokes per frame (AF=9.9, 16). The enlarged views of the heart region are outlined by the orange boxes and the red arrows point out the structure where the proposed method gives a superior reconstruction performance. The y-t images (the 116th slice along y and temporal dimensions) are outlined by green boxes. The error maps and PSNR/SSIM metrics are shown at the bottom, respectively."
1003
+ ],
1004
+ "image_footnote": [],
1005
+ "bbox": [
1006
+ 178,
1007
+ 93,
1008
+ 821,
1009
+ 524
1010
+ ],
1011
+ "page_idx": 5
1012
+ },
1013
+ {
1014
+ "type": "text",
1015
+ "text": "consistent with the results of NUFFT in AO and PV. For example, the intensity fluctuation of the AO curve between Frame 5 and Frame 11 can be well captured by the proposed INR-based method.",
1016
+ "bbox": [
1017
+ 57,
1018
+ 607,
1019
+ 485,
1020
+ 662
1021
+ ],
1022
+ "page_idx": 5
1023
+ },
1024
+ {
1025
+ "type": "text",
1026
+ "text": "3.3. Results of the temporal super-resolution",
1027
+ "text_level": 1,
1028
+ "bbox": [
1029
+ 58,
1030
+ 705,
1031
+ 366,
1032
+ 719
1033
+ ],
1034
+ "page_idx": 5
1035
+ },
1036
+ {
1037
+ "type": "text",
1038
+ "text": "To demonstrate the internal continuity of the optimized representation of the dynamic MRI, we use a denser coordinate along the temporal axis as input to conduct upsampling $(4\\times)$ on the reconstructed dynamic MR image sequence, named temporal super-resolution. The pipeline is shown in Fig.5(a). The GT frames with temporal linear interpolation between Frame 10 and 11 are used as the reference for comparison, as shown in Fig.5(b). Qualitatively, there is no significant structural difference between the super-resolution images and the interpolated images, indicating the strong implicit continuity representation of the optimized INR function.",
1039
+ "bbox": [
1040
+ 57,
1041
+ 749,
1042
+ 485,
1043
+ 904
1044
+ ],
1045
+ "page_idx": 5
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "text": "4. Discussion",
1050
+ "text_level": 1,
1051
+ "bbox": [
1052
+ 509,
1053
+ 607,
1054
+ 611,
1055
+ 619
1056
+ ],
1057
+ "page_idx": 5
1058
+ },
1059
+ {
1060
+ "type": "text",
1061
+ "text": "In this study, we proposed a novel unsupervised INR-based deep learning method for highly accelerated dynamic MRI reconstruction, which modeled the dynamic MR image sequence as a continuous mapping function. We validated the proposed method on retrospective cardiac cine data and perspective DCE liver data with various acceleration rates. The results showed the effectiveness and generalization of the proposed method on artifact suppression and motion fidelity preservation, especially at extremely high accelerations of 26-fold or 41.6-fold. The proposed method outperforms the compared CS-based methods such as $\\mathrm{L} + \\mathrm{S}$ and GRASP. The results indicated that the proposed reconstruction method holds promise for high temporal resolution 2D MRI acquisitions.",
1062
+ "bbox": [
1063
+ 507,
1064
+ 634,
1065
+ 937,
1066
+ 819
1067
+ ],
1068
+ "page_idx": 5
1069
+ },
1070
+ {
1071
+ "type": "text",
1072
+ "text": "The superiority of the proposed method over the baseline methods is believed from the implicit regularization from the internal continuity of INR, which is validated by the results of the temporal super-resolution $(4\\times)$ , as shown in Fig.5. In addition, the super-resolution performance allows us to further speed up the data acquisition along the temporal axis for dy",
1073
+ "bbox": [
1074
+ 507,
1075
+ 820,
1076
+ 937,
1077
+ 906
1078
+ ],
1079
+ "page_idx": 5
1080
+ },
1081
+ {
1082
+ "type": "page_number",
1083
+ "text": "6",
1084
+ "bbox": [
1085
+ 492,
1086
+ 914,
1087
+ 504,
1088
+ 925
1089
+ ],
1090
+ "page_idx": 5
1091
+ },
1092
+ {
1093
+ "type": "image",
1094
+ "img_path": "images/03899cb282586c4522d756a82bbf31a443a09fb964681eaaaca8d17c783a0c48.jpg",
1095
+ "image_caption": [
1096
+ "Figure 3: The comparison of the reconstruction results on the cardiac cine dataset with 8 and 5 spokes per frame, which corresponds to the acceleration factors of 26 and 41.6. Zoomed-in views of the heart chambers are outlined by orange boxes and the y-t images (the 116th slice along y and temporal dimensions) are outlined by green boxes. The difference map between the reconstructed image and ground truth and PSNR/SSIM metrics are also shown."
1097
+ ],
1098
+ "image_footnote": [],
1099
+ "bbox": [
1100
+ 178,
1101
+ 93,
1102
+ 821,
1103
+ 526
1104
+ ],
1105
+ "page_idx": 6
1106
+ },
1107
+ {
1108
+ "type": "image",
1109
+ "img_path": "images/1bfef3f9eb0281bc82a31407304db05957aad562dcd980a71d8b9d57bdfe9d6f.jpg",
1110
+ "image_caption": [
1111
+ "Figure 4: The comparison of the reconstruction results and ROI analysis among different methods on the DCE liver dataset with 34 spokes per frame (AF=11.3). (a) Reconstruction results at different contrast phases are visualized. The zoomed-in area outlined by orange boxes with the proposed method gives the best image quality with minimal noise among different methods. (b) Signal intensity-time curves of different methods are compared in aorta (AO) and portal vein (PV) areas, and the NUFFT result serves as the temporal fidelity reference."
1112
+ ],
1113
+ "image_footnote": [],
1114
+ "bbox": [
1115
+ 142,
1116
+ 583,
1117
+ 860,
1118
+ 732
1119
+ ],
1120
+ "page_idx": 6
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "text": "namic MRI. Unlike the existing super-resolution methods, the INR-based method does not require extra modeling or training, but simply gives the denser coordinates, which reduces the computational burden and the reconstruction time usage during deployment.",
1125
+ "bbox": [
1126
+ 57,
1127
+ 815,
1128
+ 485,
1129
+ 885
1130
+ ],
1131
+ "page_idx": 6
1132
+ },
1133
+ {
1134
+ "type": "text",
1135
+ "text": "The proposed method has a few limitations. First, the low",
1136
+ "bbox": [
1137
+ 73,
1138
+ 891,
1139
+ 485,
1140
+ 904
1141
+ ],
1142
+ "page_idx": 6
1143
+ },
1144
+ {
1145
+ "type": "text",
1146
+ "text": "rank regularization adopted in this work is the nuclear norm and is optimized with gradient-descent-based algorithms. However, as discussed by Lingala et al. [8]'s work, naive nuclear norm minimization may not be stable for fast convergence. In future works, INR combined with different low-rank regularization substitutes and optimization methods will be explored.",
1147
+ "bbox": [
1148
+ 507,
1149
+ 815,
1150
+ 937,
1151
+ 900
1152
+ ],
1153
+ "page_idx": 6
1154
+ },
1155
+ {
1156
+ "type": "page_number",
1157
+ "text": "7",
1158
+ "bbox": [
1159
+ 492,
1160
+ 914,
1161
+ 502,
1162
+ 923
1163
+ ],
1164
+ "page_idx": 6
1165
+ },
1166
+ {
1167
+ "type": "image",
1168
+ "img_path": "images/cc11c83c9d1d7701b7e2cc65c3294cbccb50f3264cadb84ecdadcf611c570873.jpg",
1169
+ "image_caption": [],
1170
+ "image_footnote": [],
1171
+ "bbox": [
1172
+ 72,
1173
+ 96,
1174
+ 472,
1175
+ 181
1176
+ ],
1177
+ "page_idx": 7
1178
+ },
1179
+ {
1180
+ "type": "image",
1181
+ "img_path": "images/169869441b561f8a8a7ffa679eaef7c6063a815f0e77f4e3e51c3f5384e85e34.jpg",
1182
+ "image_caption": [
1183
+ "Figure 5: (a) The pipeline of temporal super-resolution for the reconstructed dynamic MRI. For the given denser coordinates, the optimized function (Hash grids & MLP) outputs the interpolated frames. (b) The upsampled images between Frame 10 and Frame 11 of the cardiac cine dataset with 21 spokes per frame. Three equally-spaced coordinates to be generated (10.25, 10.5, 10.75) between Frame 10 and Frame 11 are fed to the network for temporal super-resolution $(4\\times)$ . The ground truth of Frame 10 and Frame 11, and the linear interpolated frames serve as the reference. The reference and output images at the position of Frame 10 and 11 are outlined with orange boxes. The corresponding error maps are displayed at the bottom."
1184
+ ],
1185
+ "image_footnote": [],
1186
+ "bbox": [
1187
+ 72,
1188
+ 186,
1189
+ 473,
1190
+ 351
1191
+ ],
1192
+ "page_idx": 7
1193
+ },
1194
+ {
1195
+ "type": "text",
1196
+ "text": "Second, the temporal super-resolution test indicates a comparable 4 times upsampling result with INR, but the smoothness witnessed at the edge of heart chambers demonstrated its limitation for higher or even arbitrary super-resolution results. Third, although the reconstruction time is faster than the other unsupervised methods, it is still challenging for real-time imaging.",
1197
+ "bbox": [
1198
+ 57,
1199
+ 505,
1200
+ 485,
1201
+ 590
1202
+ ],
1203
+ "page_idx": 7
1204
+ },
1205
+ {
1206
+ "type": "text",
1207
+ "text": "5. Conclusion",
1208
+ "text_level": 1,
1209
+ "bbox": [
1210
+ 58,
1211
+ 618,
1212
+ 164,
1213
+ 631
1214
+ ],
1215
+ "page_idx": 7
1216
+ },
1217
+ {
1218
+ "type": "text",
1219
+ "text": "In this work, we proposed an INR-based unsupervised deep learning method for highly accelerated dynamic MRI reconstruction. The proposed method learns an implicit continuous representation function to represent the desired spatiotemporal image sequence, mapping spatiotemporal coordinates to the corresponding image intensities. The proposed method is training database-free and does not require prior information for the reconstruction. Several tests on retrospective cardiac and perspective DCE liver data proved that the proposed method could robustly produce a high-quality dynamic MR image sequence even at an extremely high acceleration rate $(41.6\\times)$ . Additionally, benefiting from the internal continuity of the optimized INR network, the proposed method demonstrates an impressive performance of temporal super-resolution to upsample the desired dynamic images at higher temporal rates than the physical acquisitions. We thus believe that the INR-based method has the potential to further accelerate dynamic MRI acquisition in the future.",
1220
+ "bbox": [
1221
+ 60,
1222
+ 650,
1223
+ 485,
1224
+ 904
1225
+ ],
1226
+ "page_idx": 7
1227
+ },
1228
+ {
1229
+ "type": "text",
1230
+ "text": "References",
1231
+ "text_level": 1,
1232
+ "bbox": [
1233
+ 512,
1234
+ 96,
1235
+ 591,
1236
+ 109
1237
+ ],
1238
+ "page_idx": 7
1239
+ },
1240
+ {
1241
+ "type": "list",
1242
+ "sub_type": "ref_text",
1243
+ "list_items": [
1244
+ "[1] C. B. Marcu, A. M. Beek, A. C. van Rossum, Clinical applications of cardiovascular magnetic resonance imaging, CMAJ 175 (2006) 911-917.",
1245
+ "[2] R. N. Low, Abdominal mri advances in the detection of liver tumours and characterisation, The Lancet Oncology 8 (2007) 525-535.",
1246
+ "[3] C. Cuenod, D. Balvay, Perfusion and vascular permeability: Basic concepts and measurement in dce-ct and dce-mri, Diagnostic and Interventional Imaging 94 (2013) 1187–1204.",
1247
+ "[4] S. M. Wright, M. P. McDougall, Single echo acquisition mri using rf encoding, NMR in Biomedicine 22 (2009) 982-993.",
1248
+ "[5] H. Jung, K. Sung, K. S. Nayak, E. Y. Kim, J. C. Ye, k-t focuss: A general compressed sensing framework for high resolution dynamic mri, Magnetic Resonance in Medicine 61 (2009) 103-116.",
1249
+ "[6] L. Feng, M. B. Srichai, R. P. Lim, A. Harrison, W. King, G. Adluru, E. V. R. Dibella, D. K. Sodickson, R. Otazo, D. Kim, Highly accelerated real-time cardiac cine migraine using k-t sparse-sense, Magnetic Resonance in Medicine 70 (2013) 64-74.",
1250
+ "[7] L. Feng, R. Grimm, K. T. Block, H. Chandarana, S. Kim, J. Xu, L. Axel, D. K. Sodickson, R. Otazo, Golden-angle radial sparse parallel migraine: Combination of compressed sensing, parallel imaging, and golden-angle radial sampling for fast and flexible dynamic volumetric migraine, Magnetic Resonance in Medicine 72 (2014) 707-717.",
1251
+ "[8] S. G. Lingala, Y. Hu, E. DiBella, M. Jacob, Accelerated dynamic mri exploiting sparsity and low-rank structure: k-t slr, IEEE Transactions on Medical Imaging 30 (2011) 1042-1054.",
1252
+ "[9] B. Zhao, J. P. Haldar, A. G. Christodoulou, Z.-P. Liang, Image reconstruction from highly undersampled (k, t)-space data with joint partial separability and sparsity constraints, IEEE Transactions on Medical Imaging 31 (2012) 1809-1820.",
1253
+ "[10] R. Otazo, E. Candès, D. K. Sodickson, Low-rank plus sparse matrix decomposition for accelerated dynamic mri with separation of background and dynamic components, Magnetic Resonance in Medicine 73 (2015) 1125-1136.",
1254
+ "[11] L. Feng, Q. Wen, C. Huang, A. Tong, F. Liu, H. Chandarana, Grasp-pro: improving grasp dce-mri through self-calibrating subspace-modeling and contrast phase automation, Magnetic Resonance in Medicine 83 (2020) 94-108.",
1255
+ "[12] L. Feng, 4d golden-angle radial mri at subsecond temporal resolution, NMR in Biomedicine (2022) e4844.",
1256
+ "[13] A. Bustin, N. Fuin, R. M. Botnar, C. Prieto, From compressed-sensing to artificial intelligence-based cardiac mri reconstruction, Frontiers in cardiovascular medicine 7 (2020) 17.",
1257
+ "[14] S. Wang, Z. Su, L. Ying, X. Peng, S. Zhu, F. Liang, D. Feng, D. Liang, Accelerating magnetic resonance imaging via deep learning, in: IEEE 13th International Symposium on Biomedical Imaging (ISBI), 2016, pp. 514-517.",
1258
+ "[15] Y. Han, J. Yoo, H. H. Kim, H. J. Shin, K. Sung, J. C. Ye, Deep learning with domain adaptation for accelerated projection-reconstruction mr, Magnetic Resonance in Medicine 80 (2018) 1189-1205.",
1259
+ "[16] J. Schlemper, J. Caballero, J. V. Hajnal, A. Price, D. Rueckert, A deep cascade of convolutional neural networks for mr image reconstruction, in: International Conference on Information Processing in Medical Imaging, 2017, pp. 647-658.",
1260
+ "[17] C. Qin, J. Schlemper, J. Caballero, A. N. Price, J. V. Hajnal, D. Rueckert, Convolutional recurrent neural networks for dynamic mr image reconstruction, IEEE Transactions on Medical Imaging 38 (2019) 280-290.",
1261
+ "[18] C. M. Sandino, P. Lai, S. S. Vasanawala, J. Y. Cheng, Accelerating cardiac cine mri using a deep learning-based esprit reconstruction, Magnetic Resonance in Medicine 85 (2021) 152-167.",
1262
+ "[19] W. Huang, Z. Ke, Z.-X. Cui, J. Cheng, Z. Qiu, S. Jia, L. Ying, Y. Zhu, D. Liang, Deep low-rank plus sparse network for dynamic mr imaging, Medical Image Analysis 73 (2021) 102190.",
1263
+ "[20] Z. Huang, J. Bae, P. M. Johnson, T. Sood, L. Heacock, J. Fogarty, L. Moy, S. G. Kim, F. Knoll, A simulation pipeline to generate realistic breast images for learning dce-mri reconstruction, in: Machine Learning for Medical Image Reconstruction, 2021, pp. 45-53.",
1264
+ "[21] Z. Ke, J. Cheng, L. Ying, H. Zheng, Y. Zhu, D. Liang, An unsupervised deep learning method for multi-coil cine migraine, Physics in Medicine & Biology 65 (2020) 235041.",
1265
+ "[22] J. Yoo, K. H. Jin, H. Gupta, J. Yerly, M. Stuber, M. Unser, Time"
1266
+ ],
1267
+ "bbox": [
1268
+ 512,
1269
+ 129,
1270
+ 937,
1271
+ 904
1272
+ ],
1273
+ "page_idx": 7
1274
+ },
1275
+ {
1276
+ "type": "page_number",
1277
+ "text": "8",
1278
+ "bbox": [
1279
+ 492,
1280
+ 914,
1281
+ 502,
1282
+ 925
1283
+ ],
1284
+ "page_idx": 7
1285
+ },
1286
+ {
1287
+ "type": "list",
1288
+ "sub_type": "ref_text",
1289
+ "list_items": [
1290
+ "dependent deep image prior for dynamic migraine, IEEE Transactions on Medical Imaging 40 (2021) 3337-3348.",
1291
+ "[23] A. H. Ahmed, Q. Zou, P. Nagpal, M. Jacob, Dynamic imaging using deep bi-linear unsupervised representation (deblur), IEEE Transactions on Medical Imaging 41 (2022) 2693-2703.",
1292
+ "[24] D. Ulyanov, A. Vedaldi, V. Lempitsky, Deep image prior, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2018, pp. 9446-9454.",
1293
+ "[25] V. Sitzmann, J. N. P. Martel, A. W. Bergman, D. B. Lindell, G. Wetzstein, Implicit neural representations with periodic activation functions, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), 2020, pp. 7462-7473.",
1294
+ "[26] B. Mildenhall, P. P. Srinivasan, M. Tancik, J. T. Barron, R. Ramamoorthi, R. Ng, Nerf: Representing scenes as neural radiance fields for view synthesis, in: European Conference on Computer Vision (ECCV), 2020, pp. 405-421.",
1295
+ "[27] J. J. Park, P. Florence, J. Straub, R. Newcombe, S. Lovegrove, Deepsdf: Learning continuous signed distance functions for shape representation, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 165-174.",
1296
+ "[28] T. Müller, F. Rousselle, J. Novák, A. Keller, Real-time neural radiance caching for path tracing, arXiv preprint arXiv:2106.12372 (2021).",
1297
+ "[29] G. Zang, R. Idoughi, R. Li, P. Wonka, W. Heidrich, Intratomo: self-supervised learning-based tomography via sinogram synthesis and prediction, in: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021, pp. 1960-1970.",
1298
+ "[30] A. W. Reed, H. Kim, R. Anirudh, K. A. Mohan, K. Champley, J. Kang, S. Jayasuriya, Dynamic ct reconstruction from limited views with implicit neural representations and parametric motion fields, in: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021, pp. 2258-2268.",
1299
+ "[31] Y. Sun, J. Liu, M. Xie, B. Wohlberg, U. S. Kamilov, Coil: Coordinate-based internal learning for tomographic imaging, IEEE Transactions on Computational Imaging 7 (2021) 1400-1412.",
1300
+ "[32] L. Shen, J. Pauly, L. Xing, Nerp: Implicit neural representation learning with prior embedding for sparsely sampled image reconstruction, IEEE Transactions on Neural Networks and Learning Systems (2022) 1-13.",
1301
+ "[33] L. Liu, J. Gu, K. Z. Lin, T.-S. Chua, C. Theobalt, Neural sparse voxel fields, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), NIPS'20, 2020, pp. 15651-15663.",
1302
+ "[34] C. Sun, M. Sun, H. Chen, Direct voxel grid optimization: Superfast convergence for radiance fields reconstruction, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022, pp. 5459-5469.",
1303
+ "[35] Sara Fridovich-Keil and Alex Yu, M. Tancik, Q. Chen, B. Recht, A. Kanazawa, Plenoxels: Radiance fields without neural networks, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022, pp. 5491-5500.",
1304
+ "[36] M. Tancik, P. P. Srinivasan, B. Mildenhall, S. Fridovich-Keil, N. Raghavan, U. Singhal, R. Ramamoorthi, J. T. Barron, R. Ng, Fourier features let networks learn high frequency functions in low dimensional domains, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), 2020, pp. 7537-7547.",
1305
+ "[37] T. Müller, A. Evans, C. Schied, A. Keller, Instant neural graphics primitives with a multiresolution hash encoding, ACM Transactions on Graphics 41 (2022) 102:1-102:15.",
1306
+ "[38] J. Lehtinen, J. Munkberg, J. Hasselgren, S. Laine, T. Karras, M. Aittala, T. Aila, Noise2noise: Learning image restoration without clean data, in: International Conference on Machine Learning (ICML), volume 80, 2018, pp. 2965-2974.",
1307
+ "[39] D. P. Kingma, J. Ba, Adam: A method for stochastic optimization, arXiv preprint arXiv:1412.6980 (2014).",
1308
+ "[40] M. J. Muckley, R. Stern, T. Murrell, F. Knoll, TorchKbNufft: A high-level, hardware-agnostic non-uniform fast Fourier transform, in: ISMRM Workshop on Data Sampling & Image Reconstruction, 2020.",
1309
+ "[41] C. Chen, Y. Liu, P. Schniter, M. Tong, K. Zareba, O. Simonetti, L. Potter, R. Ahmad, Ocmr (v1. 0)-open-access multi-coil k-space dataset for cardiovascular magnetic resonance imaging, arXiv preprint arXiv:2008.03410 (2020).",
1310
+ "[42] H. Chandarana, L. Feng, T. K. Block, A. B. Rosenkrantz, R. P. Lim, J. S. Babb, D. K. Sodickson, R. Otazo, Free-breathing contrast-enhanced mul"
1311
+ ],
1312
+ "bbox": [
1313
+ 60,
1314
+ 98,
1315
+ 485,
1316
+ 897
1317
+ ],
1318
+ "page_idx": 8
1319
+ },
1320
+ {
1321
+ "type": "list",
1322
+ "sub_type": "ref_text",
1323
+ "list_items": [
1324
+ "tiphase mri of the liver using a combination of compressed sensing, parallel imaging, and golden-angle radial sampling, Investigative radiology 48 (2013) 10-16.",
1325
+ "[43] M. Uecker, P. Lai, M. J. Murphy, P. Virtue, M. Elad, J. M. Pauly, S. S. Vasanawala, M. Lustig, Espirit—an eigenvalue approach to autocalibrating parallel mri: Where sense meets grappa, Magnetic Resonance in Medicine 71 (2014) 990-1001."
1326
+ ],
1327
+ "bbox": [
1328
+ 512,
1329
+ 98,
1330
+ 937,
1331
+ 177
1332
+ ],
1333
+ "page_idx": 8
1334
+ },
1335
+ {
1336
+ "type": "page_number",
1337
+ "text": "9",
1338
+ "bbox": [
1339
+ 492,
1340
+ 913,
1341
+ 504,
1342
+ 925
1343
+ ],
1344
+ "page_idx": 8
1345
+ }
1346
+ ]
2301.00xxx/2301.00127/97724fca-330b-4a45-9d7d-ecac8fcb1f6d_model.json ADDED
@@ -0,0 +1,1846 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.125,
7
+ 0.104,
8
+ 0.873,
9
+ 0.144
10
+ ],
11
+ "angle": 0,
12
+ "content": "Spatiotemporal implicit neural representation for unsupervised dynamic MRI reconstruction"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.198,
18
+ 0.164,
19
+ 0.798,
20
+ 0.18
21
+ ],
22
+ "angle": 0,
23
+ "content": "Jie Feng\\(^{a}\\), Ruimin Feng\\(^{a}\\), Qing Wu\\(^{b}\\), Zhiyong Zhang\\(^{a}\\), Yuyao Zhang\\(^{b,c}\\), Hongjiang Wei\\(^{a,*}\\)"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.249,
29
+ 0.189,
30
+ 0.747,
31
+ 0.225
32
+ ],
33
+ "angle": 0,
34
+ "content": "\\(^{a}\\)School of Biomedical Engineering, Shanghai Jiao Tong University, Shanghai, China \n\\(^{b}\\)School of Information Science and Technology, ShanghaiTech University, Shanghai, China \n\\(^{c}\\)iHuman Institute, Shanghaitech University, Shanghai, China"
35
+ },
36
+ {
37
+ "type": "title",
38
+ "bbox": [
39
+ 0.06,
40
+ 0.279,
41
+ 0.127,
42
+ 0.292
43
+ ],
44
+ "angle": 0,
45
+ "content": "Abstract"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.058,
51
+ 0.299,
52
+ 0.94,
53
+ 0.485
54
+ ],
55
+ "angle": 0,
56
+ "content": "Supervised Deep-Learning (DL)-based reconstruction algorithms have shown state-of-the-art results for highly-undersampled dynamic Magnetic Resonance Imaging (MRI) reconstruction. However, the requirement of excessive high-quality ground-truth data hinders their applications due to the generalization problem. Recently, Implicit Neural Representation (INR) has appeared as a powerful DL-based tool for solving the inverse problem by characterizing the attributes of a signal as a continuous function of corresponding coordinates in an unsupervised manner. In this work, we proposed an INR-based method to improve dynamic MRI reconstruction from highly undersampled \\(k\\)-space data, which only takes spatiotemporal coordinates as inputs. Specifically, the proposed INR represents the dynamic MRI images as an implicit function and encodes them into neural networks. The weights of the network are learned from sparsely-acquired \\((k, t)\\)-space data itself only, without external training datasets or prior images. Benefiting from the strong implicit continuity regularization of INR together with explicit regularization for low-rankness and sparsity, our proposed method outperforms the compared scan-specific methods at various acceleration factors. E.g., experiments on retrospective cardiac cine datasets show an improvement of \\(5.5 \\sim 7.1\\) dB in PSNR for extremely high accelerations (up to \\(41.6 \\times\\)). The high-quality and inner continuity of the images provided by INR has great potential to further improve the spatiotemporal resolution of dynamic MRI, without the need of any training data."
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.059,
62
+ 0.492,
63
+ 0.67,
64
+ 0.507
65
+ ],
66
+ "angle": 0,
67
+ "content": "Keywords: Dynamic MR imaging, Implicit Neural Representation, Unsupervised learning"
68
+ },
69
+ {
70
+ "type": "title",
71
+ "bbox": [
72
+ 0.06,
73
+ 0.533,
74
+ 0.176,
75
+ 0.547
76
+ ],
77
+ "angle": 0,
78
+ "content": "1. Introduction"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.058,
84
+ 0.558,
85
+ 0.486,
86
+ 0.685
87
+ ],
88
+ "angle": 0,
89
+ "content": "Dynamic Magnetic Resonance Imaging (MRI) is one of the most popular MRI technologies, which can preserve not only excellent tissue contrast but also dynamic temporal changes of tissue. Dynamic MRI requires rapid data collection for the study of moving organs with severe physiological motion, such as the heart [1] and abdomen [2]. Dynamic Contrast-Enhanced (DCE) MRI has also made tremendous contributions to the study of microvascular structure and function of in vivo organs [3]."
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.058,
95
+ 0.686,
96
+ 0.486,
97
+ 0.856
98
+ ],
99
+ "angle": 0,
100
+ "content": "However, the limitations of MRI hardware on gradient encoding performance and long acquisition time slow down our pace for higher spatiotemporal resolutions in dynamic MRI[4]. Spatial and temporal resolution are always inversely related. High spatial resolution images can only be acquired with low temporal resolution and vice versa. Thus, a trade-off has to be made between spatial and temporal resolution in practical dynamic MRI. This conflict can be potentially resolved by developing advanced MRI reconstruction methods from highly-undersampled \\( k \\)-space data, including the traditional Compressed-Sensing (CS)-based methods and the Deep-Learning (DL)-based methods."
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.509,
106
+ 0.533,
107
+ 0.938,
108
+ 0.775
109
+ ],
110
+ "angle": 0,
111
+ "content": "CS methods exploit spatial and temporal correlations of dynamic MRI by using irregular \\( k \\)-space undersampling patterns to create incoherent artifacts in a suitable transform domain where the medical images are compressible, such as in the \\( k \\)-t domain [5], temporal-gradient domain (temporal total variation regularizer) [6, 7] and many others. Image reconstruction is performed by exploiting the sparsity in the solution, subject to data consistency constraints. The further development of sparsity extended to the usage of low-rank prior: the Low-rank and Sparsity (L&S) strategy enforced a both sparse and low-rank output solution [8, 9], and the Low-rank plus Sparsity \\((\\mathrm{L} + \\mathrm{S})\\) strategy decomposed the solution images into a low-rank and a sparsity component for background and the dynamic foreground, respectively [10]. Recently, the subspace-modeling strategy enforced a combination of a temporal sparsity constraint and a low-rank spatial subspace constraint to improve DCE-MRI reconstruction [11, 12]."
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.509,
117
+ 0.779,
118
+ 0.938,
119
+ 0.907
120
+ ],
121
+ "angle": 0,
122
+ "content": "Recent advances in DL techniques have shown potential for further accelerating dynamic MRI data acquisition. By adopting the supervised-learning strategy with large quantities of undersampled and fully-sampled image pairs, DL-based methods showed superior performance compared to CS-based methods [13]. DL-based methods applied in dynamic MRI reconstruction can be separated into two categories, i.e., end-to-end and unrolled methods. The end-to-end methods [14, 15] enable the networks to directly learn the mapping from undersampled im"
123
+ },
124
+ {
125
+ "type": "page_footnote",
126
+ "bbox": [
127
+ 0.079,
128
+ 0.883,
129
+ 0.209,
130
+ 0.894
131
+ ],
132
+ "angle": 0,
133
+ "content": "*Corresponding author."
134
+ },
135
+ {
136
+ "type": "page_footnote",
137
+ "bbox": [
138
+ 0.084,
139
+ 0.895,
140
+ 0.448,
141
+ 0.906
142
+ ],
143
+ "angle": 0,
144
+ "content": "Email address: hongjiang.wei@sjtu.edu.cn (Hongjiang Wei)"
145
+ },
146
+ {
147
+ "type": "list",
148
+ "bbox": [
149
+ 0.079,
150
+ 0.883,
151
+ 0.448,
152
+ 0.906
153
+ ],
154
+ "angle": 0,
155
+ "content": null
156
+ },
157
+ {
158
+ "type": "aside_text",
159
+ "bbox": [
160
+ 0.026,
161
+ 0.313,
162
+ 0.057,
163
+ 0.736
164
+ ],
165
+ "angle": 270,
166
+ "content": "arXiv:2301.00127v2 [eess.IV] 13 Jan 2023"
167
+ },
168
+ {
169
+ "type": "footer",
170
+ "bbox": [
171
+ 0.06,
172
+ 0.916,
173
+ 0.211,
174
+ 0.928
175
+ ],
176
+ "angle": 0,
177
+ "content": "Preprint submitted to arXiv"
178
+ }
179
+ ],
180
+ [
181
+ {
182
+ "type": "text",
183
+ "bbox": [
184
+ 0.062,
185
+ 0.098,
186
+ 0.485,
187
+ 0.48
188
+ ],
189
+ "angle": 0,
190
+ "content": "ages with artifacts to fully sampled high-quality images. In contrast, the unrolled strategy is inspired by unrolling the iterative optimization process of CS, using networks to learn the auxiliary parameters or regularizers [16, 17, 18, 19] during the iterations. Especially, \\(\\mathrm{L + S}\\)-Net [19] combined the \\(\\mathrm{L + S}\\) strategy of CS-based methods with the unrolled DL methods, demonstrating the availability of low-rank and sparsity in DL methods. However, the excessive demand for high-quality ground-truth labels in supervised learning hinders its applications in practice due to the generalization issue [13]. For example, the performance of the trained networks would degrade when the data is acquired with different scan parameters or pathological conditions. While in the case of DCE MRI, the ground-truth data are not available [20]. Alternatively, the unsupervised-learning strategy was introduced to the DL-based dynamic MRI reconstruction without involving external data in the training process. For example, Ke et al. [21] used a time-interleaved acquisition scheme, where the fully-sampled images were generated by merging adjacent frames. However, a large dataset is still needed for training the neural net. Yoo et al. [22] and Ahmed et al. [23] both adopted the Deep Image Prior (DIP) approach [24], which leveraged the tendency of untrained Convolutional Neural Networks (CNN) to generate natural-structured images as an implicit regularizer and then optimized the CNN parameters for scan-specific reconstruction. However, DIP-based methods suffer from a heavy computational burden and are still limited for application [22]."
191
+ },
192
+ {
193
+ "type": "text",
194
+ "bbox": [
195
+ 0.062,
196
+ 0.481,
197
+ 0.485,
198
+ 0.891
199
+ ],
200
+ "angle": 0,
201
+ "content": "Implicit Neural Representation (INR) is a new way which parameterizes signals by a multi-layer perceptron (MLP) [25]. Unlike traditional explicit representation that uses discrete elements such as pixels (2D images) or voxels (3D volumes), INR represents the desired object itself as a continuous representation function of the spatial coordinates. In other words, the values at any spatial location of the object can be retrieved by querying the trained MLP with the corresponding coordinate. It provides a general solution for various applications of object reconstruction. With the application of MLP and proper encoding function mapping the input coordinates to a high-dimensional space [26], INR has achieved superior performance in multiple computer vision tasks [27, 26, 28]. Previous research also showed the INR's capability to solve the inverse problem in medical imaging fields, e.g., CT image reconstruction [29, 30, 31] and undersampled MRI [32] in an unsupervised manner. For example, implicit Neural Representation learning with Prior embedding (NeRP) [32] was proposed to perform the static MRI reconstruction from the sparsely-sampled \\( k \\)-space data. However, NeRP requires a fully-sampled prior image with the same modality for the reconstruction of longitudinal MRI images of follow-up scans. Additionally, the INR for object reconstruction usually takes hours or even days to converge on one single data. Recently, parametric encoding functions with extra learnable parameters [33, 28, 34, 35] were proposed to significantly shorten the convergence time. For example, the hash encoding [28] function has shown promising results for accelerating the computational processes of INR in seconds for many graphics applications."
202
+ },
203
+ {
204
+ "type": "text",
205
+ "bbox": [
206
+ 0.075,
207
+ 0.893,
208
+ 0.485,
209
+ 0.906
210
+ ],
211
+ "angle": 0,
212
+ "content": "In this paper, we aim to present a new unsupervised method"
213
+ },
214
+ {
215
+ "type": "text",
216
+ "bbox": [
217
+ 0.509,
218
+ 0.097,
219
+ 0.937,
220
+ 0.424
221
+ ],
222
+ "angle": 0,
223
+ "content": "for highly accelerated dynamic MRI reconstruction. Inspired by the insight of INR, the proposed method treated the dynamic MR image sequence as a continuous function mapping the spatiotemporal coordinates to the corresponding image intensities. The function was parameterized by a hash encoding function and an MLP and served as an implicit continuity regularizer for dynamic MRI reconstruction. The MLP weights were directly learned from the imaging-model-based \\((\\pmb{k},\\mathrm{t})\\)-space data consistency loss combined with the explicit regularizers, without training databases or any ground-truth data. When inferring, the reconstructed images can simply be querying the optimized network with the same or denser spatiotemporal coordinates, which would allow for sampling and interpolating the dynamic MRI at an arbitrary frame rate. Experiments on retrospective cardiac cine data and prospective untriggered DCE liver MRI data showed that the proposed method outperformed the compared scan-specific methods. Our results showed an improvement of \\(5.5\\mathrm{dB}\\sim 7.1\\) dB in PSNR at an extremely high acceleration factor (41.6-fold). A temporal super-resolution test \\((4\\times)\\) was conducted without retraining the network to demonstrate the strong continuity of the optimized representation function as an implicit regularizer for dynamic MRI reconstruction. The main contributions of this study are as follows:"
224
+ },
225
+ {
226
+ "type": "text",
227
+ "bbox": [
228
+ 0.529,
229
+ 0.437,
230
+ 0.937,
231
+ 0.506
232
+ ],
233
+ "angle": 0,
234
+ "content": "- INR is first introduced to dynamic MRI reconstruction as an implicit continuity regularizer, achieving an improvement of \\(5.5\\mathrm{dB}\\sim 7.1\\) dB in PSNR at an extremely high acceleration rate \\((41.6\\times)\\) compared to other scan-specific methods."
235
+ },
236
+ {
237
+ "type": "text",
238
+ "bbox": [
239
+ 0.529,
240
+ 0.522,
241
+ 0.937,
242
+ 0.592
243
+ ],
244
+ "angle": 0,
245
+ "content": "- The INR-based method is an unsupervised-learning strategy, meaning that it does not require external datasets or prior images for training. Thus, the proposed method generalizes on the data acquired with different scan parameters and imaging areas."
246
+ },
247
+ {
248
+ "type": "text",
249
+ "bbox": [
250
+ 0.529,
251
+ 0.607,
252
+ 0.937,
253
+ 0.662
254
+ ],
255
+ "angle": 0,
256
+ "content": "- The proposed method achieved a reasonable \\(4 \\times\\) temporal super-resolution for dynamic MRI reconstruction without network retraining, suggesting its strong implicit continuity to achieve higher temporal resolutions."
257
+ },
258
+ {
259
+ "type": "list",
260
+ "bbox": [
261
+ 0.529,
262
+ 0.437,
263
+ 0.937,
264
+ 0.662
265
+ ],
266
+ "angle": 0,
267
+ "content": null
268
+ },
269
+ {
270
+ "type": "title",
271
+ "bbox": [
272
+ 0.51,
273
+ 0.692,
274
+ 0.592,
275
+ 0.705
276
+ ],
277
+ "angle": 0,
278
+ "content": "2. Method"
279
+ },
280
+ {
281
+ "type": "title",
282
+ "bbox": [
283
+ 0.51,
284
+ 0.725,
285
+ 0.761,
286
+ 0.739
287
+ ],
288
+ "angle": 0,
289
+ "content": "2.1. Dynamic MRI with regularizers"
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.509,
295
+ 0.751,
296
+ 0.937,
297
+ 0.877
298
+ ],
299
+ "angle": 0,
300
+ "content": "In dynamic MRI, the relationship between measured \\((\\pmb{k},\\mathrm{t})\\) space data and the reconstructed image matrix can be expressed by a linear model. Given the discretized image matrix \\(d\\in \\mathbb{C}^{(N\\times N)\\times T}\\) and the measured \\((\\pmb{k},\\mathrm{t})\\) -space data of the cth coil \\(m_{c}\\in \\mathbb{C}^{(N\\times M)\\times T}\\) \\((1\\leq c\\leq C)\\), where \\(N\\) is the image size, \\(T\\) denotes the total temporal frames of the image, \\(M\\) \\((M < N)\\) is the number of acquired readout lines for each frame and \\(C\\) is the total number of coil channels. The relationship between \\(d\\) and \\(m_{c}\\) can be formulated as:"
301
+ },
302
+ {
303
+ "type": "equation",
304
+ "bbox": [
305
+ 0.681,
306
+ 0.893,
307
+ 0.937,
308
+ 0.907
309
+ ],
310
+ "angle": 0,
311
+ "content": "\\[\nm _ {c} = F _ {u} S _ {c} d. \\tag {1}\n\\]"
312
+ },
313
+ {
314
+ "type": "page_number",
315
+ "bbox": [
316
+ 0.494,
317
+ 0.915,
318
+ 0.504,
319
+ 0.926
320
+ ],
321
+ "angle": 0,
322
+ "content": "2"
323
+ }
324
+ ],
325
+ [
326
+ {
327
+ "type": "text",
328
+ "bbox": [
329
+ 0.058,
330
+ 0.096,
331
+ 0.485,
332
+ 0.154
333
+ ],
334
+ "angle": 0,
335
+ "content": "Here, \\( F_{u} \\in \\mathbb{C}^{(N \\times M) \\times (N \\times N)} \\) denotes the Fourier operator with the undersampling mask, which simulates the undersampled acquisition process of dynamic MRI, and \\( S_{c} \\in \\mathbb{C}^{(N \\times N) \\times (N \\times N)} \\) is a diagonal matrix representing the cth coil sensitivity map."
336
+ },
337
+ {
338
+ "type": "text",
339
+ "bbox": [
340
+ 0.058,
341
+ 0.155,
342
+ 0.486,
343
+ 0.197
344
+ ],
345
+ "angle": 0,
346
+ "content": "Reconstructing image \\(d\\) from the undersampled \\((\\pmb{k},\\mathrm{t})\\)-space data is actually solving an ill-posed inverse problem, and the optimization process is formulated as:"
347
+ },
348
+ {
349
+ "type": "equation",
350
+ "bbox": [
351
+ 0.147,
352
+ 0.204,
353
+ 0.486,
354
+ 0.242
355
+ ],
356
+ "angle": 0,
357
+ "content": "\\[\n\\underset {d} {\\arg \\min } \\frac {1}{2} \\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d - m _ {c} \\| _ {2} ^ {2} + \\mathcal {R} (d), \\tag {2}\n\\]"
358
+ },
359
+ {
360
+ "type": "text",
361
+ "bbox": [
362
+ 0.058,
363
+ 0.248,
364
+ 0.486,
365
+ 0.276
366
+ ],
367
+ "angle": 0,
368
+ "content": "where \\(\\mathcal{R}(d)\\) is the prior regularizer, helping target \\(d\\) reach optimal results at ill-posed conditions."
369
+ },
370
+ {
371
+ "type": "text",
372
+ "bbox": [
373
+ 0.058,
374
+ 0.277,
375
+ 0.486,
376
+ 0.334
377
+ ],
378
+ "angle": 0,
379
+ "content": "It has been shown that using sparsity and low-rank regularizers as prior knowledge in CS-based [8, 9, 10] and DL-based methods [19] is able to reach state-of-the-art results for dynamic MRI reconstruction. An example can be formulated as:"
380
+ },
381
+ {
382
+ "type": "equation",
383
+ "bbox": [
384
+ 0.08,
385
+ 0.341,
386
+ 0.485,
387
+ 0.379
388
+ ],
389
+ "angle": 0,
390
+ "content": "\\[\n\\underset {d} {\\arg \\min } \\frac {1}{2} \\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d - m _ {c} \\| _ {2} ^ {2} + \\lambda_ {S} \\| T V _ {t} (d) \\| _ {1} + \\lambda_ {L} \\| d \\| _ {*}, \\tag {3}\n\\]"
391
+ },
392
+ {
393
+ "type": "text",
394
+ "bbox": [
395
+ 0.058,
396
+ 0.385,
397
+ 0.486,
398
+ 0.485
399
+ ],
400
+ "angle": 0,
401
+ "content": "where \\( TV_{t}(\\bullet) \\) is the temporal TV operator as the sparsity regularizer. \\( \\| d\\|_{*} \\) is the nuclear norm (sum of singular values) of image matrix \\( d \\), representing the low-rank regularizer. \\( \\lambda_{S} \\) and \\( \\lambda_{L} \\) are the sparsity and low-rank regularization hyperparameters, respectively. Previous works [8] have proved that the target in Eq. 3 can be optimized iteratively for a good dynamic MRI performance without Ground Truth (GT)."
402
+ },
403
+ {
404
+ "type": "title",
405
+ "bbox": [
406
+ 0.059,
407
+ 0.497,
408
+ 0.238,
409
+ 0.51
410
+ ],
411
+ "angle": 0,
412
+ "content": "2.2. INR in dynamic MRI"
413
+ },
414
+ {
415
+ "type": "text",
416
+ "bbox": [
417
+ 0.058,
418
+ 0.512,
419
+ 0.486,
420
+ 0.712
421
+ ],
422
+ "angle": 0,
423
+ "content": "Inspired by INR, the internal continuity of the image can be a powerful regularizer for solving the ill-posed inverse problem of dynamic MRI reconstruction from sparsely-acquired \\((\\pmb{k}, t)\\)-space data. The INR-based method can be implemented by applying a learnable continuous mapping function between spatiotemporal coordinates and desired image intensities to be reconstructed. We introduce \\(f_{\\theta}: \\mathbb{R}^3 \\to \\mathbb{C}\\) be the continuous function parameterized by learnable parameters \\(\\theta\\), mapping the spatiotemporal coordinates \\((x,y,t)\\) into corresponding image intensities, where \\((x,y)\\) represent the 2D spatial coordinates \\((1 \\leq x,y \\leq N)\\) and \\(t\\) represents the temporal coordinate \\((1 \\leq t \\leq T)\\). Thus, the image \\(d\\) is rewritten to \\(d_{\\theta} \\in \\mathbb{C}^{(N \\times N) \\times T}\\) by feeding all the spatiotemporal coordinates of the dynamic images into \\(f_{\\theta}\\) and the Casorati matrix \\(d_{\\theta}\\) is:"
424
+ },
425
+ {
426
+ "type": "equation",
427
+ "bbox": [
428
+ 0.144,
429
+ 0.719,
430
+ 0.485,
431
+ 0.815
432
+ ],
433
+ "angle": 0,
434
+ "content": "\\[\nd _ {\\theta} = \\left[ \\begin{array}{c c c} f _ {\\theta} (1, 1, 1) & \\dots & f _ {\\theta} (1, 1, T) \\\\ \\vdots & & \\vdots \\\\ f _ {\\theta} (N, 1, 1) & \\ddots & f _ {\\theta} (N, 1, T) \\\\ \\vdots & & \\vdots \\\\ f _ {\\theta} (N, N, 1) & \\dots & f _ {\\theta} (N, N, T) \\end{array} \\right]. \\tag {4}\n\\]"
435
+ },
436
+ {
437
+ "type": "text",
438
+ "bbox": [
439
+ 0.058,
440
+ 0.822,
441
+ 0.486,
442
+ 0.865
443
+ ],
444
+ "angle": 0,
445
+ "content": "Thus, Eq. 3 can be written as a fitting problem that searches the optimal parameters \\(\\theta\\) of the continuous mapping function \\(f_{\\theta}\\):"
446
+ },
447
+ {
448
+ "type": "equation",
449
+ "bbox": [
450
+ 0.072,
451
+ 0.872,
452
+ 0.486,
453
+ 0.91
454
+ ],
455
+ "angle": 0,
456
+ "content": "\\[\n\\underset {\\theta} {\\arg \\min } \\frac {1}{2} \\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d _ {\\theta} - m _ {c} \\| _ {2} ^ {2} + \\lambda_ {S} \\| T V _ {t} (d _ {\\theta}) \\| _ {1} + \\lambda_ {L} \\| d _ {\\theta} \\| _ {*}. \\tag {5}\n\\]"
457
+ },
458
+ {
459
+ "type": "text",
460
+ "bbox": [
461
+ 0.509,
462
+ 0.097,
463
+ 0.938,
464
+ 0.14
465
+ ],
466
+ "angle": 0,
467
+ "content": "Here, Eq. 5 incorporates the implicit continuity on the desired image sequence, together with the explicit sparsity and low-rankness regularizers."
468
+ },
469
+ {
470
+ "type": "title",
471
+ "bbox": [
472
+ 0.51,
473
+ 0.154,
474
+ 0.938,
475
+ 0.182
476
+ ],
477
+ "angle": 0,
478
+ "content": "2.3. Continuous mapping function with MLP and hash encoding"
479
+ },
480
+ {
481
+ "type": "text",
482
+ "bbox": [
483
+ 0.509,
484
+ 0.185,
485
+ 0.938,
486
+ 0.256
487
+ ],
488
+ "angle": 0,
489
+ "content": "In INR, the continuous representation function \\( f_{\\theta} \\) is based on MLP. A better high-frequency fitting performance can be achieved by mapping the input coordinates to a higher dimensional space using an encoding function \\( \\varphi \\) before passing them to MLP [26, 36]:"
490
+ },
491
+ {
492
+ "type": "equation",
493
+ "bbox": [
494
+ 0.628,
495
+ 0.267,
496
+ 0.937,
497
+ 0.282
498
+ ],
499
+ "angle": 0,
500
+ "content": "\\[\nf _ {\\theta} (x, y, t) = M L P (\\varphi (x, y, t)). \\tag {6}\n\\]"
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.509,
506
+ 0.291,
507
+ 0.939,
508
+ 0.533
509
+ ],
510
+ "angle": 0,
511
+ "content": "In this work, we adopted hash encoding [37] as the coordinate encoding function \\(\\varphi\\), which enables the use of smaller MLPs and significantly a faster convergence time. Specifically, hash encoding uses a total of \\(L\\) independent hash grids with the size of \\(T\\) as learnable feature storages. These hash grids represent a set of resolutions in the form of a geometric series, i.e., \\(N_{min}, b * N_{min}, \\dots, b^{(L-1)} * N_{min}\\), where \\(N_{min}\\) and \\(b\\) are the first term and the ratio of the geometric series, respectively. Trilinear interpolation is applied in each queried hash grid entry to keep continuity. Each hash grid outputs an \\(F\\)-dim feature vector and then these interpolated feature vectors are concatenated as the final encoded input vector. As pointed out by Müller et al. [37], the five hyperparameters mentioned above can be tuned to fit large quantities of tasks better: \\(N_{min}\\) and \\(b\\) decide how the resolution among different hash grids increases, and \\(L, T, F\\) are important tuners for the tradeoff between performance, memory and quality."
512
+ },
513
+ {
514
+ "type": "title",
515
+ "bbox": [
516
+ 0.51,
517
+ 0.546,
518
+ 0.646,
519
+ 0.56
520
+ ],
521
+ "angle": 0,
522
+ "content": "2.4. Loss functions"
523
+ },
524
+ {
525
+ "type": "text",
526
+ "bbox": [
527
+ 0.509,
528
+ 0.563,
529
+ 0.938,
530
+ 0.592
531
+ ],
532
+ "angle": 0,
533
+ "content": "Eq. 5 is rewritten to the form of the following loss functions for the implementation with gradient-descent-based algorithms:"
534
+ },
535
+ {
536
+ "type": "equation",
537
+ "bbox": [
538
+ 0.527,
539
+ 0.601,
540
+ 0.937,
541
+ 0.657
542
+ ],
543
+ "angle": 0,
544
+ "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\underbrace {\\sum_ {c = 1} ^ {C} \\| F _ {u} S _ {c} d _ {\\theta} - m _ {c} \\| _ {2} ^ {2}} _ {\\mathcal {L} _ {D C}} + \\lambda_ {S} \\underbrace {\\| T V _ {t} \\left(d _ {\\theta}\\right) \\| _ {1}} _ {\\mathcal {L} _ {T V}} + \\lambda_ {L} \\underbrace {\\left\\| d _ {\\theta} \\right\\| _ {*}} _ {\\mathcal {L} _ {L R}}. \\tag {7}\n\\]"
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.509,
550
+ 0.665,
551
+ 0.938,
552
+ 0.722
553
+ ],
554
+ "angle": 0,
555
+ "content": "Here \\(\\mathcal{L}_{DC},\\mathcal{L}_{TV}\\) and \\(\\mathcal{L}_{LR}\\) stand for data consistency (DC) loss in \\((\\pmb{k},\\mathrm{t})\\) -space, temporal TV loss and low-rank loss, corresponding to the three terms of the optimization objective in Eq. 5, respectively."
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.509,
561
+ 0.723,
562
+ 0.938,
563
+ 0.864
564
+ ],
565
+ "angle": 0,
566
+ "content": "Considering that the magnitudes of the \\(k\\)-space low-frequency elements are several orders greater than those of the high-frequency elements, a relative L2 loss [38, 28] is used as the DC loss. Compared with normal L2 loss, the relative L2 loss is normalized by the square of predicted output, helping balance the gradients across \\(k\\)-space for better high-frequency performance. Let \\(\\hat{Y}_i\\) be one element of the multi-coil predicted \\(k\\)-space data \\([FS_1dFS_2d\\dots FS_Cd]\\) and \\(Y_{i}\\) is the corresponding element of the multi-coil acquired \\(k\\)-space data \\([m_1m_2\\dots m_C]\\), then DC loss is written as:"
567
+ },
568
+ {
569
+ "type": "equation",
570
+ "bbox": [
571
+ 0.631,
572
+ 0.872,
573
+ 0.937,
574
+ 0.91
575
+ ],
576
+ "angle": 0,
577
+ "content": "\\[\n\\mathcal {L} _ {D C} = \\sum_ {i = 1} ^ {N \\times M \\times T \\times C} \\frac {\\left(\\hat {Y} _ {i} - Y _ {i}\\right) ^ {2}}{\\left(\\hat {Y} _ {i}\\right) ^ {2} + \\epsilon}. \\tag {8}\n\\]"
578
+ },
579
+ {
580
+ "type": "page_number",
581
+ "bbox": [
582
+ 0.494,
583
+ 0.915,
584
+ 0.504,
585
+ 0.926
586
+ ],
587
+ "angle": 0,
588
+ "content": "3"
589
+ }
590
+ ],
591
+ [
592
+ {
593
+ "type": "image",
594
+ "bbox": [
595
+ 0.08,
596
+ 0.105,
597
+ 0.917,
598
+ 0.219
599
+ ],
600
+ "angle": 0,
601
+ "content": null
602
+ },
603
+ {
604
+ "type": "image_caption",
605
+ "bbox": [
606
+ 0.058,
607
+ 0.238,
608
+ 0.938,
609
+ 0.298
610
+ ],
611
+ "angle": 0,
612
+ "content": "Figure 1: Overview of the proposed method. All the spatiotemporal coordinates are fed into hash grids and an MLP to output two-channel intensities as the real and imaginary parts of the image series. The predicted \\( k \\)-space data are generated with the undersampled Fourier Transform (a golden-angle radial undersampling pattern) from the reconstructed complex-valued images following Eq. 1. The difference between the predicted \\( k \\)-space data and acquired \\( k \\)-space data is calculated as the data consistency loss. Two regularization terms, temporal Total Variation and low-rankness, are applied to the output image series in the loss function. The parameters in the hash grids and the MLP are updated iteratively by minimizing the loss function."
613
+ },
614
+ {
615
+ "type": "text",
616
+ "bbox": [
617
+ 0.058,
618
+ 0.319,
619
+ 0.486,
620
+ 0.348
621
+ ],
622
+ "angle": 0,
623
+ "content": "The parameter \\(\\epsilon\\) with a value of \\(10^{-4}\\) is added to the denominator to prevent the zero-division problem."
624
+ },
625
+ {
626
+ "type": "text",
627
+ "bbox": [
628
+ 0.058,
629
+ 0.35,
630
+ 0.485,
631
+ 0.378
632
+ ],
633
+ "angle": 0,
634
+ "content": "Therefore, the parameters \\(\\theta\\) of hash grids and MLP are optimized to minimize the total loss:"
635
+ },
636
+ {
637
+ "type": "equation",
638
+ "bbox": [
639
+ 0.078,
640
+ 0.391,
641
+ 0.487,
642
+ 0.448
643
+ ],
644
+ "angle": 0,
645
+ "content": "\\[\n\\mathcal {L} _ {\\text {t o t a l}} = \\underbrace {\\sum_ {i = 1} ^ {N \\times M \\times T \\times C} \\frac {\\left(\\hat {Y} _ {i} - Y _ {i}\\right) ^ {2}}{\\left(\\hat {Y} _ {i}\\right) ^ {2} + \\epsilon}} _ {\\mathcal {L} _ {D C}} + \\lambda_ {S} \\underbrace {\\| T V _ {I} (d _ {\\theta}) \\| _ {1}} _ {\\mathcal {L} _ {T V}} + \\lambda_ {L} \\underbrace {\\| d _ {\\theta} \\| _ {*}} _ {\\mathcal {L} _ {L R}}. \\tag {9}\n\\]"
646
+ },
647
+ {
648
+ "type": "title",
649
+ "bbox": [
650
+ 0.059,
651
+ 0.478,
652
+ 0.252,
653
+ 0.492
654
+ ],
655
+ "angle": 0,
656
+ "content": "2.5. Implementation details"
657
+ },
658
+ {
659
+ "type": "text",
660
+ "bbox": [
661
+ 0.058,
662
+ 0.513,
663
+ 0.486,
664
+ 0.586
665
+ ],
666
+ "angle": 0,
667
+ "content": "We used a tiny MLP containing 5 hidden layers and each hidden layer consisted of 64 neurons followed by a ReLU activation function. The MLP output 2 channels, representing the real and imaginary components of the complex-valued MRI images. No activation function was adopted for the last layer."
668
+ },
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.058,
673
+ 0.587,
674
+ 0.486,
675
+ 0.673
676
+ ],
677
+ "angle": 0,
678
+ "content": "During the optimization process, all the spatiotemporal coordinates were gathered in one batch and the batch size was set to 1. All the coordinates were isotropically normalized to [0, 1] for fast convergence. The number of optimization epochs was set to 500. The Adam optimizer [39] was used with a constant learning rate of \\(0.001\\), \\(\\beta_{1} = 0.9\\), \\(\\beta_{2} = 0.999\\), and \\(\\epsilon = 10^{-8}\\)."
679
+ },
680
+ {
681
+ "type": "text",
682
+ "bbox": [
683
+ 0.058,
684
+ 0.675,
685
+ 0.486,
686
+ 0.746
687
+ ],
688
+ "angle": 0,
689
+ "content": "Once the optimization process was done, the continuous function \\( f_{\\theta} \\) was considered a good representation of the underlying image sequences. Then the same coordinate batch or a denser coordinate batch can be fed into the INR network to output the image sequences."
690
+ },
691
+ {
692
+ "type": "text",
693
+ "bbox": [
694
+ 0.058,
695
+ 0.748,
696
+ 0.486,
697
+ 0.863
698
+ ],
699
+ "angle": 0,
700
+ "content": "The whole pipeline is illustrated in Fig.1, and was conducted on a system equipped with an Intel i7-9700 processor, 64G RAM, and an NVIDIA RTX 2080Ti 11G GPU. The networks were implemented with PyTorch 1.11.0 and tiny-cudann<sup>1</sup>. The non-cartesian Fourier undersampling operation was implemented with the Non-Uniform Fast Fourier Transform (NUFFT) and was deployed with torchkbnufft 1.3.0 [40] for fast calculation and gradient backpropagation on GPU."
701
+ },
702
+ {
703
+ "type": "title",
704
+ "bbox": [
705
+ 0.51,
706
+ 0.319,
707
+ 0.712,
708
+ 0.333
709
+ ],
710
+ "angle": 0,
711
+ "content": "3. Experiments and results"
712
+ },
713
+ {
714
+ "type": "title",
715
+ "bbox": [
716
+ 0.511,
717
+ 0.361,
718
+ 0.587,
719
+ 0.376
720
+ ],
721
+ "angle": 0,
722
+ "content": "3.1. Setup"
723
+ },
724
+ {
725
+ "type": "title",
726
+ "bbox": [
727
+ 0.511,
728
+ 0.397,
729
+ 0.621,
730
+ 0.41
731
+ ],
732
+ "angle": 0,
733
+ "content": "3.1.1. Datasets"
734
+ },
735
+ {
736
+ "type": "text",
737
+ "bbox": [
738
+ 0.509,
739
+ 0.414,
740
+ 0.938,
741
+ 0.457
742
+ ],
743
+ "angle": 0,
744
+ "content": "The proposed method was tested on a simulated retrospective cardiac cine dataset and a perspective untriggered DCE liver dataset to prove its effectiveness and generalization."
745
+ },
746
+ {
747
+ "type": "title",
748
+ "bbox": [
749
+ 0.527,
750
+ 0.459,
751
+ 0.788,
752
+ 0.473
753
+ ],
754
+ "angle": 0,
755
+ "content": "(1) Retrospective cardiac cine dataset:"
756
+ },
757
+ {
758
+ "type": "text",
759
+ "bbox": [
760
+ 0.509,
761
+ 0.476,
762
+ 0.938,
763
+ 0.703
764
+ ],
765
+ "angle": 0,
766
+ "content": "The fully sampled cardiac cine data from the OCMR dataset [41] were acquired from healthy volunteers on a 1.5T scanner (MAGNETOM Avanto, Siemens Healthineers, Erlangen, Germany) using a bSSFP sequence with the following parameters: \\(\\mathrm{FOV} = 320\\times 260\\mathrm{mm}^2\\), imaging matrix \\(= 256\\times 208\\), slice thickness \\(= 8\\mathrm{mm}\\), TR/TE \\(= 2.79\\mathrm{ms} / 1.33\\mathrm{ms}\\), number of frame \\(= 18\\). The data acquisition was collected with prospective ECGgating and breath-holding. The number of receiver coils is 18. A simulation undersampling pattern of 2D golden-angle radial acquisition scheme is adopted, where the readout lines are repetitively through the center of \\(k\\)-space and rotated with a step of \\(111.25^{\\circ}\\). The simulation process includes cropping original data to \\(208\\times 208\\) in the image domain and then converting to the frequency domain by multi-coil NUFFT with golden-angle trajectories of Fibonacci numbers [42]. The coil sensitivity maps were calculated by the ESPIRiT algorithm [43]."
767
+ },
768
+ {
769
+ "type": "title",
770
+ "bbox": [
771
+ 0.527,
772
+ 0.706,
773
+ 0.765,
774
+ 0.719
775
+ ],
776
+ "angle": 0,
777
+ "content": "(2) Untriggered DCE liver dataset:"
778
+ },
779
+ {
780
+ "type": "text",
781
+ "bbox": [
782
+ 0.509,
783
+ 0.722,
784
+ 0.938,
785
+ 0.905
786
+ ],
787
+ "angle": 0,
788
+ "content": "The DCE liver data were acquired continuously with the golden-angle acquisition scheme. The 3D stack-of-stars Fast Low Angle SHot (FLASH) sequence was acquired on a breathtaking healthy volunteer using a 3T Siemens MAGNETOM Verio scanner with the following parameters: \\(\\mathrm{FOV} = 370 \\times 370 \\, \\mathrm{mm}^2\\), \\(\\mathrm{TR/TE} = 3.83 \\, \\mathrm{ms}/1.71 \\, \\mathrm{ms}\\), imaging matrix \\(= 384 \\times 384\\), slice thickness \\(= 3 \\, \\mathrm{mm}\\), total spoke number of each slice \\(= 600\\). A total of 12 receiver coils were used during the scan. The data including coil sensitivity maps were from Feng et al. [7]'s demo and details about intravenous contrast enhancement can be found in the paper. Each 34 acquired spokes were grouped to reconstruct one frame, which corresponds to an Acceleration Factor \\((\\mathrm{AF}) \\approx 11.3\\) and 17 frames in total."
789
+ },
790
+ {
791
+ "type": "page_footnote",
792
+ "bbox": [
793
+ 0.078,
794
+ 0.893,
795
+ 0.295,
796
+ 0.906
797
+ ],
798
+ "angle": 0,
799
+ "content": "1https://github.com/nvlabs/tiny-cuda-nn"
800
+ },
801
+ {
802
+ "type": "page_number",
803
+ "bbox": [
804
+ 0.494,
805
+ 0.915,
806
+ 0.504,
807
+ 0.926
808
+ ],
809
+ "angle": 0,
810
+ "content": "4"
811
+ }
812
+ ],
813
+ [
814
+ {
815
+ "type": "title",
816
+ "bbox": [
817
+ 0.059,
818
+ 0.097,
819
+ 0.273,
820
+ 0.111
821
+ ],
822
+ "angle": 0,
823
+ "content": "3.1.2. Performance evaluation"
824
+ },
825
+ {
826
+ "type": "text",
827
+ "bbox": [
828
+ 0.057,
829
+ 0.112,
830
+ 0.486,
831
+ 0.324
832
+ ],
833
+ "angle": 0,
834
+ "content": "In this work, we chose NUFFT, \\(\\mathrm{L + S}\\) [10] and GRASP [7] as the baselines for comparison. NUFFT gives the results obtained by directly zero-filling the frequency domain. \\(\\mathrm{L + S}\\) and GRASP two of the CS-based reconstruction methods which use a similar optimization pipeline as Eq. 2. The difference between them is that GRASP adopted a temporal TV regularizer, while \\(\\mathrm{L + S}\\) decomposed the solution images into a background component with low-rank regularizer and a dynamic foreground with temporal TV regularizer. We did not compare the proposed INR-based method to the supervised DL methods for dynamic MRI reconstruction since the datasets used in this work are insufficient for supervised network training. In addition, the ground truth is not available for the untriggered DCE liver dataset, which also limits the training process of previous supervised methods."
835
+ },
836
+ {
837
+ "type": "text",
838
+ "bbox": [
839
+ 0.058,
840
+ 0.326,
841
+ 0.486,
842
+ 0.412
843
+ ],
844
+ "angle": 0,
845
+ "content": "We tested the performance of the proposed method with 21, 13, 8 and 5 spokes per frame (AF ≈ 9.9, 16, 26, 41.6) on the cardiac cine dataset, and with 34 spokes per frame (AF ≈ 11.3) on the DCE liver dataset. For a fair comparison, the hyperparameters of all the methods are tuned to get the best performance and fit the GPU storage in different datasets and AFs, respectively."
846
+ },
847
+ {
848
+ "type": "text",
849
+ "bbox": [
850
+ 0.058,
851
+ 0.412,
852
+ 0.487,
853
+ 0.482
854
+ ],
855
+ "angle": 0,
856
+ "content": "Quantitative visual comparison and quantitative comparison were used for evaluation. For the cardiac cine dataset, quantitative metrics including peak signal-to-noise ratio (PSNR) and structural similarity index (SSIM) were calculated frame-by-frame as follows:"
857
+ },
858
+ {
859
+ "type": "equation",
860
+ "bbox": [
861
+ 0.168,
862
+ 0.486,
863
+ 0.486,
864
+ 0.52
865
+ ],
866
+ "angle": 0,
867
+ "content": "\\[\nP S N R = 1 0 \\times \\log_ {1 0} \\left(\\frac {1}{\\| y - \\hat {y} \\| _ {2} ^ {2}}\\right), \\tag {10}\n\\]"
868
+ },
869
+ {
870
+ "type": "equation",
871
+ "bbox": [
872
+ 0.126,
873
+ 0.525,
874
+ 0.485,
875
+ 0.557
876
+ ],
877
+ "angle": 0,
878
+ "content": "\\[\nS S I M = \\frac {\\left(2 \\mu_ {y} \\mu_ {\\hat {y}} + c _ {1}\\right) \\left(2 \\sigma_ {y \\hat {y}} + c _ {2}\\right)}{\\left(\\mu_ {y} ^ {2} + \\mu_ {\\hat {y}} ^ {2} + c _ {1}\\right) \\left(\\sigma_ {y} ^ {2} + \\sigma_ {\\hat {y}} ^ {2} + c _ {2}\\right)}, \\tag {11}\n\\]"
879
+ },
880
+ {
881
+ "type": "text",
882
+ "bbox": [
883
+ 0.058,
884
+ 0.56,
885
+ 0.487,
886
+ 0.645
887
+ ],
888
+ "angle": 0,
889
+ "content": "where \\(y\\) and \\(\\hat{y}\\) represent ground truth and reconstructed image, respectively, \\(\\mu_y\\) and \\(\\mu_{\\hat{y}}\\) are the mean intensity of \\(y\\) and \\(\\hat{y}\\), \\(\\sigma_y\\) and \\(\\sigma_{\\hat{y}}\\) are the variance of \\(y\\) and \\(\\hat{y}\\), \\(\\sigma_{y\\hat{y}}\\) is the covariance of \\(y\\) and \\(\\hat{y}\\), the constant \\(c_1\\) and \\(c_2\\) were set to \\(0.01^2\\) and \\(0.03^2\\). \\(y\\) and \\(\\hat{y}\\) were both normalized to [0, 1] according to the image sequence maximum and minimum."
890
+ },
891
+ {
892
+ "type": "text",
893
+ "bbox": [
894
+ 0.058,
895
+ 0.646,
896
+ 0.487,
897
+ 0.716
898
+ ],
899
+ "angle": 0,
900
+ "content": "The \\(k\\)-space data were calculated from the reconstructed complex-valued MR images with the 2D Fast Fourier Transform. For quantitative comparison, the normalized root mean square error (NRMSE) against GT \\(k\\)-space data was calculated coil-by-coil:"
901
+ },
902
+ {
903
+ "type": "equation",
904
+ "bbox": [
905
+ 0.188,
906
+ 0.716,
907
+ 0.485,
908
+ 0.759
909
+ ],
910
+ "angle": 0,
911
+ "content": "\\[\nN R M S E = \\sqrt {\\frac {\\left\\| Y - \\hat {Y} \\right\\| _ {2} ^ {2}}{\\| Y \\| _ {2} ^ {2}}}, \\tag {12}\n\\]"
912
+ },
913
+ {
914
+ "type": "text",
915
+ "bbox": [
916
+ 0.058,
917
+ 0.764,
918
+ 0.486,
919
+ 0.793
920
+ ],
921
+ "angle": 0,
922
+ "content": "where \\( Y \\) and \\( \\hat{Y} \\) represents the predicted and acquired \\( k \\)-space data, respectively."
923
+ },
924
+ {
925
+ "type": "text",
926
+ "bbox": [
927
+ 0.057,
928
+ 0.793,
929
+ 0.487,
930
+ 0.907
931
+ ],
932
+ "angle": 0,
933
+ "content": "For the DCE liver dataset, only visual comparison and temporal ROI intensity assessment were conducted due to the lack of the GT image. The ROIs of the aorta (AO) and portal vein (PV) were manually drawn for signal intensity-time curves. For temporal fidelity comparison, NUFFT was used as the reference since no temporal regularization was involved in the reconstructed images. Although contaminated by the streaking artifacts, the average signal intensity from NUFFT results across"
934
+ },
935
+ {
936
+ "type": "text",
937
+ "bbox": [
938
+ 0.509,
939
+ 0.097,
940
+ 0.938,
941
+ 0.126
942
+ ],
943
+ "angle": 0,
944
+ "content": "large ROI was still able to preserve contrast evolution for fidelity analysis."
945
+ },
946
+ {
947
+ "type": "title",
948
+ "bbox": [
949
+ 0.51,
950
+ 0.142,
951
+ 0.903,
952
+ 0.157
953
+ ],
954
+ "angle": 0,
955
+ "content": "3.2. Reconstruction performance of the proposed method"
956
+ },
957
+ {
958
+ "type": "title",
959
+ "bbox": [
960
+ 0.511,
961
+ 0.163,
962
+ 0.704,
963
+ 0.176
964
+ ],
965
+ "angle": 0,
966
+ "content": "3.2.1.Cardiac cine dataset"
967
+ },
968
+ {
969
+ "type": "text",
970
+ "bbox": [
971
+ 0.509,
972
+ 0.179,
973
+ 0.938,
974
+ 0.519
975
+ ],
976
+ "angle": 0,
977
+ "content": "Fig.2 compares the reconstruction performance of different methods on the cardiac cine dataset with 21 and 13 spokes per frame (AF=9.9, 16, respectively). Visually, the images reconstructed by the proposed method appear to have better anatomical details and provide a more accurate temporal fidelity than the baselines at both acceleration conditions of 21 and 13 spokes. NUFFT, \\(\\mathrm{L + S}\\) and GRASP all suffer from artifacts and noises in the cardiac chamber area. While the reconstructed images by the proposed method show highly similar anatomical details as the ground truth, as pointed out by red arrows. In the y-t view, the \\(\\mathrm{L + S}\\) results are over smooth and the GRASP results suffer from noticeable streaking artifacts along the temporal axis. The proposed method provides the highest temporal fidelity of the dynamic images between frames. The error map between the reconstruction and the ground truth further supports our observation. It is noted that the reconstructed errors observed on the error maps at the edge of the cardiac ventricles were potentially blurred by cardiac motion. The proposed INR-based method shows the smallest error at the edge of the ventricles, and is consistent with the observation from the y-t view. Quantitatively, the proposed method achieves the best performance with a PSNR of \\(39.00\\pm 0.55\\) dB (21 spokes) / \\(37.86\\pm 0.61\\) dB (13 spokes) and an SSIM of \\(0.980\\pm 0.003\\) (21 spokes) / \\(0.975\\pm 0.004\\) (13 spokes) than the compared methods."
978
+ },
979
+ {
980
+ "type": "text",
981
+ "bbox": [
982
+ 0.509,
983
+ 0.52,
984
+ 0.939,
985
+ 0.734
986
+ ],
987
+ "angle": 0,
988
+ "content": "We further tested the ability of the proposed method for dynamic MRI reconstruction at extremely high acceleration rates (8 and 5 per frame, AF=26 and 41.6, respectively), as shown in Fig.3. The proposed method exhibits comparable performance between AF=26 and 41.6, with the PSNR/SSIM of \\(36.88 \\pm 0.63\\) dB/0.968 \\(\\pm 0.005\\) (8 spokes) and \\(35.41 \\pm 0.56\\) dB/0.957 \\(\\pm 0.006\\) (5 spokes). The proposed method has the best image quality with minimal noise and artifacts. Contrarily, L+S and GRASP suffer from temporal smoothness and noticeable streaking artifacts with increased acceleration rates. From the y-t view, the dynamic information on the reconstructed images is well captured by the proposed method, even with 5 spokes per frame. Additionally, the proposed INR-based method results in a higher PSNR than GRASP (5.5 dB) and L+S (7.1 dB), respectively."
989
+ },
990
+ {
991
+ "type": "title",
992
+ "bbox": [
993
+ 0.51,
994
+ 0.75,
995
+ 0.685,
996
+ 0.762
997
+ ],
998
+ "angle": 0,
999
+ "content": "3.2.2. DCE liver dataset"
1000
+ },
1001
+ {
1002
+ "type": "text",
1003
+ "bbox": [
1004
+ 0.508,
1005
+ 0.764,
1006
+ 0.938,
1007
+ 0.907
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": "For DCE liver dataset with 34 spokes per frame (AF=11.3), the visual comparisons at different temporal phases are demonstrated in Fig. 4(a). As can be seen from the zoomed-in images, the anatomical details of the kidney can be well visible on the reconstructed images by the proposed method. Severe streaking and noise can be observed on the reconstructed images by NUFFT, L+S, and GRASP. While the proposed method provides high-quality images with less noise than other methods. The signal intensity-time curves in Fig. 4(b) suggest that the proposed method yields the best temporal fidelity, which is"
1011
+ },
1012
+ {
1013
+ "type": "page_number",
1014
+ "bbox": [
1015
+ 0.494,
1016
+ 0.915,
1017
+ 0.505,
1018
+ 0.926
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": "5"
1022
+ }
1023
+ ],
1024
+ [
1025
+ {
1026
+ "type": "image",
1027
+ "bbox": [
1028
+ 0.179,
1029
+ 0.094,
1030
+ 0.823,
1031
+ 0.525
1032
+ ],
1033
+ "angle": 0,
1034
+ "content": null
1035
+ },
1036
+ {
1037
+ "type": "image_caption",
1038
+ "bbox": [
1039
+ 0.058,
1040
+ 0.538,
1041
+ 0.94,
1042
+ 0.584
1043
+ ],
1044
+ "angle": 0,
1045
+ "content": "Figure 2: The reconstruction results of NUFFT, \\(\\mathrm{L + S}\\), GRASP and the proposed method (from left to right) on the cardiac cine dataset with 21 and 13 spokes per frame (AF=9.9, 16). The enlarged views of the heart region are outlined by the orange boxes and the red arrows point out the structure where the proposed method gives a superior reconstruction performance. The y-t images (the 116th slice along y and temporal dimensions) are outlined by green boxes. The error maps and PSNR/SSIM metrics are shown at the bottom, respectively."
1046
+ },
1047
+ {
1048
+ "type": "text",
1049
+ "bbox": [
1050
+ 0.058,
1051
+ 0.608,
1052
+ 0.487,
1053
+ 0.663
1054
+ ],
1055
+ "angle": 0,
1056
+ "content": "consistent with the results of NUFFT in AO and PV. For example, the intensity fluctuation of the AO curve between Frame 5 and Frame 11 can be well captured by the proposed INR-based method."
1057
+ },
1058
+ {
1059
+ "type": "title",
1060
+ "bbox": [
1061
+ 0.059,
1062
+ 0.706,
1063
+ 0.367,
1064
+ 0.72
1065
+ ],
1066
+ "angle": 0,
1067
+ "content": "3.3. Results of the temporal super-resolution"
1068
+ },
1069
+ {
1070
+ "type": "text",
1071
+ "bbox": [
1072
+ 0.058,
1073
+ 0.75,
1074
+ 0.487,
1075
+ 0.906
1076
+ ],
1077
+ "angle": 0,
1078
+ "content": "To demonstrate the internal continuity of the optimized representation of the dynamic MRI, we use a denser coordinate along the temporal axis as input to conduct upsampling \\((4\\times)\\) on the reconstructed dynamic MR image sequence, named temporal super-resolution. The pipeline is shown in Fig.5(a). The GT frames with temporal linear interpolation between Frame 10 and 11 are used as the reference for comparison, as shown in Fig.5(b). Qualitatively, there is no significant structural difference between the super-resolution images and the interpolated images, indicating the strong implicit continuity representation of the optimized INR function."
1079
+ },
1080
+ {
1081
+ "type": "title",
1082
+ "bbox": [
1083
+ 0.51,
1084
+ 0.608,
1085
+ 0.612,
1086
+ 0.62
1087
+ ],
1088
+ "angle": 0,
1089
+ "content": "4. Discussion"
1090
+ },
1091
+ {
1092
+ "type": "text",
1093
+ "bbox": [
1094
+ 0.509,
1095
+ 0.636,
1096
+ 0.938,
1097
+ 0.82
1098
+ ],
1099
+ "angle": 0,
1100
+ "content": "In this study, we proposed a novel unsupervised INR-based deep learning method for highly accelerated dynamic MRI reconstruction, which modeled the dynamic MR image sequence as a continuous mapping function. We validated the proposed method on retrospective cardiac cine data and perspective DCE liver data with various acceleration rates. The results showed the effectiveness and generalization of the proposed method on artifact suppression and motion fidelity preservation, especially at extremely high accelerations of 26-fold or 41.6-fold. The proposed method outperforms the compared CS-based methods such as \\(\\mathrm{L} + \\mathrm{S}\\) and GRASP. The results indicated that the proposed reconstruction method holds promise for high temporal resolution 2D MRI acquisitions."
1101
+ },
1102
+ {
1103
+ "type": "text",
1104
+ "bbox": [
1105
+ 0.509,
1106
+ 0.821,
1107
+ 0.938,
1108
+ 0.907
1109
+ ],
1110
+ "angle": 0,
1111
+ "content": "The superiority of the proposed method over the baseline methods is believed from the implicit regularization from the internal continuity of INR, which is validated by the results of the temporal super-resolution \\((4\\times)\\), as shown in Fig.5. In addition, the super-resolution performance allows us to further speed up the data acquisition along the temporal axis for dy"
1112
+ },
1113
+ {
1114
+ "type": "page_number",
1115
+ "bbox": [
1116
+ 0.494,
1117
+ 0.915,
1118
+ 0.505,
1119
+ 0.926
1120
+ ],
1121
+ "angle": 0,
1122
+ "content": "6"
1123
+ }
1124
+ ],
1125
+ [
1126
+ {
1127
+ "type": "image",
1128
+ "bbox": [
1129
+ 0.179,
1130
+ 0.095,
1131
+ 0.823,
1132
+ 0.527
1133
+ ],
1134
+ "angle": 0,
1135
+ "content": null
1136
+ },
1137
+ {
1138
+ "type": "image_caption",
1139
+ "bbox": [
1140
+ 0.058,
1141
+ 0.538,
1142
+ 0.94,
1143
+ 0.575
1144
+ ],
1145
+ "angle": 0,
1146
+ "content": "Figure 3: The comparison of the reconstruction results on the cardiac cine dataset with 8 and 5 spokes per frame, which corresponds to the acceleration factors of 26 and 41.6. Zoomed-in views of the heart chambers are outlined by orange boxes and the y-t images (the 116th slice along y and temporal dimensions) are outlined by green boxes. The difference map between the reconstructed image and ground truth and PSNR/SSIM metrics are also shown."
1147
+ },
1148
+ {
1149
+ "type": "image",
1150
+ "bbox": [
1151
+ 0.143,
1152
+ 0.584,
1153
+ 0.861,
1154
+ 0.733
1155
+ ],
1156
+ "angle": 0,
1157
+ "content": null
1158
+ },
1159
+ {
1160
+ "type": "image_caption",
1161
+ "bbox": [
1162
+ 0.058,
1163
+ 0.746,
1164
+ 0.938,
1165
+ 0.793
1166
+ ],
1167
+ "angle": 0,
1168
+ "content": "Figure 4: The comparison of the reconstruction results and ROI analysis among different methods on the DCE liver dataset with 34 spokes per frame (AF=11.3). (a) Reconstruction results at different contrast phases are visualized. The zoomed-in area outlined by orange boxes with the proposed method gives the best image quality with minimal noise among different methods. (b) Signal intensity-time curves of different methods are compared in aorta (AO) and portal vein (PV) areas, and the NUFFT result serves as the temporal fidelity reference."
1169
+ },
1170
+ {
1171
+ "type": "text",
1172
+ "bbox": [
1173
+ 0.058,
1174
+ 0.816,
1175
+ 0.487,
1176
+ 0.887
1177
+ ],
1178
+ "angle": 0,
1179
+ "content": "namic MRI. Unlike the existing super-resolution methods, the INR-based method does not require extra modeling or training, but simply gives the denser coordinates, which reduces the computational burden and the reconstruction time usage during deployment."
1180
+ },
1181
+ {
1182
+ "type": "text",
1183
+ "bbox": [
1184
+ 0.075,
1185
+ 0.892,
1186
+ 0.486,
1187
+ 0.906
1188
+ ],
1189
+ "angle": 0,
1190
+ "content": "The proposed method has a few limitations. First, the low"
1191
+ },
1192
+ {
1193
+ "type": "text",
1194
+ "bbox": [
1195
+ 0.509,
1196
+ 0.816,
1197
+ 0.938,
1198
+ 0.901
1199
+ ],
1200
+ "angle": 0,
1201
+ "content": "rank regularization adopted in this work is the nuclear norm and is optimized with gradient-descent-based algorithms. However, as discussed by Lingala et al. [8]'s work, naive nuclear norm minimization may not be stable for fast convergence. In future works, INR combined with different low-rank regularization substitutes and optimization methods will be explored."
1202
+ },
1203
+ {
1204
+ "type": "page_number",
1205
+ "bbox": [
1206
+ 0.494,
1207
+ 0.915,
1208
+ 0.504,
1209
+ 0.925
1210
+ ],
1211
+ "angle": 0,
1212
+ "content": "7"
1213
+ }
1214
+ ],
1215
+ [
1216
+ {
1217
+ "type": "image",
1218
+ "bbox": [
1219
+ 0.073,
1220
+ 0.097,
1221
+ 0.473,
1222
+ 0.183
1223
+ ],
1224
+ "angle": 0,
1225
+ "content": null
1226
+ },
1227
+ {
1228
+ "type": "image",
1229
+ "bbox": [
1230
+ 0.073,
1231
+ 0.187,
1232
+ 0.474,
1233
+ 0.353
1234
+ ],
1235
+ "angle": 0,
1236
+ "content": null
1237
+ },
1238
+ {
1239
+ "type": "image_caption",
1240
+ "bbox": [
1241
+ 0.058,
1242
+ 0.367,
1243
+ 0.488,
1244
+ 0.48
1245
+ ],
1246
+ "angle": 0,
1247
+ "content": "Figure 5: (a) The pipeline of temporal super-resolution for the reconstructed dynamic MRI. For the given denser coordinates, the optimized function (Hash grids & MLP) outputs the interpolated frames. (b) The upsampled images between Frame 10 and Frame 11 of the cardiac cine dataset with 21 spokes per frame. Three equally-spaced coordinates to be generated (10.25, 10.5, 10.75) between Frame 10 and Frame 11 are fed to the network for temporal super-resolution \\((4\\times)\\). The ground truth of Frame 10 and Frame 11, and the linear interpolated frames serve as the reference. The reference and output images at the position of Frame 10 and 11 are outlined with orange boxes. The corresponding error maps are displayed at the bottom."
1248
+ },
1249
+ {
1250
+ "type": "text",
1251
+ "bbox": [
1252
+ 0.058,
1253
+ 0.506,
1254
+ 0.487,
1255
+ 0.592
1256
+ ],
1257
+ "angle": 0,
1258
+ "content": "Second, the temporal super-resolution test indicates a comparable 4 times upsampling result with INR, but the smoothness witnessed at the edge of heart chambers demonstrated its limitation for higher or even arbitrary super-resolution results. Third, although the reconstruction time is faster than the other unsupervised methods, it is still challenging for real-time imaging."
1259
+ },
1260
+ {
1261
+ "type": "title",
1262
+ "bbox": [
1263
+ 0.059,
1264
+ 0.619,
1265
+ 0.165,
1266
+ 0.632
1267
+ ],
1268
+ "angle": 0,
1269
+ "content": "5. Conclusion"
1270
+ },
1271
+ {
1272
+ "type": "text",
1273
+ "bbox": [
1274
+ 0.062,
1275
+ 0.651,
1276
+ 0.486,
1277
+ 0.905
1278
+ ],
1279
+ "angle": 0,
1280
+ "content": "In this work, we proposed an INR-based unsupervised deep learning method for highly accelerated dynamic MRI reconstruction. The proposed method learns an implicit continuous representation function to represent the desired spatiotemporal image sequence, mapping spatiotemporal coordinates to the corresponding image intensities. The proposed method is training database-free and does not require prior information for the reconstruction. Several tests on retrospective cardiac and perspective DCE liver data proved that the proposed method could robustly produce a high-quality dynamic MR image sequence even at an extremely high acceleration rate \\((41.6\\times)\\). Additionally, benefiting from the internal continuity of the optimized INR network, the proposed method demonstrates an impressive performance of temporal super-resolution to upsample the desired dynamic images at higher temporal rates than the physical acquisitions. We thus believe that the INR-based method has the potential to further accelerate dynamic MRI acquisition in the future."
1281
+ },
1282
+ {
1283
+ "type": "title",
1284
+ "bbox": [
1285
+ 0.513,
1286
+ 0.097,
1287
+ 0.593,
1288
+ 0.11
1289
+ ],
1290
+ "angle": 0,
1291
+ "content": "References"
1292
+ },
1293
+ {
1294
+ "type": "ref_text",
1295
+ "bbox": [
1296
+ 0.52,
1297
+ 0.13,
1298
+ 0.938,
1299
+ 0.152
1300
+ ],
1301
+ "angle": 0,
1302
+ "content": "[1] C. B. Marcu, A. M. Beek, A. C. van Rossum, Clinical applications of cardiovascular magnetic resonance imaging, CMAJ 175 (2006) 911-917."
1303
+ },
1304
+ {
1305
+ "type": "ref_text",
1306
+ "bbox": [
1307
+ 0.52,
1308
+ 0.153,
1309
+ 0.938,
1310
+ 0.174
1311
+ ],
1312
+ "angle": 0,
1313
+ "content": "[2] R. N. Low, Abdominal mri advances in the detection of liver tumours and characterisation, The Lancet Oncology 8 (2007) 525-535."
1314
+ },
1315
+ {
1316
+ "type": "ref_text",
1317
+ "bbox": [
1318
+ 0.521,
1319
+ 0.175,
1320
+ 0.938,
1321
+ 0.208
1322
+ ],
1323
+ "angle": 0,
1324
+ "content": "[3] C. Cuenod, D. Balvay, Perfusion and vascular permeability: Basic concepts and measurement in dce-ct and dce-mri, Diagnostic and Interventional Imaging 94 (2013) 1187–1204."
1325
+ },
1326
+ {
1327
+ "type": "ref_text",
1328
+ "bbox": [
1329
+ 0.521,
1330
+ 0.209,
1331
+ 0.938,
1332
+ 0.231
1333
+ ],
1334
+ "angle": 0,
1335
+ "content": "[4] S. M. Wright, M. P. McDougall, Single echo acquisition mri using rf encoding, NMR in Biomedicine 22 (2009) 982-993."
1336
+ },
1337
+ {
1338
+ "type": "ref_text",
1339
+ "bbox": [
1340
+ 0.521,
1341
+ 0.232,
1342
+ 0.938,
1343
+ 0.264
1344
+ ],
1345
+ "angle": 0,
1346
+ "content": "[5] H. Jung, K. Sung, K. S. Nayak, E. Y. Kim, J. C. Ye, k-t focuss: A general compressed sensing framework for high resolution dynamic mri, Magnetic Resonance in Medicine 61 (2009) 103-116."
1347
+ },
1348
+ {
1349
+ "type": "ref_text",
1350
+ "bbox": [
1351
+ 0.521,
1352
+ 0.265,
1353
+ 0.938,
1354
+ 0.309
1355
+ ],
1356
+ "angle": 0,
1357
+ "content": "[6] L. Feng, M. B. Srichai, R. P. Lim, A. Harrison, W. King, G. Adluru, E. V. R. Dibella, D. K. Sodickson, R. Otazo, D. Kim, Highly accelerated real-time cardiac cine migraine using k-t sparse-sense, Magnetic Resonance in Medicine 70 (2013) 64-74."
1358
+ },
1359
+ {
1360
+ "type": "ref_text",
1361
+ "bbox": [
1362
+ 0.521,
1363
+ 0.31,
1364
+ 0.938,
1365
+ 0.365
1366
+ ],
1367
+ "angle": 0,
1368
+ "content": "[7] L. Feng, R. Grimm, K. T. Block, H. Chandarana, S. Kim, J. Xu, L. Axel, D. K. Sodickson, R. Otazo, Golden-angle radial sparse parallel migraine: Combination of compressed sensing, parallel imaging, and golden-angle radial sampling for fast and flexible dynamic volumetric migraine, Magnetic Resonance in Medicine 72 (2014) 707-717."
1369
+ },
1370
+ {
1371
+ "type": "ref_text",
1372
+ "bbox": [
1373
+ 0.521,
1374
+ 0.367,
1375
+ 0.938,
1376
+ 0.399
1377
+ ],
1378
+ "angle": 0,
1379
+ "content": "[8] S. G. Lingala, Y. Hu, E. DiBella, M. Jacob, Accelerated dynamic mri exploiting sparsity and low-rank structure: k-t slr, IEEE Transactions on Medical Imaging 30 (2011) 1042-1054."
1380
+ },
1381
+ {
1382
+ "type": "ref_text",
1383
+ "bbox": [
1384
+ 0.521,
1385
+ 0.4,
1386
+ 0.938,
1387
+ 0.444
1388
+ ],
1389
+ "angle": 0,
1390
+ "content": "[9] B. Zhao, J. P. Haldar, A. G. Christodoulou, Z.-P. Liang, Image reconstruction from highly undersampled (k, t)-space data with joint partial separability and sparsity constraints, IEEE Transactions on Medical Imaging 31 (2012) 1809-1820."
1391
+ },
1392
+ {
1393
+ "type": "ref_text",
1394
+ "bbox": [
1395
+ 0.514,
1396
+ 0.445,
1397
+ 0.938,
1398
+ 0.489
1399
+ ],
1400
+ "angle": 0,
1401
+ "content": "[10] R. Otazo, E. Candès, D. K. Sodickson, Low-rank plus sparse matrix decomposition for accelerated dynamic mri with separation of background and dynamic components, Magnetic Resonance in Medicine 73 (2015) 1125-1136."
1402
+ },
1403
+ {
1404
+ "type": "ref_text",
1405
+ "bbox": [
1406
+ 0.514,
1407
+ 0.49,
1408
+ 0.938,
1409
+ 0.534
1410
+ ],
1411
+ "angle": 0,
1412
+ "content": "[11] L. Feng, Q. Wen, C. Huang, A. Tong, F. Liu, H. Chandarana, Grasp-pro: improving grasp dce-mri through self-calibrating subspace-modeling and contrast phase automation, Magnetic Resonance in Medicine 83 (2020) 94-108."
1413
+ },
1414
+ {
1415
+ "type": "ref_text",
1416
+ "bbox": [
1417
+ 0.514,
1418
+ 0.535,
1419
+ 0.938,
1420
+ 0.556
1421
+ ],
1422
+ "angle": 0,
1423
+ "content": "[12] L. Feng, 4d golden-angle radial mri at subsecond temporal resolution, NMR in Biomedicine (2022) e4844."
1424
+ },
1425
+ {
1426
+ "type": "ref_text",
1427
+ "bbox": [
1428
+ 0.514,
1429
+ 0.558,
1430
+ 0.938,
1431
+ 0.59
1432
+ ],
1433
+ "angle": 0,
1434
+ "content": "[13] A. Bustin, N. Fuin, R. M. Botnar, C. Prieto, From compressed-sensing to artificial intelligence-based cardiac mri reconstruction, Frontiers in cardiovascular medicine 7 (2020) 17."
1435
+ },
1436
+ {
1437
+ "type": "ref_text",
1438
+ "bbox": [
1439
+ 0.514,
1440
+ 0.591,
1441
+ 0.938,
1442
+ 0.635
1443
+ ],
1444
+ "angle": 0,
1445
+ "content": "[14] S. Wang, Z. Su, L. Ying, X. Peng, S. Zhu, F. Liang, D. Feng, D. Liang, Accelerating magnetic resonance imaging via deep learning, in: IEEE 13th International Symposium on Biomedical Imaging (ISBI), 2016, pp. 514-517."
1446
+ },
1447
+ {
1448
+ "type": "ref_text",
1449
+ "bbox": [
1450
+ 0.514,
1451
+ 0.636,
1452
+ 0.938,
1453
+ 0.669
1454
+ ],
1455
+ "angle": 0,
1456
+ "content": "[15] Y. Han, J. Yoo, H. H. Kim, H. J. Shin, K. Sung, J. C. Ye, Deep learning with domain adaptation for accelerated projection-reconstruction mr, Magnetic Resonance in Medicine 80 (2018) 1189-1205."
1457
+ },
1458
+ {
1459
+ "type": "ref_text",
1460
+ "bbox": [
1461
+ 0.514,
1462
+ 0.67,
1463
+ 0.938,
1464
+ 0.714
1465
+ ],
1466
+ "angle": 0,
1467
+ "content": "[16] J. Schlemper, J. Caballero, J. V. Hajnal, A. Price, D. Rueckert, A deep cascade of convolutional neural networks for mr image reconstruction, in: International Conference on Information Processing in Medical Imaging, 2017, pp. 647-658."
1468
+ },
1469
+ {
1470
+ "type": "ref_text",
1471
+ "bbox": [
1472
+ 0.514,
1473
+ 0.715,
1474
+ 0.938,
1475
+ 0.748
1476
+ ],
1477
+ "angle": 0,
1478
+ "content": "[17] C. Qin, J. Schlemper, J. Caballero, A. N. Price, J. V. Hajnal, D. Rueckert, Convolutional recurrent neural networks for dynamic mr image reconstruction, IEEE Transactions on Medical Imaging 38 (2019) 280-290."
1479
+ },
1480
+ {
1481
+ "type": "ref_text",
1482
+ "bbox": [
1483
+ 0.514,
1484
+ 0.749,
1485
+ 0.938,
1486
+ 0.782
1487
+ ],
1488
+ "angle": 0,
1489
+ "content": "[18] C. M. Sandino, P. Lai, S. S. Vasanawala, J. Y. Cheng, Accelerating cardiac cine mri using a deep learning-based esprit reconstruction, Magnetic Resonance in Medicine 85 (2021) 152-167."
1490
+ },
1491
+ {
1492
+ "type": "ref_text",
1493
+ "bbox": [
1494
+ 0.514,
1495
+ 0.783,
1496
+ 0.938,
1497
+ 0.815
1498
+ ],
1499
+ "angle": 0,
1500
+ "content": "[19] W. Huang, Z. Ke, Z.-X. Cui, J. Cheng, Z. Qiu, S. Jia, L. Ying, Y. Zhu, D. Liang, Deep low-rank plus sparse network for dynamic mr imaging, Medical Image Analysis 73 (2021) 102190."
1501
+ },
1502
+ {
1503
+ "type": "ref_text",
1504
+ "bbox": [
1505
+ 0.514,
1506
+ 0.816,
1507
+ 0.938,
1508
+ 0.86
1509
+ ],
1510
+ "angle": 0,
1511
+ "content": "[20] Z. Huang, J. Bae, P. M. Johnson, T. Sood, L. Heacock, J. Fogarty, L. Moy, S. G. Kim, F. Knoll, A simulation pipeline to generate realistic breast images for learning dce-mri reconstruction, in: Machine Learning for Medical Image Reconstruction, 2021, pp. 45-53."
1512
+ },
1513
+ {
1514
+ "type": "ref_text",
1515
+ "bbox": [
1516
+ 0.514,
1517
+ 0.861,
1518
+ 0.938,
1519
+ 0.894
1520
+ ],
1521
+ "angle": 0,
1522
+ "content": "[21] Z. Ke, J. Cheng, L. Ying, H. Zheng, Y. Zhu, D. Liang, An unsupervised deep learning method for multi-coil cine migraine, Physics in Medicine & Biology 65 (2020) 235041."
1523
+ },
1524
+ {
1525
+ "type": "ref_text",
1526
+ "bbox": [
1527
+ 0.514,
1528
+ 0.895,
1529
+ 0.938,
1530
+ 0.906
1531
+ ],
1532
+ "angle": 0,
1533
+ "content": "[22] J. Yoo, K. H. Jin, H. Gupta, J. Yerly, M. Stuber, M. Unser, Time"
1534
+ },
1535
+ {
1536
+ "type": "list",
1537
+ "bbox": [
1538
+ 0.514,
1539
+ 0.13,
1540
+ 0.938,
1541
+ 0.906
1542
+ ],
1543
+ "angle": 0,
1544
+ "content": null
1545
+ },
1546
+ {
1547
+ "type": "page_number",
1548
+ "bbox": [
1549
+ 0.494,
1550
+ 0.915,
1551
+ 0.504,
1552
+ 0.926
1553
+ ],
1554
+ "angle": 0,
1555
+ "content": "8"
1556
+ }
1557
+ ],
1558
+ [
1559
+ {
1560
+ "type": "ref_text",
1561
+ "bbox": [
1562
+ 0.091,
1563
+ 0.099,
1564
+ 0.486,
1565
+ 0.121
1566
+ ],
1567
+ "angle": 0,
1568
+ "content": "dependent deep image prior for dynamic migraine, IEEE Transactions on Medical Imaging 40 (2021) 3337-3348."
1569
+ },
1570
+ {
1571
+ "type": "ref_text",
1572
+ "bbox": [
1573
+ 0.062,
1574
+ 0.122,
1575
+ 0.486,
1576
+ 0.156
1577
+ ],
1578
+ "angle": 0,
1579
+ "content": "[23] A. H. Ahmed, Q. Zou, P. Nagpal, M. Jacob, Dynamic imaging using deep bi-linear unsupervised representation (deblur), IEEE Transactions on Medical Imaging 41 (2022) 2693-2703."
1580
+ },
1581
+ {
1582
+ "type": "ref_text",
1583
+ "bbox": [
1584
+ 0.061,
1585
+ 0.157,
1586
+ 0.485,
1587
+ 0.189
1588
+ ],
1589
+ "angle": 0,
1590
+ "content": "[24] D. Ulyanov, A. Vedaldi, V. Lempitsky, Deep image prior, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2018, pp. 9446-9454."
1591
+ },
1592
+ {
1593
+ "type": "ref_text",
1594
+ "bbox": [
1595
+ 0.061,
1596
+ 0.19,
1597
+ 0.486,
1598
+ 0.234
1599
+ ],
1600
+ "angle": 0,
1601
+ "content": "[25] V. Sitzmann, J. N. P. Martel, A. W. Bergman, D. B. Lindell, G. Wetzstein, Implicit neural representations with periodic activation functions, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), 2020, pp. 7462-7473."
1602
+ },
1603
+ {
1604
+ "type": "ref_text",
1605
+ "bbox": [
1606
+ 0.061,
1607
+ 0.235,
1608
+ 0.486,
1609
+ 0.278
1610
+ ],
1611
+ "angle": 0,
1612
+ "content": "[26] B. Mildenhall, P. P. Srinivasan, M. Tancik, J. T. Barron, R. Ramamoorthi, R. Ng, Nerf: Representing scenes as neural radiance fields for view synthesis, in: European Conference on Computer Vision (ECCV), 2020, pp. 405-421."
1613
+ },
1614
+ {
1615
+ "type": "ref_text",
1616
+ "bbox": [
1617
+ 0.061,
1618
+ 0.28,
1619
+ 0.486,
1620
+ 0.324
1621
+ ],
1622
+ "angle": 0,
1623
+ "content": "[27] J. J. Park, P. Florence, J. Straub, R. Newcombe, S. Lovegrove, Deepsdf: Learning continuous signed distance functions for shape representation, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 165-174."
1624
+ },
1625
+ {
1626
+ "type": "ref_text",
1627
+ "bbox": [
1628
+ 0.061,
1629
+ 0.325,
1630
+ 0.486,
1631
+ 0.347
1632
+ ],
1633
+ "angle": 0,
1634
+ "content": "[28] T. Müller, F. Rousselle, J. Novák, A. Keller, Real-time neural radiance caching for path tracing, arXiv preprint arXiv:2106.12372 (2021)."
1635
+ },
1636
+ {
1637
+ "type": "ref_text",
1638
+ "bbox": [
1639
+ 0.061,
1640
+ 0.348,
1641
+ 0.486,
1642
+ 0.392
1643
+ ],
1644
+ "angle": 0,
1645
+ "content": "[29] G. Zang, R. Idoughi, R. Li, P. Wonka, W. Heidrich, Intratomo: self-supervised learning-based tomography via sinogram synthesis and prediction, in: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021, pp. 1960-1970."
1646
+ },
1647
+ {
1648
+ "type": "ref_text",
1649
+ "bbox": [
1650
+ 0.061,
1651
+ 0.393,
1652
+ 0.486,
1653
+ 0.447
1654
+ ],
1655
+ "angle": 0,
1656
+ "content": "[30] A. W. Reed, H. Kim, R. Anirudh, K. A. Mohan, K. Champley, J. Kang, S. Jayasuriya, Dynamic ct reconstruction from limited views with implicit neural representations and parametric motion fields, in: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021, pp. 2258-2268."
1657
+ },
1658
+ {
1659
+ "type": "ref_text",
1660
+ "bbox": [
1661
+ 0.061,
1662
+ 0.448,
1663
+ 0.486,
1664
+ 0.481
1665
+ ],
1666
+ "angle": 0,
1667
+ "content": "[31] Y. Sun, J. Liu, M. Xie, B. Wohlberg, U. S. Kamilov, Coil: Coordinate-based internal learning for tomographic imaging, IEEE Transactions on Computational Imaging 7 (2021) 1400-1412."
1668
+ },
1669
+ {
1670
+ "type": "ref_text",
1671
+ "bbox": [
1672
+ 0.061,
1673
+ 0.482,
1674
+ 0.486,
1675
+ 0.515
1676
+ ],
1677
+ "angle": 0,
1678
+ "content": "[32] L. Shen, J. Pauly, L. Xing, Nerp: Implicit neural representation learning with prior embedding for sparsely sampled image reconstruction, IEEE Transactions on Neural Networks and Learning Systems (2022) 1-13."
1679
+ },
1680
+ {
1681
+ "type": "ref_text",
1682
+ "bbox": [
1683
+ 0.061,
1684
+ 0.516,
1685
+ 0.486,
1686
+ 0.55
1687
+ ],
1688
+ "angle": 0,
1689
+ "content": "[33] L. Liu, J. Gu, K. Z. Lin, T.-S. Chua, C. Theobalt, Neural sparse voxel fields, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), NIPS'20, 2020, pp. 15651-15663."
1690
+ },
1691
+ {
1692
+ "type": "ref_text",
1693
+ "bbox": [
1694
+ 0.061,
1695
+ 0.55,
1696
+ 0.486,
1697
+ 0.594
1698
+ ],
1699
+ "angle": 0,
1700
+ "content": "[34] C. Sun, M. Sun, H. Chen, Direct voxel grid optimization: Superfast convergence for radiance fields reconstruction, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022, pp. 5459-5469."
1701
+ },
1702
+ {
1703
+ "type": "ref_text",
1704
+ "bbox": [
1705
+ 0.061,
1706
+ 0.595,
1707
+ 0.486,
1708
+ 0.639
1709
+ ],
1710
+ "angle": 0,
1711
+ "content": "[35] Sara Fridovich-Keil and Alex Yu, M. Tancik, Q. Chen, B. Recht, A. Kanazawa, Plenoxels: Radiance fields without neural networks, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022, pp. 5491-5500."
1712
+ },
1713
+ {
1714
+ "type": "ref_text",
1715
+ "bbox": [
1716
+ 0.061,
1717
+ 0.639,
1718
+ 0.486,
1719
+ 0.695
1720
+ ],
1721
+ "angle": 0,
1722
+ "content": "[36] M. Tancik, P. P. Srinivasan, B. Mildenhall, S. Fridovich-Keil, N. Raghavan, U. Singhal, R. Ramamoorthi, J. T. Barron, R. Ng, Fourier features let networks learn high frequency functions in low dimensional domains, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), 2020, pp. 7537-7547."
1723
+ },
1724
+ {
1725
+ "type": "ref_text",
1726
+ "bbox": [
1727
+ 0.061,
1728
+ 0.696,
1729
+ 0.486,
1730
+ 0.728
1731
+ ],
1732
+ "angle": 0,
1733
+ "content": "[37] T. Müller, A. Evans, C. Schied, A. Keller, Instant neural graphics primitives with a multiresolution hash encoding, ACM Transactions on Graphics 41 (2022) 102:1-102:15."
1734
+ },
1735
+ {
1736
+ "type": "ref_text",
1737
+ "bbox": [
1738
+ 0.061,
1739
+ 0.729,
1740
+ 0.486,
1741
+ 0.773
1742
+ ],
1743
+ "angle": 0,
1744
+ "content": "[38] J. Lehtinen, J. Munkberg, J. Hasselgren, S. Laine, T. Karras, M. Aittala, T. Aila, Noise2noise: Learning image restoration without clean data, in: International Conference on Machine Learning (ICML), volume 80, 2018, pp. 2965-2974."
1745
+ },
1746
+ {
1747
+ "type": "ref_text",
1748
+ "bbox": [
1749
+ 0.061,
1750
+ 0.775,
1751
+ 0.486,
1752
+ 0.796
1753
+ ],
1754
+ "angle": 0,
1755
+ "content": "[39] D. P. Kingma, J. Ba, Adam: A method for stochastic optimization, arXiv preprint arXiv:1412.6980 (2014)."
1756
+ },
1757
+ {
1758
+ "type": "ref_text",
1759
+ "bbox": [
1760
+ 0.061,
1761
+ 0.797,
1762
+ 0.486,
1763
+ 0.83
1764
+ ],
1765
+ "angle": 0,
1766
+ "content": "[40] M. J. Muckley, R. Stern, T. Murrell, F. Knoll, TorchKbNufft: A high-level, hardware-agnostic non-uniform fast Fourier transform, in: ISMRM Workshop on Data Sampling & Image Reconstruction, 2020."
1767
+ },
1768
+ {
1769
+ "type": "ref_text",
1770
+ "bbox": [
1771
+ 0.061,
1772
+ 0.831,
1773
+ 0.486,
1774
+ 0.874
1775
+ ],
1776
+ "angle": 0,
1777
+ "content": "[41] C. Chen, Y. Liu, P. Schniter, M. Tong, K. Zareba, O. Simonetti, L. Potter, R. Ahmad, Ocmr (v1. 0)-open-access multi-coil k-space dataset for cardiovascular magnetic resonance imaging, arXiv preprint arXiv:2008.03410 (2020)."
1778
+ },
1779
+ {
1780
+ "type": "ref_text",
1781
+ "bbox": [
1782
+ 0.061,
1783
+ 0.875,
1784
+ 0.486,
1785
+ 0.898
1786
+ ],
1787
+ "angle": 0,
1788
+ "content": "[42] H. Chandarana, L. Feng, T. K. Block, A. B. Rosenkrantz, R. P. Lim, J. S. Babb, D. K. Sodickson, R. Otazo, Free-breathing contrast-enhanced mul"
1789
+ },
1790
+ {
1791
+ "type": "list",
1792
+ "bbox": [
1793
+ 0.061,
1794
+ 0.099,
1795
+ 0.486,
1796
+ 0.898
1797
+ ],
1798
+ "angle": 0,
1799
+ "content": null
1800
+ },
1801
+ {
1802
+ "type": "ref_text",
1803
+ "bbox": [
1804
+ 0.541,
1805
+ 0.099,
1806
+ 0.938,
1807
+ 0.132
1808
+ ],
1809
+ "angle": 0,
1810
+ "content": "tiphase mri of the liver using a combination of compressed sensing, parallel imaging, and golden-angle radial sampling, Investigative radiology 48 (2013) 10-16."
1811
+ },
1812
+ {
1813
+ "type": "ref_text",
1814
+ "bbox": [
1815
+ 0.513,
1816
+ 0.133,
1817
+ 0.938,
1818
+ 0.178
1819
+ ],
1820
+ "angle": 0,
1821
+ "content": "[43] M. Uecker, P. Lai, M. J. Murphy, P. Virtue, M. Elad, J. M. Pauly, S. S. Vasanawala, M. Lustig, Espirit—an eigenvalue approach to autocalibrating parallel mri: Where sense meets grappa, Magnetic Resonance in Medicine 71 (2014) 990-1001."
1822
+ },
1823
+ {
1824
+ "type": "list",
1825
+ "bbox": [
1826
+ 0.513,
1827
+ 0.099,
1828
+ 0.938,
1829
+ 0.178
1830
+ ],
1831
+ "angle": 0,
1832
+ "content": null
1833
+ },
1834
+ {
1835
+ "type": "page_number",
1836
+ "bbox": [
1837
+ 0.494,
1838
+ 0.914,
1839
+ 0.505,
1840
+ 0.926
1841
+ ],
1842
+ "angle": 0,
1843
+ "content": "9"
1844
+ }
1845
+ ]
1846
+ ]
2301.00xxx/2301.00127/97724fca-330b-4a45-9d7d-ecac8fcb1f6d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:40843a51d35c0f7e4aac22c848f607edccee9c38deac436ae5364454c918fd44
3
+ size 10800852
2301.00xxx/2301.00127/full.md ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Spatiotemporal implicit neural representation for unsupervised dynamic MRI reconstruction
2
+
3
+ Jie Feng $^{a}$ , Ruimin Feng $^{a}$ , Qing Wu $^{b}$ , Zhiyong Zhang $^{a}$ , Yuyao Zhang $^{b,c}$ , Hongjiang Wei $^{a,*}$
4
+
5
+ $^{a}$ School of Biomedical Engineering, Shanghai Jiao Tong University, Shanghai, China
6
+ $^{b}$ School of Information Science and Technology, ShanghaiTech University, Shanghai, China
7
+ $^{c}$ iHuman Institute, Shanghaitech University, Shanghai, China
8
+
9
+ # Abstract
10
+
11
+ Supervised Deep-Learning (DL)-based reconstruction algorithms have shown state-of-the-art results for highly-undersampled dynamic Magnetic Resonance Imaging (MRI) reconstruction. However, the requirement of excessive high-quality ground-truth data hinders their applications due to the generalization problem. Recently, Implicit Neural Representation (INR) has appeared as a powerful DL-based tool for solving the inverse problem by characterizing the attributes of a signal as a continuous function of corresponding coordinates in an unsupervised manner. In this work, we proposed an INR-based method to improve dynamic MRI reconstruction from highly undersampled $k$ -space data, which only takes spatiotemporal coordinates as inputs. Specifically, the proposed INR represents the dynamic MRI images as an implicit function and encodes them into neural networks. The weights of the network are learned from sparsely-acquired $(k, t)$ -space data itself only, without external training datasets or prior images. Benefiting from the strong implicit continuity regularization of INR together with explicit regularization for low-rankness and sparsity, our proposed method outperforms the compared scan-specific methods at various acceleration factors. E.g., experiments on retrospective cardiac cine datasets show an improvement of $5.5 \sim 7.1$ dB in PSNR for extremely high accelerations (up to $41.6 \times$ ). The high-quality and inner continuity of the images provided by INR has great potential to further improve the spatiotemporal resolution of dynamic MRI, without the need of any training data.
12
+
13
+ Keywords: Dynamic MR imaging, Implicit Neural Representation, Unsupervised learning
14
+
15
+ # 1. Introduction
16
+
17
+ Dynamic Magnetic Resonance Imaging (MRI) is one of the most popular MRI technologies, which can preserve not only excellent tissue contrast but also dynamic temporal changes of tissue. Dynamic MRI requires rapid data collection for the study of moving organs with severe physiological motion, such as the heart [1] and abdomen [2]. Dynamic Contrast-Enhanced (DCE) MRI has also made tremendous contributions to the study of microvascular structure and function of in vivo organs [3].
18
+
19
+ However, the limitations of MRI hardware on gradient encoding performance and long acquisition time slow down our pace for higher spatiotemporal resolutions in dynamic MRI[4]. Spatial and temporal resolution are always inversely related. High spatial resolution images can only be acquired with low temporal resolution and vice versa. Thus, a trade-off has to be made between spatial and temporal resolution in practical dynamic MRI. This conflict can be potentially resolved by developing advanced MRI reconstruction methods from highly-undersampled $k$ -space data, including the traditional Compressed-Sensing (CS)-based methods and the Deep-Learning (DL)-based methods.
20
+
21
+ CS methods exploit spatial and temporal correlations of dynamic MRI by using irregular $k$ -space undersampling patterns to create incoherent artifacts in a suitable transform domain where the medical images are compressible, such as in the $k$ -t domain [5], temporal-gradient domain (temporal total variation regularizer) [6, 7] and many others. Image reconstruction is performed by exploiting the sparsity in the solution, subject to data consistency constraints. The further development of sparsity extended to the usage of low-rank prior: the Low-rank and Sparsity (L&S) strategy enforced a both sparse and low-rank output solution [8, 9], and the Low-rank plus Sparsity $(\mathrm{L} + \mathrm{S})$ strategy decomposed the solution images into a low-rank and a sparsity component for background and the dynamic foreground, respectively [10]. Recently, the subspace-modeling strategy enforced a combination of a temporal sparsity constraint and a low-rank spatial subspace constraint to improve DCE-MRI reconstruction [11, 12].
22
+
23
+ Recent advances in DL techniques have shown potential for further accelerating dynamic MRI data acquisition. By adopting the supervised-learning strategy with large quantities of undersampled and fully-sampled image pairs, DL-based methods showed superior performance compared to CS-based methods [13]. DL-based methods applied in dynamic MRI reconstruction can be separated into two categories, i.e., end-to-end and unrolled methods. The end-to-end methods [14, 15] enable the networks to directly learn the mapping from undersampled im
24
+
25
+ ages with artifacts to fully sampled high-quality images. In contrast, the unrolled strategy is inspired by unrolling the iterative optimization process of CS, using networks to learn the auxiliary parameters or regularizers [16, 17, 18, 19] during the iterations. Especially, $\mathrm{L + S}$ -Net [19] combined the $\mathrm{L + S}$ strategy of CS-based methods with the unrolled DL methods, demonstrating the availability of low-rank and sparsity in DL methods. However, the excessive demand for high-quality ground-truth labels in supervised learning hinders its applications in practice due to the generalization issue [13]. For example, the performance of the trained networks would degrade when the data is acquired with different scan parameters or pathological conditions. While in the case of DCE MRI, the ground-truth data are not available [20]. Alternatively, the unsupervised-learning strategy was introduced to the DL-based dynamic MRI reconstruction without involving external data in the training process. For example, Ke et al. [21] used a time-interleaved acquisition scheme, where the fully-sampled images were generated by merging adjacent frames. However, a large dataset is still needed for training the neural net. Yoo et al. [22] and Ahmed et al. [23] both adopted the Deep Image Prior (DIP) approach [24], which leveraged the tendency of untrained Convolutional Neural Networks (CNN) to generate natural-structured images as an implicit regularizer and then optimized the CNN parameters for scan-specific reconstruction. However, DIP-based methods suffer from a heavy computational burden and are still limited for application [22].
26
+
27
+ Implicit Neural Representation (INR) is a new way which parameterizes signals by a multi-layer perceptron (MLP) [25]. Unlike traditional explicit representation that uses discrete elements such as pixels (2D images) or voxels (3D volumes), INR represents the desired object itself as a continuous representation function of the spatial coordinates. In other words, the values at any spatial location of the object can be retrieved by querying the trained MLP with the corresponding coordinate. It provides a general solution for various applications of object reconstruction. With the application of MLP and proper encoding function mapping the input coordinates to a high-dimensional space [26], INR has achieved superior performance in multiple computer vision tasks [27, 26, 28]. Previous research also showed the INR's capability to solve the inverse problem in medical imaging fields, e.g., CT image reconstruction [29, 30, 31] and undersampled MRI [32] in an unsupervised manner. For example, implicit Neural Representation learning with Prior embedding (NeRP) [32] was proposed to perform the static MRI reconstruction from the sparsely-sampled $k$ -space data. However, NeRP requires a fully-sampled prior image with the same modality for the reconstruction of longitudinal MRI images of follow-up scans. Additionally, the INR for object reconstruction usually takes hours or even days to converge on one single data. Recently, parametric encoding functions with extra learnable parameters [33, 28, 34, 35] were proposed to significantly shorten the convergence time. For example, the hash encoding [28] function has shown promising results for accelerating the computational processes of INR in seconds for many graphics applications.
28
+
29
+ In this paper, we aim to present a new unsupervised method
30
+
31
+ for highly accelerated dynamic MRI reconstruction. Inspired by the insight of INR, the proposed method treated the dynamic MR image sequence as a continuous function mapping the spatiotemporal coordinates to the corresponding image intensities. The function was parameterized by a hash encoding function and an MLP and served as an implicit continuity regularizer for dynamic MRI reconstruction. The MLP weights were directly learned from the imaging-model-based $(\pmb{k},\mathrm{t})$ -space data consistency loss combined with the explicit regularizers, without training databases or any ground-truth data. When inferring, the reconstructed images can simply be querying the optimized network with the same or denser spatiotemporal coordinates, which would allow for sampling and interpolating the dynamic MRI at an arbitrary frame rate. Experiments on retrospective cardiac cine data and prospective untriggered DCE liver MRI data showed that the proposed method outperformed the compared scan-specific methods. Our results showed an improvement of $5.5\mathrm{dB}\sim 7.1$ dB in PSNR at an extremely high acceleration factor (41.6-fold). A temporal super-resolution test $(4\times)$ was conducted without retraining the network to demonstrate the strong continuity of the optimized representation function as an implicit regularizer for dynamic MRI reconstruction. The main contributions of this study are as follows:
32
+
33
+ - INR is first introduced to dynamic MRI reconstruction as an implicit continuity regularizer, achieving an improvement of $5.5\mathrm{dB}\sim 7.1$ dB in PSNR at an extremely high acceleration rate $(41.6\times)$ compared to other scan-specific methods.
34
+ - The INR-based method is an unsupervised-learning strategy, meaning that it does not require external datasets or prior images for training. Thus, the proposed method generalizes on the data acquired with different scan parameters and imaging areas.
35
+ - The proposed method achieved a reasonable $4 \times$ temporal super-resolution for dynamic MRI reconstruction without network retraining, suggesting its strong implicit continuity to achieve higher temporal resolutions.
36
+
37
+ # 2. Method
38
+
39
+ # 2.1. Dynamic MRI with regularizers
40
+
41
+ In dynamic MRI, the relationship between measured $(\pmb{k},\mathrm{t})$ space data and the reconstructed image matrix can be expressed by a linear model. Given the discretized image matrix $d\in \mathbb{C}^{(N\times N)\times T}$ and the measured $(\pmb{k},\mathrm{t})$ -space data of the cth coil $m_{c}\in \mathbb{C}^{(N\times M)\times T}$ $(1\leq c\leq C)$ , where $N$ is the image size, $T$ denotes the total temporal frames of the image, $M$ $(M < N)$ is the number of acquired readout lines for each frame and $C$ is the total number of coil channels. The relationship between $d$ and $m_{c}$ can be formulated as:
42
+
43
+ $$
44
+ m _ {c} = F _ {u} S _ {c} d. \tag {1}
45
+ $$
46
+
47
+ Here, $F_{u} \in \mathbb{C}^{(N \times M) \times (N \times N)}$ denotes the Fourier operator with the undersampling mask, which simulates the undersampled acquisition process of dynamic MRI, and $S_{c} \in \mathbb{C}^{(N \times N) \times (N \times N)}$ is a diagonal matrix representing the cth coil sensitivity map.
48
+
49
+ Reconstructing image $d$ from the undersampled $(\pmb{k},\mathrm{t})$ -space data is actually solving an ill-posed inverse problem, and the optimization process is formulated as:
50
+
51
+ $$
52
+ \underset {d} {\arg \min } \frac {1}{2} \sum_ {c = 1} ^ {C} \| F _ {u} S _ {c} d - m _ {c} \| _ {2} ^ {2} + \mathcal {R} (d), \tag {2}
53
+ $$
54
+
55
+ where $\mathcal{R}(d)$ is the prior regularizer, helping target $d$ reach optimal results at ill-posed conditions.
56
+
57
+ It has been shown that using sparsity and low-rank regularizers as prior knowledge in CS-based [8, 9, 10] and DL-based methods [19] is able to reach state-of-the-art results for dynamic MRI reconstruction. An example can be formulated as:
58
+
59
+ $$
60
+ \underset {d} {\arg \min } \frac {1}{2} \sum_ {c = 1} ^ {C} \| F _ {u} S _ {c} d - m _ {c} \| _ {2} ^ {2} + \lambda_ {S} \| T V _ {t} (d) \| _ {1} + \lambda_ {L} \| d \| _ {*}, \tag {3}
61
+ $$
62
+
63
+ where $TV_{t}(\bullet)$ is the temporal TV operator as the sparsity regularizer. $\| d\|_{*}$ is the nuclear norm (sum of singular values) of image matrix $d$ , representing the low-rank regularizer. $\lambda_{S}$ and $\lambda_{L}$ are the sparsity and low-rank regularization hyperparameters, respectively. Previous works [8] have proved that the target in Eq. 3 can be optimized iteratively for a good dynamic MRI performance without Ground Truth (GT).
64
+
65
+ # 2.2. INR in dynamic MRI
66
+
67
+ Inspired by INR, the internal continuity of the image can be a powerful regularizer for solving the ill-posed inverse problem of dynamic MRI reconstruction from sparsely-acquired $(\pmb{k}, t)$ -space data. The INR-based method can be implemented by applying a learnable continuous mapping function between spatiotemporal coordinates and desired image intensities to be reconstructed. We introduce $f_{\theta}: \mathbb{R}^3 \to \mathbb{C}$ be the continuous function parameterized by learnable parameters $\theta$ , mapping the spatiotemporal coordinates $(x,y,t)$ into corresponding image intensities, where $(x,y)$ represent the 2D spatial coordinates $(1 \leq x,y \leq N)$ and $t$ represents the temporal coordinate $(1 \leq t \leq T)$ . Thus, the image $d$ is rewritten to $d_{\theta} \in \mathbb{C}^{(N \times N) \times T}$ by feeding all the spatiotemporal coordinates of the dynamic images into $f_{\theta}$ and the Casorati matrix $d_{\theta}$ is:
68
+
69
+ $$
70
+ d _ {\theta} = \left[ \begin{array}{c c c} f _ {\theta} (1, 1, 1) & \dots & f _ {\theta} (1, 1, T) \\ \vdots & & \vdots \\ f _ {\theta} (N, 1, 1) & \ddots & f _ {\theta} (N, 1, T) \\ \vdots & & \vdots \\ f _ {\theta} (N, N, 1) & \dots & f _ {\theta} (N, N, T) \end{array} \right]. \tag {4}
71
+ $$
72
+
73
+ Thus, Eq. 3 can be written as a fitting problem that searches the optimal parameters $\theta$ of the continuous mapping function $f_{\theta}$ :
74
+
75
+ $$
76
+ \underset {\theta} {\arg \min } \frac {1}{2} \sum_ {c = 1} ^ {C} \| F _ {u} S _ {c} d _ {\theta} - m _ {c} \| _ {2} ^ {2} + \lambda_ {S} \| T V _ {t} (d _ {\theta}) \| _ {1} + \lambda_ {L} \| d _ {\theta} \| _ {*}. \tag {5}
77
+ $$
78
+
79
+ Here, Eq. 5 incorporates the implicit continuity on the desired image sequence, together with the explicit sparsity and low-rankness regularizers.
80
+
81
+ # 2.3. Continuous mapping function with MLP and hash encoding
82
+
83
+ In INR, the continuous representation function $f_{\theta}$ is based on MLP. A better high-frequency fitting performance can be achieved by mapping the input coordinates to a higher dimensional space using an encoding function $\varphi$ before passing them to MLP [26, 36]:
84
+
85
+ $$
86
+ f _ {\theta} (x, y, t) = M L P (\varphi (x, y, t)). \tag {6}
87
+ $$
88
+
89
+ In this work, we adopted hash encoding [37] as the coordinate encoding function $\varphi$ , which enables the use of smaller MLPs and significantly a faster convergence time. Specifically, hash encoding uses a total of $L$ independent hash grids with the size of $T$ as learnable feature storages. These hash grids represent a set of resolutions in the form of a geometric series, i.e., $N_{min}, b * N_{min}, \dots, b^{(L-1)} * N_{min}$ , where $N_{min}$ and $b$ are the first term and the ratio of the geometric series, respectively. Trilinear interpolation is applied in each queried hash grid entry to keep continuity. Each hash grid outputs an $F$ -dim feature vector and then these interpolated feature vectors are concatenated as the final encoded input vector. As pointed out by Müller et al. [37], the five hyperparameters mentioned above can be tuned to fit large quantities of tasks better: $N_{min}$ and $b$ decide how the resolution among different hash grids increases, and $L, T, F$ are important tuners for the tradeoff between performance, memory and quality.
90
+
91
+ # 2.4. Loss functions
92
+
93
+ Eq. 5 is rewritten to the form of the following loss functions for the implementation with gradient-descent-based algorithms:
94
+
95
+ $$
96
+ \mathcal {L} _ {\text {t o t a l}} = \underbrace {\sum_ {c = 1} ^ {C} \| F _ {u} S _ {c} d _ {\theta} - m _ {c} \| _ {2} ^ {2}} _ {\mathcal {L} _ {D C}} + \lambda_ {S} \underbrace {\| T V _ {t} \left(d _ {\theta}\right) \| _ {1}} _ {\mathcal {L} _ {T V}} + \lambda_ {L} \underbrace {\left\| d _ {\theta} \right\| _ {*}} _ {\mathcal {L} _ {L R}}. \tag {7}
97
+ $$
98
+
99
+ Here $\mathcal{L}_{DC},\mathcal{L}_{TV}$ and $\mathcal{L}_{LR}$ stand for data consistency (DC) loss in $(\pmb{k},\mathrm{t})$ -space, temporal TV loss and low-rank loss, corresponding to the three terms of the optimization objective in Eq. 5, respectively.
100
+
101
+ Considering that the magnitudes of the $k$ -space low-frequency elements are several orders greater than those of the high-frequency elements, a relative L2 loss [38, 28] is used as the DC loss. Compared with normal L2 loss, the relative L2 loss is normalized by the square of predicted output, helping balance the gradients across $k$ -space for better high-frequency performance. Let $\hat{Y}_i$ be one element of the multi-coil predicted $k$ -space data $[FS_1dFS_2d\dots FS_Cd]$ and $Y_{i}$ is the corresponding element of the multi-coil acquired $k$ -space data $[m_1m_2\dots m_C]$ , then DC loss is written as:
102
+
103
+ $$
104
+ \mathcal {L} _ {D C} = \sum_ {i = 1} ^ {N \times M \times T \times C} \frac {\left(\hat {Y} _ {i} - Y _ {i}\right) ^ {2}}{\left(\hat {Y} _ {i}\right) ^ {2} + \epsilon}. \tag {8}
105
+ $$
106
+
107
+ ![](images/023528a343bdf79f0426b545d385990ed75bea42cb4159522ed029ba0a8b7ec2.jpg)
108
+ Figure 1: Overview of the proposed method. All the spatiotemporal coordinates are fed into hash grids and an MLP to output two-channel intensities as the real and imaginary parts of the image series. The predicted $k$ -space data are generated with the undersampled Fourier Transform (a golden-angle radial undersampling pattern) from the reconstructed complex-valued images following Eq. 1. The difference between the predicted $k$ -space data and acquired $k$ -space data is calculated as the data consistency loss. Two regularization terms, temporal Total Variation and low-rankness, are applied to the output image series in the loss function. The parameters in the hash grids and the MLP are updated iteratively by minimizing the loss function.
109
+
110
+ The parameter $\epsilon$ with a value of $10^{-4}$ is added to the denominator to prevent the zero-division problem.
111
+
112
+ Therefore, the parameters $\theta$ of hash grids and MLP are optimized to minimize the total loss:
113
+
114
+ $$
115
+ \mathcal {L} _ {\text {t o t a l}} = \underbrace {\sum_ {i = 1} ^ {N \times M \times T \times C} \frac {\left(\hat {Y} _ {i} - Y _ {i}\right) ^ {2}}{\left(\hat {Y} _ {i}\right) ^ {2} + \epsilon}} _ {\mathcal {L} _ {D C}} + \lambda_ {S} \underbrace {\| T V _ {I} (d _ {\theta}) \| _ {1}} _ {\mathcal {L} _ {T V}} + \lambda_ {L} \underbrace {\| d _ {\theta} \| _ {*}} _ {\mathcal {L} _ {L R}}. \tag {9}
116
+ $$
117
+
118
+ # 2.5. Implementation details
119
+
120
+ We used a tiny MLP containing 5 hidden layers and each hidden layer consisted of 64 neurons followed by a ReLU activation function. The MLP output 2 channels, representing the real and imaginary components of the complex-valued MRI images. No activation function was adopted for the last layer.
121
+
122
+ During the optimization process, all the spatiotemporal coordinates were gathered in one batch and the batch size was set to 1. All the coordinates were isotropically normalized to [0, 1] for fast convergence. The number of optimization epochs was set to 500. The Adam optimizer [39] was used with a constant learning rate of $0.001$ , $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ , and $\epsilon = 10^{-8}$ .
123
+
124
+ Once the optimization process was done, the continuous function $f_{\theta}$ was considered a good representation of the underlying image sequences. Then the same coordinate batch or a denser coordinate batch can be fed into the INR network to output the image sequences.
125
+
126
+ The whole pipeline is illustrated in Fig.1, and was conducted on a system equipped with an Intel i7-9700 processor, 64G RAM, and an NVIDIA RTX 2080Ti 11G GPU. The networks were implemented with PyTorch 1.11.0 and tiny-cudann<sup>1</sup>. The non-cartesian Fourier undersampling operation was implemented with the Non-Uniform Fast Fourier Transform (NUFFT) and was deployed with torchkbnufft 1.3.0 [40] for fast calculation and gradient backpropagation on GPU.
127
+
128
+ # 3. Experiments and results
129
+
130
+ # 3.1. Setup
131
+
132
+ # 3.1.1. Datasets
133
+
134
+ The proposed method was tested on a simulated retrospective cardiac cine dataset and a perspective untriggered DCE liver dataset to prove its effectiveness and generalization.
135
+
136
+ # (1) Retrospective cardiac cine dataset:
137
+
138
+ The fully sampled cardiac cine data from the OCMR dataset [41] were acquired from healthy volunteers on a 1.5T scanner (MAGNETOM Avanto, Siemens Healthineers, Erlangen, Germany) using a bSSFP sequence with the following parameters: $\mathrm{FOV} = 320\times 260\mathrm{mm}^2$ , imaging matrix $= 256\times 208$ , slice thickness $= 8\mathrm{mm}$ , TR/TE $= 2.79\mathrm{ms} / 1.33\mathrm{ms}$ , number of frame $= 18$ . The data acquisition was collected with prospective ECGgating and breath-holding. The number of receiver coils is 18. A simulation undersampling pattern of 2D golden-angle radial acquisition scheme is adopted, where the readout lines are repetitively through the center of $k$ -space and rotated with a step of $111.25^{\circ}$ . The simulation process includes cropping original data to $208\times 208$ in the image domain and then converting to the frequency domain by multi-coil NUFFT with golden-angle trajectories of Fibonacci numbers [42]. The coil sensitivity maps were calculated by the ESPIRiT algorithm [43].
139
+
140
+ # (2) Untriggered DCE liver dataset:
141
+
142
+ The DCE liver data were acquired continuously with the golden-angle acquisition scheme. The 3D stack-of-stars Fast Low Angle SHot (FLASH) sequence was acquired on a breathtaking healthy volunteer using a 3T Siemens MAGNETOM Verio scanner with the following parameters: $\mathrm{FOV} = 370 \times 370 \, \mathrm{mm}^2$ , $\mathrm{TR/TE} = 3.83 \, \mathrm{ms}/1.71 \, \mathrm{ms}$ , imaging matrix $= 384 \times 384$ , slice thickness $= 3 \, \mathrm{mm}$ , total spoke number of each slice $= 600$ . A total of 12 receiver coils were used during the scan. The data including coil sensitivity maps were from Feng et al. [7]'s demo and details about intravenous contrast enhancement can be found in the paper. Each 34 acquired spokes were grouped to reconstruct one frame, which corresponds to an Acceleration Factor $(\mathrm{AF}) \approx 11.3$ and 17 frames in total.
143
+
144
+ # 3.1.2. Performance evaluation
145
+
146
+ In this work, we chose NUFFT, $\mathrm{L + S}$ [10] and GRASP [7] as the baselines for comparison. NUFFT gives the results obtained by directly zero-filling the frequency domain. $\mathrm{L + S}$ and GRASP two of the CS-based reconstruction methods which use a similar optimization pipeline as Eq. 2. The difference between them is that GRASP adopted a temporal TV regularizer, while $\mathrm{L + S}$ decomposed the solution images into a background component with low-rank regularizer and a dynamic foreground with temporal TV regularizer. We did not compare the proposed INR-based method to the supervised DL methods for dynamic MRI reconstruction since the datasets used in this work are insufficient for supervised network training. In addition, the ground truth is not available for the untriggered DCE liver dataset, which also limits the training process of previous supervised methods.
147
+
148
+ We tested the performance of the proposed method with 21, 13, 8 and 5 spokes per frame (AF ≈ 9.9, 16, 26, 41.6) on the cardiac cine dataset, and with 34 spokes per frame (AF ≈ 11.3) on the DCE liver dataset. For a fair comparison, the hyperparameters of all the methods are tuned to get the best performance and fit the GPU storage in different datasets and AFs, respectively.
149
+
150
+ Quantitative visual comparison and quantitative comparison were used for evaluation. For the cardiac cine dataset, quantitative metrics including peak signal-to-noise ratio (PSNR) and structural similarity index (SSIM) were calculated frame-by-frame as follows:
151
+
152
+ $$
153
+ P S N R = 1 0 \times \log_ {1 0} \left(\frac {1}{\| y - \hat {y} \| _ {2} ^ {2}}\right), \tag {10}
154
+ $$
155
+
156
+ $$
157
+ S S I M = \frac {\left(2 \mu_ {y} \mu_ {\hat {y}} + c _ {1}\right) \left(2 \sigma_ {y \hat {y}} + c _ {2}\right)}{\left(\mu_ {y} ^ {2} + \mu_ {\hat {y}} ^ {2} + c _ {1}\right) \left(\sigma_ {y} ^ {2} + \sigma_ {\hat {y}} ^ {2} + c _ {2}\right)}, \tag {11}
158
+ $$
159
+
160
+ where $y$ and $\hat{y}$ represent ground truth and reconstructed image, respectively, $\mu_y$ and $\mu_{\hat{y}}$ are the mean intensity of $y$ and $\hat{y}$ , $\sigma_y$ and $\sigma_{\hat{y}}$ are the variance of $y$ and $\hat{y}$ , $\sigma_{y\hat{y}}$ is the covariance of $y$ and $\hat{y}$ , the constant $c_1$ and $c_2$ were set to $0.01^2$ and $0.03^2$ . $y$ and $\hat{y}$ were both normalized to [0, 1] according to the image sequence maximum and minimum.
161
+
162
+ The $k$ -space data were calculated from the reconstructed complex-valued MR images with the 2D Fast Fourier Transform. For quantitative comparison, the normalized root mean square error (NRMSE) against GT $k$ -space data was calculated coil-by-coil:
163
+
164
+ $$
165
+ N R M S E = \sqrt {\frac {\left\| Y - \hat {Y} \right\| _ {2} ^ {2}}{\| Y \| _ {2} ^ {2}}}, \tag {12}
166
+ $$
167
+
168
+ where $Y$ and $\hat{Y}$ represents the predicted and acquired $k$ -space data, respectively.
169
+
170
+ For the DCE liver dataset, only visual comparison and temporal ROI intensity assessment were conducted due to the lack of the GT image. The ROIs of the aorta (AO) and portal vein (PV) were manually drawn for signal intensity-time curves. For temporal fidelity comparison, NUFFT was used as the reference since no temporal regularization was involved in the reconstructed images. Although contaminated by the streaking artifacts, the average signal intensity from NUFFT results across
171
+
172
+ large ROI was still able to preserve contrast evolution for fidelity analysis.
173
+
174
+ # 3.2. Reconstruction performance of the proposed method
175
+
176
+ # 3.2.1.Cardiac cine dataset
177
+
178
+ Fig.2 compares the reconstruction performance of different methods on the cardiac cine dataset with 21 and 13 spokes per frame (AF=9.9, 16, respectively). Visually, the images reconstructed by the proposed method appear to have better anatomical details and provide a more accurate temporal fidelity than the baselines at both acceleration conditions of 21 and 13 spokes. NUFFT, $\mathrm{L + S}$ and GRASP all suffer from artifacts and noises in the cardiac chamber area. While the reconstructed images by the proposed method show highly similar anatomical details as the ground truth, as pointed out by red arrows. In the y-t view, the $\mathrm{L + S}$ results are over smooth and the GRASP results suffer from noticeable streaking artifacts along the temporal axis. The proposed method provides the highest temporal fidelity of the dynamic images between frames. The error map between the reconstruction and the ground truth further supports our observation. It is noted that the reconstructed errors observed on the error maps at the edge of the cardiac ventricles were potentially blurred by cardiac motion. The proposed INR-based method shows the smallest error at the edge of the ventricles, and is consistent with the observation from the y-t view. Quantitatively, the proposed method achieves the best performance with a PSNR of $39.00\pm 0.55$ dB (21 spokes) / $37.86\pm 0.61$ dB (13 spokes) and an SSIM of $0.980\pm 0.003$ (21 spokes) / $0.975\pm 0.004$ (13 spokes) than the compared methods.
179
+
180
+ We further tested the ability of the proposed method for dynamic MRI reconstruction at extremely high acceleration rates (8 and 5 per frame, AF=26 and 41.6, respectively), as shown in Fig.3. The proposed method exhibits comparable performance between AF=26 and 41.6, with the PSNR/SSIM of $36.88 \pm 0.63$ dB/0.968 $\pm 0.005$ (8 spokes) and $35.41 \pm 0.56$ dB/0.957 $\pm 0.006$ (5 spokes). The proposed method has the best image quality with minimal noise and artifacts. Contrarily, L+S and GRASP suffer from temporal smoothness and noticeable streaking artifacts with increased acceleration rates. From the y-t view, the dynamic information on the reconstructed images is well captured by the proposed method, even with 5 spokes per frame. Additionally, the proposed INR-based method results in a higher PSNR than GRASP (5.5 dB) and L+S (7.1 dB), respectively.
181
+
182
+ # 3.2.2. DCE liver dataset
183
+
184
+ For DCE liver dataset with 34 spokes per frame (AF=11.3), the visual comparisons at different temporal phases are demonstrated in Fig. 4(a). As can be seen from the zoomed-in images, the anatomical details of the kidney can be well visible on the reconstructed images by the proposed method. Severe streaking and noise can be observed on the reconstructed images by NUFFT, L+S, and GRASP. While the proposed method provides high-quality images with less noise than other methods. The signal intensity-time curves in Fig. 4(b) suggest that the proposed method yields the best temporal fidelity, which is
185
+
186
+ ![](images/f304faa96a109472fb8e77c2a432e70147f5609eb983e4f11d4160d117eee8a3.jpg)
187
+ Figure 2: The reconstruction results of NUFFT, $\mathrm{L + S}$ , GRASP and the proposed method (from left to right) on the cardiac cine dataset with 21 and 13 spokes per frame (AF=9.9, 16). The enlarged views of the heart region are outlined by the orange boxes and the red arrows point out the structure where the proposed method gives a superior reconstruction performance. The y-t images (the 116th slice along y and temporal dimensions) are outlined by green boxes. The error maps and PSNR/SSIM metrics are shown at the bottom, respectively.
188
+
189
+ consistent with the results of NUFFT in AO and PV. For example, the intensity fluctuation of the AO curve between Frame 5 and Frame 11 can be well captured by the proposed INR-based method.
190
+
191
+ # 3.3. Results of the temporal super-resolution
192
+
193
+ To demonstrate the internal continuity of the optimized representation of the dynamic MRI, we use a denser coordinate along the temporal axis as input to conduct upsampling $(4\times)$ on the reconstructed dynamic MR image sequence, named temporal super-resolution. The pipeline is shown in Fig.5(a). The GT frames with temporal linear interpolation between Frame 10 and 11 are used as the reference for comparison, as shown in Fig.5(b). Qualitatively, there is no significant structural difference between the super-resolution images and the interpolated images, indicating the strong implicit continuity representation of the optimized INR function.
194
+
195
+ # 4. Discussion
196
+
197
+ In this study, we proposed a novel unsupervised INR-based deep learning method for highly accelerated dynamic MRI reconstruction, which modeled the dynamic MR image sequence as a continuous mapping function. We validated the proposed method on retrospective cardiac cine data and perspective DCE liver data with various acceleration rates. The results showed the effectiveness and generalization of the proposed method on artifact suppression and motion fidelity preservation, especially at extremely high accelerations of 26-fold or 41.6-fold. The proposed method outperforms the compared CS-based methods such as $\mathrm{L} + \mathrm{S}$ and GRASP. The results indicated that the proposed reconstruction method holds promise for high temporal resolution 2D MRI acquisitions.
198
+
199
+ The superiority of the proposed method over the baseline methods is believed from the implicit regularization from the internal continuity of INR, which is validated by the results of the temporal super-resolution $(4\times)$ , as shown in Fig.5. In addition, the super-resolution performance allows us to further speed up the data acquisition along the temporal axis for dy
200
+
201
+ ![](images/03899cb282586c4522d756a82bbf31a443a09fb964681eaaaca8d17c783a0c48.jpg)
202
+ Figure 3: The comparison of the reconstruction results on the cardiac cine dataset with 8 and 5 spokes per frame, which corresponds to the acceleration factors of 26 and 41.6. Zoomed-in views of the heart chambers are outlined by orange boxes and the y-t images (the 116th slice along y and temporal dimensions) are outlined by green boxes. The difference map between the reconstructed image and ground truth and PSNR/SSIM metrics are also shown.
203
+
204
+ ![](images/1bfef3f9eb0281bc82a31407304db05957aad562dcd980a71d8b9d57bdfe9d6f.jpg)
205
+ Figure 4: The comparison of the reconstruction results and ROI analysis among different methods on the DCE liver dataset with 34 spokes per frame (AF=11.3). (a) Reconstruction results at different contrast phases are visualized. The zoomed-in area outlined by orange boxes with the proposed method gives the best image quality with minimal noise among different methods. (b) Signal intensity-time curves of different methods are compared in aorta (AO) and portal vein (PV) areas, and the NUFFT result serves as the temporal fidelity reference.
206
+
207
+ namic MRI. Unlike the existing super-resolution methods, the INR-based method does not require extra modeling or training, but simply gives the denser coordinates, which reduces the computational burden and the reconstruction time usage during deployment.
208
+
209
+ The proposed method has a few limitations. First, the low
210
+
211
+ rank regularization adopted in this work is the nuclear norm and is optimized with gradient-descent-based algorithms. However, as discussed by Lingala et al. [8]'s work, naive nuclear norm minimization may not be stable for fast convergence. In future works, INR combined with different low-rank regularization substitutes and optimization methods will be explored.
212
+
213
+ ![](images/cc11c83c9d1d7701b7e2cc65c3294cbccb50f3264cadb84ecdadcf611c570873.jpg)
214
+
215
+ ![](images/169869441b561f8a8a7ffa679eaef7c6063a815f0e77f4e3e51c3f5384e85e34.jpg)
216
+ Figure 5: (a) The pipeline of temporal super-resolution for the reconstructed dynamic MRI. For the given denser coordinates, the optimized function (Hash grids & MLP) outputs the interpolated frames. (b) The upsampled images between Frame 10 and Frame 11 of the cardiac cine dataset with 21 spokes per frame. Three equally-spaced coordinates to be generated (10.25, 10.5, 10.75) between Frame 10 and Frame 11 are fed to the network for temporal super-resolution $(4\times)$ . The ground truth of Frame 10 and Frame 11, and the linear interpolated frames serve as the reference. The reference and output images at the position of Frame 10 and 11 are outlined with orange boxes. The corresponding error maps are displayed at the bottom.
217
+
218
+ Second, the temporal super-resolution test indicates a comparable 4 times upsampling result with INR, but the smoothness witnessed at the edge of heart chambers demonstrated its limitation for higher or even arbitrary super-resolution results. Third, although the reconstruction time is faster than the other unsupervised methods, it is still challenging for real-time imaging.
219
+
220
+ # 5. Conclusion
221
+
222
+ In this work, we proposed an INR-based unsupervised deep learning method for highly accelerated dynamic MRI reconstruction. The proposed method learns an implicit continuous representation function to represent the desired spatiotemporal image sequence, mapping spatiotemporal coordinates to the corresponding image intensities. The proposed method is training database-free and does not require prior information for the reconstruction. Several tests on retrospective cardiac and perspective DCE liver data proved that the proposed method could robustly produce a high-quality dynamic MR image sequence even at an extremely high acceleration rate $(41.6\times)$ . Additionally, benefiting from the internal continuity of the optimized INR network, the proposed method demonstrates an impressive performance of temporal super-resolution to upsample the desired dynamic images at higher temporal rates than the physical acquisitions. We thus believe that the INR-based method has the potential to further accelerate dynamic MRI acquisition in the future.
223
+
224
+ # References
225
+
226
+ [1] C. B. Marcu, A. M. Beek, A. C. van Rossum, Clinical applications of cardiovascular magnetic resonance imaging, CMAJ 175 (2006) 911-917.
227
+ [2] R. N. Low, Abdominal mri advances in the detection of liver tumours and characterisation, The Lancet Oncology 8 (2007) 525-535.
228
+ [3] C. Cuenod, D. Balvay, Perfusion and vascular permeability: Basic concepts and measurement in dce-ct and dce-mri, Diagnostic and Interventional Imaging 94 (2013) 1187–1204.
229
+ [4] S. M. Wright, M. P. McDougall, Single echo acquisition mri using rf encoding, NMR in Biomedicine 22 (2009) 982-993.
230
+ [5] H. Jung, K. Sung, K. S. Nayak, E. Y. Kim, J. C. Ye, k-t focuss: A general compressed sensing framework for high resolution dynamic mri, Magnetic Resonance in Medicine 61 (2009) 103-116.
231
+ [6] L. Feng, M. B. Srichai, R. P. Lim, A. Harrison, W. King, G. Adluru, E. V. R. Dibella, D. K. Sodickson, R. Otazo, D. Kim, Highly accelerated real-time cardiac cine migraine using k-t sparse-sense, Magnetic Resonance in Medicine 70 (2013) 64-74.
232
+ [7] L. Feng, R. Grimm, K. T. Block, H. Chandarana, S. Kim, J. Xu, L. Axel, D. K. Sodickson, R. Otazo, Golden-angle radial sparse parallel migraine: Combination of compressed sensing, parallel imaging, and golden-angle radial sampling for fast and flexible dynamic volumetric migraine, Magnetic Resonance in Medicine 72 (2014) 707-717.
233
+ [8] S. G. Lingala, Y. Hu, E. DiBella, M. Jacob, Accelerated dynamic mri exploiting sparsity and low-rank structure: k-t slr, IEEE Transactions on Medical Imaging 30 (2011) 1042-1054.
234
+ [9] B. Zhao, J. P. Haldar, A. G. Christodoulou, Z.-P. Liang, Image reconstruction from highly undersampled (k, t)-space data with joint partial separability and sparsity constraints, IEEE Transactions on Medical Imaging 31 (2012) 1809-1820.
235
+ [10] R. Otazo, E. Candès, D. K. Sodickson, Low-rank plus sparse matrix decomposition for accelerated dynamic mri with separation of background and dynamic components, Magnetic Resonance in Medicine 73 (2015) 1125-1136.
236
+ [11] L. Feng, Q. Wen, C. Huang, A. Tong, F. Liu, H. Chandarana, Grasp-pro: improving grasp dce-mri through self-calibrating subspace-modeling and contrast phase automation, Magnetic Resonance in Medicine 83 (2020) 94-108.
237
+ [12] L. Feng, 4d golden-angle radial mri at subsecond temporal resolution, NMR in Biomedicine (2022) e4844.
238
+ [13] A. Bustin, N. Fuin, R. M. Botnar, C. Prieto, From compressed-sensing to artificial intelligence-based cardiac mri reconstruction, Frontiers in cardiovascular medicine 7 (2020) 17.
239
+ [14] S. Wang, Z. Su, L. Ying, X. Peng, S. Zhu, F. Liang, D. Feng, D. Liang, Accelerating magnetic resonance imaging via deep learning, in: IEEE 13th International Symposium on Biomedical Imaging (ISBI), 2016, pp. 514-517.
240
+ [15] Y. Han, J. Yoo, H. H. Kim, H. J. Shin, K. Sung, J. C. Ye, Deep learning with domain adaptation for accelerated projection-reconstruction mr, Magnetic Resonance in Medicine 80 (2018) 1189-1205.
241
+ [16] J. Schlemper, J. Caballero, J. V. Hajnal, A. Price, D. Rueckert, A deep cascade of convolutional neural networks for mr image reconstruction, in: International Conference on Information Processing in Medical Imaging, 2017, pp. 647-658.
242
+ [17] C. Qin, J. Schlemper, J. Caballero, A. N. Price, J. V. Hajnal, D. Rueckert, Convolutional recurrent neural networks for dynamic mr image reconstruction, IEEE Transactions on Medical Imaging 38 (2019) 280-290.
243
+ [18] C. M. Sandino, P. Lai, S. S. Vasanawala, J. Y. Cheng, Accelerating cardiac cine mri using a deep learning-based esprit reconstruction, Magnetic Resonance in Medicine 85 (2021) 152-167.
244
+ [19] W. Huang, Z. Ke, Z.-X. Cui, J. Cheng, Z. Qiu, S. Jia, L. Ying, Y. Zhu, D. Liang, Deep low-rank plus sparse network for dynamic mr imaging, Medical Image Analysis 73 (2021) 102190.
245
+ [20] Z. Huang, J. Bae, P. M. Johnson, T. Sood, L. Heacock, J. Fogarty, L. Moy, S. G. Kim, F. Knoll, A simulation pipeline to generate realistic breast images for learning dce-mri reconstruction, in: Machine Learning for Medical Image Reconstruction, 2021, pp. 45-53.
246
+ [21] Z. Ke, J. Cheng, L. Ying, H. Zheng, Y. Zhu, D. Liang, An unsupervised deep learning method for multi-coil cine migraine, Physics in Medicine & Biology 65 (2020) 235041.
247
+ [22] J. Yoo, K. H. Jin, H. Gupta, J. Yerly, M. Stuber, M. Unser, Time
248
+
249
+ dependent deep image prior for dynamic migraine, IEEE Transactions on Medical Imaging 40 (2021) 3337-3348.
250
+ [23] A. H. Ahmed, Q. Zou, P. Nagpal, M. Jacob, Dynamic imaging using deep bi-linear unsupervised representation (deblur), IEEE Transactions on Medical Imaging 41 (2022) 2693-2703.
251
+ [24] D. Ulyanov, A. Vedaldi, V. Lempitsky, Deep image prior, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2018, pp. 9446-9454.
252
+ [25] V. Sitzmann, J. N. P. Martel, A. W. Bergman, D. B. Lindell, G. Wetzstein, Implicit neural representations with periodic activation functions, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), 2020, pp. 7462-7473.
253
+ [26] B. Mildenhall, P. P. Srinivasan, M. Tancik, J. T. Barron, R. Ramamoorthi, R. Ng, Nerf: Representing scenes as neural radiance fields for view synthesis, in: European Conference on Computer Vision (ECCV), 2020, pp. 405-421.
254
+ [27] J. J. Park, P. Florence, J. Straub, R. Newcombe, S. Lovegrove, Deepsdf: Learning continuous signed distance functions for shape representation, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2019, pp. 165-174.
255
+ [28] T. Müller, F. Rousselle, J. Novák, A. Keller, Real-time neural radiance caching for path tracing, arXiv preprint arXiv:2106.12372 (2021).
256
+ [29] G. Zang, R. Idoughi, R. Li, P. Wonka, W. Heidrich, Intratomo: self-supervised learning-based tomography via sinogram synthesis and prediction, in: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021, pp. 1960-1970.
257
+ [30] A. W. Reed, H. Kim, R. Anirudh, K. A. Mohan, K. Champley, J. Kang, S. Jayasuriya, Dynamic ct reconstruction from limited views with implicit neural representations and parametric motion fields, in: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2021, pp. 2258-2268.
258
+ [31] Y. Sun, J. Liu, M. Xie, B. Wohlberg, U. S. Kamilov, Coil: Coordinate-based internal learning for tomographic imaging, IEEE Transactions on Computational Imaging 7 (2021) 1400-1412.
259
+ [32] L. Shen, J. Pauly, L. Xing, Nerp: Implicit neural representation learning with prior embedding for sparsely sampled image reconstruction, IEEE Transactions on Neural Networks and Learning Systems (2022) 1-13.
260
+ [33] L. Liu, J. Gu, K. Z. Lin, T.-S. Chua, C. Theobalt, Neural sparse voxel fields, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), NIPS'20, 2020, pp. 15651-15663.
261
+ [34] C. Sun, M. Sun, H. Chen, Direct voxel grid optimization: Superfast convergence for radiance fields reconstruction, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022, pp. 5459-5469.
262
+ [35] Sara Fridovich-Keil and Alex Yu, M. Tancik, Q. Chen, B. Recht, A. Kanazawa, Plenoxels: Radiance fields without neural networks, in: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), 2022, pp. 5491-5500.
263
+ [36] M. Tancik, P. P. Srinivasan, B. Mildenhall, S. Fridovich-Keil, N. Raghavan, U. Singhal, R. Ramamoorthi, J. T. Barron, R. Ng, Fourier features let networks learn high frequency functions in low dimensional domains, in: Proceedings of the 34th International Conference on Neural Information Processing Systems (NIPS), 2020, pp. 7537-7547.
264
+ [37] T. Müller, A. Evans, C. Schied, A. Keller, Instant neural graphics primitives with a multiresolution hash encoding, ACM Transactions on Graphics 41 (2022) 102:1-102:15.
265
+ [38] J. Lehtinen, J. Munkberg, J. Hasselgren, S. Laine, T. Karras, M. Aittala, T. Aila, Noise2noise: Learning image restoration without clean data, in: International Conference on Machine Learning (ICML), volume 80, 2018, pp. 2965-2974.
266
+ [39] D. P. Kingma, J. Ba, Adam: A method for stochastic optimization, arXiv preprint arXiv:1412.6980 (2014).
267
+ [40] M. J. Muckley, R. Stern, T. Murrell, F. Knoll, TorchKbNufft: A high-level, hardware-agnostic non-uniform fast Fourier transform, in: ISMRM Workshop on Data Sampling & Image Reconstruction, 2020.
268
+ [41] C. Chen, Y. Liu, P. Schniter, M. Tong, K. Zareba, O. Simonetti, L. Potter, R. Ahmad, Ocmr (v1. 0)-open-access multi-coil k-space dataset for cardiovascular magnetic resonance imaging, arXiv preprint arXiv:2008.03410 (2020).
269
+ [42] H. Chandarana, L. Feng, T. K. Block, A. B. Rosenkrantz, R. P. Lim, J. S. Babb, D. K. Sodickson, R. Otazo, Free-breathing contrast-enhanced mul
270
+
271
+ tiphase mri of the liver using a combination of compressed sensing, parallel imaging, and golden-angle radial sampling, Investigative radiology 48 (2013) 10-16.
272
+ [43] M. Uecker, P. Lai, M. J. Murphy, P. Virtue, M. Elad, J. M. Pauly, S. S. Vasanawala, M. Lustig, Espirit—an eigenvalue approach to autocalibrating parallel mri: Where sense meets grappa, Magnetic Resonance in Medicine 71 (2014) 990-1001.
2301.00xxx/2301.00127/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afb8b7433fde2fa5717408b7f3415848f794816af741815617a2d5a0d88713de
3
+ size 816834
2301.00xxx/2301.00127/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00130/01c945fb-cab9-4f7c-a745-0c790477032b_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00130/01c945fb-cab9-4f7c-a745-0c790477032b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00130/01c945fb-cab9-4f7c-a745-0c790477032b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4458e6a9858895e88fb87190c4a589e6a0db994770f1cc2cf60d3f1071ca7a4c
3
+ size 666075
2301.00xxx/2301.00130/full.md ADDED
@@ -0,0 +1,486 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Accuracy-Guaranteed Collaborative DNN Inference in Industrial IoT via Deep Reinforcement Learning
2
+
3
+ Wen Wu, Member, IEEE, Peng Yang, Member, IEEE, Weiting Zhang, Student Member, IEEE, Conghao Zhou, Student Member, IEEE, and Xuemin (Sherman) Shen, Fellow, IEEE
4
+
5
+ Abstract—Collaboration among industrial Internet of Things (IoT) devices and edge networks is essential to support computation-intensive deep neural network (DNN) inference services which require low delay and high accuracy. Sampling rate adaption which dynamically configures the sampling rates of industrial IoT devices according to network conditions, is the key in minimizing the service delay. In this paper, we investigate the collaborative DNN inference problem in industrial IoT networks. To capture the channel variation and task arrival randomness, we formulate the problem as a constrained Markov decision process (CMDP). Specifically, sampling rate adaption, inference task offloading and edge computing resource allocation are jointly considered to minimize the average service delay while guaranteeing the long-term accuracy requirements of different inference services. Since CMDP cannot be directly solved by general reinforcement learning (RL) algorithms due to the intractable long-term constraints, we first transform the CMDP into an MDP by leveraging the Lyapunov optimization technique. Then, a deep RL-based algorithm is proposed to solve the MDP. To expedite the training process, an optimization subroutine is embedded in the proposed algorithm to directly obtain the optimal edge computing resource allocation. Extensive simulation results are provided to demonstrate that the proposed RL-based algorithm can significantly reduce the average service delay while preserving long-term inference accuracy with a high probability.
6
+
7
+ Index Terms—Sampling rate adaption, inference accuracy, collaborative DNN Inference, deep reinforcement learning.
8
+
9
+ # I. INTRODUCTION
10
+
11
+ With the development of advanced neural network techniques and ubiquitous industrial Internet of Things (IoT) devices, deep neural network (DNN) is widely applied in extensive industrial IoT applications, such as facility monitoring and fault diagnosis [1]. Industrial IoT devices (e.g., vibration sensors) can sense the industrial operating environment and feed sensing data to a DNN, and then the DNN processes the sensing data and renders inference results, namely DNN inference. Although DNN inference can achieve high inference accuracy as compared to traditional alternatives (e.g., decision tree), executing DNN inference tasks requires
12
+
13
+ W. Wu, C. Zhou, and X. Shen are with the Department of Electrical and Computer Engineering, University of Waterloo, 200 University Avenue West, Waterloo, ON N2L 3G1, Canada (email: {w77wu, c89zhou, sshen}@uwaterloo.ca).
14
+ P. Yang (Corresponding author) is with the School of Electronic Information and Communications, Huazhong University of Science and Technology, Wuhan, 430074, P.R. China (email: yangpeng@hust.edu.cn).
15
+ W. Zhang is with the School of Electronic and Information Engineering, Beijing Jiaotong University, Beijing 100044, P.R. China (email: 17111018@bjtu.edu.cn).
16
+
17
+ extensive computation resource due to tremendous multiply- and-accumulation operations [2]. A device-only solution that purely executes DNN inference tasks at resource-constrained industrial IoT devices, becomes intractable due to prohibitive energy consumption and a high service delay. For example, processing an image using AlexNet incurs up to $0.45\mathrm{W}$ energy consumption [3]. An edge-only solution which purely offloads large-volume sensing data to resource-rich edge nodes, e.g., access point (AP), suffers from an unpredictable service delay due to time-varying wireless channel [4]. Hence, neither a device-only nor an edge-only solution can effectively support low-delay DNN inference services.
18
+
19
+ Collaborative inference, which coordinates resource-constrained industrial IoT devices and the resource-rich AP, becomes a de-facto paradigm to provide low-delay and high-accuracy inference services [5]. Within the collaborative inference, sensing data from industrial IoT devices can be either processed locally or offloaded to the AP. At industrial IoT devices, light-weight compressed DNNs (i.e., neural networks are compressed without significantly decreasing their performance) are deployed due to constrained on-board computing capability, which saves computing resource at the cost of inference accuracy [6], [7]. At the AP, uncompressed DNNs are deployed to provide high-accuracy inference services at the cost of network resources. Through the resource allocation (e.g., task offloading) between industrial IoT devices and the AP, the overall service performance can be enhanced.
20
+
21
+ However, the sampling rate adaption technique that dynamically configures the sampling rates of industrial IoT devices, is seldom considered. Through dynamically adjusting the sampling rates according to channel conditions and AP's workload, sensing data from industrial IoT devices can be compressed, thereby reducing not only the offloaded data volume, but also task computation workload. In our experiments, we implement AlexNet to conduct bearing fault diagnosis based on the collected bearing vibration signal from dataset [8].<sup>1</sup> As shown in Fig. 1, inference accuracy grows sub-linearly with the sampling rate. For example, when the sampling rate increases from $18\mathrm{KHz}$ to $24\mathrm{KHz}$ , the accuracy increases from $95\%$ to $98.7\%$ . Hence, when the channel condition is poor or edge computation workload is heavy, decreasing the sampling rate can reduce the offloaded data volume and requested computation workload, thereby reducing the service delay at
22
+
23
+ <sup>1</sup>The experiment is conducted on an open-source dataset [8]. This dataset collects the vibration signal of drive end bearings at a sampling rate of 48 KHz, and there are 10 types of possible faults.
24
+
25
+ the cost of limited inference accuracy. When channel condition is good and edge computation workload is light, increasing the sampling rate can help deliver a high-accuracy service with an acceptable service delay. Hence, sampling rate adaption can effectively reduce the service delay, which should be incorporated in the collaborative DNN inference.
26
+
27
+ The sampling rate adaption and resource allocation for collaborative DNN inference are entangled with the following challenges. Firstly, due to time-varying channel conditions and random task arrivals, sampling rate and resource allocation should be dynamically adjusted to achieve the minimum service delay. Minimizing the long-term service delay requires the stochastic information of network dynamics. Secondly, in addition to minimizing the service delay, the long-term accuracy requirements should be guaranteed for different inference services. The long-term accuracy performance is determined by decisions of sampling rate adaption and resource allocation over time, and hence the optimal decisions require future network information. To address the above two challenges, a reinforcement learning (RL) technique is leveraged to interact with the unknown environment to capture the network dynamics, and then a Lyapunov optimization technique is utilized within the RL framework to guarantee the long-term accuracy requirements without requiring future network information.
28
+
29
+ In this paper, we investigate the collaborative DNN inference problem in industrial IoT networks. Firstly, we formulate the problem as a constrained Markov decision process (CMDP) to account for time-varying channel conditions and random task arrivals. Specifically, sampling rates of industrial IoT devices, task offloading, and edge computation resource allocation are optimized to minimize the average service delay while guaranteeing the long-term accuracy requirements of multiple services. Secondly, since traditional RL algorithms target at optimizing a long-term reward without considering policy constraints, they cannot be applied to solve CMDP with long-term constraints. To solve the problem, we transform the CDMP into an MDP via the Lyapunov optimization technique. The core idea is to construct accuracy deficit queues to characterize the satisfaction status of the long-term accuracy constraints, thereby guiding the learning agent to meet the long-term accuracy constraints. Thirdly, to solve the MDP, a learning-based algorithm is developed based on the deep deterministic policy gradient (DDPG) algorithm. Within the learning algorithm, to reduce the training complexity, edge computing resource allocation is directly solved via an optimization subroutine based on convex optimization theory, since it only impacts one-shot delay performance according to theoretical analysis. Extensive simulations are conducted to validate the effectiveness of the proposed algorithm in reducing the average service delay while preserving the long-term accuracy requirements.
30
+
31
+ Our main contributions in this paper are summarized as follows:
32
+
33
+ - We formulate the collaborative DNN inference problem as a CMDP, in which the objective is to minimize the average service delay while guaranteeing the long-term accuracy constraints;
34
+
35
+ ![](images/0e64792af370eb8eb2aa2c592e488ae9dfa89ead162dd1ed6fdcc975b9526ddb.jpg)
36
+ Fig. 1. Inference accuracy with respect to sampling rates on the bearing vibration dataset [8].
37
+
38
+ - We transform the CMDP into an MDP via the Lyapunov optimization technique which constructs accuracy deficit queue to characterize the satisfaction status of the long-term accuracy constraints;
39
+ - We propose a deep RL-based algorithm to make the optimal sampling rate adaption and resource allocation decisions. To reduce the training complexity, an optimization subroutine is embedded in the proposed algorithm for the optimal edge computing resource allocation.
40
+
41
+ The remainder of this paper is organized as follows. Section II reviews related works. The system model and problem formulation are presented in Section III. Section IV proposes a learning-based solution. Simulation results are given in Section V. Finally, Section VI concludes this paper.
42
+
43
+ # II. RELATED WORK
44
+
45
+ DNN inference for resource-constrained industrial IoT devices has garnered much attention recently. A device-only solution aims to facilitate DNN inference services resorting to on-board computing resources. To reduce the computational complexity, DNN compression techniques are applied, such as weight pruning [6] and knowledge distillation [9]. Considering the widely-equipped energy-harvesting functionality in IoT devices, Gobieski et al. designed a light-weight DNN inference model, which can dynamically compress the model size in order to balance inference accuracy and energy efficiency [2]. In another line of research, edge-assisted DNN inference solutions can provide high-accuracy inference services by utilizing powerful edge computing servers. To facilitate low-delay and accurate DNN-based video analytics, Yang et al. proposed an online video quality and computing resource allocation strategy to maximize video analytic accuracy [10]. Another inspiring work proposed a novel device-edge collaborative inference scheme, in which the DNN model is partitioned and deployed at both the device and the edge, and intermediate results are transferred via wireless links [5]. The above works can provide possible resource allocation solutions to enhance DNN inference performance. Different from existing works, our work takes the sampling rate adaption of industrial IoT devices into account, aiming at providing accuracy-guaranteed inference services in dynamic industrial IoT networks.
46
+
47
+ RL algorithms have been widely applied in allocating network resources in wireless networks, such as service migra
48
+
49
+ ![](images/5fc446781674aaad7606ca5a68755df4b9bb77bfcd37f5357a50cb61c3ec8957.jpg)
50
+ Fig. 2. The collaborative DNN inference framework for industrial IoT devices.
51
+
52
+ tion in vehicular networks [11], network slicing in cellular networks [12], content caching in edge networks [13], and task scheduling in industrial IoT networks [14]. Hence, RL algorithms are considered as plausible solutions to manage network resources for DNN inference services. However, DNN inference services require minimizing the average delay while satisfying the long-term accuracy constraints. Traditional RL algorithms, e.g., DDPG, can be applied to solve MDPs, in which learning agents seek to optimize a long-term reward without policy constraints, while they cannot deal with constrained long-term optimization problems [15], [16]. Our proposed deep RL-based algorithm can address long-term constraints within the RL framework by the modification of reward based on the Lyapunov optimization technique. In addition, an optimization subroutine is embedded in our algorithm to further reduce the training complexity.
53
+
54
+ # III. SYSTEM MODEL AND PROBLEM FORMULATION
55
+
56
+ # A. Network Model
57
+
58
+ As shown in Fig. 2, we consider a wireless network with one AP to serve multiple types of industrial IoT devices. The AP is in charge of collecting network information and resource orchestration within the network. Consider $M$ types of inference services, denoted by a set $\mathcal{M}$ , such as facility fault diagnosis and facility monitoring services. Taking the facility fault diagnosis service as an example, vibration sensors installed on industrial IoT devices sense the operating conditions at a sampling rate, and feed the sensed vibration signal into a DNN, then the DNN diagnoses the facility fault type. The set of industrial IoT devices subscribed to service $m$ is denoted by $\mathcal{N}_m$ , and the set of all industrial IoT devices is denoted by $\mathcal{N} = \cup_{m \in \mathcal{M}} \mathcal{N}_m$ . In the collaborative inference framework, two types of DNNs are deployed. One is a compressed DNN, which is deployed at industrial IoT devices. The compressed DNN can be implemented via the weight pruning technique, which prunes less-important weights to reduce computational complexity while maintaining similar inference accuracy [6]. The other is an uncompressed DNN, which is deployed at the AP. In this way, $M$ types of uncompressed DNNs share the edge computing resource to serve different inference requests. Important notations are summarized in Table I.
59
+
60
+ Table I SUMMARY OF NOTATIONS.
61
+
62
+ <table><tr><td>Notation</td><td>Description</td></tr><tr><td>Am</td><td>Achieved instantaneous accuracy of service m</td></tr><tr><td>Amth</td><td>Long-term accuracy requirement of service m</td></tr><tr><td>Bt</td><td>Local computing queue backlog in time slot t</td></tr><tr><td>c</td><td>Computing resource allocation decision vector</td></tr><tr><td>D</td><td>Service delay</td></tr><tr><td>L(·)</td><td>Lyapunov function</td></tr><tr><td>o</td><td>Task offloading decision vector</td></tr><tr><td>Qt</td><td>Edge computing queue backlog in time slot t</td></tr><tr><td>V</td><td>Parameter to balance delay and accuracy requirement</td></tr><tr><td>X</td><td>Sampling rate selection decision matrix of all devices</td></tr><tr><td>Zt</td><td>Accuracy deficit queue backlog in time slot t</td></tr><tr><td>ξn</td><td>Raw task data size of device n</td></tr><tr><td>ηm</td><td>Task computation intensity of service m</td></tr><tr><td>λn</td><td>Average task arrival rate of device n</td></tr><tr><td>ζ(xnt)</td><td>Task data size of device n in time slot t</td></tr><tr><td>Ψt</td><td>Amount of dropped tasks in computing queues</td></tr></table>
63
+
64
+ The collaborative DNN inference framework operates in a time-slotted manner. Let $t$ denote the time index, where $t \in \mathcal{T} = \{1,2,\dots,T\}$ . The detailed procedure is given as follows.
65
+
66
+ 1) Sampling rate selection: Industrial IoT devices first select their sampling rates according to channel conditions and computation workloads. The set of candidate sampling rates is denoted by $\mathcal{K} = \{\theta_1, \theta_2, \dots, \theta_K\}$ , where $\theta_K$ denotes the raw sampling rate. We assume the sampling rate in $\mathcal{K}$ increases linearly with the index, i.e., $\theta_k = k\theta_K / K$ . Let $\mathbf{X}^t$ denote the sampling rate decision matrix in time slot $t$ , whose element $x_{n,k}^t = 1$ indicates industrial IoT device $n \in \mathcal{N}$ selects the $k$ -th sampling rate.
67
+ 2) Task processing: The sensing data from industrial IoT devices within a time slot is deemed as a computation task, which can be either offloaded to the AP or executed locally. Let $\mathbf{o}^t\in \mathbb{R}^{|\mathcal{N}|\times 1}$ denote the offloading decision vector in time slot $t$ , whose element $o_n^t = 0$ indicates offloading the computation task from industrial IoT device $n$ . Otherwise, $o_n^t = 1$ indicates executing the computation task locally.
68
+
69
+ # B. Service Delay Model
70
+
71
+ A computation task can be either processed locally or offloaded to the AP. In what follows, we analyze the service delay in these two cases.
72
+
73
+ 1) Executing locally: Let $\lambda_{n}^{t}$ denote the task arrival rate of the $n$ -th industrial IoT device in time slot $t$ , which is assumed to follow a general random distribution. The raw data size of the generated tasks at the $n$ -th device is denoted by $\xi_{n}^{t} = \lambda_{n}^{t}\nu_{m}, \forall n \in \mathcal{N}_{m}$ , where $\nu_{m}$ denotes the raw data size of a task for service $m$ . After the sampling rate is selected, the data size of the generated task is represented by $\zeta(\mathbf{x}_n^t) = \sum_{k=1}^{K} x_{n,k}^t \xi_n^t k / K$ , where $\mathbf{x}_n^t = \{x_{n,k}^t\}_{k \in \mathcal{K}}$ is the sampling rate selection decision vector of the $n$ -th device. When the inference task is processed locally by a compressed
74
+
75
+ DNN, the service delay includes the queuing delay in the local computing queue and task processing delay, which is given by
76
+
77
+ $$
78
+ d _ {n, l} ^ {t} = \frac {\sigma_ {n} ^ {t} \eta_ {m , c} \left(B _ {n} ^ {t} + \zeta \left(\mathbf {x} _ {n} ^ {t}\right)\right)}{f _ {n}}, \forall n \in \mathcal {N} _ {m}, \tag {1}
79
+ $$
80
+
81
+ where $f_{n}$ is the CPU frequency of the $n$ -th industrial IoT device, and $\eta_{m,c}$ denotes the computation intensity of the compressed DNN for the $m$ -th service. Here, $B_{n}^{t}$ is the backlogged computation tasks (in bits) in the local computing queue, which is updated via
82
+
83
+ $$
84
+ B _ {n} ^ {t + 1} = \min \left\{\left[ B _ {n} ^ {t} + o _ {n} ^ {t} \zeta \left(\mathbf {x} _ {n} ^ {t}\right) - \frac {f _ {n} \tau}{\eta_ {m , c}} \right] ^ {+}, B _ {n} ^ {\max } \right\}, \tag {2}
85
+ $$
86
+
87
+ where $[x]^+ = \max \{x, 0\}$ , $B_n^{max}$ is the capacity of the local computing queue, and $\tau$ is the duration of a time slot. Tasks will be dropped if the local computing queue is full. Let
88
+
89
+ $$
90
+ \Psi_ {b, n} ^ {t} = \max \left\{B _ {n} ^ {t} + o _ {n} ^ {t} \zeta \left(\mathbf {x} _ {n} ^ {t}\right) - \frac {f _ {n} \tau}{\eta_ {m , c}} - B _ {n} ^ {\max }, 0 \right\} \tag {3}
91
+ $$
92
+
93
+ denote the amount of the dropped tasks in the local computing queue of device $n$ . Here, $\Psi_{n,b}^{t} > 0$ indicates that an event of local computing queue overflow occurs at the $n$ -th device, and the corresponding penalty will be incurred to avoid queue overflow.
94
+
95
+ 2) Offloading to AP: When a task is offloaded to the AP, it will be processed by an uncompressed DNN. The service delay consists of task offloading delay, queuing delay in the edge computing queue, and task processing delay, which are analyzed respectively as follows.
96
+
97
+ - Task offloading delay: For the $n$ -th industrial IoT device, the offloading delay is given by
98
+
99
+ $$
100
+ d _ {n, o} ^ {t} = \frac {\left(1 - o _ {n} ^ {t}\right) \zeta \left(\mathbf {x} _ {n} ^ {t}\right)}{R _ {n} ^ {t}}, \tag {4}
101
+ $$
102
+
103
+ where transmission rate between the $n$ -th industrial IoT device and the AP, $R_{n}^{t}$ , is given by
104
+
105
+ $$
106
+ R _ {n} ^ {t} = \frac {W}{N} \log_ {2} \left(1 + \frac {P _ {T} G \left(H _ {n} ^ {t}\right)}{N _ {f} \sigma^ {2}}\right). \tag {5}
107
+ $$
108
+
109
+ Here, $W$ , $P_{T}$ , $G(H_{n}^{t})$ , and $N_{f}$ represent the system bandwidth, transmit power, channel gain, and noise figure, respectively. $\sigma^2 = N_o W / N$ denotes the background noise where $N_{o}$ is thermal noise spectrum density. Channel gain $G(H_{n}^{t})$ varies in terms of channel state $H_{n}^{t}$ . Based on extensive real-time measurements, channel state $H_{n}^{t}$ can be modeled with a finite set of channel states $\mathcal{H}$ [17]. The evolution of channel states is characterized by a discrete-time and ergodic Markov chain model, whose transition matrix is $\mathbf{P} \in \mathbb{R}^{|\mathcal{H}| \times |\mathcal{H}|}$ .
110
+
111
+ - Task processing delay: The tasks from all industrial IoT devices subscribed to the $m$ -th service are placed in the edge computing queue for the $m$ -th service. The amount of aggregated tasks is given by $\sum_{n \in \mathcal{N}_m} (1 - o_n^t) \zeta(\mathbf{x}_n^t)$ . The computing resource is dynamically allocated among multiple services at the AP according to service task arrivals, which can be realized via containerization techniques, such as Dockers and Kubernetes [18]. Let $\mathbf{c}^t \in$
112
+
113
+ $\mathbb{R}^{M\times 1}$ denote the computing resource allocation decision vector in time slot $t$ . Each element $0\leq c_m^t\leq 1$ denotes the portion of computing resource allocated to the $m$ -th service. Hence, the processing delay is given by
114
+
115
+ $$
116
+ d _ {n, p} ^ {t} = \frac {\eta_ {m , u} \left(1 - o _ {n} ^ {t}\right) \zeta \left(\mathbf {x} _ {n} ^ {t}\right)}{c _ {m} ^ {t} f _ {b}}, \forall n \in \mathcal {N} _ {m}, \tag {6}
117
+ $$
118
+
119
+ where $f_{b}$ is the CPU frequency of the computing server at the AP, and $\eta_{m,u}$ denotes the computation intensity of processing the $m$ -th service task by the uncompressed DNN. Note that $\eta_{m,u} > \eta_{m,c}$ , since the uncompressed DNN consumes more computing resource.
120
+
121
+ - Queuing delay: The queuing delay consists of two components: (i) the time taken to process backlogged tasks in the edge computing queue, which is given by
122
+
123
+ $$
124
+ d _ {n, q} ^ {t} = \frac {Q _ {m} ^ {t} \eta_ {m , u}}{c _ {m} ^ {t} f _ {b}}, \forall n \in \mathcal {N} _ {m}. \tag {7}
125
+ $$
126
+
127
+ Here, $Q_{m}^{t}$ denotes the edge computing queue backlog for the $m$ -th service in time slot $t$ , which is updated according to
128
+
129
+ $$
130
+ Q _ {m} ^ {t + 1} = \min \left\{\left[ Q _ {m} ^ {t} + a _ {m} ^ {t} - \frac {c _ {m} ^ {t} f _ {b} \tau}{\eta_ {m , u}} \right] ^ {+}, Q _ {m} ^ {\max } \right\}. \tag {8}
131
+ $$
132
+
133
+ Here, $a_{m}^{t} = \sum_{n\in \mathcal{N}_{m}}(1 - o_{n}^{t})\zeta (\mathbf{x}_{n}^{t})$ and $Q_{m}^{\max}$ denotes the capacity of the $m$ -th edge computing queue. Similar to that in local computing queues, tasks will also be dropped if the edge computing queue is full, and the amount of dropped tasks for the $m$ -th edge computing queue is given by
134
+
135
+ $$
136
+ \Psi_ {q, m} ^ {t} = \max \left\{Q _ {m} ^ {t} + a _ {m} ^ {t} - \frac {c _ {m} ^ {t} f _ {b} \tau}{\eta_ {m , u}} - Q _ {m} ^ {\text {m a x}}, 0 \right\}. \tag {9}
137
+ $$
138
+
139
+ Here, $\Psi_{q,m}^{t} > 0$ indicates that an event of edge computing queue overflow occurs; and (ii) average waiting time among all newly arrived tasks until the task of industrial IoT device $n$ is processed, which is given by
140
+
141
+ $$
142
+ d _ {n, w} ^ {t} = \frac {\eta_ {m , u} \sum_ {i \neq n , i \in \mathcal {N} _ {m}} \left(1 - o _ {i} ^ {t}\right) \zeta \left(\mathbf {x} _ {i} ^ {t}\right)}{2 c _ {m} ^ {t} f _ {b}}. \tag {10}
143
+ $$
144
+
145
+ Here, $\sum_{i\neq n,i\in \mathcal{N}_m}\left(1 - o_i^t\right)\zeta \left(\mathbf{x}_n^t\right)$ denotes the amount of aggregated tasks except the task of industrial IoT device $n$
146
+
147
+ Taking both local execution and offloading into account, the service delay in time slot $t$ is given by
148
+
149
+ $$
150
+ \begin{array}{l} D \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}, \mathbf {c} ^ {t}\right) = \sum_ {n \in \mathcal {N}} \left(d _ {n, l} ^ {t} + d _ {n, o} ^ {t} + d _ {n, p} ^ {t} + d _ {n, q} ^ {t} + d _ {n, w} ^ {t}\right) \\ + w _ {p} \left(\sum_ {n \in \mathcal {N}} \mathbb {1} _ {\left\{\Psi_ {b, n} ^ {t} > 0 \right\}} + \sum_ {m \in \mathcal {M}} \mathbb {1} _ {\left\{\Psi_ {q, m} ^ {t} > 0 \right\}}\right), \tag {11} \\ \end{array}
151
+ $$
152
+
153
+ where $\mathbb{1}_{\{x\}} = 1$ and $w_{p} > 0$ are the indicator function and the positive unit penalty cost for queue overflow, respectively. The first term represents the experienced delay to complete all tasks in time slot $t$ . The second term represents the penalty for potential overflow events in local and edge computing queues.
154
+
155
+ # C. Inference Accuracy Model
156
+
157
+ The inference accuracy depends on the sampling rate of a task and the type of DNN that executes a task. Firstly, we characterize the relationship between the inference accuracy and the sampling rate, which is specified by accuracy function $g(\theta_k), \forall \theta_k \in \mathcal{K}$ . Specifically, we implement a DNN inference algorithm, i.e., AlexNet [19], and apply the AlexNet to diagnose facility fault type based on the collected bearing vibration signal from the dataset [8], and then measure the accuracy function values with respect to sampling rates, as shown in Fig. 1. Secondly, the relationship between the inference accuracy and the type of DNN is also characterized via experiments. Here, $h_{m,c}$ and $h_{m,u}$ represent the inference accuracy of the compressed DNN and the uncompressed DNN for the $m$ -th service, respectively. Note that, $h_{m,c} < h_{m,u}$ , as an uncompressed DNN achieves higher fault diagnosis accuracy.
158
+
159
+ Since the DNN model selection (i.e., task offloading decision) and the sampling rate selection are independent, inference accuracy is the product of the accuracy value with respect to the selected sampling rate and the accuracy value with respect to the selected DNN type, i.e., $g\left(\sum_{k \in \mathcal{K}} x_{n,k}^{t} \theta_{k}\right)\left(o_{n}^{t} h_{m,c} + \left(1 - o_{n}^{t}\right) h_{m,u}\right)$ . Hence, the average inference accuracy for the $m$ -th service in time slot $t$ can be given by
160
+
161
+ $$
162
+ A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right) = \sum_ {n \in \mathcal {N} _ {m}} \frac {1}{| \mathcal {N} _ {m} |} g \left(\sum_ {k \in \mathcal {K}} x _ {n, k} ^ {t} \theta_ {k}\right). \tag {12}
163
+ $$
164
+
165
+ $$
166
+ \left(o _ {n} ^ {t} h _ {m, c} + \left(1 - o _ {n} ^ {t}\right) h _ {m, u}\right).
167
+ $$
168
+
169
+ Note that the model can be readily extended to cases when other inference methods are adopted, since the accuracy values with respect to sampling rates and DNN types are obtained via practical experiments.
170
+
171
+ # D. Problem Formulation
172
+
173
+ DNN inference services require not only minimizing service delay, but also guaranteeing their long-term accuracy requirements, which can be modeled via a CMDP. Its action, state, reward, and state transition matrix are defined as follows:
174
+
175
+ - Action: The action includes the sampling rate selection, task offloading, and edge computing resource allocation decisions, i.e., $\hat{a}^t = \{\mathbf{X}^t,\mathbf{o}^t,\mathbf{c}^t\}$ . Note that the components of the action should satisfy following constraints: (1) $x_{n,k}^{t}\in \{0,1\}$ constrains the sampling rate selection decision; (2) $o_n^t\in \{0,1\}$ requires the binary task offloading decision; and (3) $\sum_{m\in \mathcal{M}}c_m^t\leq 1$ and $0\leq c_m^t\leq 1$ constrain a continuous computing resource allocation decision.
176
+ - State: The state includes local computing queues backlog of industrial IoT devices $B_{n}^{t}$ , edge computing queues backlog $Q_{m}^{t}$ , channel conditions of industrial IoT devices $H_{n}^{t}$ , and the raw data size of the generated tasks at industrial IoT devices $\xi_{n}^{t}$ , i.e.,
177
+
178
+ $$
179
+ \hat {s} ^ {t} = \left\{\left\{B _ {n} ^ {t} \right\} _ {n \in \mathcal {N}}, \left\{Q _ {m} ^ {t} \right\} _ {m \in \mathcal {M}}, \left\{H _ {n} ^ {t} \right\} _ {n \in \mathcal {N}}, \left\{\xi_ {n} ^ {t} \right\} _ {n \in \mathcal {N}} \right\}. \tag {13}
180
+ $$
181
+
182
+ The queue backlogs, i.e., $\{B_n^t\}_{n\in \mathcal{N}}$ and $\{Q_m^t\}_{m\in \mathcal{M}}$ , adopt a unit in bits, which result in large state space, especially for a large number of industrial IoT devices.
183
+
184
+ - Reward: The reward is designed to minimize the service delay in (22) in time slot $t$ , which is defined as $\hat{r}^t = -D(\mathbf{X}^t, \mathbf{o}^t, \mathbf{c}^t)$ .
185
+ - State transition probability: State transition probability is given by
186
+
187
+ $$
188
+ \begin{array}{l} \Pr \left(\hat {s} ^ {t + 1} | \hat {s} ^ {t}, \hat {a} ^ {t}\right) = \prod_ {n \in \mathcal {N}} \Pr \left(B _ {n} ^ {t + 1} \mid B _ {n} ^ {t}, x _ {n, k} ^ {t}, o _ {n} ^ {t}\right). \\ \prod_ {m \in \mathcal {M}} \Pr \left(Q _ {m} ^ {t + 1} \mid Q _ {m} ^ {t}, \mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right). \tag {14} \\ \prod_ {n \in \mathcal {N}} \Pr \left(H _ {n} ^ {t + 1} | H _ {n} ^ {t}\right) \cdot \prod_ {n \in \mathcal {N}} \Pr \left(\xi_ {n} ^ {t + 1} | \xi_ {n} ^ {t}\right). \\ \end{array}
189
+ $$
190
+
191
+ The equality holds due to the independence of different state components. The first two components are governed by the evolution of local computing queues and edge computing queues in (2) and (8), respectively. The third component is evolved according to the discrete-time Markov chain of channel conditions, and the last component is governed by the memoryless task arrival pattern. Note that each of those state components only depends on its previous state components, which means the state transition is Markovian.
192
+
193
+ Our goal is to find a stationary policy $\pi \in \Pi$ that dynamically configures sampling rates selection $\mathbf{X}^t$ , task offloading $\mathbf{o}^t$ , and edge computing resource allocation $\mathbf{c}^t$ according to state $\hat{s}^t$ , to minimize the service delay while guaranteeing long-term inference accuracy requirements $\{A_m^{th}\}_{m\in \mathcal{M}}$ , which is formulated as the following problem:
194
+
195
+ $$
196
+ \mathbf {P} _ {0}: \min _ {\pi \in \Pi} \lim _ {T \rightarrow \infty} \frac {1}{T} \sum_ {t = 1} ^ {T} \mathbb {E} _ {\pi} \left[ D \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}, \mathbf {c} ^ {t}\right)\right] \tag {15a}
197
+ $$
198
+
199
+ $$
200
+ \text {s . t .} \quad \lim _ {T \rightarrow \infty} \frac {1}{T} \sum_ {t = 1} ^ {T} A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right) \geq A _ {m} ^ {t h}, \forall m \in \mathcal {M}. \tag {15b}
201
+ $$
202
+
203
+ Here, $\mathbf{P}_0$ is a CMDP. Directly solving the above CMDP via dynamic programming solutions [15] is challenging due to the following reasons. Firstly, state transition probability is unknown due to the lack of statistic information on the channel condition variation and task arrival patterns of all industrial IoT devices. Secondly, even the state transition probability is known, large action space and state space that grow with respect to the number of industrial IoT devices incur an extremely high computational complexity, which makes dynamic programming solutions intractable. Hence, we propose a deep RL-based algorithm to solve the CMDP, which can be applied in large-scale networks without requiring statistic information of network dynamics.
204
+
205
+ # IV. DEEP RL-BASED SAMPLING RATE ADAPTION AND RESOURCE ALLOCATION ALGORITHM
206
+
207
+ As mentioned before, a CDMP problem cannot be directly solved via traditional RL algorithms. We first leverage the Lyapunov optimization technique to deal with the long-term constraints and transform the problem into an MDP. Then,
208
+
209
+ we develop a deep RL-based algorithm to solve the MDP. To further reduce the training complexity, an optimization subroutine is embedded to directly obtain the optimal edge computation resource allocation.
210
+
211
+ # A. Lyapunov-Based Problem Transformation
212
+
213
+ The major challenge in solving problem $\mathbf{P}_0$ is to handle the long-term constraints. We leverage the Lyapunov technique [20], [21] to address this challenge. The core idea is to construct accuracy deficit queues to characterize the satisfaction status of the long-term accuracy constraints, thereby guiding the learning agent to meet the long-term accuracy constraints. The problem transformation procedure is presented as follows.
214
+
215
+ Firstly, we construct inference accuracy deficit queues for all services, whose dynamics evolves as follows:
216
+
217
+ $$
218
+ Z _ {m} ^ {t + 1} = \left[ A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right) + Z _ {m} ^ {t} \right] ^ {+}, \forall m \in \mathcal {M}. \tag {16}
219
+ $$
220
+
221
+ Here, $Z_{m}^{t}$ indicates the deviation of the achieved instantaneous accuracy from the long-term accuracy requirement, whose initial state is set to $Z_{m}^{0} = 0$ . Then, a Lyapunov function is introduced to characterize the satisfaction status of the long-term accuracy constraint, which is defined as $L(Z_{m}^{t}) = (Z_{m}^{t})^{2}/2$ [20]–[22]. A smaller value of $L(Z_{m}^{t})$ indicates better satisfaction of the long-term accuracy constraint.
222
+
223
+ Secondly, the Lyapunov function should be consistently pushed to a low value in order to guarantee the long-term accuracy constraints. Hence, we introduce a one-shot Lyapunov drift to capture the variation of the Lyapunov function across two subsequent time slots [20]. Given $Z_{m}^{t}$ , the one-shot Lyapunov drift is defined as $\Delta (Z_m^t) = L(Z_m^{t + 1}) - L(Z_m^t)$ which is upper bounded by
224
+
225
+ $$
226
+ \begin{array}{l} \Delta \left(Z _ {m} ^ {t}\right) = \frac {1}{2} \left(\left(Z _ {m} ^ {t + 1}\right) ^ {2} - \left(Z _ {m} ^ {t}\right) ^ {2}\right) \\ \leq \frac {1}{2} \left(\left(Z _ {m} ^ {t} + A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right)\right) ^ {2} - \left(Z _ {m} ^ {t}\right) ^ {2}\right) \\ = \frac {1}{2} \left(A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right)\right) ^ {2} + Z _ {m} ^ {t} \left(A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right)\right) \\ \leq C _ {m} + Z _ {m} ^ {t} \left(A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right)\right), \tag {17} \\ \end{array}
227
+ $$
228
+
229
+ where $C_m = \left(A_m^{th} - A_m^{min}\right)^2 / 2$ is a constant, and $A_m^{min}$ is the lowest inference accuracy that can be achieved for service $m$ . The first inequality is due to the substitution of (16), and the second inequality is because $A_m(\mathbf{X}^t, \mathbf{o}^t) \geq A_m^{min}$ .
230
+
231
+ Thirdly, based on the Lyapunov optimization theory, the original CMDP of minimizing the service delay while guaranteeing the long-term accuracy requirements boils down to minimizing a drift-plus-cost, i.e.,
232
+
233
+ $$
234
+ \begin{array}{l} \sum_ {m \in \mathcal {M}} \Delta \left(Z _ {m} ^ {t}\right) + V \cdot D \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}, \mathbf {c} ^ {t}\right) \\ \leq \sum_ {m \in \mathcal {M}} C _ {m} + \sum_ {m \in \mathcal {M}} Z _ {m} ^ {t} \left(A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right)\right) \tag {18} \\ + V \cdot D \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}, \mathbf {c} ^ {t}\right), \\ \end{array}
235
+ $$
236
+
237
+ where the inequality is due to the upper bound in (17). Here $V$ is a positive parameter to adjust the tradeoff between the
238
+
239
+ service delay minimization and the satisfaction status of the long-term accuracy constraints. The underlying rationale is that, if the long-term accuracy constraint is violated, i.e., $Z_{m}^{t} > 0$ , stratifying the long-term constraints by improving the instantaneous inference accuracy becomes more urgent than reducing the service delay.
240
+
241
+ In this way, the CMDP is transformed into an MDP with the objective of minimizing the drift-plus-cost in each time slot.
242
+
243
+ # B. Equivalent MDP
244
+
245
+ In the equivalent MDP, the action, state, reward, and state transition matrix are modified as follows due to the incorporation of accuracy deficit queues.
246
+
247
+ - Action: The action is the same as that in the CMDP, i.e., $a^t = \hat{a}^t = \{\mathbf{X}^t,\mathbf{o}^t,\mathbf{c}^t\}$ .
248
+ - State: Compared with the state of the CMDP, the accuracy deficit queue backlog of services $\{Z_m^t\}_{m\in \mathcal{M}}$ should be incorporated, i.e.,
249
+
250
+ $$
251
+ s ^ {t} = \left\{\hat {s} ^ {t}, \left\{Z _ {m} ^ {t} \right\} _ {m \in \mathcal {M}} \right\}. \tag {19}
252
+ $$
253
+
254
+ - Reward: The reward is modified to minimize the drift-plus-cost in (18) in time slot $t$ , i.e.,
255
+
256
+ $$
257
+ \begin{array}{l} r ^ {t} = - V \cdot D \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}, \mathbf {c} ^ {t}\right) \\ - \sum_ {m \in \mathcal {M}} Z _ {m} ^ {t} \left(A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right)\right). \tag {20} \\ \end{array}
258
+ $$
259
+
260
+ Note that the constant term $\sum_{m\in \mathcal{M}}C_m$ in (18) is ignored in the reward for brevity.
261
+
262
+ - State transition probability: Since accuracy deficit queue backlogs are incorporated in the state, the state transition probability evolves according to
263
+
264
+ $$
265
+ \begin{array}{l} \Pr \left(s ^ {t + 1} \mid s ^ {t}, a ^ {t}\right) = \Pr \left(\hat {s} ^ {t + 1} \mid \hat {s} ^ {t}, \hat {a} ^ {t}\right). \\ \prod_ {m \in \mathcal {M}} \Pr \left(Z _ {m} ^ {t + 1} \mid Z _ {m} ^ {t}, \mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right). \tag {21} \\ \end{array}
266
+ $$
267
+
268
+ where the second term is the evolution of the accuracy deficit queue backlog according to (16). Note that the overall state transition is still Markovian.
269
+
270
+ Then, problem $\mathbf{P}_0$ is transformed into the following MDP problem:
271
+
272
+ $$
273
+ \begin{array}{l} \mathbf {P} _ {1}: \min _ {\pi \in \Pi} \quad \lim _ {T \rightarrow \infty} \frac {1}{T} \sum_ {t = 1} ^ {T} \mathbb {E} _ {\pi} \left[ \sum_ {m \in \mathcal {M}} Z _ {m} ^ {t} \left(A _ {m} ^ {t h} - A _ {m} \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}\right)\right)\right. \\ \left. + V \cdot D \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}, \mathbf {c} ^ {t}\right) \right]. \tag {22} \\ \end{array}
274
+ $$
275
+
276
+ Similar to CMDP, solving an MDP via dynamic programming solutions also suffers from the curse of dimensionality due to large state space. Hence, we propose a deep RL-based algorithm to solve the MDP, which is detailed in Section IV-D.
277
+
278
+ # C. Optimization Subroutine for Edge Computing Resource Allocation
279
+
280
+ Although $\mathbf{P}_1$ can be directly solved by RL algorithms, an inherent property on edge computing resource allocation can
281
+
282
+ be leveraged, in order to reduce the training complexity of RL algorithms. Through analysis on (22), the edge computing resource allocation is independent of the inference accuracy performance, and hence it only impacts the one-shot service delay performance. In time slot $t$ , once task offloading and sampling rate selection decisions are made, the optimal computing resource allocation decision can be obtained via solving the following optimization problem:
283
+
284
+ $$
285
+ \mathbf {P} _ {2}: \min _ {\mathbf {c} ^ {t}} D \left(\mathbf {X} ^ {t}, \mathbf {o} ^ {t}, \mathbf {c} ^ {t}\right)
286
+ $$
287
+
288
+ $$
289
+ \text {s . t .} \sum_ {m \in \mathcal {M}} c _ {m} ^ {t} \leq 1 \tag {23a}
290
+ $$
291
+
292
+ $$
293
+ 0 \leq c _ {m} ^ {t} \leq 1. \tag {23b}
294
+ $$
295
+
296
+ A further analysis of (11) indicates that only the task processing delay and queuing delay at the AP are impacted by the edge computing resource allocation, i.e., $\sum_{n\in \mathcal{N}}\left(d_{n,p}^{t} + d_{n,q}^{t} + d_{n,w}^{t}\right)$ . In addition, the aggregated delay from the perspective of all devices is equivalent to the aggregated delay from the perspective of all services. Hence, the objective function in $\mathbf{P}_2$ can be rewritten as $\sum_{m\in \mathcal{M}}d_m^t$ , where
297
+
298
+ $$
299
+ \begin{array}{l} d _ {m} ^ {t} = \sum_ {n \in \mathcal {N} _ {m}} \left(\frac {\eta_ {m , u} \left(1 - o _ {n} ^ {t}\right) \zeta \left(\mathbf {x} _ {n} ^ {t}\right)}{c _ {m} ^ {t} f _ {b}} + \frac {Q _ {m} ^ {t} \eta_ {m , u}}{c _ {m} ^ {t} f _ {b}} \right. \tag {24} \\ \left. + \frac {\eta_ {m , u} \sum_ {i \neq n , i \in \mathcal {N} _ {m}} \left(1 - o _ {i} ^ {t}\right) \zeta \left(\mathbf {x} _ {i} ^ {t}\right)}{2 c _ {m} ^ {t} f _ {b}}\right) \\ \end{array}
300
+ $$
301
+
302
+ denotes the experienced delay of the $m$ -th service. By analyzing the convexity property of the problem, we have the following theorem to obtain the optimal edge computation resource allocation in each time slot.
303
+
304
+ Theorem 1. The optimal edge computing resource allocation for problem $\mathbf{P}_2$ is given by
305
+
306
+ $$
307
+ c _ {m} ^ {t, \star} = \frac {\sqrt {\Lambda_ {m} ^ {t}}}{\sum_ {m \in \mathcal {M}} \sqrt {\Lambda_ {m} ^ {t}}}, \forall m \in \mathcal {M}, \tag {25}
308
+ $$
309
+
310
+ where
311
+
312
+ $$
313
+ \begin{array}{l} \Lambda_ {m} ^ {t} = \sum_ {n \in \mathcal {N} _ {m}} \left(\eta_ {m, u} (1 - o _ {n} ^ {t}) \zeta (\mathbf {x} _ {n} ^ {t}) + Q _ {m} ^ {t} \eta_ {m, u} \right. \\ \left. + \frac {\eta_ {m , u}}{2} \sum_ {i \neq n, i \in \mathcal {N} _ {m}} \left(1 - o _ {i} ^ {t}\right) \zeta \left(\mathbf {x} _ {i} ^ {t}\right)\right). \tag {26} \\ \end{array}
314
+ $$
315
+
316
+ Proof. Proof is provided in Appendix A.
317
+
318
+ ![](images/377052c6230de6505dbfdc9da8a50bae07057db38fbc8cbb258369883fbe4159.jpg)
319
+
320
+ This optimization subroutine for the edge computing resource allocation is embedded in the following proposed deep RL-based algorithm. In this way, the training complexity can be reduced, because it is no longer necessary to train the neural networks to obtain optimal edge computing resource allocation policy.
321
+
322
+ # D. Deep RL-based Algorithm
323
+
324
+ To solve problem $\mathbf{P}_1$ , we propose a deep RL-based algorithm, which is extended from the celebrated DDPG algorithm [23]. The main difference between DDPG and the
325
+
326
+ Algorithm 1: Deep RL-based algorithm for sampling rate adaption and resource allocation
327
+
328
+ 1 Initialization: Initialize all neural networks and the experience replay memory;
329
+ 2 for each episode do
330
+ 3 Reset the environment and obtain initial state $s_0$ 4 for time slot $t\in \mathcal{T}$ do
331
+
332
+ 5 Determine the sampling rate selection and task offloading actions $\{\mathbf{X}^t,\mathbf{o}^t\}$ by the actor network according to current state $s^t$
333
+ 6 Determine edge computing resource allocation action $\mathbf{c}^t$ by (25);
334
+ 7 Send joint action $a^t = \{\mathbf{X}^t,\mathbf{o}^t,\mathbf{c}^t\}$ to all industrial IoT devices by the AP;
335
+ 8 Execute t devices;
336
+ 9 | Observe reward $r^t$ and new state $s^{t+1}$ ;
337
+ 10 Store transition $\{s^t,a^t,r^t,s^{t + 1}\}$ in the epersistence replay memory;
338
+ 11 Sample a random minibatch transitions from the epexperience replay memory;
339
+ 12 Train the critic and actor network by (27) and (28), respectively;
340
+ 13 Update target networks by (29);
341
+ 14 end
342
+
343
+ 15 end
344
+
345
+ proposed algorithm is that the above optimization subroutine for computing resource allocation is embedded to reduce the training complexity. The proposed algorithm can be deployed at the AP, which collects the entire network state information and enforces the policy to all connected industrial IoT devices.
346
+
347
+ In the algorithm, the learning agent has two parts: (a) an actor network, which is to determine the action based on the current state; and (b) a critic network, which is to evaluate the determined action based on the reward feedback from the environment. Let $\mu(s|\phi^{\mu})$ and $Q(s,a|\phi^{Q})$ denote the actor network and the critic network, respectively, whose neural network weights are $\phi^{\mu}$ and $\phi^{Q}$ . As shown in Algorithm 1, the deep RL-based algorithm operates in a time-slotted manner, which consists of the following three steps.
348
+
349
+ The first step is to obtain experience by interacting with the environment. Based on current network state $s^t$ , the actor network generates the sampling rate selection and task offloading actions with an additive policy exploration noise that follows Gaussian distribution $\mathcal{N}(0, \sigma^2)$ . The optimization subroutine generates the edge computation resource allocation action. Then, the joint action is executed at all industrial IoT devices. The corresponding reward $r^t$ and the next state $s^{t+1}$ are observed from the environment. The state transition $\{s^t, a^t, r^t, s^{t+1}\}$ is stored in the experience replay memory for actor and critic network training.
350
+
351
+ The second step is to train the actor and critic network based on the stored experience. To avoid the divergence issue caused by DNN, a minibatch of transitions are randomly sampled from the experience replay memory to break experience
352
+
353
+ Table II SIMULATION PARAMETERS [24], [25].
354
+
355
+ <table><tr><td>Parameter</td><td>Value</td></tr><tr><td>Thermal noise spectrum density (No)</td><td>-174 dBm/Hz [25]</td></tr><tr><td>Communication bandwidth (W)</td><td>[5,25] MHz</td></tr><tr><td>Transmit power (PT)</td><td>20 dBm [24]</td></tr><tr><td>Average task arrival rate (λ)</td><td>[0.6,1] request/sec</td></tr><tr><td>Noise figure (Nf)</td><td>5 dB</td></tr><tr><td>Intensity of compressed DNN (η1,c,η2,c)</td><td>(80,160) cycles/bit</td></tr><tr><td>Intensity of uncompressed DNN (η1,u,η2,u)</td><td>(200,400) cycles/bit</td></tr><tr><td>Device and edge server CPU frequency (fn,fb)</td><td>(0.1,2) GHz</td></tr><tr><td>Number of Type I/II devices (N1,N2)</td><td>5,5</td></tr><tr><td>Time slot duration (τ)</td><td>1 second</td></tr><tr><td>Balance parameter (V)</td><td>0.05</td></tr><tr><td>Unit penalty for queue overflow (wp)</td><td>1</td></tr><tr><td>Accuracy of compressed DNN (h1,c,h2,c)</td><td>0.8,0.8</td></tr><tr><td>Accuracy of uncompressed DNN (h1,u,h2,u)</td><td>1,1</td></tr><tr><td>Local/edge queue capacity (Bmax,Qmax)</td><td>(3.84,19.2) megabits</td></tr></table>
356
+
357
+ Table III PARAMETERS OF THE PROPOSED RL-BASED ALGORITHM.
358
+
359
+ <table><tr><td>Parameter</td><td>Value</td><td>Parameter</td><td>Value</td></tr><tr><td>Actor learning rate</td><td>10-4</td><td>Critic learning rate</td><td>10-3</td></tr><tr><td>Actor hidden units</td><td>(64, 32)</td><td>Critic hidden units</td><td>(64, 32)</td></tr><tr><td>Hidden activation</td><td>ReLU</td><td>Actor output activation</td><td>Tanh</td></tr><tr><td>Optimizer</td><td>Adam</td><td>Policy noise (σ)</td><td>0.2</td></tr><tr><td>Target update (δ)</td><td>0.005</td><td>Discount factor</td><td>0.85</td></tr><tr><td>Minibatch size</td><td>64</td><td>Replay memory size</td><td>100,000</td></tr><tr><td>Training episodes</td><td>1,000</td><td>Time slots per episode</td><td>200</td></tr></table>
360
+
361
+ correlation. The critic network is trained by minimizing the loss function
362
+
363
+ $$
364
+ \operatorname {L o s s} \left(\phi^ {Q}\right) = \frac {1}{N _ {b}} \sum_ {i = 1} ^ {N _ {b}} \left(y _ {i} - Q \left(s _ {i}, a _ {i} \mid \phi^ {Q}\right)\right) ^ {2}, \tag {27}
365
+ $$
366
+
367
+ where $y_{i} = r_{i} + \gamma Q^{\prime}(s_{i + 1},\mu^{\prime}(s_{i + 1}|\phi^{\mu^{\prime}})|\phi^{Q^{\prime}})$ , and $N_{b}$ is the minibatch size. Here, $\mu^{\prime}(s|\phi^{\mu^{\prime}})$ and $Q^{\prime}(s,a|\phi^{Q^{\prime}})$ represent actor and critic target networks with weights $\phi^{\mu^{\prime}}$ and $\phi^{Q^{\prime}}$ . The actor network is trained via the policy gradient
368
+
369
+ $$
370
+ \nabla_ {\phi^ {\mu}} \approx \frac {1}{N _ {b}} \sum_ {i = 1} ^ {N _ {b}} \nabla_ {a} Q \left(s _ {i}, a \mid \phi^ {Q}\right) | _ {s = s _ {i}, a = \mu \left(s _ {i}\right)} \nabla_ {\theta^ {\mu}} \mu \left(s _ {i} \mid \phi^ {\mu}\right) | _ {s _ {i}}. \tag {28}
371
+ $$
372
+
373
+ The third step is to update target networks. In order to ensure network training stability, the actor and critic target networks are softly updated by
374
+
375
+ $$
376
+ \phi^ {Q ^ {\prime}} = \delta \phi^ {Q} + (1 - \delta) \phi^ {Q ^ {\prime}}, \phi^ {\mu^ {\prime}} = \delta \phi^ {\mu} + (1 - \delta) \phi^ {\mu^ {\prime}}, \tag {29}
377
+ $$
378
+
379
+ where $0 < \delta \ll 1$ denotes the target network update ratio.
380
+
381
+ # V. SIMULATION RESULTS
382
+
383
+ # A. Simulation Setup
384
+
385
+ We consider a smart factory scenario in our simulation, in which industrial IoT devices, e.g., vibration sensors, are randomly scattered. The industrial IoT devices installed on industrial facilities (e.g., robot arms) sense their operating conditions. The sensing data is processed locally or offloaded to an AP in the smart factory for processing. The transmit power of an industrial IoT device is set to $20\mathrm{dBm}$ [24]. The channel condition is modeled with three states, i.e., "Good
386
+
387
+ ![](images/5806a2adb4df412acc4ce97138c6bf32c82dd02f70651551e08d5cf9d312e588.jpg)
388
+ (a) Average delay performance
389
+
390
+ ![](images/a94802a04334963cc1a704b2c273e87f26ae195c6f99e9be1cd0e7f2e7508cb8.jpg)
391
+ (b) Accuracy Performance of two services
392
+ Fig. 3. Performance of the proposed algorithm in the training stage.
393
+
394
+ (G)”, “Normal (N)”, and “Bad (B)”, and the corresponding transition matrix is given by [17]
395
+
396
+ $$
397
+ \mathbf {P} = \left[ \begin{array}{c c c} P _ {G G} & P _ {G N} & 0 \\ P _ {N G} & P _ {N N} & P _ {N B} \\ 0 & P _ {N B} & P _ {B B} \end{array} \right] = \left[ \begin{array}{c c c} 0. 3 & 0. 7 & 0 \\ 0. 2 5 & 0. 5 & 0. 2 5 \\ 0 & 0. 7 & 0. 3 \end{array} \right]. \tag {30}
398
+ $$
399
+
400
+ Two types of DNN inference services are considered. Type I service: a facility fault diagnosis service to identify the fault type based on the collected bearing vibration signal from the dataset [8]. Since the duration of a time slot in the simulation is set to be one second, the task data size is the data volume of a one-second signal, which is a product of the raw sampling rate and the quantization bits of the signal. In the dataset, the bearing vibration signal is collected at 48 KHz sampling rate and 16 bit quantization, and hence the corresponding task data size is 768 kilobits. The long-term accuracy requirement of the service is set to 0.8. Type II service: a service extended from the Type I service to diagnose facility fault based on a low-grade bearing vibration dataset while requiring higher inference accuracy 0.9. The low-grade dataset collects vibration signal at a lower sampling rate of $32\mathrm{KHz}$ , and hence the task data size is 512 kilobits. For both services, the task arrival rate of each industrial IoT device at each time slot follows a uniform distribution $\mathcal{U}(\lambda -0.5,\lambda +0.5)$ , where $\lambda$ is the average task arrival rate. We consider four candidate sampling rates for industrial IoT devices, which are $25\%$ , $50\%$ , $75\%$ and $100\%$ of the raw sampling rate. The corresponding accuracy with respect to the sampling rates are 0.59, 0.884, 0.950 and 0.987, respectively, based on extensive experiments
401
+
402
+ ![](images/1af0eee910826d17bd3371ee7060873957e0bda7818444df7829814e539e75a4.jpg)
403
+ Fig. 4. Service delay performance with respect to task arrival rates.
404
+
405
+ ![](images/dd885d8f9ab583950ee408d246a2c0c591a47c4844e5e0f4d1d12aca393b63d2.jpg)
406
+ Fig. 5. Inference accuracy performance with respect to task arrival rates.
407
+
408
+ on the bearing vibration dataset [8]. Balance parameter $V$ is set to 0.05 based on extensive simulations. Other important simulation parameters are listed in Table II. The parameters of the proposed algorithm are given in Table III. The proposed algorithm is compared to the following benchmarks:
409
+
410
+ - Delay myopic: Each industrial IoT device dynamically makes sampling rate selection and task offloading decisions by maximizing the one-step reward in (20) according to the network state.
411
+ - Static configuration: Each industrial IoT device takes a static configuration on the sampling rate selection and task offloading decisions, which can guarantee services' accuracy requirements.
412
+
413
+ # B. Performance Evaluation
414
+
415
+ 1) Convergence of the proposed algorithm: The service delay performance in the training stage is shown in Fig. 3(a). We can clearly see that the average service delay gradually decreases as the increase of training episodes, which validates the convergence of the proposed algorithm. In addition, Fig. 3(b) shows the accuracy performance for both services with respect to training episodes. The accuracy performance is not good at the beginning of the training stage, but after 1,000 episodes of training, the accuracy performance converges to the predetermined requirements.
416
+ 2) Impact of task arrival rate: Once well-trained offline, we evaluate the performance of the proposed algorithm in the online inference. As shown in Fig. 4, we compare the average service delay performance of the proposed algorithm with benchmark schemes in terms of task arrival rates for
417
+
418
+ ![](images/8fba8eb8dcc70eda235675c3f0a0185ddb21ab645539b37ad3ce4de14fee6329.jpg)
419
+ Fig. 6. Service delay performance with respect to communication bandwidth.
420
+
421
+ ![](images/349eecc993d97bd936de04b7e097b15db3bccfa98e892204d76cc62e743074eb.jpg)
422
+ Fig. 7. Service delay in terms of CPU frequency of the edge server.
423
+
424
+ $W = 20\mathrm{MHz}$ . Each simulation point is plotted with a $95\%$ confidence interval. Several observations can be obtained from the figure. Firstly, the service delay increases with the task arrival rate due to constrained communication and computing resources in the network. Secondly, the proposed algorithm significantly outperforms benchmark schemes. The reason is that the proposed RL-based algorithm can capture network dynamics, such as the task arrival pattern and channel condition variation, via interacting with the environment. The learned knowledge is utilized to make online decisions that target at the long-term performance, while benchmark schemes only focus on the short-term performance and do not adapt to network dynamics. Specifically, the proposed algorithm can reduce the average service delay by $19\%$ and $25\%$ , respectively, as compared with delay myopic and static configuration schemes.
425
+
426
+ As shown in Fig. 5, boxplot accuracy distribution of two services is presented with respect to different task arrival rates. The long-term accuracy requirements for two services are 0.8 and 0.9, respectively. It can be seen that the proposed algorithm guarantees the long-term accuracy requirements of both services with a high probability. Specifically, the maximum error probability is less than $0.5\%$ .
427
+
428
+ 3) Impact of communication bandwidth: Fig. 6 shows the impact of communication bandwidth on the average service delay. Firstly, we can see that the average service delay decreases as the growth of bandwidth. The reason is that the transmission delay is reduced when the communication resource becomes sufficient. In addition, the proposed al
429
+
430
+ girthm achieves good performance when the bandwidth is scarce. When system bandwidth is only $5\mathrm{MHz}$ , the proposed algorithm achieves $1.20\times$ and $1.42\times$ delay reduction compared with delay myopic and static configuration schemes, respectively, which is larger than that when the system bandwidth is $25\mathrm{MHz}$ ( $1.15\times$ and $1.31\times$ ). The reason is that the proposed algorithm efficiently utilizes the on-board computing resources. Simulation results show that the proposed algorithm decides $47.5\%$ computation tasks to be executed locally with $5\mathrm{MHz}$ bandwidth, while the delay myopic benchmark only decides $17\%$ . Due to the efficient resource orchestration among industrial IoT devices and the AP, the proposed algorithm can effectively reduce average service delay for both services.
431
+
432
+ 4) Impact of optimization subroutine: As shown in Fig. 7, we evaluate the performance of the proposed algorithm with the fixed computing resource allocation (referred to as proposed-fixed), in which the edge computing resource is allocated based on the average computing demand of two services. Compared with the proposed-fixed solution, the proposed algorithm achieves significant performance gain when the edge computing resource is constrained. Specifically, the performance gain in reducing the service delay decreases from $1.98 \times$ at $1\mathrm{GHz}$ CPU frequency to only $1.02 \times$ at $1.2\mathrm{GHz}$ CPU frequency. The reason is that efficient resource allocation is more important in resource-constrained scenarios, as compared to resource-rich scenarios. The results validate the effectiveness of the optimization subroutine for edge computing resource allocation. In addition to the performance gain, another merit of the optimization subroutine is to reduce the training complexity of RL algorithms.
433
+
434
+ # VI. CONCLUSION
435
+
436
+ In this paper, we have studied the sampling rate adaption and resource allocation problem for collaborative DNN inference in industrial IoT networks. A deep RL-based algorithm has been developed to determine the channel variation and the task arrival pattern which are then exploited to provide accuracy-guaranteed DNN inference services. The proposed algorithm can optimize service delay performance on the fly, without requiring statistic information of network dynamics. The Lyapunov-based transformation technique can be applied to other CMDPs. For the future work, we will investigate the impact of device mobility on the inference performance.
437
+
438
+ # APPENDIX
439
+
440
+ # A. Proof of Theorem 1
441
+
442
+ Firstly, the problem is proved to be a convex optimization problem. For brevity of notations, we omit $t$ in the proof. With the definition of $\Lambda_{m}$ in (26), the objective function can be rewritten as $\sum_{m\in \mathcal{M}}\Lambda_m / (c_pf_b)$ . The second-order derivative of the objective function shows $2\Lambda_{m} / (f_{b}c_{m}^{3}) > 0$ . In addition, the inequality constraint is linear. Hence, the problem is a convex optimization problem.
443
+
444
+ Secondly, a Lagrange function for the problem without considering the inequality constraints is constructed, i.e.,
445
+
446
+ $$
447
+ \mathcal {L} (\mathbf {c}, a) = \sum_ {m \in \mathcal {M}} \frac {\Lambda_ {m}}{c _ {m} f _ {b}} + a \left(\sum_ {m \in \mathcal {M}} c _ {m} - 1\right), \tag {31}
448
+ $$
449
+
450
+ where $a$ denotes the Lagrange multiplier. Based on Karush-Kuhn-Tucker conditions [26], we have
451
+
452
+ $$
453
+ \frac {\partial L (\mathbf {c} , a)}{\partial c _ {m}} = - \frac {\Lambda_ {m}}{f _ {b} c _ {m} ^ {2}} + a = 0, \forall m \in \mathcal {M}. \tag {32}
454
+ $$
455
+
456
+ By solving the above equation, we can obtain $c_{m}^{\star} = \sqrt{\Lambda_{m} / a f_{b}}, \forall m \in \mathcal{M}$ . Substituting the above result into the complementary slackness condition $\sum_{m \in \mathcal{M}} c_{m}^{\star} - 1 = 0$ , the optimal value of $a$ is given by $a^{\star} = \left( \sum_{m \in \mathcal{M}} \sqrt{\Lambda_{m}} \right)^{2} / f_{b}$ . From the above equation, $a^{\star}$ takes a positive value, and hence $\{c_{m}^{\star}\}_{m \in \mathcal{M}}$ are positive values, which means constraint (23b), i.e., $c_{m}^{t} \geq 0, \forall m \in \mathcal{M}$ , is automatically satisfied. Substituting $a^{\star}$ into the complementary slackness condition proves Theorem 1.
457
+
458
+ # REFERENCES
459
+
460
+ [1] H. Hu, B. Tang, X. Gong, W. Wei, and H. Wang, "Intelligent fault diagnosis of the high-speed train with big data based on deep neural networks," IEEE Trans. Ind. Informat., vol. 13, no. 4, pp. 2106-2116, Apr. 2017.
461
+ [2] G. Gobieski, B. Lucia, and N. Beckmann, "Intelligence beyond the edge: Inference on intermittent embedded systems," in Proc. ASPLOS, 2019, pp. 199-213.
462
+ [3] Y. Chen, T. Krishna, J. S. Emer, and V. Sze, "Eyeriss: An energy-efficient reconfigurable accelerator for deep convolutional neural networks," IEEE J. Solid-State Circuits, vol. 52, no. 1, pp. 127-138, Jan. 2017.
463
+ [4] D. A. Chekired, L. Khoukhi, and H. T. Mouftah, "Industrial IoT data scheduling based on hierarchical fog computing: A key for enabling smart factory," IEEE Trans. Ind. Informat., vol. 14, no. 10, pp. 4590-4602, Oct. 2018.
464
+ [5] E. Li, L. Zeng, Z. Zhou, and X. Chen, "Edge AI: On-demand accelerating deep neural network inference via edge computing," IEEE Trans. Wireless Commun., vol. 19, no. 1, pp. 447-457, Jan. 2020.
465
+ [6] S. Han, H. Mao, and W. J. Dally, "Deep compression: Compressing deep neural networks with pruning, trained quantization and huffman coding," arXiv preprint arXiv:1510.00149, 2015.
466
+ [7] S. Teerapittayanon, B. McDanel, and H. Kung, “Branchynet: Fast inference via early exiting from deep neural networks,” in Proc. IEEE ICPR, 2016, pp. 2464–2469.
467
+ [8] International Data Corporation, "Case western reserve university," [Online]. Available: https://csegroups(case.edu/bearingdatacenter/pages/download-data-file.
468
+ [9] G. Chen, W. Choi, X. Yu, T. Han, and M. Chandraker, “Learning efficient object detection models with knowledge distillation,” in Proc. NIPS, 2017, pp. 742–751.
469
+ [10] P. Yang, F. Lyu, W. Wu, N. Zhang, L. Yu, and X. Shen, "Edge coordinated query configuration for low-latency and accurate video analytics," IEEE Trans. Ind. Informat., vol. 16, no. 7, pp. 4855-4864, Jul. 2020.
470
+ [11] S. Wang, Y. Guo, N. Zhang, P. Yang, A. Zhou, and X. Shen, “Delay-aware microservice coordination in mobile edge computing: A reinforcement learning approach,” IEEE Trans. Mobile Comput., DOI: 10.1109/TMC.2019.2957804, 2019.
471
+ [12] X. Shen, J. Gao, W. Wu, K. Lyu, M. Li, W. Zhuang, X. Li, and J. Rao, "AI-assisted network-slicing based next-generation wireless networks," IEEE Open J. Veh. Technol., vol. 1, no. 1, pp. 45-66, Jan. 2020.
472
+ [13] P. Yang, N. Zhang, S. Zhang, L. Yu, J. Zhang, and X. Shen, "Content popularity prediction towards location-aware mobile edge caching," IEEE Trans. Multimedia, vol. 21, no. 4, pp. 915-929, Apr. 2019.
473
+ [14] K. Wang, Y. Zhou, Z. Liu, Z. Shao, X. Luo, and Y. Yang, "Online task scheduling and resource allocation for intelligent NOMA-based industrial Internet of things," IEEE J. Sel. Areas Commun., vol. 38, no. 5, pp. 803-815, May 2020.
474
+ [15] E. Altman, Constrained Markov decision processes. CRC Press, 1999, vol. 7.
475
+ [16] Q. Liang, F. Que, and E. Modiano, “Accelerated primal-dual policy optimization for safe reinforcement learning,” arXiv preprint arXiv:1802.06480, 2018.
476
+ [17] L. Lei, Y. Kuang, X. Shen, K. Yang, J. Qiao, and Z. Zhong, "Optimal reliability in energy harvesting industrial wireless sensor networks," IEEE Trans. Wireless Commun., vol. 15, no. 8, pp. 5399-5413, Aug. 2016.
477
+
478
+ [18] D. Bernstein, "Containers and cloud: From lxc to docker to kubernetes," IEEE Cloud Computing, vol. 1, no. 3, pp. 81-84, Sep. 2014.
479
+ [19] A. Krizhevsky, I. Sutskever, and G. E. Hinton, "Imagenet classification with deep convolutional neural networks," in Proc. NIPS, 2012, pp. 1097-1105.
480
+ [20] M. J. Neely, "Stochastic network optimization with application to communication and queueing systems," Synthesis Lectures on Communication Networks, vol. 3, no. 1, pp. 1-211, 2010.
481
+ [21] J. Luo, F. Yu, Q. Chen, and L. Tang, "Adaptive video streaming with edge caching and video transcoding over software-defined mobile networks: A deep reinforcement learning approach," IEEE Trans. Wireless Commun., vol. 19, no. 3, pp. 1577-1592, Mar. 2020.
482
+ [22] J. Xu, L. Chen, and P. Zhou, "Joint service caching and task offloading for mobile edge computing in dense networks," in Proc. IEEE INFOCOM, 2018, pp. 207-215.
483
+ [23] T. P. Lillicrap, J. J. Hunt, A. Pritzel, N. Heess, T. Erez, Y. Tassa, D. Silver, and D. Wierstra, "Continuous control with deep reinforcement learning," in Proc. ICLR, 2016.
484
+ [24] V. Petrov, A. Samuylov, V. Begishev, D. Moltchanov, S. Andreev, K. Samouylov, and Y. Koucheryavy, "Vehicle-based relay assistance for opportunistic crowdsensing over narrowband IoT (NB-IoT)," IEEE Internet of Things J., vol. 5, no. 5, pp. 3710-3723, Oct. 2018.
485
+ [25] W. Wu, N. Cheng, N. Zhang, P. Yang, W. Zhuang, and X. Shen, "Fast mmwave beam alignment via correlated bandit learning," IEEE Trans. Wireless Commun., vol. 18, no. 12, pp. 5894-5908, Dec. 2019.
486
+ [26] S. Boyd, S. P. Boyd, and L. Vandenberghe, Convex optimization. Cambridge university press, 2004.
2301.00xxx/2301.00130/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b59b7b2468be7f0026be60dd365f53783368f264a033a4efc4db993f61b02e29
3
+ size 664055
2301.00xxx/2301.00130/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00157/128c83c4-e54e-4950-a434-c5755fa770fc_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00157/128c83c4-e54e-4950-a434-c5755fa770fc_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00157/128c83c4-e54e-4950-a434-c5755fa770fc_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3bc421df6086723330707ae0836aad0865a1b452a7aa8ef6ea1ae643d156d924
3
+ size 20124451
2301.00xxx/2301.00157/full.md ADDED
@@ -0,0 +1,705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Ponder: Point Cloud Pre-training via Neural Rendering
2
+
3
+ Di Huang $^{1,2}$ Sida Peng $^{3}$ Tong He $^{2,\dagger}$ Honghui Yang $^{2,3}$ Xiaowei Zhou $^{3}$ Wanli Ouyang $^{2}$
4
+
5
+ The University of Sydney<sup>1</sup> Shanghai AI Laboratory<sup>2</sup> Zhejiang University<sup>3</sup>
6
+
7
+ # Abstract
8
+
9
+ We propose a novel approach to self-supervised learning of point cloud representations by differentiable neural rendering. Motivated by the fact that informative point cloud features should be able to encode rich geometry and appearance cues and render realistic images, we train a point-cloud encoder within a devised point-based neural renderer by comparing the rendered images with real images on massive RGB-D data. The learned point-cloud encoder can be easily integrated into various downstream tasks, including not only high-level tasks like 3D detection and segmentation but also low-level tasks like 3D reconstruction and image synthesis. Extensive experiments on various tasks demonstrate the superiority of our approach compared to existing pre-training methods.
10
+
11
+ # 1. Introduction
12
+
13
+ We have witnessed the widespread success of supervised learning in developing vision tasks, such as image classification [20, 13] and object detection [48, 19, 25]. In contrast to the 2D image domain, current 3D point cloud benchmarks only maintain limited annotations, in terms of quantity and diversity, due to the extremely high cost of laborious labeling. Self-supervised learning (SSL) for point cloud [57, 21, 26, 23, 7, 46, 67, 53, 64, 61, 41, 30, 66, 36], consequently, becomes one of the main driving forces and has attracted increasing attention in the 3D research community.
14
+
15
+ Previous SSL methods for learning effective 3D representation can be roughly categorized into two groups: contrast-based [57, 21, 26, 23, 7, 46, 67] and completion-based [53, 64, 61, 41, 30, 66, 36]. Contrast-based methods are designed to maintain invariant representation under different transformations. To achieve this, informative samples are required. In the 2D image domain, the above challenge is addressed by (1) introducing efficient positive/negative sampling methods, (2) using a large batch size
16
+
17
+ ![](images/4516f97ac16064ec65a140b2f1a72e9bda32bfc7885ec9c863d02aaf9349911b.jpg)
18
+ Figure 1. This work proposes a novel point cloud pre-training method via neural rendering, named Ponder. Ponder is directly trained with RGB-D image supervision, and can be used for various applications, e.g. 3D object detection, 3D semantic segmentation, 3d scene reconstruction, and image synthesis.
19
+
20
+ and storing representative samples, and (3) applying various data augmentation policies. Inspired by these works, many works [57, 21, 26, 23, 7, 46, 67] are proposed to learn geometry-invariant features on 3D point cloud.
21
+
22
+ Completion-based methods are another line of research for 3D SSL, which utilizes a pre-training task of reconstructing the masked point cloud based on partial observations. By maintaining a high masking ratio, such a simple task encourages the model to learn a holistic understanding of the input beyond low-level statistics. Although the masked autoencoders have been successfully applied for SSL in images [17] and videos [14, 52], it remains challenging and still in exploration due to the inherent irregularity and sparsity of the point cloud data.
23
+
24
+ Different from the two groups of methods above, we propose point cloud pre-training via neural rendering (Ponder). Our motivation is that neural rendering, one of the most amazing progress and domain-specific design in 3D vision, can be leveraged to enforce the point cloud features being able to encode rich geometry and appearance cues. As illustrated in Figure 1, we address the task of learning representative 3D features via point cloud rendering. To the best of our knowledge, this is the first exploration of neural rendering for pre-training 3D point cloud models. Specif
25
+
26
+ ![](images/a0ea9a193b516cc182af9ad690a47ea203b488adae14f2412847932e63676d1b.jpg)
27
+ Figure 2. Different types of point cloud pre-training.
28
+
29
+ ically, given one or a sequence of RGB-D images, we lift them to 3D space and obtain a set of colored points. Points are then forwarded to a 3D encoder to learn the geometry and appearance of the scene via a neural representation. Provided specific parameters of the camera and the neural representation from the encoder, neural rendering is leveraged to render the RGB and depth images in a differentiable way. The network is trained to minimize the difference between rendered and observed 2D images. In doing so, our approach enjoys multiple advantages:
30
+
31
+ - Our method is able to learn effective point cloud representation, which encodes rich geometry and appearance clues by leveraging neural rendering.
32
+ - Our method can be flexibly integrated into various tasks. For the first time, we validate the effectiveness of the proposed pre-training method for low-level tasks like surface reconstruction and image synthesis tasks.
33
+ - The proposed method can leverage rich RGB-D images for pre-training. The easier accessibility of the RGB-D data enables the possibility of 3D pre-training on a large amount of data.
34
+
35
+ Our approach proposes a novel pretext task that can serve as a strong alternative to contrast-based methods and completion-based methods in 3D point cloud pre-training. The proposed framework, Ponder, is capable of accommodating a variety of point cloud backbones, both point-based and voxel-based, and has been rigorously evaluated on a range of challenging 3D tasks, including object detection, semantic segmentation, reconstruction, and image synthesis. The consistent improvements demonstrate the effectiveness of our proposed Ponder.
36
+
37
+ # 2. Related Work
38
+
39
+ Neural rendering. Neural Rendering is a type of rendering technology that uses neural networks to differentiably render images from 3D scene representation. NeRF[35] is one of the representative neural rendering methods, which represents the scene as the neural radiance field and renders the images via volume rendering. Based on NeRF, there are a series of works [38, 62, 55, 39, 63, 56, 65, 47, 3, 58] trying to improve the NeRF representation, including accelerate NeRF training, boost the quality of geometry, and so on. Another type of neural rendering leverages neural point clouds as the scene representation. [2, 45] take points locations and corresponding descriptors as input, rasterize the points with z-buffer, and use a rendering network to get the final image. Later work of PointNeRF[59] renders realistic images from neural point cloud representation using a NeRF-like rendering process. Our work is inspired by the recent progress of neural rendering.
40
+
41
+ Self-supervised learning in point clouds. Current methods can be roughly categorized into two categories: contrast-based and completion-based. Inspired by the works [18, 6] from the 2D image domain, PointContrast [57] is one of the pioneering works for 3D contrastive learning. Similarly, it encourages the network to learn invariant 3D representation under different transformations. Some works [21, 26, 23, 7, 46, 67] follow the pipeline by either devising new sampling strategies to select informative positive/negative training pairs, or explore various types of data augmentations. Another line of work is completion-based [64, 61, 41, 30, 66, 36] methods, which get inspiration from Masked Autoencoders [17]. PointMAE [41] proposes restoring the masked points via a set-to-set Chamfer Distance. VoxelMAE [36] instead recovers the underlying geometry by distinguishing if the voxel contains points. Another work MaskPoint[30] pre-train point cloud encoder by performing binary classification to check if a sampled point is occupied. Later, IAE [61] proposes to pre-train point cloud encoder by recovering continuous 3D geometry in an implicit manner. Different from the above pipelines, we propose a novel framework for point cloud pre-training via neural rendering.
42
+
43
+ Multi-modal point cloud pre-training. Some recent works explore the pre-training pipeline with multi-modality data of 2D images and 3D point clouds. Pri3D[22] use 3D point cloud and multi-view images to pre-train the 2D image networks. CrossPoint[1] aligns the 2D image features and 3D point cloud features through a contrastive learning pipeline. [27] proposes a unified framework for exploring the invariances with different input data formats, including 2D images and 3D point clouds. Different from previous methods, most of which attempt to align 2D images and 3D point clouds in the feature space, our method proposes to
44
+
45
+ connect 2D and 3D in the RGB-D image domain via differentiable rendering.
46
+
47
+ # 3. Methods
48
+
49
+ An overview of our Ponder is presented in Figure 3. Provided the camera pose, 3D point clouds are obtained by projecting the RGB-D images back to 3D space (Section 3.1). Then, we extract point-wise feature using a point cloud encoder (Section 3.2) and organize it to a 3D feature volume (Section 3.3), which is used to reconstruct the neural scene representation and render images in a differentiable manner (Section 3.4).
50
+
51
+ # 3.1. Constructing point cloud from RGB-D images
52
+
53
+ The proposed method makes use of sequential RGB-D images $\{(I_i, D_i)\}_{i=1}^N$ , the camera intrinsic parameters $\{\mathbf{K}_i\}_{i=1}^N$ , and extrinsic poses $\{\pmb{\xi}_i\}_{i=1}^N \in \mathbf{SE}(3)$ . $N$ is the input view number. SE(3) refers to the Special Euclidean Group representing 3D rotations and translations. The camera parameters can be easily obtained from SfM or SLAM.
54
+
55
+ We construct the point cloud $\mathcal{X}$ by back-projecting RGB-D images to point clouds in a unified coordinate:
56
+
57
+ $$
58
+ \mathcal {X} = \bigcup_ {i} ^ {N} \pi^ {- 1} \left(I _ {i}, D _ {i}, \boldsymbol {\xi} _ {i}, \mathbf {K} _ {i}\right), \tag {1}
59
+ $$
60
+
61
+ where $\pi^{-1}$ back-projects the RGB-D image to 3D world space using camera poses. Note that different from previous methods which only consider the point location, our method attributes each point with both point location and RGB color. The details of $\pi^{-1}$ are provided in the supplementary material.
62
+
63
+ # 3.2. Point cloud encoder for feature extraction
64
+
65
+ Given the point cloud $\mathcal{X}$ constructed from RGB-D images, a point cloud encoder $f_{p}$ is used to extract per-point feature embedding $\mathcal{E}$ :
66
+
67
+ $$
68
+ \mathcal {E} = f _ {p} (\mathcal {X}). \tag {2}
69
+ $$
70
+
71
+ The encoder $f_{p}$ pre-trained with the method mentioned in the Section 3.4 serves as a good initialization for various downstream tasks.
72
+
73
+ # 3.3. Building feature volume
74
+
75
+ After completing feature extraction, we use average pooling to convert the point embeddings $\mathcal{E}$ into a 3D feature volume. We then employ a U-Net style 3D CNN to fill in the empty space and aggregate features from the surrounding points to obtain a dense 3D volume, denoted as $\nu$ .
76
+
77
+ # 3.4. Pre-training with Neural Rendering
78
+
79
+ This section introduces how to reconstruct the implicit scene representation and render images differentiably. We first give a brief introduction to neural scene representation, then illustrate how to integrate it into our point cloud pretraining pipeline. Last, we show the differentiable rendering formulation to render color and depth images from the neural scene representation.
80
+
81
+ Brief introduction of neural scene representation. Neural scene representation aims to represent the scene geometry and appearance through a neural network. In this paper, we use the Signed Distance Function (SDF), which measures the distance between a query point and the surface boundary, to represent the scene geometry implicitly. SDF is capable of representing high-quality geometry details. For any query point of the scene, the neural network takes points features as input and outputs the corresponding SDF value and RGB value. In this way, the neural network captures both the geometry and appearance information of a specific scene. Following NeuS[55], the scene can be reconstructed as:
82
+
83
+ $$
84
+ s (\mathbf {p}) = \tilde {f} _ {s} (\mathbf {p}), \quad c (\mathbf {p}, \mathbf {d}) = \tilde {f} _ {c} (\mathbf {p}, \mathbf {d}), \tag {3}
85
+ $$
86
+
87
+ where $\tilde{f}_s$ is the SDF decoder and $\tilde{f}_c$ is the RGB color decoder. $\tilde{f}_s$ takes point location $\mathbf{p}$ as input, and predicts the SDF value $s$ . $\tilde{f}_c$ takes point location $\mathbf{p}$ and viewing direction $\mathbf{d}$ as input, and outputs the RGB color value $c$ . Both $\tilde{f}_s$ and $\tilde{f}_c$ are implemented by simple MLP networks.
88
+
89
+ Neural scene representation from point cloud input in Ponder. To predict a neural scene representation from the input point cloud, we change the scene formulation to take 3D feature volume $\mathcal{V}$ as an additional input. Specifically, given a 3D query point $\mathbf{p}$ and viewing direction $\mathbf{d}$ , the feature embedding $\mathcal{V}(\mathbf{p})$ can be extracted from the processed feature volume $\mathcal{V}$ by trilinear interpolation. The scene is then represented as:
90
+
91
+ $$
92
+ s (\mathbf {p}) = f _ {s} (\mathbf {p}, \mathcal {V} (\mathbf {p})), \quad c (\mathbf {p}, \mathbf {d}) = f _ {c} (\mathbf {p}, \mathbf {d}, \mathcal {V} (\mathbf {p})), \tag {4}
93
+ $$
94
+
95
+ where $\mathcal{V}$ is predicted by the point cloud encoder $f_{p}$ and encodes information of each scene. $f_{s}$ and $f_{c}$ are SDF and RGB decoders shared for all scenes. Different from Equation (3), which is used for storing single-scene information in the $\{\tilde{f}_s,\tilde{f}_c\}$ , the formulation in Equation (4) includes an extra input $\mathcal{V}(\mathbf{p})$ to facilitate representing the information of multiple scenes.
96
+
97
+ Differentiable rendering. Given the dense 3D volume $\mathcal{V}$ and viewing point, we use differentiable volume rendering to render the projected color images and depth images.
98
+
99
+ ![](images/c64d29cfa76459d5d68352bb5fe0553bd8b1aa9ed0d97cdd8f2074d1622a7e89.jpg)
100
+ Figure 3. The pipeline of our point cloud pre-training via neural rendering (Ponder). Given multi-view RGB-D images, we first construct the point cloud by back-projection, then use a point cloud encoder $f_{p}$ to extract per-point features $\mathcal{E}$ . $\mathcal{E}$ are organized to a 3D feature volume by average pooling and then processed by the 3D convolution layer. Finally, the 3D feature volume is rendered to multi-view RGB-D images via a differentiable neural rendering, which are compared with the input multi-view RGB-D images as the supervision.
101
+
102
+ For each rendering ray with camera origin $\mathbf{o}$ and viewing direction $\mathbf{d}$ , we sample a set of ray points $\{\mathbf{p}(z)|\mathbf{p}(z) = \mathbf{o} + z\mathbf{d},z\in [z_n,z_f]\}$ along the ray, where $z$ denotes the length of the ray. Note that $\mathbf{o}$ and $\mathbf{d}$ can be calculated from paired camera parameters $\{(K_i,\pmb {\xi}_i)\}$ . $z_{n}$ and $z_{f}$ denote the near and far bounds of the ray. Different from previous methods [35, 55], we automatically determine $\{z_{n},z_{f}\}$ by the ray intersection with the 3D feature volume box, using axis-aligned bounding boxes (AABB) algorithm. Then, the ray color and depth value can be aggregated as:
103
+
104
+ $$
105
+ \hat {C} = \int_ {z _ {n}} ^ {z _ {f}} \omega (z) c (\mathbf {p} (z), \mathbf {d}) d z, \tag {5}
106
+ $$
107
+
108
+ $$
109
+ \hat {D} = \int_ {z _ {n}} ^ {z _ {f}} \omega (z) z d z, \tag {6}
110
+ $$
111
+
112
+ where the $\hat{C}$ is the ray color and the $\hat{D}$ is the ray depth. We follow NeuS[55] to build an unbiased and occlusion-awareness weight function $w(z)$ :
113
+
114
+ $$
115
+ w (z) = T (z) \cdot \rho (z). \tag {7}
116
+ $$
117
+
118
+ $T(z)$ measures the accumulated transmittance from $z_{n}$ to $z$ and $\rho (z)$ is the occupied density function which are defined as:
119
+
120
+ $$
121
+ T (z) = \exp \left(- \int_ {z _ {n}} ^ {z _ {f}} \rho (z) d z\right), \tag {8}
122
+ $$
123
+
124
+ $$
125
+ \rho (z) = \max \left(\frac {- \frac {d \Phi_ {h}}{d z} (s (\mathbf {p} (z)))}{\Phi_ {h} (s (\mathbf {p} (z)))}, 0\right). \tag {9}
126
+ $$
127
+
128
+ $\Phi_h(x)$ is the Sigmoid function $\Phi_h(x) = (1 + e^{-hx})^{-1}$ where $h^{-1}$ is treated as a trainable parameter, $h^{-1}$ approaches to zero as the network training converges. In practice, we use a numerically approximated version by quadrature. We make the decode networks $\{f_s,f_c\}$ relatively smaller than [35, 55] to accelerate the training process.
129
+
130
+ Rendered examples. The rendered color images and depth images are shown in Figure 4. As shown in the figure, even though the input point cloud is pretty sparse, our
131
+
132
+ ![](images/433ee8d28cdd66283a90d8aaf9c08facf50aa1073056d26d2f50f03ca649be6a.jpg)
133
+
134
+ ![](images/8272d1ee72fd7a58f5a77943a7a8a09ae613bd5100374aca6b2a5bd0102b317c.jpg)
135
+
136
+ ![](images/8efa32fcf345fa55d83b0512c1db36e3a4517af16e39a22946b451f23a284ee8.jpg)
137
+
138
+ ![](images/80a7dd6265038a597a179c20496897c5bf37838c227a5c86bedcd75e5d301dea.jpg)
139
+ Projected Points
140
+ Rendered Color
141
+ Figure 4. Rendered images by Ponder on the ScanNet validation set. The projected point clouds are visualized in the first column. Even though input point clouds are very sparse, our model is still capable of rendering color and depth images similar to the reference images.
142
+
143
+ ![](images/b444ab1eeed2056f2989f5fd1a48e720ddb116fa6913a9d2bd996097f10de672.jpg)
144
+
145
+ ![](images/7a189edf9e9fce5a2b597b13ae1ecce1375b249f77ccfd2cd3b3592f241ae7df.jpg)
146
+
147
+ ![](images/10421000e47da06cb463951aa056dc16ae70a15eda5d0ce22923fc9f2f9e1afb.jpg)
148
+ Reference Color
149
+
150
+ ![](images/b46013c2af78daa4aef9f6705b1e923cf5e3cda6c4f79529794d821ac77c065e.jpg)
151
+
152
+ ![](images/3c878fad94f277504ee8cccadb42108244e662f3e088a1209279b4e5fef8b73f.jpg)
153
+
154
+ ![](images/6cfc34d64fd0bf5b642e96041934c6fbc27510aa00b4c845822ecffc53d4efb9.jpg)
155
+ Rendered Depth
156
+
157
+ ![](images/21e3d23096f1a5e36b2deaa5678a13d0e2868a295f329dedacb10ea5e3357419.jpg)
158
+
159
+ ![](images/6b39fd92383b4220a9dcddfa4a54c0dd4e49eea480bc315efab590f47fbeb8d8.jpg)
160
+
161
+ ![](images/48d20c99b50a20caeb2346631f1a88cd667c5d850fd4cc028fadfc84db78c08d.jpg)
162
+ Reference Depth
163
+
164
+ method is still capable of rendering color and depth images similar to the reference image.
165
+
166
+ # 3.5. Pre-training loss
167
+
168
+ We leverage the input $\{I_i, D_i\}$ to supervise neural scene representation reconstruction. The total loss function contains five parts,
169
+
170
+ $$
171
+ L = \lambda_ {c} L _ {c} + \lambda_ {d} L _ {d} + \lambda_ {e} L _ {e} + \lambda_ {s} L _ {s} + \lambda_ {f} L _ {f}, \tag {10}
172
+ $$
173
+
174
+ which are loss functions responsible for color supervision $L_{c}$ , depth supervision $L_{d}$ , Eikonal regularization $L_{e}$ , near-surface SDF supervision $L_{s}$ , and free space SDF supervision $L_{f}$ . These loss functions are illustrated in the following section.
175
+
176
+ Color and depth loss. $L_{c}$ and $L_{d}$ are the color loss and depth loss, which measure consistency between the rendered pixels and the ground-truth pixels. Assume that we sample $N_{r}$ rays for each image and $N_{p}$ points for each ray,
177
+
178
+ then the $L_{c}$ and $L_{d}$ can be written as:
179
+
180
+ $$
181
+ L _ {c} = \frac {1}{N _ {r}} \sum_ {i} ^ {N _ {r}} | | \hat {C} - C | | _ {2} ^ {2} \tag {11}
182
+ $$
183
+
184
+ $$
185
+ L _ {d} = \frac {1}{N _ {r}} \sum_ {i} ^ {N _ {r}} \left\| \hat {D} - D \right\| _ {2} ^ {2}, \tag {12}
186
+ $$
187
+
188
+ where $C$ and $D$ are the ground-truth color and depth respectively for each ray, $\hat{C}$ and $\hat{D}$ are their corresponding rendered ones in Eq. (5) and Eq. (6).
189
+
190
+ Loss for SDF regularization. $L_{e}$ is the widely used Eikonal loss [16] for SDF regularization:
191
+
192
+ $$
193
+ L _ {e} = \frac {1}{N _ {r} N _ {p}} \sum_ {i, j} ^ {N _ {r}, N _ {p}} \left(| \nabla s \left(\mathbf {p} _ {i, j}\right) | - 1\right) ^ {2}, \tag {13}
194
+ $$
195
+
196
+ where $\nabla s(\mathbf{p}_{i,j})$ denotes the gradient of SDF $s$ at location $\mathbf{p}_{i,j}$ . Since SDF is a distance measure, $L_{e}$ encourages this distance to have a unit norm gradient at the query point.
197
+
198
+ Near-surface and free space loss for SDF. To stabilize the training and improve the reconstruction performance, similar to iSDF [40] and GO-Surf [54], we add additional approximate SDF supervision to help the SDF estimation. Specifically, for near-surface points, the difference between rendered depth and ground-truth depth can be viewed as the pseudo-SDF ground-truth supervision; for points far from the surface, a free space loss is used to regularize the irregular SDF value additionally. To calculate the approximate SDF supervision, we first define an indicator $b(z)$ for each sampled ray point with ray length $z$ and corresponding GT depth $D$ :
199
+
200
+ $$
201
+ b (z) = D - z. \tag {14}
202
+ $$
203
+
204
+ $b(z)$ can be viewed as the approximate SDF value, which is credible only when $b(z)$ is small. Let $t$ be a human-defined threshold, which is set as 0.05 in this paper. For sampled ray points that satisfy $b(z) \leq t$ , we leverage the near-surface SDF loss to constrain the SDF prediction $s(z_{i,j})$ :
205
+
206
+ $$
207
+ L _ {s} = \frac {1}{N _ {r} N _ {p}} \sum_ {i, j} ^ {N _ {r}, N _ {p}} | s \left(z _ {i, j}\right) - b \left(z _ {i, j}\right) |. \tag {15}
208
+ $$
209
+
210
+ For the remaining sampled ray points, we use a free space loss:
211
+
212
+ $$
213
+ L _ {f} = \frac {1}{N _ {r} N _ {p}} \sum_ {i, j} ^ {N _ {r}, N _ {p}} \max \left(0, e ^ {- \alpha \cdot s \left(z _ {i, j}\right)} - 1, s \left(z _ {i, j}\right) - b \left(z _ {i, j}\right)\right), \tag {16}
214
+ $$
215
+
216
+ where $\alpha$ is set as 5 following the same with [40, 54]. Note that due to the noisy depth images, we only apply $L_{s}$ and $L_{f}$ on the rays that have valid depth values.
217
+
218
+ In our experiments, we follow a similar loss of weight with GO-Surf [54], which sets $\lambda_{c}$ as 10.0, $\lambda_{d}$ as 1.0, $\lambda_{s}$ as 10.0, and $\lambda_{f}$ as 1.0. We observe that the Eikonal term in our method can easily lead to over-smooth reconstructions, thus we use a small weight of 0.01 for the Eikonal loss.
219
+
220
+ # 4. Experiments
221
+
222
+ # 4.1. Pre-training
223
+
224
+ Datasets. We use ScanNet[11] RGB-D images as our pretraining data. ScanNet is a widely used real-world indoor dataset, which contains more than 1500 indoor scenes. Each scene is carefully scanned by an RGB-D camera, leading to about 2.5 million RGB-D frames in total. We follow the same train/val split with VoteNet[43].
225
+
226
+ Data preparation. During pre-training, a mini-batch of batch size 8 includes point clouds from 8 scenes. The point cloud of a scene, serving as the input of the point cloud encoder in our approach, is back-projected from the 5 RGB-D frames of the video for the scene with an interval of 20. The 5 frames are also used as the supervision of the network. We randomly down-sample the input point cloud to 20,000 points and follow the masking strategy as used in Mask Point [30].
227
+
228
+ Implementation details. We train the proposed pipeline for 100 epochs using an AdamW optimizer [34] with a weight decay of 0.05. The learning rate is initialized as 1e-4 with Exponential scheduling. For the rendering process, we randomly choose 128 rays for each image and sample 128 points for each ray. More implementation details can be found in the supplementary materials.
229
+
230
+ # 4.2. Transfer Learning
231
+
232
+ In contrast to previous methods, our approach is able to encode rich geometry and appearance cues into the point cloud representations via neural rendering. These strengths make it flexible to be applied to various tasks, including not only 3D semantic segmentation and 3D detection tasks but also low-level surface reconstruction and image synthesis.
233
+
234
+ # 4.2.1 High-level 3D Tasks
235
+
236
+ 3D object detection. We select two representative approaches, Votenet [43] and H3DNet [68], as the baselines. VoteNet leverages a voting mechanism to obtain object centers, which are used for generating 3D bounding box proposals. By introducing a set of geometric primitives, H3DNet achieves a significant improvement in accuracy compared to previous methods. Two datasets are applied to verify the effectiveness of our method: ScanNet[11] and SUN RGB-D[49]. Different from ScanNet, which contains fully reconstructed 3D scenes, SUN RGB-D is a single-
237
+
238
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Detection Model</td><td rowspan="2">Pre-training Type</td><td rowspan="2">Pre-training Epochs</td><td colspan="2">ScanNet</td><td colspan="2">SUN RGB-D</td></tr><tr><td>AP50↑</td><td>AP25↑</td><td>AP50↑</td><td>AP25↑</td></tr><tr><td>3DETR[37]</td><td>3DETR</td><td>-</td><td>-</td><td>37.5</td><td>62.7</td><td>30.3</td><td>58.0</td></tr><tr><td>Point-BERT[64]</td><td>3DETR</td><td>Completion</td><td>300</td><td>38.3</td><td>61.0</td><td>-</td><td>-</td></tr><tr><td>MaskPoint[30]</td><td>3DETR</td><td>Completion</td><td>300</td><td>40.6</td><td>63.4</td><td>-</td><td>-</td></tr><tr><td>VoteNet [43]</td><td>VoteNet</td><td>-</td><td>-</td><td>33.5</td><td>58.6</td><td>32.9</td><td>57.7</td></tr><tr><td>STRL[23]</td><td>VoteNet</td><td>Contrast</td><td>100</td><td>38.4</td><td>59.5</td><td>35.0</td><td>58.2</td></tr><tr><td>RandomRooms[46]</td><td>VoteNet</td><td>Contrast</td><td>300</td><td>36.2</td><td>61.3</td><td>35.4</td><td>59.2</td></tr><tr><td>PointContrast[57]</td><td>VoteNet</td><td>Contrast</td><td>-</td><td>38.0</td><td>59.2</td><td>34.8</td><td>57.5</td></tr><tr><td>PC-FractalDB[60]</td><td>VoteNet</td><td>Contrast</td><td>-</td><td>38.3</td><td>61.9</td><td>33.9</td><td>59.4</td></tr><tr><td>DepthContrast[67]</td><td>VoteNet</td><td>Contrast</td><td>1000</td><td>39.1</td><td>62.1</td><td>35.4</td><td>60.4</td></tr><tr><td>IAE[61]</td><td>VoteNet</td><td>Completion</td><td>1000</td><td>39.8</td><td>61.5</td><td>36.0</td><td>60.4</td></tr><tr><td>Ponder</td><td>VoteNet</td><td>Rendering</td><td>100</td><td>41.0 (+7.5)</td><td>63.6 (+5.0)</td><td>36.6 (+3.7)</td><td>61.0 (+3.3)</td></tr></table>
239
+
240
+ Table 1. 3D object detection $AP_{25}$ and $AP_{50}$ on ScanNet and SUN RGB-D. VoteNet[43] is a baseline model. Purple numbers indicate improvements over the corresponding baseline. The DepthContrast[67] and Point-BERT[64] results are adopted from IAE[61] and MaskPoint[30]. Ponder outperforms both VoteNet-based and 3DETR-based point cloud pre-training methods with fewer training epochs.
241
+
242
+ <table><tr><td>Method</td><td>AP50↑</td><td>AP25↑</td></tr><tr><td>VoteNet[43]</td><td>33.5</td><td>58.7</td></tr><tr><td>3DETR[37]</td><td>37.5</td><td>62.7</td></tr><tr><td>3DETR-m[37]</td><td>47.0</td><td>65.0</td></tr><tr><td>H3DNet[68]</td><td>48.1</td><td>67.2</td></tr><tr><td>Ponder+H3DNet</td><td>50.9 (+2.8)</td><td>68.4 (+1.2)</td></tr></table>
243
+
244
+ Table 2. 3D object detection. $AP_{25}$ and $AP_{50}$ on ScanNet. Ponder significantly boosts the detection accuracy of H3DNet by a margin of +2.8 and +1.2 for $AP_{50}$ and $AP_{25}$ , respectively.
245
+
246
+ view RGB-D dataset with 3D bounding box annotations. It has 10,335 RGB-D images for 37 object categories. For pre-training, we use PointNet++ as the point cloud encoder $f_{p}$ , which is identical to the backbone used in VoteNet and H3DNet. We pre-train the point cloud encoder on the ScanNet dataset and transfer the weight as the downstream initialization. Following [43], we use average precision with 3D detection IoU threshold 0.25 and threshold 0.5 as the evaluation metrics.
247
+
248
+ The 3D detection results are shown in Table 1. Our method improves the baseline of VoteNet without pretraining by a large margin, boosting $\mathrm{AP}_{50}$ by $7.5\%$ and $3.7\%$ for ScanNet and SUN RGB-D, respectively. IAE [61] is a pre-training method that represents the inherent 3D geometry in a continuous manner. Our learned point cloud representation achieves higher accuracy because it is able to recover both the geometry and appearance of the scene. The $\mathrm{AP}_{50}$ and $\mathrm{AP}_{25}$ of our method are higher than that of IAE by $1.2\%$ and $2.1\%$ on ScanNet, respectively. Besides, we have observed that our method surpasses the recent point cloud pre-training approach, MaskPoint [30], even when using a less sophisticated backbone (PointNet++ vs. 3DETR), as presented in Table 1. To verify the effectiveness of POn
249
+
250
+ der, we also apply it for a much stronger baseline, H3DNet. As shown in Table 2, our method surpasses H3DNet by $+2.8$ and $+1.2$ for $\mathrm{AP}_{50}$ and $\mathrm{AP}_{25}$ , respectively.
251
+
252
+ 3D semantic segmentation. 3D semantic segmentation is another fundamental scene understanding task. We select one of the top-performing backbones, MinkUNet[10], for transfer learning. MinkUNet leverage 3D sparse convolution to extract effective 3D scene features. For pre-training, we use MinkUNet as the point cloud encoder $f_{p}$ , and pretrain the model on ScanNet. We report the finetuning results on the ScanNet dataset with the mean IoU of the validation set as the evaluation metric. Table 3 shows the quantitative results of Ponder with MinkUNet. The results demonstrate that Ponder is effective in improving the semantic segmentation performance, achieving a significant improvement of $1.3\mathrm{mIoU}$ .
253
+
254
+ # 4.2.2 Low-level 3D Tasks
255
+
256
+ Low-level 3D tasks like scene reconstruction and image synthesis are getting increasing attention due to their wide applications. However, most of them are trained from scratch. How to pre-train a model with a good initialization is desperately needed. We are the first pre-training work to demonstrate a strong transferring ability to such low-level 3D tasks.
257
+
258
+ 3D scene reconstruction. 3D scene reconstruction task aims to recover the scene geometry, e.g. mesh, from the point cloud input. We choose ConvONet[42] as the baseline model, whose architecture is widely adopted in [9, 31, 62]. Following the same setting as ConvONet, we conduct experiments on the Synthetic Indoor Scene Dataset
259
+
260
+ <table><tr><td>Method</td><td>mIoU ↑</td></tr><tr><td>PointNet++[44]</td><td>53.5</td></tr><tr><td>KPConv[51]</td><td>69.2</td></tr><tr><td>SparseConvNet[15]</td><td>69.3</td></tr><tr><td>PT[69]</td><td>70.6</td></tr><tr><td>MinkUnet[10]</td><td>72.2</td></tr><tr><td>Ponder+MinkUnet</td><td>73.5 (+1.3)</td></tr></table>
261
+
262
+ Table 3. 3D segmentation mIoU on ScanNet dataset.
263
+
264
+ <table><tr><td>Method</td><td>Encoder</td><td>IoU↑</td><td>NC↑</td><td>F-Score↑</td></tr><tr><td>ConvONet[42]</td><td>PointNet++</td><td>77.8</td><td>88.7</td><td>90.6</td></tr><tr><td>IAE[61]</td><td>PointNet++</td><td>75.7</td><td>88.7</td><td>91.0</td></tr><tr><td>Ponder</td><td>PointNet++</td><td>80.2 (+2.4)</td><td>89.3</td><td>92.0</td></tr></table>
265
+
266
+ Table 4. 3D scene reconstruction IoU, NC, and F-Score on SISD dataset with PointNet++ model. Ponder is able to boost the reconstruction performance.
267
+
268
+ (SISD)[42], which is a synthetic dataset and contains 5000 scenes with multiple ShapeNet [5] objects. To make a fair comparison with IAE [61], we use the same VoteNet-style PointNet++ as the encoder of ConvONet, which downsamples the original point cloud to 1024 points. Following [42], we use Volumetric IoU, Normal Consistency (NC), and F-Score [50] with the threshold value of $1\%$ as the evaluation metrics.
269
+
270
+ The results are shown in Table 4. Compared to the baseline ConvONet model with PointNet++, IAE is not able to boost the reconstruction results, while the proposed approach can improve the reconstruction quality $(+2.4\%)$ for IoU). The results show the effectiveness of Ponder for the 3D reconstruction task.
271
+
272
+ Image synthesis from point clouds. We also validate the effectiveness of our method on another low-level task of image synthesis from point clouds. We use Point-NeRF[59] as the baseline. Point-NeRF uses neural 3D point clouds with associated neural features to render images. It can be used both for a generalizable setting for various scenes and a single-scene fitting setting. In our experiments, we mainly focus on the generalizable setting of Point-NeRF. We replace the 2D image features of Point-NeRF with point features extracted by a DGCNN network. Following the same setting with PointNeRF, we use DTU[24] as the evaluation dataset. DTU dataset is a multiple-view stereo dataset containing 80 scenes with paired images and camera poses. We transfer both the DGCNN encoder and color decoder as the weight initialization of Point-NeRF. We use PSNR as the metric for synthesized image quality evaluation.
273
+
274
+ The results are shown in Figure 5. By leveraging the pretrained weights of our method, the image synthesis model is able to converge faster with fewer training steps and achieve better final image quality than training from scratch.
275
+
276
+ ![](images/5c3b67ee9fbb32f109446597561925b5638b96ee1e3dd9e341f6d5c086680088.jpg)
277
+ Figure 5. Comparison of image synthesis from point clouds. Compared with training from scratch, our Ponder model is able to converge faster and achieve better image synthesis results.
278
+
279
+ # 4.3. Ablation study
280
+
281
+ In this section, we conduct a series of ablation experiments to evaluate the effectiveness of our proposed approach. All experiments are conducted on ScanNet and SUN RGB-D datasets. We use 3D object detection for evaluation due to its simplicity.
282
+
283
+ Influence of Rendering Targets. The rendering part of our method contains two items: RGB color image and depth image. We study the influence of each item with the transferring task of 3D detection. The results are presented in Table 5. Combining depth and color images for reconstruction shows the best detection results. In addition, using depth reconstruction presents better performance than color reconstruction for 3D detection.
284
+
285
+ Influence of mask ratio. To augment point cloud data, we employ random masking as one of the augmentation methods, which divides the input point cloud into 2048 groups with 64 points. In this ablation study, we evaluate the performance of our method with different mask ratios, ranging from $0\%$ to $90\%$ , on the ScanNet and SUN RGB-D datasets, and report the results in Table 6. Notably, we find that even when no dividing and masking strategy is applied $(0\%)$ , our method achieves a competitive $AP_{50}$ performance of 40.7 and 37.3 on ScanNet and SUN RGB-D, respectively. Our method achieves the best performance on ScanNet with a mask ratio of $75\%$ and a $AP_{50}$ performance of 41.7. Overall, these results suggest that our method is robust to the hyper-parameter of mask ratio and can still achieve competitive performance without any mask operation.
286
+
287
+ Influence of 3D feature volume resolution. In our method, Ponder constructs a 3D feature volume with a resolution of [16, 32, 64], which is inspired by recent progress in multi-resolution 3D reconstruction. However, building such a high-resolution feature volume can consume significant GPU memory. To investigate the effect of feature volume resolution, we conduct experiments with different resolutions and report the results in Table 7. From the results, we
288
+
289
+ observe that even with a smaller feature volume resolution of 16, Ponder can still achieve competitive performance on downstream tasks.
290
+
291
+ <table><tr><td>Supervision</td><td>ScanNet</td><td>SUN RGB-D</td></tr><tr><td>VoteNet</td><td>33.5</td><td>32.9</td></tr><tr><td>+Depth</td><td>40.9 (+7.4)</td><td>36.1 (+3.2)</td></tr><tr><td>+Color</td><td>40.5 (+7.0)</td><td>35.8 (+2.9)</td></tr><tr><td>+Depth+Color</td><td>41.0 (+7.5)</td><td>36.6 (+3.7)</td></tr></table>
292
+
293
+ Table 5. Ablation study for supervision type. 3D detection $AP_{50}$ on ScanNet and SUN RGB-D. Combining color supervision and depth supervision can lead to better detection performance than using a single type of supervision.
294
+
295
+ <table><tr><td>Mask ratio</td><td>ScanNet</td><td>SUN RGB-D</td></tr><tr><td>VoteNet</td><td>33.5</td><td>32.9</td></tr><tr><td>0%</td><td>40.7 (+7.2)</td><td>37.3 (+4.4)</td></tr><tr><td>25%</td><td>40.7 (+7.2)</td><td>36.2 (+3.3)</td></tr><tr><td>50%</td><td>40.3 (+6.8)</td><td>36.9 (+4.0)</td></tr><tr><td>75%</td><td>41.7 (+8.2)</td><td>37.0 (+4.1)</td></tr><tr><td>90%</td><td>41.0 (+7.5)</td><td>36.6 (+3.7)</td></tr></table>
296
+
297
+ Table 6. Ablation study for mask ratio. $3D$ detection $AP_{50}$ on ScanNet and SUN RGB-D.
298
+
299
+ <table><tr><td>Resolution</td><td>ScanNet</td><td>SUN RGB-D</td></tr><tr><td>VoteNet</td><td>33.5</td><td>32.9</td></tr><tr><td>16</td><td>40.7 (+7.2)</td><td>36.6 (+3.7)</td></tr><tr><td>16+32+64</td><td>41.0 (+7.5)</td><td>36.6 (+3.7)</td></tr></table>
300
+
301
+ Table 7. Ablation study for feature volume resolution. 3D detection $AP_{50}$ on ScanNet and SUN RGB-D.
302
+
303
+ <table><tr><td>View number</td><td>ScanNet</td><td>SUN RGB-D</td></tr><tr><td>VoteNet</td><td>33.5</td><td>32.9</td></tr><tr><td>1 view</td><td>40.1 (+6.6)</td><td>35.4 (+2.5)</td></tr><tr><td>3 views</td><td>40.8 (+7.3)</td><td>36.0 (+3.1)</td></tr><tr><td>5 views</td><td>41.0 (+7.5)</td><td>36.6 (+3.7)</td></tr></table>
304
+
305
+ Table 8. Ablation study for view number. $3D$ detection $AP_{50}$ on ScanNet and SUN RGB-D. Using multi-view supervision for point cloud pre-training can achieve better performance.
306
+
307
+ Number of input RGB-D view. Our method utilizes $N$ RGB-D images, where $N$ is the input view number. We study the influence of $N$ and conduct experiments on 3D detection, as shown in Table 8. We change the number of input views while keeping the scene number of a batch still 8. Using multi-view supervision helps to reduce single-view ambiguity. Similar observations are also found in the multi-view reconstruction task [32]. Compared with the
308
+
309
+ ![](images/45e560536bb21d5eeba72d58ad61f2b3a7aa492620525cc3e4faf6330ede873c.jpg)
310
+
311
+ ![](images/f086b3d0328e5e053e85c6e6ed7a057fe63760cf3a3667c7641dda86bd29557f.jpg)
312
+
313
+ ![](images/3bf21379569846b7595452370118df58387e7d4cac6fa92f42c9a8a0fc637dca.jpg)
314
+ Input Point Cloud
315
+
316
+ ![](images/5333b0b7546b8d8b5309e42061970c9c829666785d79e72bb458dca90794cd79.jpg)
317
+
318
+ ![](images/1b7a8b7a71341b57eec61a54efbba5408324267d9ace3afb12a9d8239c6fcd7e.jpg)
319
+
320
+ ![](images/c79e8fee2082416403a3f8ba843eba281989cd113a862d2651d7f5d199120585.jpg)
321
+ Reconstruction
322
+
323
+ ![](images/a61226c152cf74b7ebab882f964b6b0a078b4366f5dcfe8974eb377ebff7b72a.jpg)
324
+
325
+ ![](images/a2acce0ec2f9857bbe9aa725648e698f053ebd35c6b3aa7244d3409f226b1dac.jpg)
326
+
327
+ ![](images/1b4cca00e0d15ee3fab6b2f8920fd44123f513a31390e87534d0ddd474cb6781.jpg)
328
+ Input Point Cloud
329
+ Figure 6. Reconstructed surface by Ponder. Our pre-training method can be easily integrated into the task of 3D reconstruction. Despite the sparsity of the input point cloud (only $2\%$ points are used), our method can still recover precise geometric details.
330
+
331
+ ![](images/ddb92f5dc2af5e9cfe7449c8c194a2279bef104cc1e09288581e19414fe3e04d.jpg)
332
+
333
+ ![](images/1c0ac20708e3e3b9791c53b3d4c35d4c45ec5b073cda8d49b334c6dc11960f95.jpg)
334
+
335
+ ![](images/fa891cff2583f624cacdfb445793d64e279dd27f6fc7f3b66bdfb68a2ac0b8fa.jpg)
336
+ Reconstruction
337
+
338
+ single view, multiple views achieve higher accuracy, boosting $\mathrm{AP}_{50}$ by $0.9\%$ and $1.2\%$ for ScanNet and SUN RGB-D datasets, respectively.
339
+
340
+ # 4.4. Other applications
341
+
342
+ The pre-trained model from our pipeline Ponder itself can also be directly used for surface reconstruction from sparse point clouds. Specifically, after learning the neural scene representation, we query the SDF value in the 3D space and leverage the Marching Cubes [33] to extract the surface. We show the reconstruction results in Figure 6. The results show that even though the input is sparse point clouds from complex scenes, our method is able to recover high-fidelity meshes. Check the supplementary for more image synthesis and 3D reconstruction results.
343
+
344
+ # 5. Conclusion
345
+
346
+ This paper shows that differentiable neural rendering is a powerful tool for point cloud representation learning. The proposed pre-training pipeline, Ponder, is able to encode rich geometry and appearance cues into the point cloud representation via neural rendering. For the first time, our model can be transferred to both high-level 3D perception tasks and 3D low-level tasks, like 3D reconstruction and image synthesis from point clouds. Besides, the learned Ponder model can be directly used for 3D reconstruction and image synthesis from sparse point clouds. We also exploratively validate the effectiveness of Ponder on outdoor scenario and other input modalities, where we observe $1.41\%$ mAP improvement for 3D multiview object detection on the NuScene dataset that takes multiview images as input (details in the supplementary material).
347
+
348
+ Several directions could be explored in future works. First, recent progress in neural representations could help Ponder achieve better rendering quality and gain more ac
349
+
350
+ curate supervision from 2D images. Second, thanks to the flexible architecture design, Ponder can potentially be expanded to other self-supervised learning fields, e.g., pretraining 2D image backbones, and other downstream tasks.
351
+
352
+ # References
353
+
354
+ [1] Mohamed Afham, Isuru Dissanayake, Dinithi Dissanayake, Amaya Dharmasiri, Kanchana Thilakarathna, and Ranga Rodrigo. Crosspoint: Self-supervised cross-modal contrastive learning for 3d point cloud understanding. In CVPR, 2022.
355
+ [2] Kara-Ali Aliev, Artem Sevastopolsky, Maria Kolos, Dmitry Ulyanov, and Victor Lempitsky. Neural point-based graphics. In ECCV. Springer, 2020.
356
+ [3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In ICCV, 2021.
357
+ [4] Holger Caesar, Varun Bankiti, Alex H. Lang, Sourabh Vora, Venice Erin Liong, Qiang Xu, Anush Krishnan, Yu Pan, Giancarlo Baldan, and Oscar Beijbom. nuscenes: A multimodal dataset for autonomous driving. In CVPR, 2020.
358
+ [5] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015.
359
+ [6] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML. PMLR, 2020.
360
+ [7] Yujin Chen, Matthias Nießner, and Angela Dai. 4dcontrast: Contrastive learning with dynamic correspondences for 3d scene understanding. In ECCV, 2022.
361
+ [8] Zhang Chen, Yinda Zhang, Kyle Genova, Sean Fanello, Sofien Bouaziz, Christian Hane, Ruofei Du, Cem Keskin, Thomas Funkhouser, and Danhang Tang. Multiresolution deep implicit functions for 3d shape representation. In ICCV, 2021.
362
+ [9] Julian Chibane, Thiemo Alldieck, and Gerard Pons-Moll. Implicit functions in feature space for 3d shape reconstruction and completion. In CVPR, 2020.
363
+ [10] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In CVPR, 2019.
364
+ [11] Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In CVPR, 2017.
365
+ [12] Jifeng Dai, Haozhi Qi, Yuwen Xiong, Yi Li, Guodong Zhang, Han Hu, and Yichen Wei. Deformable convolutional networks. In ICCV, 2017.
366
+ [13] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020.
367
+
368
+ [14] Christoph Feichtenhofer, Haoqi Fan, Yanghao Li, and Kaiming He. Masked autoencoders as spatiotemporal learners. arXiv preprint arXiv:2205.09113, 2022.
369
+ [15] Benjamin Graham, Martin Engelcke, and Laurens van der Maaten. 3d semantic segmentation with submanifold sparse convolutional networks. CVPR, 2018.
370
+ [16] Amos Gropp, Lior Yariv, Niv Haim, Matan Atzmon, and Yaron Lipman. Implicit geometric regularization for learning shapes. arXiv preprint arXiv:2002.10099, 2020.
371
+ [17] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In CVPR, 2022.
372
+ [18] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020.
373
+ [19] Kaiming He, Georgia Gkioxari, Piotr Dollár, and Ross Girshick. Mask r-cnn. In ICCV, 2017.
374
+ [20] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, 2016.
375
+ [21] Ji Hou, Benjamin Graham, Matthias Nießner, and Saining Xie. Exploring data-efficient 3d scene understanding with contrastive scene contexts. In CVPR, 2021.
376
+ [22] Ji Hou, Saining Xie, Benjamin Graham, Angela Dai, and Matthias Nießner. Pri3d: Can 3d priors help 2d representation learning? In ICCV, 2021.
377
+ [23] Siyuan Huang, Yichen Xie, Song-Chun Zhu, and Yixin Zhu. Spatio-temporal self-supervised representation learning for 3d point clouds. arXiv preprint arXiv:2109.00179, 2021.
378
+ [24] Rasmus Jensen, Anders Dahl, George Vogiatzis, Engin Tola, and Henrik Aanæs. Large scale multi-view stereopsis evaluation. In CVPR, 2014.
379
+ [25] Ge-Peng Ji, Deng-Ping Fan, Yu-Cheng Chou, Dengxin Dai, Alexander Liniger, and Luc Van Gool. Deep gradient learning for efficient camouflaged object detection. Machine Intelligence Research, 20(1):92-108, 2023.
380
+ [26] Li Jiang, Shaoshuai Shi, Zhuotao Tian, Xin Lai, Shu Liu, Chi-Wing Fu, and Jiaya Jia. Guided point contrastive learning for semi-supervised point cloud semantic segmentation. In ICCV, 2021.
381
+ [27] Lanxiao Li and Michael Heizmann. A closer look at invariances in self-supervised pre-training for 3d vision. arXiv preprint arXiv:2207.04997, 2022.
382
+ [28] Lanxiao Li and Michael Heizmann. A closer look at invariances in self-supervised pre-training for 3d vision. In ECCV, 2022.
383
+ [29] Yanwei Li, Yilun Chen, Xiaojuan Qi, Zeming Li, Jian Sun, and Jiaya Jia. Unifying voxel-based representation with transformer for 3d object detection. 2022.
384
+ [30] Haotian Liu, Mu Cai, and Yong Jae Lee. Masked discrimination for self-supervised learning on point clouds. arXiv preprint arXiv:2203.11183, 2022.
385
+ [31] Lingjie Liu, Jiatao Gu, Kyaw Zaw Lin, Tat-Seng Chua, and Christian Theobalt. Neural sparse voxel fields. NeurIPS, 33, 2020.
386
+ [32] Xiaoxiao Long, Cheng Lin, Peng Wang, Taku Komura, and Wenping Wang. Sparseneus: Fast generalizable neu
387
+
388
+ ral surface reconstruction from sparse views. arXiv preprint arXiv:2206.05737, 2022.
389
+ [33] William E Lorensen and Harvey E Cline. Marching cubes: A high resolution 3d surface construction algorithm. ACM siggraph computer graphics, 21(4), 1987.
390
+ [34] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.
391
+ [35] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1), 2021.
392
+ [36] Chen Min, Dawei Zhao, Liang Xiao, Yiming Nie, and Bin Dai. Voxel-mae: Masked autoencoders for pre-training large-scale point clouds. arXiv preprint arXiv:2206.09900, 2022.
393
+ [37] Ishan Misra, Rohit Girdhar, and Armand Joulin. An End-to-End Transformer Model for 3D Object Detection. In ICCV, 2021.
394
+ [38] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4), July 2022.
395
+ [39] Michael Oechsle, Songyou Peng, and Andreas Geiger. Unisurf: Unifying neural implicit surfaces and radiance fields for multi-view reconstruction. In ICCV, 2021.
396
+ [40] Joseph Ortiz, Alexander Clegg, Jing Dong, Edgar Sucar, David Novotny, Michael Zollhoefer, and Mustafa Mukadam. isdf: Real-time neural signed distance fields for robot perception. arXiv preprint arXiv:2204.02296, 2022.
397
+ [41] Yatian Pang, Wenxiao Wang, Francis EH Tay, Wei Liu, Yonghong Tian, and Li Yuan. Masked autoencoders for point cloud self-supervised learning. arXiv preprint arXiv:2203.06604, 2022.
398
+ [42] Songyou Peng, Michael Niemeyer, Lars Mescheder, Marc Pollefeys, and Andreas Geiger. Convolutional occupancy networks. In ECCV. Springer, 2020.
399
+ [43] Charles R Qi, Or Litany, Kaiming He, and Leonidas J Guibas. Deep hough voting for 3d object detection in point clouds. In ICCV, 2019.
400
+ [44] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. NeurIPS, 30, 2017.
401
+ [45] Ruslan Rakhimov, Andrei-Timotei Ardelean, Victor Lempitsky, and Evgeny Burnaev. Npgb++: Accelerating neural point-based graphics. In CVPR, 2022.
402
+ [46] Yongming Rao, Benlin Liu, Yi Wei, Jiwen Lu, Cho-Jui Hsieh, and Jie Zhou. Randomrooms: Unsupervised pretraining from synthetic shapes and randomized layouts for 3d object detection. In ICCV, 2021.
403
+ [47] Christian Reiser, Songyou Peng, Yiyi Liao, and Andreas Geiger. Kilonerf: Speeding up neural radiance fields with thousands of tiny mlps. In ICCV, pages 14335-14345, 2021.
404
+ [48] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. _NeurIPS_, 28, 2015.
405
+ [49] Shuran Song, Samuel P Lichtenberg, and Jianxiong Xiao. Sun rgb-d: A rgb-d scene understanding benchmark suite. In CVPR, 2015.
406
+
407
+ [50] Maxim Tatarchenko, Stephan R Richter, René Ranftl, Zhuwen Li, Vladlen Koltun, and Thomas Brox. What do single-view 3d reconstruction networks learn? In CVPR, 2019.
408
+ [51] Hugues Thomas, Charles R. Qi, Jean-Emmanuel Deschaud, Beatrix Marcotegui, François Goulette, and Leonidas J. Guibas. Kpconv: Flexible and deformable convolution for point clouds. ICCV, 2019.
409
+ [52] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. arXiv preprint arXiv:2203.12602, 2022.
410
+ [53] Hanchen Wang, Qi Liu, Xiangyu Yue, Joan Lasenby, and Matt J Kusner. Unsupervised point cloud pre-training via occlusion completion. In ICCV, 2021.
411
+ [54] Jingwen Wang, Tymoteusz Bleja, and Lourdes Agapito. Go-surf: Neural feature grid optimization for fast, high-fidelity rgb-d surface reconstruction. arXiv preprint arXiv:2206.14735, 2022.
412
+ [55] Peng Wang, Lingjie Liu, Yuan Liu, Christian Theobalt, Taku Komura, and Wenping Wang. Neus: Learning neural implicit surfaces by volume rendering for multi-view reconstruction. arXiv preprint arXiv:2106.10689, 2021.
413
+ [56] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul Srinivasan, Howard Zhou, Jonathan T. Barron, Ricardo MartinBrualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. arXiv preprint arXiv:2102.13090, 2021.
414
+ [57] Saining Xie, Jiatao Gu, Demi Guo, Charles R Qi, Leonidas Guibas, and Or Litany. Pointcontrast: Unsupervised pretraining for 3d point cloud understanding. In ECCV. Springer, 2020.
415
+ [58] Wenpeng Xing, Jie Chen, and Yike Guo. Robust local light field synthesis via occlusion-aware sampling and deep visual feature fusion. Machine Intelligence Research, 20(3):408-420, 2023.
416
+ [59] Qiangeng Xu, Zexiang Xu, Julien Philip, Sai Bi, Zhixin Shu, Kalyan Sunkavalli, and Ulrich Neumann. Point-nerf: Point-based neural radiance fields. In CVPR, 2022.
417
+ [60] Ryosuke Yamada, Hirokatsu Kataoka, Naoya Chiba, Yukiyasu Domae, and Tetsuya Ogata. Point cloud pretraining with natural 3d structures. In CVPR, 2022.
418
+ [61] Siming Yan, Zhenpei Yang, Haoxiang Li, Li Guan, Hao Kang, Gang Hua, and Qixing Huang. Implicit autoencoder for point cloud self-supervised representation learning. arXiv preprint arXiv:2201.00785, 2022.
419
+ [62] Alex Yu, Ruilong Li, Matthew Tancik, Hao Li, Ren Ng, and Angjoo Kanazawa. *PlenOctrees for real-time rendering of neural radiance fields*. In ICCV, 2021.
420
+ [63] Alex Yu, Vickie Ye, Matthew Tancik, and Angjoo Kanazawa. pixelNeRF: Neural radiance fields from one or few images. https://arxiv.org/abs/2012.02190, 2020.
421
+ [64] Xumin Yu, Lulu Tang, Yongming Rao, Tiejun Huang, Jie Zhou, and Jiwen Lu. Point-bert: Pre-training 3d point cloud transformers with masked point modeling. In CVPR, 2022.
422
+ [65] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. NERF++: Analyzing and improving neural radiance fields. https://arxiv.org/abs/2010.07492, 2020.
423
+
424
+ [66] Renrui Zhang, Ziyu Guo, Peng Gao, Rongyao Fang, Bin Zhao, Dong Wang, Yu Qiao, and Hongsheng Li. Pointm2ae: Multi-scale masked autoencoders for hierarchical point cloud pre-training. arXiv preprint arXiv:2205.14401, 2022.
425
+ [67] Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3d features on any point-cloud. In ICCV, pages 10252-10263, 2021.
426
+ [68] Zaiwei Zhang, Bo Sun, Haitao Yang, and Qixing Huang. H3dnet: 3d object detection using hybrid geometric primitives. In ECCV. Springer, 2020.
427
+ [69] Hengshuang Zhao, Li Jiang, Jiaya Jia, Philip HS Torr, and Vladlen Koltun. Point transformer. In ICCV, 2021.
428
+
429
+ # Ponder: Point Cloud Pre-training via Neural Rendering Supplementary Material
430
+
431
+ # A. Implementation Details
432
+
433
+ In this section, we give more implementation details of our Ponder model.
434
+
435
+ # A.1. Pre-training Details
436
+
437
+ Network architecture. To process the extracted 3D feature volume, our approach utilizes a 3D U-Net. We adopt the standard implementation of 3D U-Net, which consists of four down-sampling stages with corresponding channels of 32, 64, 128, and 256, respectively. All convolution layers use a 3D kernel of size 3. To construct the neural rendering decoders, Ponder employs a five-layer MLP network as the SDF decoder and a three-layer MLP network as the RGB decoder.
438
+
439
+ ![](images/7e31532dfb1d622039af77d4bba59ac89f272a0343bebca5e1010aa51bd3d0ee.jpg)
440
+ Figure 7. 3D U-Net architecture.
441
+
442
+ 3D feature volume. Given a point cloud $\mathcal{X}$ , we first discretize the 3D space into a feature volume, $\mathcal{V}$ , of resolution $H\times W\times D$ . For each voxel center in $\mathcal{V}$ , we then apply average pooling to aggregate features from surrounding points of $\mathcal{X}$ . When there is no point near a voxel due to the sparsity of $\mathcal{X}$ , that
443
+
444
+ ![](images/d5cb69a9b09704e99f5d729eb96b3766394044fedce0db1a9115f47e6b17624c.jpg)
445
+ Figure 8. 3D feature volume construction.
446
+
447
+ voxel remains empty. The point
448
+
449
+ cloud $\mathcal{X}$ can be created from either single or multiple depth frames.
450
+
451
+ In our experiments, we build a hierarchical feature volume $\mathcal{V}$ with a resolution of [16, 32, 64]. Building a 3D hierarchical feature volume has been wildly used for recovering detailed 3D geometry, e.g. [9, 8]. After processing the 3D feature volume with a 3D CNN, we use trilinear interpolation to get the feature of the query point $\mathbf{p}$ , which is sampled along the casting ray and denoted as $\mathcal{V}(\mathbf{p})$ . We use the drop-in replacement of gridSampler from [54] to accelerate the training.
452
+
453
+ Ray sampling strategy. Similar to [35, 55], we sample twice for each rendering ray. First, we uniformly sample coarse points between the near bound $z_{n}$ and far bound $z_{f}$ . Then, we use importance sampling with the coarse probability estimation to sample fine points. Following [55], the coarse probability is calculated based on $\Phi_h(s)$ . By this sampling strategy, our method can automatically determine sample locations and can collect more points near the surface, which makes the training process more efficient.
454
+
455
+ Back projection Here we give details of the back projection function $\pi^{-1}$ to get point clouds from depth images. Let $\mathbf{K}$ be camera intrinsic parameters, $\xi = [\mathbf{R}|\mathbf{t}]$ be camera extrinsic parameters, where $\mathbf{R}$ is the rotation matrix and $\mathbf{t}$ is the translation matrix. $X_{uv}$ is the projected point location and $X_w$ is the point location in the 3D world coordinate. Then, according to the pinhole camera model:
456
+
457
+ $$
458
+ s \boldsymbol {X} _ {u v} = \mathbf {K} \left(\mathbf {R} \boldsymbol {X} _ {w} + \mathbf {t}\right), \tag {17}
459
+ $$
460
+
461
+ where $s$ is the depth value. After expanding the $\mathbf{X}_{uv}$ and $\mathbf{X}_w$ :
462
+
463
+ $$
464
+ s \left[ \begin{array}{l} u \\ v \\ 1 \end{array} \right] = \mathbf {K} (\mathbf {R} \left[ \begin{array}{l} X \\ Y \\ Z \end{array} \right] + \mathbf {t}). \tag {18}
465
+ $$
466
+
467
+ Then, the 3D point location can be calculated as follows:
468
+
469
+ $$
470
+ \left[ \begin{array}{l} X \\ Y \\ Z \end{array} \right] = \mathbf {R} ^ {- 1} \left(\mathbf {K} ^ {- 1} s \left[ \begin{array}{l} u \\ v \\ 1 \end{array} \right] - \mathbf {t}\right) \tag {19}
471
+ $$
472
+
473
+ The above Equation 19 is the back-projection equation $\pi^{-1}$ used in this paper.
474
+
475
+ Training Time. The Ponder model is pre-trained with 8 NVIDIA A100 GPUs for 96 hours.
476
+
477
+ # A.2. Transfer Learning Details
478
+
479
+ 3D scene reconstruction. ConvONet [42] reconstructs scene geometry from the point cloud input. It follows a two-step manner, which first encodes the point cloud into a 3D feature volume or multiple feature planes, then decodes the occupancy probability for each query point. To evaluate the transfer learning capability of our point cloud encoder, we conduct an experiment where we replace the point cloud encoder of ConvONet directly with our pretrained encoder, without any additional modifications. We choose the highest performing configuration of ConvONet as the baseline setting, which uses a 3D feature volume with a resolution of 64. For the training of ConvONet, we follow the same training setting as the released code<sup>1</sup>.
480
+
481
+ Image synthesis from point clouds. Point-NeRF [59] renders images from neural point cloud representation. It first generates neural point clouds from multi-view images, then uses point-based volume rendering to synthesize images. To transfer the learned network weight to the Point-NeRF pipeline, we 1) replace the 2D image feature backbone with a pre-trained point cloud encoder to get the neural point cloud features, 2) replace the color decoder by a pretrained color decoder, 3) keep the other Point-NeRF module untouched. Since a large amount of point cloud is hard to be directly processed by the point cloud encoder, we downsample the point cloud to $1\%$ , which will decrease the rendering quality but help reduce the GPU memory requirements. We report the PSNR results of the unmasked region as the evaluation metric, which is directly adopted from the original codebase<sup>2</sup>. For training Point-NeRF, we follow the same setting as Point-NeRF.
482
+
483
+ # B. Supplementary Experiments
484
+
485
+ # B.1. Transfer Learning
486
+
487
+ Label Efficiency Training. We also do experiments to show the performance of our method with limited labeling for the downstream task. Specifically, we test the label efficiency training on the 3D object detection task for ScanNet. Following the same setting with IAE[61], we use $20\%$ , $40\%$ , $60\%$ , and $80\%$ of ground truth annotations. The results are shown in Figure 9. We show constantly improved results over training from scratch, especially when only $20\%$ of the data is available.
488
+
489
+ ![](images/7b972f9a811b14eb22489607891ed0da6f1c5ba1bb027e7798deab7c8b205d18.jpg)
490
+ Figure 9. Label efficiency training. We show the 3d object detection experiment results using limited downstream data. Our pretrained model is capable of achieving better performance than training from scratch using the same percentage of data or requires fewer data to get the same detection accuracy.
491
+
492
+ Color information for downstream tasks. Different from previous works, since our pre-training model uses a colored point cloud as the input, we also use color information for the downstream tasks. Results are shown in Table 10. Using color as an additional point feature can help the VoteNet baseline achieve better performance on the SUN RGB-D dataset, but get little improvement on the ScanNet dataset. This shows that directly concatenating point positions and colors as point features shows limited robustness to application scenarios. By leveraging the proposed Ponder pre-training method, the network is well initialized to handle the point position and color features, and achieve better detection accuracy.
493
+
494
+ Ablation study of different loss terms. The ablation study of different loss terms is shown in Tab. 9, which demonstrates the effectiveness of each loss term.
495
+
496
+ <table><tr><td>Losses</td><td>\( {\mathrm{{AP}}}_{50} \uparrow \)</td><td>\( {\mathrm{{AP}}}_{25} \uparrow \)</td></tr><tr><td>\( L \)</td><td>41.0</td><td>63.6</td></tr><tr><td>- \( {L}_{c} \)</td><td>40.9</td><td>64.2</td></tr><tr><td>- \( {L}_{d} \)</td><td>40.5</td><td>63.4</td></tr><tr><td>- \( {L}_{e} \)</td><td>40.9</td><td>63.3</td></tr><tr><td>- \( {L}_{e} - {L}_{f} \)</td><td>40.7</td><td>63.1</td></tr><tr><td>- \( {L}_{e} - {L}_{f} - {L}_{s} \)</td><td>40.5</td><td>63.2</td></tr></table>
497
+
498
+ Table 9. Ablation study for loss terms $3D$ detection $AP_{25}$ and $AP_{50}$ on ScanNet.
499
+
500
+ More comparisons on 3D detection. More detection
501
+
502
+ accuracy comparisons are given in Table 10. Even using an inferior backbone, our Ponder model is able to achieve similar detection accuracy with 10 in ScanNet and better accuracy in SUN RGB-D.
503
+
504
+ 3D semantic segmentation with point-based approaches. Tab. 11 shows our additional experiments with the point-based approach Ponder+DGCNN.
505
+
506
+ Ablation study of different pre-training epochs. Tab. 12 shows that longer pre-training epochs lead to better performance in downstream tasks.
507
+
508
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">Detection Model</td><td rowspan="2">Pre-training Type</td><td rowspan="2">Pre-training Data</td><td rowspan="2">Pre-training Epochs</td><td colspan="2">ScanNet</td><td colspan="2">SUN RGB-D</td></tr><tr><td>AP50↑</td><td>AP25↑</td><td>AP50↑</td><td>AP25↑</td></tr><tr><td>VoteNet*</td><td>VoteNet*</td><td>-</td><td>-</td><td>-</td><td>37.6</td><td>60.0</td><td>33.3</td><td>58.4</td></tr><tr><td>DPCo[28]</td><td>VoteNet*</td><td>Contrast</td><td>Depth</td><td>120</td><td>41.5</td><td>64.2</td><td>35.6</td><td>59.8</td></tr><tr><td>IPCo[28]</td><td>VoteNet*</td><td>Contrast</td><td>Color &amp; Depth</td><td>120</td><td>40.9</td><td>63.9</td><td>35.5</td><td>60.2</td></tr><tr><td>VoteNet (w color)</td><td>VoteNet</td><td>-</td><td>-</td><td>-</td><td>33.4</td><td>58.8</td><td>34.3</td><td>58.3</td></tr><tr><td>Ponder</td><td>VoteNet</td><td>Rendering</td><td>Depth</td><td>100</td><td>40.9</td><td>64.2</td><td>36.1</td><td>60.3</td></tr><tr><td>Ponder</td><td>VoteNet</td><td>Rendering</td><td>Color &amp; Depth</td><td>100</td><td>41.0</td><td>63.6</td><td>36.6</td><td>61.0</td></tr></table>
509
+
510
+ Table 10. 3D object detection $AP_{25}$ and $AP_{50}$ on ScanNet and SUN RGB-D. * means a different but stronger version of VoteNet.
511
+
512
+ <table><tr><td>Method</td><td>OA↑</td><td>mIoU↑</td></tr><tr><td>DGCNN</td><td>84.1</td><td>56.1</td></tr><tr><td>Jigsaw</td><td>84.4</td><td>56.6</td></tr><tr><td>OcCo</td><td>85.1</td><td>58.5</td></tr><tr><td>IAE</td><td>85.9</td><td>60.7</td></tr><tr><td>Ponder</td><td>86.2</td><td>61.1</td></tr></table>
513
+
514
+ Table 11. 3D semantic segmentation ${OA}$ and $mIoU$ on S3DIS dataset with DGCNN model.
515
+
516
+ <table><tr><td>Epochs</td><td>AP50↑</td><td>AP25↑</td></tr><tr><td>20</td><td>38.7</td><td>62.0</td></tr><tr><td>40</td><td>39.4</td><td>62.8</td></tr><tr><td>60</td><td>40.0</td><td>62.7</td></tr><tr><td>80</td><td>40.4</td><td>63.1</td></tr><tr><td>100</td><td>41.0</td><td>63.6</td></tr></table>
517
+
518
+ # B.2. More qualitative examples
519
+
520
+ As mentioned in the paper, the pre-trained Ponder model can be directly used for surface reconstruction and image synthesis tasks. We give more application examples in Figure 10 and Figure 11. The results show that even though the input is sparse point clouds from complex scenes, our method is able to recover high-fidelity meshes and recover realistic color and depth images.
521
+
522
+ # C. Multi-Camera 3D Object Detection
523
+
524
+ To further verify the effect of utilizing rendering in self-supervised learning, we conduct exploratory experiments on the multi-camera 3D object detection task, which employs multiview images as input data.
525
+
526
+ # C.1. Experimental Setup
527
+
528
+ Dataset. The nuScenes dataset [4] is a popular benchmark for autonomous driving that includes data collected from six cameras, one LiDAR, and five radars. With 1000 scenarios, the dataset is split into three sets of 700, 150, and 150 scenes for training, validation, and testing, respectively. The evaluation metrics used for 3D object detection in the nuScenes dataset incorporate the commonly used mean average precision (mAP) and a novel nuScenes detection score (NDS).
529
+
530
+ Implementation Details. For the downstream task, we adopt the latest state-of-the-art method, i.e., UVTR [29], as our baseline. Specifically, we use ResNet50-DCN [20, 12] as the image backbone, which is initialized with the pretrained weights (i.e., the weights of ResNet-50 Caffe model)
531
+
532
+ from MMDetection<sup>3</sup>. To construct the 3D feature volume, we first project predefined 3D voxels to multi-view images through transformation matrices. Then, the voxel features are interpolated from the image features via the projected pixel locations. The resolution of the predefined 3D volume is [128, 128, 5]. The model is trained with the AdamW optimizer with an initial learning rate of $2e^{-4}$ for 24 epochs.
533
+
534
+ For pre-training, our model shares a similar architecture as the baseline, except that the point cloud is additionally used to supervise the rendered depth. As our goal is to pretrain the 2D backbone, the point cloud is not used as input to construct the 3D feature volume, which is different from the process of Ponder in the main text.
535
+
536
+ # C.2. Main Results
537
+
538
+ Table 12. Ablation study for pre-training epochs. 3D detection $AP_{25}$ and $AP_{50}$ on ScanNet.
539
+
540
+ <table><tr><td>Method</td><td>mAP↑</td><td>NDS↑</td></tr><tr><td>UVTR[29]</td><td>28.69</td><td>35.79</td></tr><tr><td>Ours</td><td>30.10 (+1.41)</td><td>36.31 (+0.52)</td></tr></table>
541
+
542
+ Table 13. Performance comparisons on the nuScenes val set.
543
+
544
+ The Effect of Pre-training. Table 13 shows that our method could yield up to $1.41\%$ mAP and $0.52\%$ NDS gains compared with the baseline, demonstrating the effectiveness of our pre-training method. The consistent improvement in both indoor and outdoor scenarios validates the robustness of our approach.
545
+
546
+ Visualization. Figure 12 provides some qualitative results of the reconstructed image and depth map, which only takes the image as input during inference. Our approach has the capability to estimate the depth of small objects, such as cars at a distance. This quality in the pre-training process encodes intricate and continuous geometric representations, which can benefit many downstream tasks. In Figure 13, we present 3D detection results in camera space and BEV (Bird's Eye View) space. Our model can predict accurate bounding boxes for nearby objects and also shows the capability of detecting objects from far distances.
547
+
548
+ ![](images/2de0da75c8547ff7944b63d92fadc0db05e785a49267f3caafc1a66f7d0a94e5.jpg)
549
+
550
+ ![](images/cda3990050846e75987bc8f3deef297ca34d1949a14d75b8909c2c5959a0df50.jpg)
551
+
552
+ ![](images/297071790e53d6ce217b3c776818e705214577deff82b69e626376d3523af24a.jpg)
553
+
554
+ ![](images/c2fbdce2f0c0fe7ca67e7d33e2b031be8cc4cd182189169dcc2a20359f977937.jpg)
555
+
556
+ ![](images/0f0add9bdabfeb62dcc083a37c52aa85312116fd6b2809581d54240a81a24623.jpg)
557
+
558
+ ![](images/927233de4c46682f63bf22b6907e4e3ba1cf5fffc343074c7185e9174ff5518b.jpg)
559
+
560
+ ![](images/03eab37f902642695bc9104f9df150c5332f27db4002787e676b5a46e08dbb86.jpg)
561
+
562
+ ![](images/8974996d66c6211963fbd5961d03a6607934aa0a25b5faf30ca2bb279145f580.jpg)
563
+
564
+ ![](images/f2f1665fdcd23167e12bd0eccb3a2713d556e58729aa6eaf60ada93a1ab06e97.jpg)
565
+
566
+ ![](images/9383a23f307d243fb1466ffe4c635c520c071bf433a0181c6bc5910ccb89583b.jpg)
567
+
568
+ ![](images/b585496c0f2bfafcb9820f29abfd1a678daa81e20d28a1ba3d8038c0914fd451.jpg)
569
+
570
+ ![](images/1ffad65a62464ce08370d145adb994a635878950d60cb0a12240745d263cc1d8.jpg)
571
+
572
+ ![](images/b09ea11ec3fd03b7749fdf1744a439b73330a52607d80a65004f411c202eb7c5.jpg)
573
+
574
+ ![](images/50d4e4816d6d620fd1131f155b763feea2ac53050edcba6f637b9abe3c44588b.jpg)
575
+
576
+ ![](images/6eaf56467a38a485edb06c453a7935995e40d1e29ea05386b1cd1e7d22485a92.jpg)
577
+
578
+ ![](images/d6fd67e1a566a1fb8631b9807fddb944724105a190d7409bc1a5bd23ebdf8a0c.jpg)
579
+
580
+ ![](images/c049ef531c5af85df95b285256d37f7de2351c09fbe08fc199c94970bb1415eb.jpg)
581
+
582
+ ![](images/a9c8d1fcf1632edeae5e92a55ab6d7cacbc345f1a6076b4f5c5907d7d5c646de.jpg)
583
+
584
+ ![](images/e404d79b769aca2dfac7a900b0a7a4193663e34cda8a28de25cbca9b59fcb81a.jpg)
585
+
586
+ ![](images/e389914148af8613240bcc696b5890d69ef5c261d9af8ceabc6a96001c6e8389.jpg)
587
+
588
+ ![](images/b9aa43845088672c1de05040f064d9323568103c010b5080d4e76375b0b859aa.jpg)
589
+
590
+ ![](images/74c5154e2bc4cfbf0a917a0b76af8a127c1fdbd3dceb0358a69708ec237ec2af.jpg)
591
+
592
+ ![](images/39e713f40ce17bd13731a6264b68c38ed9dcb7b8bd7b212183191f3d9c5acdf0.jpg)
593
+
594
+ ![](images/5a433313e566da9b6e4bc3ed8e57b3e5428a15b5f0077e97e4d046c8baaa2ef4.jpg)
595
+
596
+ ![](images/6ca0776c0b2e4ce7c4d3cf02523eb78cc1253c57e5ea1320f085a81c0854cc6d.jpg)
597
+
598
+ ![](images/71f354f13711dbcbf96b57f91c39d5a8aea8cc90ff2a3a577c7e199c4ba9af1d.jpg)
599
+ Input Point Cloud
600
+
601
+ ![](images/dad04f82ba3074b8c6069406502a7887fb90256abf740aa9652f7c9fc206c53e.jpg)
602
+ Projected Point Cloud
603
+
604
+ ![](images/1a3a563762f78484efc90076e0685e8779497731f5cfa7b674a3da08b3d23ec5.jpg)
605
+ Reconstruction
606
+
607
+ ![](images/0d0185bf51c19e32a77246e80d0d74e51e3168a3488e2c36815e9ce5ae356d70.jpg)
608
+ Image Synthesis
609
+ Figure 10. More results of application examples of Ponder on the ScanNet validation set (part 1). The input point clouds are represented by large spheres for improved clarity. The projected point clouds illustrate the actual sparsity of the point data.
610
+
611
+ ![](images/021d3b8d5c822f84a5acdb62b0e46e8ce8d53362541290b109668a793e5179bc.jpg)
612
+ Depth Synthesis
613
+
614
+ ![](images/6fffdcfb591efbc9c9c07a860a5bd974295eb54804994b458633606d72176efa.jpg)
615
+
616
+ ![](images/7e2df44591ccc4b426b857922bd4e65fa8cc74e729ad3e64557f6669050bbdd5.jpg)
617
+
618
+ ![](images/7849ac46d7d880c55a9d976dc11d6e94f0710c6e788595997481b95ac2cc85a1.jpg)
619
+
620
+ ![](images/3725b5f06f002ea9bcbac5d4d513ecbcc2279b43c352b9fb8376cfcd8a1dc1f6.jpg)
621
+
622
+ ![](images/64798e9742c9d83bf8b24d07d07f202eb9c800e307206f483a276ae758668e80.jpg)
623
+
624
+ ![](images/c47680709dfa44b05d8cbf845535e9b92a7eff5f751412fc54e5f71b224cfb2f.jpg)
625
+
626
+ ![](images/405de888165e4df132051e84f653966325a99619f5c8ed4a196c5b72964fd31f.jpg)
627
+
628
+ ![](images/6325575fb3ac381aa6919301c6c7121ddf7245c9a94d7faccb8bb7e8cd27348b.jpg)
629
+
630
+ ![](images/a90774c74b5278e6ea913076cfddf058942dceffddbf46e341e6b9493d045b7b.jpg)
631
+
632
+ ![](images/0aee919464d139fe72dabb717ba3a8a56b28e6d453036670616948d1638cc79a.jpg)
633
+
634
+ ![](images/952e4f6ca44b1604ff40f8ab600609e8eb10417e848b6821f3ed8e0da62b2798.jpg)
635
+
636
+ ![](images/99356ef402dd5c796c6aed57f99f5d6a23f5aa1131b716609b6d16f54b0af0fe.jpg)
637
+
638
+ ![](images/3afe475d57b04813aaa3a5486dd15c67d7c82864304af3c8bcbcd9b97c8dd740.jpg)
639
+
640
+ ![](images/f71db9bb4f823d1dc84f4fac73868a4a479375c98157618b3ee5088de6dcb805.jpg)
641
+
642
+ ![](images/d9e1b81a725f79b78e7db8371a28cd0d8b9be96bc78918ba403873aae1b75ff3.jpg)
643
+
644
+ ![](images/b805633611d1db56290f2bab4c087aaf84d4b6bd03f5be78ac091e4e4f5f4bd2.jpg)
645
+
646
+ ![](images/4eeaa4a7459cd314797207d81189b73e640914c5c31b99bbc337b84f1486159d.jpg)
647
+
648
+ ![](images/f5e419ccd770aa3dbf5378c1d5196a8938e58bc181f3a02d23035f83fbd6e100.jpg)
649
+
650
+ ![](images/7faca2fdf9171ab85c0f79462729a5cee5888a4d02d8d9ea6458deae06e2261d.jpg)
651
+
652
+ ![](images/656279a2a146b458caefaccf9cde5cd1ae28e643666beab93d7d033ff80ee28b.jpg)
653
+
654
+ ![](images/0a711e74cda369289e4c7dafbfefcb5a869b782a4894ad2bdc850ad0c371b769.jpg)
655
+
656
+ ![](images/a73e7e56835f63042361d0cdeaa92feee7fbc361b0c4eeec40af85561fe4fcd8.jpg)
657
+
658
+ ![](images/851b5d6c1800182330baa83777de39eac57024ad7366bac2f28a1364ee0bfc53.jpg)
659
+
660
+ ![](images/562162353359a87c71e477486701d45455972d24f77e9cefb42df9c4ecba594a.jpg)
661
+
662
+ ![](images/d6226261d2d34708b579ff6272f553686f64c557180cd0239d9f4443f38dd7af.jpg)
663
+
664
+ ![](images/8038b063229e0060d58c822caeb681737e3531af4a44bfe3902f616ef04bf89e.jpg)
665
+ Input Point Cloud
666
+
667
+ ![](images/73eb2aec636febb2c6cebc5b068225c31ea191448cf05ae518b1e856438baf1a.jpg)
668
+ Projected Point Cloud
669
+
670
+ ![](images/9959b369f503108c3d7507f003ba77a6a95bb3a58ace457873f6e752a13f8ad9.jpg)
671
+ Reconstruction
672
+
673
+ ![](images/4ab5203e4040698e49f83cbf702238844cff12d1ad92ab1a8ebf5e82812535b9.jpg)
674
+ Image Synthesis
675
+ Figure 11. More results of application examples of Ponder on the ScanNet validation set (part 2). The input point clouds are represented by large spheres for improved clarity. The projected point clouds illustrate the actual sparsity of the point data.
676
+
677
+ ![](images/6113ab60e6a67204a0b4352fc06ba792978c3b3c93a0ddee394ddbea9ccaedce.jpg)
678
+ Depth Synthesis
679
+
680
+ ![](images/52afbfa07be3b4d6b580ee3aff85ce071782f09de80a5e2aff0ab72be495fea5.jpg)
681
+
682
+ ![](images/7671200a035d54cd4d841a2fcee4c869f61974e4409eca982b9b40180be5b49d.jpg)
683
+
684
+ ![](images/19da1d6001c6fd26170eedf8dcb8365551f009f404fe80a6493c89373691ce06.jpg)
685
+ Image and Projected Point Cloud
686
+
687
+ ![](images/b1b72acbab5c0c2fc2e5a8ce9dc67db2857a8bcdd249ddfe13f1170254657838.jpg)
688
+
689
+ ![](images/b6169e80e0e14b508af250374a1788bce74da3473531b5068198df9d1be9c5b7.jpg)
690
+
691
+ ![](images/5c2ec7d374cdf2de8226654af825b8bb8dbea0d2509364f0e1c18c639eab9937.jpg)
692
+ Image Synthesis
693
+ Figure 12. The predicted image and depth map on the nuScenes dataset. Left to right: image and projected point clouds, image predictions, and depth predictions.
694
+
695
+ ![](images/b684582c5e2faec7d096d03c83df5352b84a90bf8a57c1a4138788ca1a4a9c1a.jpg)
696
+
697
+ ![](images/c53e0e9a1ec946d585408a0ae55349c15107f23556be9f42e53ac478be663e0e.jpg)
698
+
699
+ ![](images/005a89500311a8c99cbbebc20793f56276200c72e22da9d59971af12636f51d5.jpg)
700
+ Depth Synthesis
701
+
702
+ ![](images/a4eac858220ec9e8b96e1067300d794c82d816d41c7b631a5cf91faf645a3bc9.jpg)
703
+
704
+ ![](images/1607c0831d4a960ba88d93cd6966f901d32a5face0980156ce5c581b816eb2ef.jpg)
705
+ Figure 13. Qualitative results of multi-camera 3D object detection on the nuScenes dataset. We visualize the point cloud to better evaluate the quality of predicted bounding boxes.
2301.00xxx/2301.00157/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1bf1a915aa81dbb72e9bc920427b490758ab674d6f99eadba772350e9ba3198f
3
+ size 1508518
2301.00xxx/2301.00157/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00174/bdcc3ba4-ac51-4fe2-85bf-aa12aea87957_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00174/bdcc3ba4-ac51-4fe2-85bf-aa12aea87957_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00174/bdcc3ba4-ac51-4fe2-85bf-aa12aea87957_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e71945e461dcb17313476d7c929f5f9d2138c31b7daf8022a9aee4e055b8149
3
+ size 2176556
2301.00xxx/2301.00174/full.md ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00174/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9c75b075d1582e7e7177779cd6d386665648073727ad55b2570c66bf0354b0b
3
+ size 1406591
2301.00xxx/2301.00174/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00182/e088d165-4952-42cf-a3d7-7785f6182d67_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00182/e088d165-4952-42cf-a3d7-7785f6182d67_model.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00182/e088d165-4952-42cf-a3d7-7785f6182d67_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b35574b951a45269d91c293685b290015b1cb56f7bb86425cd3bc4517e1cd284
3
+ size 4447687
2301.00xxx/2301.00182/full.md ADDED
@@ -0,0 +1,554 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Bidirectional Cross-Modal Knowledge Exploration for Video Recognition with Pre-trained Vision-Language Models
2
+
3
+ Wenhao Wu $^{1,2}$ Xiaohan Wang $^{3}$ Haipeng Luo $^{4}$ Jingdong Wang $^{2}$ Yi Yang $^{3}$ Wanli Ouyang $^{5,1}$
4
+
5
+ <sup>1</sup>The University of Sydney <sup>2</sup>Baidu Inc.
6
+
7
+ <sup>4</sup>University of Chinese Academy of Sciences
8
+
9
+ $^{3}$ Zhejiang University
10
+
11
+ 5Shanghai AI Laboratory
12
+
13
+ whwu.ucas@gmail.com
14
+
15
+ # Abstract
16
+
17
+ Vision-language models (VLMs) pre-trained on large-scale image-text pairs have demonstrated impressive transferability on various visual tasks. Transferring knowledge from such powerful VLMs is a promising direction for building effective video recognition models. However, current exploration in this field is still limited. We believe that the greatest value of pre-trained VLMs lies in building a bridge between visual and textual domains. In this paper, we propose a novel framework called BIKE, which utilizes the cross-modal bridge to explore bidirectional knowledge: i) We introduce the Video Attribute Association mechanism, which leverages the Video-to-Text knowledge to generate textual auxiliary attributes for complementing video recognition. ii) We also present a Temporal Concept Spotting mechanism that uses the Text-to-Video expertise to capture temporal saliency in a parameter-free manner, leading to enhanced video representation. Extensive studies on six popular video datasets, including Kinetics-400 & 600, UCF-101, HMDB-51, ActivityNet and Charades, show that our method achieves state-of-the-art performance in various recognition scenarios, such as general, zero-shot, and few-shot video recognition. Our best model achieves a state-of-the-art accuracy of $88.6\%$ on the challenging Kinetics-400 using the released CLIP model. The code is available at https://github.com/whwu95/BIKE.
18
+
19
+ # 1. Introduction
20
+
21
+ In recent years, the remarkable success of large-scale pre-training in NLP (e.g., BERT [9], GPT [4, 39], ERNIE [71] and T5 [40]) has inspired the computer vision community. Vision-language models (VLMs) leverage large-scale noisy image-text pairs with weak correspondence for contrastive learning (e.g., CLIP [38], ALIGN [19], CoCa [67], Florence [68]), and demonstrate impressive transferability across a wide range of visual tasks.
22
+
23
+ ![](images/63f5bfad5798e199928f7a6a2d86d63672035bd47584e2bb74670e25a30f3028.jpg)
24
+ (a) Traditional video recognition.
25
+ (b) Category embedding as classifier for video recognition.
26
+
27
+ ![](images/d5d9a2d14bd1a4054751693f55eaeba7b71ad09a7311e145f5bc06c120152ebf.jpg)
28
+ (c) Bidirectional knowledge exploration for video recognition.
29
+ Figure 1. Illustration of the difference between our paradigm (c) with existing unimodality paradigm (a) and cross-modal paradigm (b). Please zoom in for the best view.
30
+
31
+ Naturally, transferring knowledge from such powerful pre-trained VLMs is emerging as a promising paradigm for building video recognition models. Currently, exploration in this field can be divided into two lines. As depicted in Figure 1(a), one approach [28,36,66] follows the traditional unimodal video recognition paradigm, initializing the video encoder with the pre-trained visual encoder of VLM. Conversely, the other approach [21,35,50,60] directly transfers the entire VLM into a video-text learning framework that utilizes natural language (i.e., class names) as supervision, as shown in Figure 1(b). This leads to an question: have we fully utilized the knowledge of VLMs for video recognition?
32
+
33
+ In our opinion, the answer is No. The greatest charm of VLMs is their ability to build a bridge between the visual and textual domains. Despite this, previous research employing pre-aligned vision-text features of VLMs for video recognition has only utilized unidirectional video-to-text matching. In this paper, we aim to facilitate bidirectional knowledge exploration through the cross-modal bridge for enhanced video recognition. With this in mind, we mine Video-to-Text and Text-to-Video knowledge by 1) generating textual information from the input video and
34
+
35
+ 2) utilizing category descriptions to extract valuable video-related signals.
36
+
37
+ In the first Video-to-Text direction, a common practice for mining VLM knowledge is to embed the input video and category description into a pre-aligned feature space, and then select the category that is closest to the video, as illustrated in Figure 1(b), which serves as our baseline. One further question naturally arises: Can we incorporate auxiliary textual information for video recognition? To address this question, we introduce an Video-Attributes Association mechanism, which leverages the zero-shot capability of VLMs to retrieve the most relevant phrases from a pre-defined lexicon for the video. These phrases are considered potential “attributes” of the video and can predict the video category directly. For example, a video of someone kicking a soccer ball may be associated with relevant phrases such as “running on the grass”, “juggling soccer ball” and “shooting goal”. Surprisingly, using only the generated attributes, we can achieve $69\%$ top-1 accuracy on the challenging Kinetics-400 dataset. Furthermore, these attributes provide additional information that the video visual signal may not capture, allowing us to build an Attributes Recognition Branch for video recognition.
38
+
39
+ In the second Text-to-Video direction, we believe that temporal saliency in videos can be leveraged to improve video representations. For instance, in a video with the category "kicking soccer ball", certain frames of kicking the ball should have higher saliency, while other frames that are unrelated to the category or background frames should have lower saliency. This insight motivates us to propose the Video Concept Spotting mechanism, which utilizes the cross-model bridge to generate category-dependent temporal saliency. In previous works [35, 50, 60], this intuitive exploration was disregarded. To be more specific, instead of treating each video frame equally, we use the correlation between each frame and the given concept (e.g., category) as a measure of frame-level saliency. This saliency is then used to temporally aggregate the frames, resulting in a compact video representation.
40
+
41
+ In the light of the above explorations, we propose BIKE, a simple yet effective framework via Bldirectional cross-modal Knowledge Exploration for enhanced video recognition. Our BIKE comprises two branches: the Attributes branch, which utilizes the Video-Attributes Association mechanism to introduce auxiliary attributes for complementary video recognition, and the Video branch, which uses the Video Concept Spotting mechanism to introduce temporal saliency to enhance video recognition. To demonstrate the effectiveness of our BIKE, we conduct comprehensive experiments on popular video datasets, including Kinetics-400 [22] & 600 [6], UCF-101 [44], HMDB-51 [24], ActivityNet [5] and Charades [42]. The results show that our method achieves state-of-the-art performance in most sce
42
+
43
+ narios, e.g., general, zero-shot, and few-shot recognition. Our main contributions can be summarized as follows:
44
+
45
+ - We propose a novel framework called BIKE that explores bidirectional knowledge from pre-trained vision-language models for video recognition.
46
+ - In the Video-to-Text direction, we introduce the Video-Attributes Association mechanism to generate extra attributes for complementary video recognition.
47
+ - In the Text-to-Video direction, we introduce the Video Concept Spotting mechanism to generate temporal saliency, which is used to yield the compact video representation for enhanced video recognition.
48
+
49
+ # 2. Methodology
50
+
51
+ An overview of our proposed BIKE is shown in Figure 2. We next elaborate on each component in more detail.
52
+
53
+ # 2.1. Preliminary: Video Recognition with VLM
54
+
55
+ In this section, we describe the typical cross-modal video recognition pipeline [21,35,50,60] based on the pre-trained vision-language model (VLM). Given a video, we sample $T$ frames from the video as input $v$ . We also have a collection of categories $C = \{c_{1}, c_{2}, \dots, c_{K}\}$ , where $K$ is the number of classes. The goal of the video recognition task is to classify the video $v$ into a category $c \in C$ . Under the formulation of video recognition, the video $v$ is encoded with a vision encoder $f(\cdot | \theta_v)$ to obtain the video embedding $\mathbf{e}_{\mathbf{v}}$ , and the category $c$ is encoded with a text encoder $g(\cdot | \phi_c)$ to obtain the category embedding $\mathbf{e}_{\mathbf{c}}$ , where
56
+
57
+ $$
58
+ \mathbf {e} _ {\mathbf {v}} = f (v \mid \theta_ {v}), \mathbf {e} _ {\mathbf {c}} = g (c \mid \phi_ {c}). \tag {1}
59
+ $$
60
+
61
+ Finally, we obtain the similarity score $S_V$ as follows:
62
+
63
+ $$
64
+ \mathcal {S} _ {V} = s \left(\mathbf {e} _ {\mathbf {v}}, \mathbf {e} _ {\mathbf {c}}\right), \tag {2}
65
+ $$
66
+
67
+ where $s(\cdot, \cdot)$ is the cosine similarity function. The objective during training is to maximize $S_V$ if $v$ and $c$ are matched, and minimize it in all other cases. During inference, we compute the score between the video embedding and each category embedding, and choose the category with the highest $S_V$ as the top-1 prediction. The parameter $\theta_v$ and $\phi_c$ of the video encoder and text encoder are initialized with weights from the pre-trained VLM (e.g., CLIP [38]). Throughout the rest of this work, we use the same notation.
68
+
69
+ # 2.2. Video-to-Text: Video-Attributes Association
70
+
71
+ First we focus on exploring Video-to-Text auxiliary signals. We present an Attributes branch as a complement to the regular Video branch in Sec. 2.1 for video recognition. Pre-generated Attributes. We begin by describing how to generate auxiliary attributes. As depicted in Figure 2(b), we
72
+
73
+ ![](images/68520389b109aee7febaba4b87516afaa54ac0ea5d82c1e72001c59c04ae1535.jpg)
74
+
75
+ ![](images/19a676279fcdef21fe98db8934cf84fbc3299af60ef0858037649b1f9bb4f159.jpg)
76
+ Figure 2. An overview of our BIKE for video recognition. (a) BIKE explores bidirectional cross-modal knowledge from the pre-trained vision-language model (e.g., CLIP) to introduce auxiliary attributes and category-dependent temporal saliency for improved video recognition. BIKE comprises an auxiliary Attributes branch and a main Video branch. (b) In the Video-to-Text direction, we present the Video-Attribute Association mechanism, which retrieves semantically relevant phrases from a pre-defined lexicon as video attributes for the input video. These attributes are concatenated and combined with a textual prefix to form an attribute sentence for text recognition. (c) In the Text-to-Video direction, we present the Video Concept Spotting mechanism, which computes the similarity between video frames and a given category as a measure of temporal saliency to enhance video representation. $D$ is the dimension of embedding, $T$ is the number of frames, and $N$ is the number of words in the category name.
77
+
78
+ ![](images/ff6912d497325cd30545c51808d0fae2a0ba8497b11d1d2b146264ead8cb5c31.jpg)
79
+
80
+ utilize the zero-shot capability of the VLM (e.g., CLIP [38]) to identify the most relevant phases from a pre-defined lexicon as possible "Attributes" of the video. To achieve this, we first apply the CLIP's image encoder to the input video $V$ to extract frame-level features. These features are then combined using average pooling to yield a video embedding. Next, we feed each phase in the pre-defined lexicon into the CLIP's text encoder to produce a set of text embeddings. We then calculate the similarity between this video embedding and each text embedding, sort the results, and select the top few phrases as the "Attributes". Once we have obtained the attributes, we employ a simple fusion method that concatenates them into a single attributes sentence $a$ . We also add a manually-designed prompt as a prefix to the sentence, such as "This is a video about {}".
81
+
82
+ Attributes Recognition. As shown in Figure 2(a), the attributes sentence $a$ is encoded with a text encoder $g(\cdot |\phi_a)$ to produce the attribute embedding $\mathbf{e}_{\mathbf{a}}$ :
83
+
84
+ $$
85
+ \mathbf {e} _ {\mathbf {a}} = g (a | \phi_ {a}). \tag {3}
86
+ $$
87
+
88
+ We use this attribute embedding to perform Attributes Recognition by calculating the similarity $S_A$ between the attribute embedding and category embeddings. Note that both the attribute sentence and categories are encoded using the same text encoder from CLIP. Interestingly, the Attributes branch can achieve a certain level of recognition performance (e.g., $\sim 56\%$ ) without any extra training, even though it's a lightweight text recognition pipeline. During inference, we combine the well-trained Video branch with the plug-and-play Attributes branch using the following fusion equation:
89
+
90
+ $$
91
+ \mathcal {S} = \lambda \mathcal {S} _ {V} + (1 - \lambda) \mathcal {S} _ {A}, \tag {4}
92
+ $$
93
+
94
+ where $\lambda$ is the fusion weight. Without any additional training, the Attributes Recognition surprisingly improve the video recognition performance, e.g., $78.8\% \xrightarrow{+1.2\%} 80.0\%$ on the challenging Kinetics-400. Naturally, the text encoder $g(\cdot|\phi_a)$ can be further trained in an end-to-end manner to improve the Attributes branch and provide a stronger complementary capability, e.g., $78.8\% \xrightarrow{+2.6\%} 81.4\%$ .
95
+
96
+ # 2.3. Text-to-Video: Video Concept Spotting
97
+
98
+ In Sec. 2.2, the Video-to-Text knowledge is employed to generate auxiliary attributes, thereby constructing a complementary Attributes branch. Naturally, we also conduct an exploration to leverage the Text-to-Video knowledge to enhance the standard Video branch for video recognition. Specifically, we propose the use of category-dependent temporal saliency to guide the temporal aggregation process, resulting in a compact video representation that enhances video recognition.
99
+
100
+ Background. To obtain a video representation based on a pre-trained image model, the typical pipeline involves two stages. First, we employ the image model to extract the spatial embedding of each frame. Next, the embeddings of these frames are temporally aggregated (e.g., mean pooling) to yield a video-level representation.
101
+
102
+ Parameter-Free Video Concept Spotting. Mean pooling is a widely used technique to aggregate the frame embeddings and obtain the final video representation. Instead of treating each video frame equally as in mean pooling, we propose a parameter-free solution that utilizes the prealigned visual and textual semantics offered by the VLM (e.g., CLIP [38]) to capture temporal saliency for video feature aggregation, as illustrated in Figure 2(c). To estimate temporal saliency, we employ word embeddings as the query to obtain finer word-to-frame saliency. Formally, the pre-trained VLM can encode each video or category name separately, and output two sets of embeddings: $\{\mathbf{v}_t\in$ $\mathbb{R}^d |t = 1,2,\dots ,T\}$ is a set of frame embeddings, where $T$ is the number of sampled frames, and $\{\mathbf{t}_n\in \mathbb{R}^d |n =$ $1,2,\dots ,N\}$ is a set of word embeddings, where $N$ is the number of words in the class name. We calculate the similarity between each word and each frame to measure the fine-grained relevancy. After that, we perform a softmax operation to normalize the similarities for each frame, and then aggregate the similarities between a certain frame and different words to obtain a frame-level saliency.
103
+
104
+ $$
105
+ \mathcal {S} _ {t} = \frac {1}{N} \sum_ {n = 1} ^ {N} \frac {\exp \left(\mathbf {v} _ {t} ^ {\mathsf {T}} \mathbf {t} _ {n} / \tau\right)}{\sum_ {t = 1} ^ {T} \exp \left(\mathbf {v} _ {t} ^ {\mathsf {T}} \mathbf {t} _ {n} / \tau\right)}, t \in [ 1, T ], n \in [ 1, N ], \tag {5}
106
+ $$
107
+
108
+ where $\tau$ is the temperature of this softmax function. See Figure 3 for the visualization of temporal saliency. Next, we utilize the temporal saliency to aggregate these frame embeddings as follows:
109
+
110
+ $$
111
+ \mathbf {e} _ {\mathbf {v}} = \sum_ {t = 1} ^ {T} \mathbf {v} _ {t} \mathcal {S} _ {t}, \tag {6}
112
+ $$
113
+
114
+ $\mathbf{e}_{\mathbf{v}} \in \mathbb{R}^{d}$ is the final enhanced video representation.
115
+
116
+ # 2.4. Objectives of BIKE
117
+
118
+ We present the BIKE learning framework for video recognition, as depicted in Figure 2(a). Formally, our BIKE extracts feature representations $\mathbf{e_v}$ , $\mathbf{e_a}$ , and $\mathbf{e_c}$ for a given video $v$ , pre-generated attributes $a$ , and category $c$ with the corresponding encoders $f(\cdot|\theta_v)$ , $g(\cdot|\phi_a)$ , and $g(\cdot|\phi_c)$ . Model parameters $\theta_v$ , $\theta_a$ , and $\theta_c$ are initialized with the weights from the pre-trained VLM (e.g., CLIP [38]). In this paper, we freeze the parameters of the pre-trained text encoder for $g(\cdot|\phi_c)$ and design extra manual prompts for the category $c$ and attributes sentence $a$ .
119
+
120
+ During the training phase, our objective is to ensure that the video representation $\mathbf{e}_{\mathbf{v}}$ and the category representation $\mathbf{e}_{\mathbf{c}}$ are similar when they are related and dissimilar when they are not, and the same applies to the attributes-category pairs. Given a batch of $B$ quadruples $\{\mathbf{e}_{\mathbf{vi}}, \mathbf{e}_{\mathbf{ai}}, \mathbf{e}_{\mathbf{ci}} \equiv C[y_i], y_i\}_{i=1}^B$ , where $C$ is the collection of $K$ categories indexed by $y_i \in [0, K-1]$ and $y_i$ is a label indicating the index of the category in the dataset, and $\mathbf{e}_{\mathbf{vi}}, \mathbf{e}_{\mathbf{ai}}, \mathbf{e}_{\mathbf{ci}}$ denote the $i$ -th video embedding, attributes embedding, and category embedding, respectively. We follow the common practice [21,50] to consider the bidirectional learning objective and employ symmetric cross-entropy loss to maximize the similarity between matched Video-Category pairs and minimize the similarity for other pairs:
121
+
122
+ $$
123
+ \mathcal {L} _ {V 2 C} = - \frac {1}{B} \sum_ {i} ^ {B} \frac {1}{| \mathcal {K} (i) |} \sum_ {k \in \mathcal {K} (i)} \log \frac {\exp (s (\mathbf {e} _ {\mathbf {c} i} , \mathbf {e} _ {\mathbf {v} k}) / \tau)}{\sum_ {j} ^ {B} \exp (s (\mathbf {e} _ {\mathbf {c} i} , \mathbf {e} _ {\mathbf {v} j}) / \tau)},
124
+ $$
125
+
126
+ $$
127
+ \begin{array}{l} \mathcal {L} _ {C 2 V} = - \frac {1}{B} \sum_ {i} ^ {B} \frac {1}{| \mathcal {K} (i) |} \sum_ {k \in \mathcal {K} (i)} \log \frac {\exp (s (\mathbf {e _ {c}} _ {k} , \mathbf {e _ {v}} _ {i}) / \tau)}{\sum_ {j} ^ {B} \exp (s (\mathbf {e _ {c}} _ {j} , \mathbf {e _ {v}} _ {i}) / \tau)}, \\ \mathcal {L} _ {V} = \frac {1}{2} \left(\mathcal {L} _ {V 2 C} + \mathcal {L} _ {C 2 V}\right), \tag {7} \\ \end{array}
128
+ $$
129
+
130
+ where $k \in \mathcal{K}(i) = \{k|k \in [1,B], y_k = y_i\}$ , $s(\cdot, \cdot)$ is the cosine similarity, and $\tau$ refers to the temperature hyperparameter for scaling. Similarly, the loss for Attributes branch is formulated as:
131
+
132
+ $$
133
+ \mathcal {L} _ {A 2 C} = - \frac {1}{B} \sum_ {i} ^ {B} \frac {1}{| \mathcal {K} (i) |} \sum_ {k \in \mathcal {K} (i)} \log \frac {\exp (s (\mathbf {e} _ {\mathbf {c} i} , \mathbf {e} _ {\mathbf {a} k}) / \tau)}{\sum_ {j} ^ {B} \exp (s (\mathbf {e} _ {\mathbf {c} i} , \mathbf {e} _ {\mathbf {a} j}) / \tau)},
134
+ $$
135
+
136
+ $$
137
+ \begin{array}{l} \mathcal {L} _ {C 2 A} = - \frac {1}{B} \sum_ {i} ^ {B} \frac {1}{| \mathcal {K} (i) |} \sum_ {k \in \mathcal {K} (i)} \log \frac {\exp \left(s \left(\mathbf {e} _ {\mathbf {c} k} , \mathbf {e} _ {\mathbf {a} i}\right) / \tau\right)}{\sum_ {j} ^ {B} \exp \left(s \left(\mathbf {e} _ {\mathbf {c} j} , \mathbf {e} _ {\mathbf {a} i}\right) / \tau\right)}, \\ \mathcal {L} _ {A} = \frac {1}{2} \left(\mathcal {L} _ {A 2 C} + \mathcal {L} _ {C 2 A}\right). \tag {8} \\ \end{array}
138
+ $$
139
+
140
+ The total loss $\mathcal{L}$ is the sum of $\mathcal{L}_V$ and $\mathcal{L}_A$ :
141
+
142
+ $$
143
+ \mathcal {L} = \mathcal {L} _ {V} + \mathcal {L} _ {A}. \tag {9}
144
+ $$
145
+
146
+ For inference, we simply combine the similarity score of the two branches as Equation 4.
147
+
148
+ <table><tr><td>Method</td><td>Venue</td><td>Input</td><td>Pre-training</td><td>Top-1(%)</td><td>Top-5(%)</td><td>Views</td><td>FLOPs</td><td>Param</td></tr><tr><td>NL I3D-101 [51]</td><td>CVPR&#x27;18</td><td>128×2242</td><td>ImageNet-1K</td><td>77.7</td><td>93.3</td><td>10×3</td><td>359×30</td><td>61.8</td></tr><tr><td>MVFNetEn [56]</td><td>AAAI&#x27;21</td><td>24×2242</td><td>ImageNet-1K</td><td>79.1</td><td>93.8</td><td>10×3</td><td>188×30</td><td>-</td></tr><tr><td>TimeSformer-L [2]</td><td>ICML&#x27;21</td><td>96×2242</td><td>ImageNet-21K</td><td>80.7</td><td>94.7</td><td>1×3</td><td>2380×3</td><td>121.4</td></tr><tr><td>ViViT-L/16×2 [1]</td><td>ICCV&#x27;21</td><td>32×3202</td><td>ImageNet-21K</td><td>81.3</td><td>94.7</td><td>4×3</td><td>3992×12</td><td>310.8</td></tr><tr><td>VideoSwin-L [31]</td><td>CVPR&#x27;22</td><td>32×3842</td><td>ImageNet-21K</td><td>84.9</td><td>96.7</td><td>10×5</td><td>2107×50</td><td>200.0</td></tr><tr><td colspan="9">Methods with large-scale image pre-training</td></tr><tr><td>ViViT-L/16×2 [1]</td><td>ICCV&#x27;21</td><td>32×3202</td><td>JFT-300M</td><td>83.5</td><td>95.5</td><td>4×3</td><td>3992×12</td><td>310.8</td></tr><tr><td>ViViT-H/16×2 [1]</td><td>ICCV&#x27;21</td><td>32×2242</td><td>JFT-300M</td><td>84.8</td><td>95.8</td><td>4×3</td><td>8316×12</td><td>647.5</td></tr><tr><td>TokenLearner-L/10 [41]</td><td>NeurIPS&#x27;21</td><td>32×2242</td><td>JFT-300M</td><td>85.4</td><td>96.3</td><td>4×3</td><td>4076×12</td><td>450</td></tr><tr><td>MTV-H [65]</td><td>CVPR&#x27;22</td><td>32×2242</td><td>JFT-300M</td><td>85.8</td><td>96.6</td><td>4×3</td><td>3706×12</td><td>-</td></tr><tr><td>CoVeR [70]</td><td>arXiv&#x27;21</td><td>16×4482</td><td>JFT-300M</td><td>86.3</td><td>-</td><td>1×3</td><td>-</td><td>-</td></tr><tr><td>CoVeR [70]</td><td>arXiv&#x27;21</td><td>16×4482</td><td>JFT-3B</td><td>87.2</td><td>-</td><td>1×3</td><td>-</td><td>-</td></tr><tr><td colspan="9">Methods with large-scale image-language pre-training</td></tr><tr><td>CoCa ViT-giant [67]</td><td>arXiv&#x27;22</td><td>6×2882</td><td>JFT-3B+ALIGN-1.8B</td><td>88.9</td><td>-</td><td>-</td><td>-</td><td>2100</td></tr><tr><td>VideoPrompt ViT-B/16 [21]</td><td>ECCV&#x27;22</td><td>16×2242</td><td>WIT-400M</td><td>76.9</td><td>93.5</td><td>-</td><td>-</td><td>-</td></tr><tr><td>ActionCLIP ViT-B/16 [50]</td><td>arXiv&#x27;21</td><td>32×2242</td><td>WIT-400M</td><td>83.8</td><td>96.2</td><td>10×3</td><td>563×30</td><td>141.7</td></tr><tr><td>Florence [68]</td><td>arXiv&#x27;21</td><td>32×3842</td><td>FLD-900M</td><td>86.5</td><td>97.3</td><td>4×3</td><td>-</td><td>647</td></tr><tr><td>ST-Adapter ViT-L/14 [36]</td><td>NeurIPS&#x27;22</td><td>32×2242</td><td>WIT-400M</td><td>87.2</td><td>97.6</td><td>3×1</td><td>8248</td><td>-</td></tr><tr><td>AIM ViT-L/14 [66]</td><td>ICLR&#x27;23</td><td>32×2242</td><td>WIT-400M</td><td>87.5</td><td>97.7</td><td>3×1</td><td>11208</td><td>341</td></tr><tr><td>EVL ViT-L/14 [28]</td><td>ECCV&#x27;22</td><td>32×2242</td><td>WIT-400M</td><td>87.3</td><td>-</td><td>3×1</td><td>8088</td><td>-</td></tr><tr><td>EVL ViT-L/14 [28]</td><td>ECCV&#x27;22</td><td>32×3362</td><td>WIT-400M</td><td>87.7</td><td>-</td><td>3×1</td><td>18196</td><td>-</td></tr><tr><td>X-CLIP ViT-L/14 [35]</td><td>ECCV&#x27;22</td><td>16×3362</td><td>WIT-400M</td><td>87.7</td><td>97.4</td><td>4×3</td><td>3086×12</td><td>-</td></tr><tr><td>Text4Vis ViT-L/14 [60]</td><td>AAAI&#x27;23</td><td>32×3362</td><td>WIT-400M</td><td>87.8</td><td>97.6</td><td>1×3</td><td>3829×3</td><td>230.7</td></tr><tr><td></td><td></td><td>16×2242</td><td></td><td>88.1</td><td>97.9</td><td>4×3</td><td>830×12</td><td>230</td></tr><tr><td>BIKE ViT-L/14</td><td>CVPR&#x27;23</td><td>8×3362</td><td>WIT-400M</td><td>88.3</td><td>98.1</td><td>4×3</td><td>932×12</td><td>230</td></tr><tr><td></td><td></td><td>32×3362</td><td></td><td>88.6</td><td>98.3</td><td>4×3</td><td>3728×12</td><td>230</td></tr></table>
149
+
150
+ Table 1. Comparisons with state-of-the-art methods on Kinetics-400. We report the FLOPs in inference phase. "Views" indicates # temporal clip $\times$ # spatial crop. The magnitudes are Giga $(10^{9})$ and Mega $(10^{6})$ for FLOPs and Param.
151
+
152
+ # 3. Experiments
153
+
154
+ # 3.1. Setups
155
+
156
+ We conduct experiments on six widely used video benchmarks, i.e., Kinetics-400 [22] & 600 [6], ActivityNet [5], Charades [42], UCF-101 [44] and HMDB-51 [24]. See Supplementary for statistics of these datasets.
157
+
158
+ Training & Inference. In our experiments, we adopt the visual encoder of CLIP [38] as the video encoder and use the textual encoder of CLIP for both the category and attributes encoders. To avoid conflict between the two branches, we first train the video encoder and then the attributes encoder. To prepare the video input, we sparsely sample $T$ (e.g., 8, 16, 32) frames. We set the temperature hyperparameter $\tau$ to 0.01 for all training phases. See Supplementary for detailed training hyperparameters.
159
+
160
+ To trade off accuracy and speed, we consider two evaluation protocols. (1) Single View: We use only 1 clip per video and the center crop for efficient evaluation, as shown in Table 6. (2) Multiple Views: It is a common practice [7, 14, 56] to sample multiple clips per video with several spatial crops to get higher accuracy. For comparison with SOTAs, we use
161
+
162
+ four clips with three crops ("4×3 Views") in Table 1.
163
+
164
+ # 3.2. Main Results
165
+
166
+ Comparison with State-of-the-arts. We present our results on Kinetics-400 in Table 1 and compare our approach with SOTAs trained under various pre-training settings. Our approach outperforms regular video recognition methods while requiring significantly less computation, as shown in the upper table. We also demonstrate superiority over methods that use web-scale image pre-training, such as JFT-300M [45] and JFT-3B [69]. Our model performs better than all JFT-300M pre-trained methods, achieving a higher accuracy $(+2.3\%)$ than CoVeR [70]. Surprisingly, our method even outperforms the JFT-3B pre-trained model $(88.6\%$ v.s. $87.2\%)$ despite the latter having almost 3 billion annotated images and a data scale $7.5\times$ larger than ours. We further compare our method with others using web-scale image-language pre-training, such as CLIP [38] and Florence [68]. Despite Florence having a larger dataset $(2\times$ more data than the 400M image-text data used in CLIP), our approach still achieves a higher accuracy by $2.1\%$ . Additionally, using only 8 frames and the same CLIP pre
167
+
168
+ <table><tr><td>Method</td><td>Top-1</td><td>mAP</td></tr><tr><td>ListenToLook [17]</td><td>-</td><td>89.9</td></tr><tr><td>MARL [57]</td><td>85.7</td><td>90.1</td></tr><tr><td>DSANet [61]</td><td>-</td><td>90.5</td></tr><tr><td>TSQNet [62]</td><td>88.7</td><td>93.7</td></tr><tr><td>NSNet [63]</td><td>90.2</td><td>94.3</td></tr><tr><td>BIKE ViT-L</td><td>94.7</td><td>96.1</td></tr></table>
169
+
170
+ <table><tr><td>Method</td><td>Frames</td><td>mAP</td></tr><tr><td>MultiScale TRN [73]</td><td>-</td><td>25.2</td></tr><tr><td>STM [20]</td><td>16</td><td>35.3</td></tr><tr><td>SlowFast R101 [14]</td><td>16+64</td><td>42.5</td></tr><tr><td>X3D-XL (312↑) [13]</td><td>16</td><td>43.4</td></tr><tr><td>ActionCLIP [50]</td><td>32</td><td>44.3</td></tr><tr><td>BIKE ViT-L</td><td>16</td><td>50.4</td></tr></table>
171
+
172
+ <table><tr><td>Method</td><td>Shot</td><td>HMDB</td><td>UCF</td><td>ANet</td><td>K400</td></tr><tr><td>VideoSwin [31]</td><td>2</td><td>20.9</td><td>53.3</td><td>-</td><td>-</td></tr><tr><td>VideoPrompt [21]</td><td>5</td><td>56.6</td><td>79.5</td><td>-</td><td>58.5</td></tr><tr><td>X-Florence [35]</td><td>2</td><td>51.6</td><td>84.0</td><td>-</td><td>-</td></tr><tr><td rowspan="3">BIKE ViT-L</td><td>1</td><td>72.3</td><td>95.2</td><td>86.6</td><td>73.5</td></tr><tr><td>2</td><td>73.5</td><td>96.1</td><td>88.7</td><td>75.7</td></tr><tr><td>5</td><td>77.7</td><td>96.5</td><td>90.9</td><td>78.2</td></tr></table>
173
+
174
+ Table 2. Comparisons with SOTAs on ActivityNet.
175
+ Table 3. Comparisons on Multi-label video dataset Charades.
176
+ Table 4. Comparisons on few-shot action recognition across four video datasets.
177
+
178
+ <table><tr><td>Method</td><td>UCF* / UCF</td><td>HMDB* / HMDB</td><td>ActivityNet* / ActivityNet</td><td>Kinetics-600</td></tr><tr><td>GA [34]</td><td>17.3±1.1 / -</td><td>19.3±2.1 / -</td><td>-</td><td>-</td></tr><tr><td>TS-GCN [16]</td><td>34.2±3.1 / -</td><td>23.2±3.0 / -</td><td>-</td><td>-</td></tr><tr><td>E2E [3]</td><td>44.1 / 35.3</td><td>29.8 / 24.8</td><td>26.6 / 20.0</td><td>-</td></tr><tr><td>DASZL [23]</td><td>48.9±5.8 / -</td><td>- / -</td><td>-</td><td>-</td></tr><tr><td>ER [8]</td><td>51.8±2.9 / -</td><td>35.3±4.6 / -</td><td>-</td><td>42.1±1.4</td></tr><tr><td>ResT [26]</td><td>58.7±3.3 / 46.7</td><td>41.1±3.7 / 34.4</td><td>32.5 / 26.3</td><td>-</td></tr><tr><td>BIKE ViT-L</td><td>86.6±3.4 / 80.8</td><td>61.4±3.6 / 52.8</td><td>86.2±1.0 / 80.0</td><td>68.5±1.2</td></tr></table>
179
+
180
+ Table 5. Comparisons on zero-shot video recognition. * denotes randomly selecting half of the test dataset's classes for evaluation, repeating the process ten times, and reporting the mean accuracy with standard deviation. For Kinetics-600, we adopt official code [8] to select the 220 new categories outside of Kinetics-400 for evaluation.
181
+
182
+ training, our model performs on par with the best results of other methods, such as AVL [28], X-CLIP [35], and Text4Vis [60]. When we use more frames as input, our method achieves a new state-of-the-art accuracy of $88.6\%$ under the CLIP pre-training setting.
183
+
184
+ We also evaluate our method on the untrimmed video dataset, ActivityNet-v1.3, to verify its generalizability. We fine-tune the Kinetics-400 pre-trained model with 16 frames, and report the top-1 accuracy and mean average precision (mAP) using the official evaluation metrics. Our approach significantly outperforms recent SOTAs, as shown in Table 2. Furthermore, to demonstrate its effectiveness on smaller datasets, we also evaluate our method on UCF-101 and HMDB-51, achieving top-1 accuracy of $98.8\%$ and $83.1\%$ , respectively. We include the results in the Supplementary due to space limitations.
185
+
186
+ Multi-Label Video Recognition. In addition to the single-label video recognition, we also evaluate our method on multi-label video recognition. We use the Charades dataset, which contains long-term activities with multiple actions, and utilize the Kinetics-400 pre-trained ViT-L backbone for training. The results are evaluated using the mAP metric. As shown in Table 3, our BIKE achieves the performance of $50.4\%$ mAP, demonstrating its effectiveness in multi-label video classification.
187
+
188
+ Few-Shot Video Recognition. We demonstrate the few-shot recognition capability of our method, which refers to
189
+
190
+ video recognition using only a few training samples. In this experiment, we scaled up the task to categorize all categories in the dataset with only a few samples per category for training. We used a CLIP pre-trained ViT-L/14 with 8 frames for few-shot video recognition, without further Kinetics-400 pre-training. The top-1 accuracy on four datasets is reported in Table 4. Our method shows remarkable transferability to diverse domain data in a data-poor situation. On UCF-101 and HMDB-51, our method outperforms VideoSwin [31] by $42.8\%$ and $52.6\%$ , respectively. In comparison with image-language pre-trained methods, our method outperforms VideoPrompt [21] and X-Florence [35] by $21.1\%$ and $21.9\%$ on HMDB-51, respectively. See Supplementary for training details.
191
+
192
+ Zero-shot Video Recognition. We further evaluate our method in an open-set setting. Table 5 presents the results of zero-shot evaluation on four video datasets using our Kinetics-400 pre-trained model (i.e., ViT-L/14 with 8 frames). There are two major evaluation methods on UCF-101, HMDB-51, and ActivityNet: half-classes evaluation (marked as *) and full-classes evaluation. For fair comparison, we present the results under the half-classes evaluation protocol, which has been widely used in previous works [3, 8, 26, 34]. Additionally, we provide results on the entire dataset for more challenging and realistic accuracy evaluation. See Supplementary for further details on evaluation protocols. Our method exhibits strong cross
193
+
194
+ <table><tr><td>Video branch</td><td>g(·|φc)</td><td>Top-1(%)</td></tr><tr><td>Baseline: Mean Pool</td><td>■</td><td>76.8</td></tr><tr><td>+ Video Concept Spotting</td><td>■</td><td>78.5 (+1.7)</td></tr><tr><td>+ (Technique) Transf</td><td>■</td><td>78.7 (+1.9)</td></tr><tr><td>+ Frozen label encoder</td><td>■</td><td>78.9 (+2.1)</td></tr></table>
195
+
196
+ (a) The effectiveness of temporal saliency. $\mathbf{\Phi}$ means finetuning category encoder $g(\cdot |\phi_c)$ . Transf is the temporal transformer.
197
+
198
+ <table><tr><td>VCS
199
+ Source</td><td>Recognition
200
+ Source</td><td>Top-1</td></tr><tr><td>Word Emb.</td><td>Word Emb.</td><td>78.1</td></tr><tr><td>[CLS] Emb.</td><td>[CLS] Emb.</td><td>74.7</td></tr><tr><td>Word Emb.</td><td>[CLS] Emb.</td><td>78.5</td></tr></table>
201
+
202
+ (b) Different category embeddings are used for Video Concept Spotting (VCS) and recognition.
203
+
204
+ <table><tr><td>Attributes</td><td>Category</td><td>Top-1</td></tr><tr><td>X</td><td>X</td><td>46.2</td></tr><tr><td>✓</td><td>X</td><td>51.2</td></tr><tr><td>✓</td><td>✓</td><td>56.6</td></tr></table>
205
+
206
+ (c) The effects of the textual prompt in Attributes recognition branch (w/o training).
207
+
208
+ <table><tr><td>#Attributes</td><td>A</td><td>V+A</td></tr><tr><td>3</td><td>53.4</td><td>79.9</td></tr><tr><td>5</td><td>56.6</td><td>80.0</td></tr><tr><td>7</td><td>57.1</td><td>79.7</td></tr></table>
209
+
210
+ (d) Study on different number of attributes (w/o training).
211
+
212
+ <table><tr><td>Training</td><td>A</td><td>V</td><td>+Δ%</td><td>V+A</td></tr><tr><td>X</td><td>56.6</td><td>78.9</td><td>+1.1%</td><td>80.0</td></tr><tr><td>✓</td><td>69.6</td><td>78.9</td><td>+2.5%</td><td>81.4</td></tr></table>
213
+
214
+ (e) The impact of Attributes branch.
215
+ $\checkmark$ means fine-tuning the attributes encoder.
216
+
217
+ <table><tr><td></td><td>V +Δ% → V+A</td></tr><tr><td>Baseline</td><td>76.8 +2.4% → 79.2</td></tr><tr><td>Ours</td><td>78.9 +2.5% → 81.4</td></tr></table>
218
+
219
+ (f) The effects of Attributes branch to complement Video branch.
220
+
221
+ <table><tr><td>Lexicon</td><td>V</td><td>+Δ%→ V+A</td></tr><tr><td>IN-1K</td><td>78.9</td><td>+1.4%→ 80.3</td></tr><tr><td>K400</td><td>78.9</td><td>+2.5%→ 81.4</td></tr></table>
222
+
223
+ (g) Study on the impact of different lexicon.
224
+
225
+ <table><tr><td>Method</td><td>T</td><td>Backbone</td><td>Top-1(%)</td></tr><tr><td>VideoPrompt [21]</td><td>16</td><td>ViT-B/32</td><td>76.9</td></tr><tr><td>ActionCLIP [50]</td><td>8</td><td>ViT-B/32</td><td>78.4</td></tr><tr><td>BIKE (Ours)</td><td>8</td><td>ViT-B/32</td><td>81.4 (+3.0)</td></tr></table>
226
+
227
+ (h) Comparison with CLIP-based methods using single-view inference. $T$ is the number of frames.
228
+
229
+ <table><tr><td>Backbone</td><td colspan="3">Baseline → V → V+A</td><td>V* → V*+A</td></tr><tr><td>ViT-B/32</td><td>76.8</td><td>+2.1%</td><td>78.9</td><td>+2.5%</td></tr><tr><td>ViT-B/16</td><td>79.9</td><td>+2.2%</td><td>82.1</td><td>+1.1%</td></tr><tr><td>ViT-L/14</td><td>85.2</td><td>+0.8%</td><td>86.4</td><td>+0.1%</td></tr><tr><td></td><td></td><td></td><td></td><td>87.4</td></tr></table>
230
+
231
+ (i) Component-by-component evaluation of our approach using various backbones. Models are fed 8 frames, where * stands for multiple view inference.
232
+
233
+ Table 6. Ablation studies on Kinetics-400. Models use ViT-B/32 as the backbone, and 8 frames as input, unless otherwise specified. We report top-1 accuracy (\%) for a single clip input with $224 \times 224$ spatial size. The V and A abbreviations are used for the Video recognition branch and Attributes recognition branch, respectively. We refer to ImageNet-1K and Kinetics-400 as IN-1K and K400, respectively.
234
+
235
+ dataset generalization ability and outperforms classic zero-shot video recognition methods.
236
+
237
+ # 3.3. Ablation Studies
238
+
239
+ In this section, we provide extensive ablations to demonstrate our method with the instantiation in Table 6.
240
+
241
+ The Effect of Temporal Saliency. We investigate the impact of our proposed Video Concept Spotting (VCS) mechanism on the performance of the Video branch, as shown in Table 6a. We start with a baseline that uses mean pooling to aggregate the features of all frames, without considering temporal saliency. We observe that equipping the baseline with VCS can improve the accuracy by $+1.7\%$ . We then introduce a multi-layer (e.g., 6-layers) Transformer encoder with position embedding for sequence features, commonly used in previous methods, and find that it provides an additional $0.2\%$ performance boost. Moreover, freezing the category encoder not only reduces training parameters but also slightly improves performance $(+0.2\%)$ .
242
+
243
+ Exploration of Category Embedding for Temporal Saliency and Classification. As mentioned in Section 2.3, CLIP's textual encoder can generate two types of embeddings: the [CLS] embedding for the entire sentence and the word embedding for each word. Therefore, we can encode the category into these two types of embeddings. The category embedding has two roles in our method: 1) it serves as
244
+
245
+ a query to determine the temporal saliency, and 2) it calculates similarity with video representation to produce recognition results. We demonstrate the results for three different combinations in Table 6b. We find that the global [CLS] embedding performs better than the word-level embedding for final recognition, but the word-level embedding is necessary for temporal saliency.
246
+
247
+ Prompt Engineering and Number of Attributes. For both attributes and categories in the Attributes recognition branch, we manually define a prompt, i.e., "This is a video about {}". The results in Table 6c show that the prompt significantly improves accuracy, even without training the attributes encoder. Furthermore, in Table 6d, we observe that the number of attributes has little effect on the performance of the Attributes recognition and two-branch recognition.
248
+
249
+ The Impact of Attributes branch. Table 6e shows that without any training, the Attributes branch can be plug-and-played on the Video branch to improve the recognition performance. After training the attributes encoder, the Attributes branch further boosts performance by an impressive $2.5\%$ on the fusion result. Additionally, we find that the Attributes branch can also improve the baseline when fused with it, as shown in Table 6f. By combining the VCS and the Attributes branch, we can achieve a remarkable improvement of $4.6\%$ on the baseline.
250
+
251
+ Attributes Generation with Different Lexicons. In
252
+
253
+ Sec. 2.2, we use a pre-defined lexicon to obtain attributes. In Table 6g, we explore the impact of different lexicons. We used ImageNet-1K, an image dataset that covers 1000 object categories, as our lexicon to search for potential object attributes. According to the results, this can increase the performance of the Attributes branch by $1.4\%$ . We found that using the 400 categories of Kinetics-400 as the lexicon can further improve the results.
254
+
255
+ Comparison with CLIP-Based Methods. Table 6h presents a comparison between our method and two CLIP-based approaches, VideoPrompt [21] and ActionCLIP [50], both trained with contrastive loss. Despite using fewer frames, our method achieves higher Top-1 accuracy than VideoPrompt. Moreover, using the same ViT-B/32 backbone, our approach outperforms ActionCLIP by $3.0\%$ .
256
+
257
+ More Evaluation with Different Backbones. Table 6i presents a comprehensive evaluation of the applicability of our method using larger backbones. Our observations are as follows: 1) Despite the greatly improved performance of the baseline with larger backbones, our VCS mechanism still provides consistent, additional gains. This demonstrates the continued necessity of Text-to-Video saliency knowledge for large models. 2) As the absolute accuracy of the Video branch increases, the complementing effect of the Attributes branch gradually weakens. We conjecture that with larger models, richer representations are learned, leading to reduced bias in learned representations and an increased correlation with the Attributes branch, resulting in a reduction in complementary information. 3) Multiple-view evaluation involving more video clips leads to increased performance, reducing the bias of the model itself. For models with a top-1 accuracy of $87.4\%$ , the Attributes branch is unable to provide supplementary knowledge. Therefore, the Attributes branch is not utilized in our ViT-L/14 models presented in Sec. 3.2.
258
+
259
+ # 3.4. Visualization
260
+
261
+ Figure 3 illustrates the temporal saliency generated by Video Concept Spotting mechanism, highlighting the frame that is most relevant to the category. We also demonstrate the complementarity of the auxiliary attributes generated by our Video-Attribute Association mechanism with the video branch. See more qualitative results in Supplementary.
262
+
263
+ # 4. Related Works
264
+
265
+ Video Recognition. Convolutional networks have been the standard backbone architecture in video recognition for a long time. Early works focused on jointly learning spatial and temporal context through parallel branches [14, 15, 43, 49, 52, 54]. Later works developed plug-and-play temporal modules [25, 30, 32, 37, 46, 48, 56, 61, 64] for 2D CNN backbones to improve temporal modeling. Some works also designed dynamic inference mechanisms [53, 55, 57, 58, 62, 63]
266
+
267
+ ![](images/aa63b62ae675ade15e79cbc65b971c389a9fa2193e5afbf7dcf6c8a1626457ed.jpg)
268
+ Figure 3. Visualization of (Top) temporal saliency and (Bottom) attributes. Please zoom in for the best view.
269
+
270
+ for efficient video recognition. Recently, Vision Transformers [10, 18, 29] has emerged as a new trend in image recognition backbones. Transformers have also been adopted for video recognition, such as TimeSFformer [2], ViViT [1], VideoSwin [31], and MViT [11].
271
+
272
+ Transferring CLIP Models for Video Recognition. CLIP [38] provides good practice in learning the coordinated vision-language models using large-scale image and text pairs. The pre-trained model can learn powerful visual representations aligned with rich linguistic semantics. Initially, some works [12, 33, 59, 72] propose to directly use CLIP for video-text retrieval. Later, a few works also explore the use of CLIP models for video recognition [21, 28, 35, 36, 50, 60, 66], they can be broadly categorized into two lines. The first line [28, 36, 66] follows the unimodal transferring paradigm, where the image encoder of CLIP is used as a strong initialization for the video encoder. The second line [21, 35, 50, 60] provides cross-model learning baselines that directly extend CLIP to video-label matching for video recognition. However, these studies only briefly tap into the knowledge from CLIP. In contrast, our work aims to further explore the bidirectional cross-modal knowledge from CLIP to enhance the cross-model baseline. Our approach introduces auxiliary attributes in the Video-to-Text direction and category-dependent temporal saliency in the Text-to-Video direction, resulting in a more effective and interpretable video recognition.
273
+
274
+ # 5. Conclusion
275
+
276
+ In this work, we introduce a novel two-stream framework called BIKE that leverages bidirectional cross-modal knowledge from CLIP models to enhance video recognition. Our approach involves the Attributes branch, which utilizes the Attributes-Category Association mechanism to generate attributes for auxiliary recognition, and the Video branch, which uses the Video Concept Spotting mechanism to generate temporal saliency and produce a more compact video representation. Our approach is evaluated on six video datasets, and the experimental results demonstrate its effectiveness.
277
+
278
+ # References
279
+
280
+ [1] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, and Cordelia Schmid. Vivit: A video vision transformer. In ICCV, pages 6836-6846, 2021. 5, 8
281
+ [2] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In ICML, pages 813-824. PMLR, 2021. 5, 8
282
+ [3] Biagio Brattoli, Joseph Tighe, Fedor Zhdanov, Pietro Perona, and Krzysztof Chalupka. Rethinking zero-shot video classification: End-to-end training for realistic applications. In CVPR, pages 4613-4623, 2020. 6, 12
283
+ [4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. NeurIPS, 33:1877-1901, 2020. 1
284
+ [5] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In CVPR, 2015. 2, 5
285
+ [6] Joao Carreira, Eric Noland, Andras Banki-Horvath, Chloe Hillier, and Andrew Zisserman. A short note about kinetics-600. arXiv preprint arXiv:1808.01340, 2018. 2, 5
286
+ [7] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In CVPR, 2017. 5, 15
287
+ [8] Shizhe Chen and Dong Huang. Elaborative rehearsal for zero-shot action recognition. In ICCV, pages 13638-13647, 2021. 6, 13
288
+ [9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 1
289
+ [10] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In ICLR, 2021. 8
290
+ [11] Haoqi Fan, Bo Xiong, Karttikeya Mangalam, Yanghao Li, Zhicheng Yan, Jitendra Malik, and Christoph Feichtenhofer. Multiscale vision transformers. In ICCV, pages 6824-6835, 2021. 8
291
+ [12] Bo Fang, Chang Liu, Yu Zhou, Min Yang, Yuxin Song, Fu Li, Weiping Wang, Xiangyang Ji, Wanli Ouyang, et al. Uatvr: Uncertainty-adaptive text-video retrieval. arXiv preprint arXiv:2301.06309, 2023. 8
292
+ [13] Christoph Feichtenhofer. X3d: Expanding architectures for efficient video recognition. In CVPR, pages 203-213, 2020. 6
293
+ [14] Christoph Feichtenhofer, Haoqi Fan, Jitendra Malik, and Kaiming He. Slowfast networks for video recognition. In ICCV, pages 6202-6211, 2019. 5, 6, 8
294
+ [15] Christoph Feichtenhofer, Axel Pinz, and Richard Wildes. Spatiotemporal residual networks for video action recognition. In NeurIPS, 2016. 8
295
+
296
+ [16] Junyu Gao, Tianzhu Zhang, and Changsheng Xu. I know the relationships: Zero-shot action recognition via two-stream graph convolutional networks and knowledge graphs. In AAAI, volume 33, pages 8303-8311, 2019. 6
297
+ [17] Ruohan Gao, Tae-Hyun Oh, Kristen Grauman, and Lorenzo Torresani. Listen to look: Action recognition by previewing audio. In CVPR, 2020. 6
298
+ [18] Kai Han, An Xiao, Enhua Wu, Jianyuan Guo, Chunjing Xu, and Yunhe Wang. Transformer in transformer. NeurIPS, 34, 2021. 8
299
+ [19] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc Le, Yun-Hsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML, pages 4904-4916. PMLR, 2021. 1
300
+ [20] Boyuan Jiang, MengMeng Wang, Weihao Gan, Wei Wu, and Junjie Yan. Stm: Spatiotemporal and motion encoding for action recognition. In ICCV, pages 2000-2009, 2019. 6, 15
301
+ [21] Chen Ju, Tengda Han, Kunhao Zheng, Ya Zhang, and Weidi Xie. Prompting visual-language models for efficient video understanding. In ECCV, pages 105-124. Springer, 2022. 1, 2, 4, 5, 6, 7, 8
302
+ [22] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. 2.5
303
+ [23] Tae Soo Kim, Jonathan Jones, Michael Piven, Zihao Xiao, Jin Bai, Yi Zhang, Weichao Qiu, Alan Yuille, and Gregory D Hager. Daszl: Dynamic action signatures for zero-shot learning. In AAAI, volume 35, pages 1817-1826, 2021. 6
304
+ [24] Hildegard Kuehne, Hueihan Jhuang, Estfbaliz Garrote, Tomaso Poggio, and Thomas Serre. Hmdb: a large video database for human motion recognition. In ICCV, 2011. 2, 5
305
+ [25] Yan Li, Bin Ji, Xintian Shi, Jianguo Zhang, Bin Kang, and Limin Wang. Tea: Temporal excitation and aggregation for action recognition. In CVPR, pages 909–918, 2020. 8
306
+ [26] Chung-Ching Lin, Kevin Lin, Lijuan Wang, Zicheng Liu, and Linjie Li. Cross-modal representation learning for zero-shot action recognition. In CVPR, pages 19978-19988, 2022. 6
307
+ [27] Ji Lin, Chuang Gan, and Song Han. Tsm: Temporal shift module for efficient video understanding. In ICCV, 2019. 15
308
+ [28] Ziyi Lin, Shijie Geng, Renrui Zhang, Peng Gao, Gerard de Melo, Xiaogang Wang, Jifeng Dai, Yu Qiao, and Hongsheng Li. Frozen clip models are efficient video learners. In ECCV, pages 388-404. Springer, 2022. 1, 5, 6, 8
309
+ [29] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In ICCV, pages 10012-10022, 2021. 8
310
+ [30] Zhaoyang Liu, Donghao Luo, Yabiao Wang, Limin Wang, Ying Tai, Chengjie Wang, Jilin Li, Feiyue Huang, and Tong Lu. Teinet: Towards an efficient architecture for video recognition. In AAAI, pages 11669-11676, 2020. 8
311
+ [31] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. In CVPR, pages 3202-3211, 2022. 5, 6, 8
312
+
313
+ [32] Zhaoyang Liu, Limin Wang, Wayne Wu, Chen Qian, and Tong Lu. Tam: Temporal adaptive module for video recognition. In ICCV, pages 13708-13718, 2021. 8
314
+ [33] Huaishao Luo, Lei Ji, Ming Zhong, Yang Chen, Wen Lei, Nan Duan, and Tianrui Li. Clip4clip: An empirical study of clip for end to end video clip retrieval and captioning. Neurocomputing, 508:293-304, 2022. 8
315
+ [34] Ashish Mishra, Vinay Kumar Verma, M Shiva Krishna Reddy, S Arulkumar, Piyush Rai, and Anurag Mittal. A generative approach to zero-shot and few-shot action recognition. In WACV, pages 372-380. IEEE, 2018. 6
316
+ [35] Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, and Haibin Ling. Expanding language-image pretrained models for general video recognition. In ECCV, pages 1-18. Springer, 2022. 1, 2, 5, 6, 8
317
+ [36] Junting Pan, Ziyi Lin, Xiatian Zhu, Jing Shao, and Hongsheng Li. St-adapter: Parameter-efficient image-to-video transfer learning. In NeurIPS, 2022. 1, 5, 8
318
+ [37] Zhaofan Qiu, Ting Yao, and Tao Mei. Learning spatiotemporal representation with pseudo-3d residual networks. In ICCV, 2017. 8
319
+ [38] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763. PMLR, 2021. 1, 2, 3, 4, 5, 8
320
+ [39] Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9, 2019. 1
321
+ [40] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67, 2020. 1
322
+ [41] Michael Ryoo, AJ Piergiovanni, Anurag Arnab, Mostafa Dehghani, and Anelia Angelova. Tokenlearner: Adaptive space-time tokenization for videos. NeurIPS, 34:12786-12797, 2021. 5
323
+ [42] Gunnar A Sigurdsson, Gül Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. Hollywood in homes: Crowdsourcing data collection for activity understanding. In ECCV, pages 510-526. Springer, 2016. 2, 5
324
+ [43] Karen Simonyan and Andrew Zisserman. Two-stream convolutional networks for action recognition in videos. In NeurIPS, 2014. 8
325
+ [44] Khurram Soomro, Amir Roshan Zamir, and Mubarak Shah. Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402, 2012. 2, 5
326
+ [45] Chen Sun, Abhinav Shrivastava, Saurabh Singh, and Abhinav Gupta. Revisiting unreasonable effectiveness of data in deep learning era. In ICCV, pages 843-852, 2017. 5
327
+ [46] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In CVPR, 2018. 8, 15
328
+
329
+ [47] Limin Wang, Wei Li, Wen Li, and Luc Van Gool. Appearance-and-relation networks for video classification. In CVPR, 2018. 15
330
+ [48] Limin Wang, Zhan Tong, Bin Ji, and Gangshan Wu. Tdn: Temporal difference networks for efficient action recognition. In CVPR, pages 1895-1904, 2021. 8, 15
331
+ [49] Limin Wang, Yuanjun Xiong, Zhe Wang, Yu Qiao, Dahua Lin, Xiaou Tang, and Luc Van Gool. Temporal segment networks: Towards good practices for deep action recognition. In ECCV, 2016. 8
332
+ [50] Mengmeng Wang, Jiazheng Xing, and Yong Liu. Actionclip: A new paradigm for video action recognition. arXiv preprint arXiv:2109.08472, 2021. 1, 2, 4, 5, 6, 7, 8
333
+ [51] Xiaolong Wang, Ross Girshick, Abhinav Gupta, and Kaiming He. Non-local neural networks. In CVPR, 2018. 5
334
+ [52] Xiaohan Wang, Linchao Zhu, Heng Wang, and Yi Yang. Interactive prototype learning for egocentric action recognition. In ICCV, pages 8168-8177, 2021. 8
335
+ [53] Xiaohan Wang, Linchao Zhu, Fei Wu, and Yi Yang. A differentiable parallel sampler for efficient video classification. ACM Transactions on Multimedia Computing, Communications and Applications, 19(3):1-18, 2023. 8
336
+ [54] Xiaohan Wang, Linchao Zhu, Yu Wu, and Yi Yang. Symbiotic attention for egocentric action recognition with object-centric alignment. IEEE TPAMI, 2020. 8
337
+ [55] Yulin Wang, Zhaoxi Chen, Haojun Jiang, Shiji Song, Yizeng Han, and Gao Huang. Adaptive focus for efficient video recognition. In ICCV, pages 16249-16258, 2021. 8
338
+ [56] Wenhao Wu, Dongliang He, Tianwei Lin, Fu Li, Chuang Gan, and Errui Ding. Mvfnet: Multi-view fusion network for efficient video recognition. In AAAI, 2021. 5, 8, 15
339
+ [57] Wenhao Wu, Dongliang He, Xiao Tan, Shifeng Chen, and Shilei Wen. Multi-agent reinforcement learning based frame sampling for effective untrimmed video recognition. In ICCV, 2019. 6, 8
340
+ [58] Wenhao Wu, Dongliang He, Xiao Tan, Shifeng Chen, Yi Yang, and Shilei Wen. Dynamic inference: A new approach toward efficient video action recognition. In Proceedings of CVPR Workshops, pages 676-677, 2020. 8
341
+ [59] Wenhao Wu, Haipeng Luo, Bo Fang, Jingdong Wang, and Wanli Ouyang. Cap4video: What can auxiliary captions do for text-video retrieval? In CVPR, 2023. 8
342
+ [60] Wenhao Wu, Zhun Sun, and Wanli Ouyang. Revisiting classifier: Transferring vision-language models for video recognition. In AAAI, 2023. 1, 2, 5, 6, 8
343
+ [61] Wenhao Wu, Yuxiang Zhao, Yanwu Xu, Xiao Tan, Dongliang He, Zhikang Zou, Jin Ye, Yingying Li, Mingde Yao, Zichao Dong, et al. Dsanet: Dynamic segment aggregation network for video-level representation learning. In ACM MM, pages 1903-1911, 2021. 6, 8
344
+ [62] Boyang Xia, Zhihao Wang, Wenhao Wu, Haoran Wang, and Jungong Han. Temporal saliency query network for efficient video recognition. In ECCV, pages 741-759. Springer, 2022. 6, 8
345
+ [63] Boyang Xia, Wenhao Wu, Haoran Wang, Rui Su, Dongliang He, Haosen Yang, Xiaoran Fan, and Wanli Ouyang. Nsnet: Non-saliency suppression sampler for efficient video recognition. In ECCV, pages 705-723. Springer, 2022. 6, 8
346
+
347
+ [64] Saining Xie, Chen Sun, Jonathan Huang, Zhuowen Tu, and Kevin Murphy. Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In ECCV, 2018. 8, 15
348
+ [65] Shen Yan, Xuehan Xiong, Anurag Arnab, Zhichao Lu, Mi Zhang, Chen Sun, and Cordelia Schmid. Multiview transformers for video recognition. In CVPR, pages 3333-3343, 2022. 5
349
+ [66] Taojiannan Yang, Yi Zhu, Yusheng Xie, Aston Zhang, Chen Chen, and Mu Li. Aim: Adapting image models for efficient video understanding. In ICLR, 2023. 1, 5, 8
350
+ [67] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917, 2022. 1, 5
351
+ [68] Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, et al. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432, 2021. 1, 5
352
+ [69] Xiaohua Zhai, Alexander Kolesnikov, Neil Houlsby, and Lucas Beyer. Scaling vision transformers. In CVPR, pages 12104-12113, 2022. 5
353
+ [70] Bowen Zhang, Jiahui Yu, Christopher Fifty, Wei Han, Andrew M Dai, Ruoming Pang, and Fei Sha. Co-training transformer with videos and images improves action recognition. arXiv preprint arXiv:2112.07175, 2021. 5
354
+ [71] Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. Ernie: Enhanced language representation with informative entities. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 1441-1451, 2019. 1
355
+ [72] Shuai Zhao, Linchao Zhu, Xiaohan Wang, and Yi Yang. Centerclip: Token clustering for efficient text-video retrieval. The 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, 2022. 8
356
+ [73] Bolei Zhou, Alex Andonian, Aude Oliva, and Antonio Torralba. Temporal relational reasoning in videos. In ECCV, 2018. 6
357
+
358
+ # Bidirectional Cross-Modal Knowledge Exploration for Video Recognition with Pre-trained Vision-Language Models
359
+
360
+ Supplementary Material
361
+
362
+ Wenhao Wu $^{1,2}$ Xiaohan Wang $^{3}$ Haipeng Luo $^{4}$ Jingdong Wang $^{2}$ Yi Yang $^{3}$ Wanli Ouyang $^{5,1}$
363
+
364
+ <sup>1</sup>The University of Sydney
365
+ <sup>2</sup>Baidu Inc.
366
+ <sup>3</sup>Zhejiang University
367
+
368
+ $^{4}$ University of Chinese Academy of Sciences Shanghai AI Laboratory
369
+
370
+ whwu.ucas@gmail.com
371
+
372
+ In this appendix, we provide additional details and results for our approach. Specifically, §A contains further details on the training process (§A.1), attributes branch (§A.2), zero-shot evaluation (§A.3), statistics of video datasets (§A.4), visual encoder architectures (§A.5), and Distributed InfoNCE (§A.6). In §B, we present additional results, including comparisons on UCF-101 and HMDB-51 (§B.1) and more visualizations (§B.2).
373
+
374
+ # A. Implementation Details
375
+
376
+ # A.1. Training details
377
+
378
+ Regular Video Recognition. We present our approach for regular video recognition in Table A.1, sharing the same training recipe for all video datasets, including Kinetics-400, ActivityNet, Charades, HMDB-51, and UCF-101.
379
+
380
+ Few-shot Video Recognition. We repeat the samples to maintain the same number of iterations as the regular counterpart. For instance, if the model is trained on Kinetics-400 with around 900 iterations per epoch for the general setting, we repeat the sample to maintain the same number of iterations for few-shot settings. We train few-shot models for 2 epochs on Kinetics-400 and 10 epochs on other video datasets, i.e., ActivityNet, HMDB-51, and UCF-101, while keeping other settings the same as in Table A.1.
381
+
382
+ Zero-shot Video Recognition. We use the Kinetics-400 pre-trained models to perform cross-dataset recognition without additional training on other datasets such as ActivityNet, HMDB-51, UCF-101, and Kinetics-600.
383
+
384
+ # A.2. Attributes Branch
385
+
386
+ To improve the quality of auxiliary attributes, we pregenerate them using CLIP ViT-L/14 with 8 frames. We employ the text encoder architecture of CLIP ViT-B/32 as our attribute encoder. To integrate the Attributes branch with the Video branch, we set $\lambda$ to 0.6 for the Video branch with ViT-B and $\lambda$ to 0.8 for the Video branch with ViT-L.
387
+
388
+ <table><tr><td>Setting</td><td>Value</td></tr><tr><td colspan="2">Training Hyperparameter</td></tr><tr><td>Batch size</td><td>256</td></tr><tr><td>Vocabulary size</td><td>49408</td></tr><tr><td>Training epochs</td><td>30 (ViT-B), 20 (ViT-L)</td></tr><tr><td>Optimizer</td><td>AdamW</td></tr><tr><td>Learning rate (Base)</td><td>5e-5, cosine</td></tr><tr><td>Learning rate (CLIP layers)</td><td>5e-6, cosine</td></tr><tr><td>Weight decay</td><td>0.2</td></tr><tr><td>Linear warm-up epochs</td><td>5</td></tr><tr><td>Adam β1,β2</td><td>0.9, 0.999</td></tr><tr><td colspan="2">Augmentation</td></tr><tr><td>Resize</td><td>RandomSizedCrop</td></tr><tr><td>Crop size</td><td>224 (Default)</td></tr><tr><td>Random Flip</td><td>0.5</td></tr><tr><td>Random Gray scale</td><td>0.2</td></tr></table>
389
+
390
+ Table A.1. Default training recipe for video recognition.
391
+
392
+ # A.3. Evaluation Protocols for Zero-shot Recognition
393
+
394
+ We employ our Kinetics-400 pre-trained models to evaluate on other datasets. For UCF-101, HMDB-51, and ActivityNet, we adopt two major evaluation protocols as described in [3]:
395
+
396
+ 1. Half-Classes Evaluation: To ensure comparability with previous works, we randomly select half of the test dataset's classes - 50 for UCF, 25 for HMDB, and 100 for ActivityNet - and evaluate on the selected subset. We repeat this process ten times and average the results for each test dataset. We refer to this setting as UCF*, HMDB* and ActivityNet*.
397
+ 2. Full-Classes Evaluation: This evaluation setting involves directly evaluating on the full dataset to return more realistic accuracy scores.
398
+
399
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Embedding dimension</td><td rowspan="2">Input resolution</td><td colspan="3">Vision transformer</td><td colspan="3">Text Transformer</td></tr><tr><td>layers</td><td>width</td><td>heads</td><td>layers</td><td>width</td><td>heads</td></tr><tr><td>ViT-B/32</td><td>512</td><td>224</td><td>12</td><td>768</td><td>12</td><td>12</td><td>512</td><td>8</td></tr><tr><td>ViT-B/16</td><td>512</td><td>224</td><td>12</td><td>768</td><td>12</td><td>12</td><td>512</td><td>8</td></tr><tr><td>ViT-L/14</td><td>768</td><td>224</td><td>24</td><td>1024</td><td>16</td><td>12</td><td>768</td><td>12</td></tr><tr><td>ViT-L/14-336px</td><td>768</td><td>336</td><td>24</td><td>1024</td><td>16</td><td>12</td><td>768</td><td>12</td></tr></table>
400
+
401
+ Table A.2. CLIP-ViT hyperparameters
402
+
403
+ For Kinetics-600, we follow [8] to choose the 220 new categories outside of Kinetics-400 in Kinetics-600 for evaluation. We use the three splits provided by [8] and sample 160 categories for evaluation from the 220 categories in Kinetics-600 for each split. We report the mean accuracy of the three splits as the final accuracy.
404
+
405
+ # A.4. Statistics of Video Datasets
406
+
407
+ We describe the video datasets used in our experiments: Kinetics-400 is a large-scale video dataset that includes 240,000 training videos and 20,000 validation videos across 400 different human action categories. Each video in the dataset is a 10-second clip of an action moment, annotated from raw YouTube videos.
408
+
409
+ Kinetics-600 is an extension of Kinetics-400, consisting of approximately 480,000 videos from 600 action categories. The videos are divided into 390,000 for training, 30,000 for validation, and 60,000 for testing. In this paper, we use its test set for zero-shot evaluation.
410
+
411
+ UCF-101 is an action recognition dataset that contains 13,320 videos from 101 realistic action categories, collected from YouTube.
412
+
413
+ HMDB-51 is a collection of realistic videos from various sources, including movies and web videos. The dataset comprises 7,000 video clips from 51 action categories.
414
+
415
+ ActivityNet-v1.3 is a large-scale untrimmed video benchmark that contains 19,994 untrimmed videos of 5 to 10 minutes from 200 activity categories.
416
+
417
+ Charades is a video dataset designed for action recognition and localization tasks. It contains over 10,000 short video clips of people performing daily activities, and consists of 157 action categories.
418
+
419
+ # A.5. Encoder Architectures
420
+
421
+ In this paper, we provide the complete architecture details of the visual encoder and textual encoders. The CLIP-ViT architectures are shown in Table A.2.
422
+
423
+ # A.6. Distributed InfoNCE
424
+
425
+ Instead of Data-Parallel Training (DP), which is single-process, multi-thread, and only works on a single machine, Distributed Data-Parallel Training (DDP) is a widely adopted single-program multiple-data training paradigm for
426
+
427
+ single- and multi-machine training. Due to GIL contention across threads, per-iteration replicated model, and additional overhead introduced by scattering inputs and gathering outputs, DP is usually slower than DDP even on a single machine. Hence, we develop the Distributed InfoNCE based on DDP for large batch size and fast training.
428
+
429
+ The core of the Distributed InfoNCE implementation is batch gathering, which enables us to calculate the $\mathrm{NM} \times \mathrm{NM}$ similarity matrix across M GPUs for InfoNCE loss. Without batch gathering, each GPU only computes a local $\mathrm{N} \times \mathrm{N}$ matrix where $\mathrm{N} \ll \mathrm{NM}$ . This means that the cosine similarity and the InfoNCE loss would only be calculated for the pairs within a single GPU, and their gradients would be later averaged and synced. That's obviously not what we want.
430
+
431
+ The batch gathering technique allows each GPU to hold N vision features and perform a matrix product with NM text features, resulting in an $\mathrm{N} \times \mathrm{NM}$ matrix. This computation is distributed (i.e., sharded) across M GPUs, and we have calculated $\mathrm{NM} \times \mathrm{NM}$ similarities across the GPUs in total. The loss we employ is symmetric, and the same process is applied w.r.t. text inputs. Algorithm 1 provides an example pseudocode to help understand the process.
432
+
433
+ # B. More Results
434
+
435
+ # B.1. Comparisons on UCF-101 and HMDB-51
436
+
437
+ In this section, we evaluate the performance of our method on the UCF-101 and HMDB-51 datasets to demonstrate its capacity for generalization to smaller datasets. We finetune our models on these two datasets using the pretrained ViT-L model on Kinetics-400 and report the accuracy on split one. We use 16 frames as inputs and train for 30 epochs. Table A.3 shows that our model has strong transferability, achieving a mean class accuracy of $98.8\%$ on UCF-101 and $83.1\%$ on HMDB-51.
438
+
439
+ # B.2. More Qualitative Results
440
+
441
+ We present additional visualizations of the Temporal Saliency generated by our Video Concept Spotting mechanism in Figure A.1. In Figure A.2, we also showcase more visualizations of the Generated Attributes produced by our Video-Attribute Association mechanism using two different lexicons.
442
+
443
+ ![](images/7a44b23333e3e6a0bae9efe90533571b9942eaf1bc9f2bba44067680553cf417.jpg)
444
+ Video: parasailing
445
+
446
+ ![](images/d8f781d6e492375f1426e8bdc2ddd3cca068a1c27ed5c5e6b384ef93b562fb10.jpg)
447
+ Video: long jump
448
+
449
+ ![](images/f414425a380d068050b53da2c994d8aa685b1b43687b005f16473ef0fba89ded.jpg)
450
+ Video: riding mule
451
+
452
+ ![](images/2354cebf61ca5086a98cc0142e47603629d757aacbf9db6a1c1484a97c0c8692.jpg)
453
+ Video: playing piano
454
+
455
+ ![](images/8d8a736a2a03696a9c25e51f1ebc8cffb3478c633f4f7d25516d777f7f34d909.jpg)
456
+ Video: pushing cart
457
+
458
+ ![](images/d8dbf1fffd8a8ee0e5172d706aecea57360adc561230a5644a3c12c2821ca02f.jpg)
459
+ Video: riding elephant
460
+
461
+ ![](images/ef536fb671f73d92444b3b94532d28fb3ff9a66cdee391c894b47da9934b9abf.jpg)
462
+ Video: surfing water
463
+
464
+ ![](images/34680195632ea385e10f525c5e91b50a9b5e2debd9aa87260ee16e023123928b.jpg)
465
+ Video: javelin throw
466
+
467
+ ![](images/1f82d04d4a66ff79411830cf97276284243e2c51c0eb32163e9a8106f391ec97.jpg)
468
+ Figure A.1. Visualization of temporal saliency from our Video Concept Spotting mechanism. Please zoom in for best view.
469
+ + Attributes This is a video about exercising with an exercise ball, juggling balls, dribbling basketball.
470
+ Prediction balloon blowing
471
+ exercising with an exercise ball
472
+
473
+ ![](images/7507d839f9c7991c498aceefe5f65cf7cdeb448cfdb3837e96bae73c0626173d.jpg)
474
+ Prediction water sliding
475
+ + Attributes This is a video about bobsledding, tobogganing, sled dog racing.
476
+ tobogganing
477
+
478
+ ![](images/65e563011ec25f1bed9915736dd5d5dca56cab2775b6becc31207f9961a2410d.jpg)
479
+ + Attributes This is a video about crawling baby, kissing, headbutting.
480
+ Prediction hugging
481
+ kissing
482
+
483
+ ![](images/375825a2bf2b17ccbd5e9150cb3e05f5c88ad2bbd94cb5f27bc7c3e3ab4b7753.jpg)
484
+ Prediction cooking chicken
485
+ making sushi
486
+ + Attributes This is a video about making sushi, dining, setting table.
487
+
488
+ ![](images/45948abf80dd4c144f6fdf72ce6657d36868fc0cce863039eb156596584cced2.jpg)
489
+ + Attributes This is a video about riding mountain bike, biking through snow, riding a bike.
490
+ Prediction motorcycling
491
+ riding a bike
492
+
493
+ ![](images/e5a44efe968d47cb247065d0fd92de5fd0e8c23d771adbebf39c6606700808fb.jpg)
494
+ + Attributes This is a video about headbutting, petting cat, massaging back.
495
+ Prediction massaging person's head
496
+ petting cat
497
+
498
+ ![](images/ae226403b7b12a211e7f1307f1cbae6949b47dac0ef173688e181e8d46dd1367.jpg)
499
+ (a) Generated attributes from Kinetics-400 lexicon.
500
+ Prediction pumping gas
501
+ pushing car
502
+
503
+ + Attributes This is a video about minivan, station wagon, limousine, car mirror.
504
+
505
+ ![](images/bf0fcbc96438581f1c86b6c68b5e5e44a26b970602efbb88ab92e9f203f3cf58.jpg)
506
+ Prediction Folding napkins
507
+ setting table
508
+
509
+ + Attributes This is a video about minivan, station wagon, limousine, car mirror.
510
+
511
+ + Attributes This is a video about minivan, station wagon, limousine, car mirror.
512
+
513
+ ![](images/bc785b071676ae28fc90154adbeb4d53803d267f6ee8c3f73a049c8f176072b6.jpg)
514
+ + Attributes This is a video about dining table, restaurant, plate, handkerchief.
515
+ Prediction drinking shots
516
+ +Attributes
517
+ This is a video about trimaran, sailboat, catamaran, motorboat.
518
+ Prediction windsurfing
519
+ sailing
520
+ (b) Generated attributes from ImageNet-1K lexicon.
521
+ Figure A.2. Visualization of the attribute sentence generated by the Video-Attribute Association mechanism that corrected the original incorrect prediction to the correct one.
522
+
523
+ Algorithm 1 Numpy-like Pseudocode of Distributed InfoNCE for our Video branch
524
+ ```ini
525
+ # categoryEncoder: encoder network for category input
526
+ # videoEncoder: encoder network for video input
527
+ # V: minibatch of video inputs
528
+ # T: minibatch of category inputs
529
+ # N: the local batch size of each GPU, e.g.,16
530
+ # M: the number of GPUs, e.g.,8
531
+ # N * M: the global batch size for multi-gpu training, e.g.,128
532
+ # extract feature representations of each modality
533
+ localVision_features = videoEncoder(V) # shape: [N, embed_dim]
534
+ local_text_features = categoryEncoder(T) # shape: [N, embed_dim]
535
+ # normalization
536
+ localVision_features = 12normalize(localVision_features, axis=1)
537
+ local_text_features = 12Normalize(local_text_features, axis=1)
538
+ # batch_gather is a function gathering and concatenating the tensors across GPUs.
539
+ allVision_features = batch_gather(localVision_features) # shape: [N * M, embed_dim]
540
+ all_text_features = batch_gather(local_text_features) # shape: [N * M, embed_dim]
541
+ # scaled pairwise cosine similarities
542
+ # shape = [N, N * M]
543
+ logits_per_vision = logit_scale * local_vision_features @ all_text_features.t()
544
+ # shape = [N, N * M]
545
+ logits_per_text = logit_scale * local_text_features @ all_vision_features.t()
546
+ # The logits are then used as inputs for N*M-way (e.g., 128-way) classification,
547
+ # resulting in a loss value corresponding to N inputs in each GPU.
548
+ # Then Distributed Data Parallel mechanism takes care of averaging these across GPUs,
549
+ # which becomes equivalent to calculating the loss over NMxNM (e.g.,128x128) similarities.
550
+ ```
551
+
552
+ <table><tr><td>Method</td><td>UCF-101</td><td>HMDB-51</td></tr><tr><td>ARTNet [47]</td><td>94.3%</td><td>70.9%</td></tr><tr><td>I3D [7]</td><td>95.6%</td><td>74.8%</td></tr><tr><td>R(2+1)D [46]</td><td>96.8%</td><td>74.5%</td></tr><tr><td>S3D-G [64]</td><td>96.8%</td><td>75.9%</td></tr><tr><td>TSM [27]</td><td>95.9%</td><td>73.5%</td></tr><tr><td>STM [20]</td><td>96.2%</td><td>72.2%</td></tr><tr><td>MVFNet [56]</td><td>96.6%</td><td>75.7%</td></tr><tr><td>TDN [48]</td><td>97.4%</td><td>76.4%</td></tr><tr><td>Ours ViT-L</td><td>98.8%</td><td>82.2%</td></tr><tr><td>Ours ViT-L (336↑)</td><td>98.6%</td><td>83.1%</td></tr></table>
553
+
554
+ Table A.3. Top-1 accuracy on UCF-101 and HMDB-51 achieved by different methods which are transferred from their Kinetics Pre-trained models with RGB modality.
2301.00xxx/2301.00182/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fa3f94028151f0a0eb2506a8ce44fc9c25341406dc3ea37e70ba928bae1d1b1
3
+ size 1046089
2301.00xxx/2301.00182/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
2301.00xxx/2301.00184/8445fc7b-3803-4777-88c1-9a5a57def64f_content_list.json ADDED
The diff for this file is too large to render. See raw diff