SlowGuess commited on
Commit
3e6a5c6
·
verified ·
1 Parent(s): f132767

Add Batch bdc0883f-3e58-4667-8615-f5ffef6c3cee

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/ab223230-b52b-491a-b054-0fe645339363_content_list.json +3 -0
  2. a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/ab223230-b52b-491a-b054-0fe645339363_model.json +3 -0
  3. a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/ab223230-b52b-491a-b054-0fe645339363_origin.pdf +3 -0
  4. a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/full.md +358 -0
  5. a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/images.zip +3 -0
  6. a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/layout.json +3 -0
  7. abcdarbitrarybitwisecoefficientfordequantization/74de187f-bb6a-4967-8de8-7245e6e6dc31_content_list.json +3 -0
  8. abcdarbitrarybitwisecoefficientfordequantization/74de187f-bb6a-4967-8de8-7245e6e6dc31_model.json +3 -0
  9. abcdarbitrarybitwisecoefficientfordequantization/74de187f-bb6a-4967-8de8-7245e6e6dc31_origin.pdf +3 -0
  10. abcdarbitrarybitwisecoefficientfordequantization/full.md +374 -0
  11. abcdarbitrarybitwisecoefficientfordequantization/images.zip +3 -0
  12. abcdarbitrarybitwisecoefficientfordequantization/layout.json +3 -0
  13. ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/b8595b73-1c79-4c16-b7bb-6fddbb1f1ffa_content_list.json +3 -0
  14. ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/b8595b73-1c79-4c16-b7bb-6fddbb1f1ffa_model.json +3 -0
  15. ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/b8595b73-1c79-4c16-b7bb-6fddbb1f1ffa_origin.pdf +3 -0
  16. ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/full.md +324 -0
  17. ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/images.zip +3 -0
  18. ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/layout.json +3 -0
  19. abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/0373c754-2d12-4d52-95c7-16e20ad62c3a_content_list.json +3 -0
  20. abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/0373c754-2d12-4d52-95c7-16e20ad62c3a_model.json +3 -0
  21. abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/0373c754-2d12-4d52-95c7-16e20ad62c3a_origin.pdf +3 -0
  22. abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/full.md +416 -0
  23. abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/images.zip +3 -0
  24. abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/layout.json +3 -0
  25. acapanticipationcaptioningwithcommonsenseknowledge/0e9d14f2-ed8b-47a0-933f-6fa45b08d07e_content_list.json +3 -0
  26. acapanticipationcaptioningwithcommonsenseknowledge/0e9d14f2-ed8b-47a0-933f-6fa45b08d07e_model.json +3 -0
  27. acapanticipationcaptioningwithcommonsenseknowledge/0e9d14f2-ed8b-47a0-933f-6fa45b08d07e_origin.pdf +3 -0
  28. acapanticipationcaptioningwithcommonsenseknowledge/full.md +302 -0
  29. acapanticipationcaptioningwithcommonsenseknowledge/images.zip +3 -0
  30. acapanticipationcaptioningwithcommonsenseknowledge/layout.json +3 -0
  31. acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/49024a39-e689-4fa1-87f4-7de949651fed_content_list.json +3 -0
  32. acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/49024a39-e689-4fa1-87f4-7de949651fed_model.json +3 -0
  33. acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/49024a39-e689-4fa1-87f4-7de949651fed_origin.pdf +3 -0
  34. acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/full.md +321 -0
  35. acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/images.zip +3 -0
  36. acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/layout.json +3 -0
  37. acceleratingdatasetdistillationviamodelaugmentation/38ccc529-8263-4fb5-b4f1-49f67321c951_content_list.json +3 -0
  38. acceleratingdatasetdistillationviamodelaugmentation/38ccc529-8263-4fb5-b4f1-49f67321c951_model.json +3 -0
  39. acceleratingdatasetdistillationviamodelaugmentation/38ccc529-8263-4fb5-b4f1-49f67321c951_origin.pdf +3 -0
  40. acceleratingdatasetdistillationviamodelaugmentation/full.md +347 -0
  41. acceleratingdatasetdistillationviamodelaugmentation/images.zip +3 -0
  42. acceleratingdatasetdistillationviamodelaugmentation/layout.json +3 -0
  43. acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/e514fec0-dcec-43a5-8ee8-992c5bb1bf30_content_list.json +3 -0
  44. acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/e514fec0-dcec-43a5-8ee8-992c5bb1bf30_model.json +3 -0
  45. acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/e514fec0-dcec-43a5-8ee8-992c5bb1bf30_origin.pdf +3 -0
  46. acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/full.md +277 -0
  47. acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/images.zip +3 -0
  48. acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/layout.json +3 -0
  49. accelirtaskawareimagecompressionforacceleratingneuralrestoration/3ee3bce8-b085-4d08-be6f-5f7d348c3f3f_content_list.json +3 -0
  50. accelirtaskawareimagecompressionforacceleratingneuralrestoration/3ee3bce8-b085-4d08-be6f-5f7d348c3f3f_model.json +3 -0
a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/ab223230-b52b-491a-b054-0fe645339363_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ad60018adf0a48b286afb872682d4f29af596e36f25b8044a79534e82e8a83e
3
+ size 82506
a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/ab223230-b52b-491a-b054-0fe645339363_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8108a99d3ae8d6b237fd3a711ecc54167f71f41e50a12a383937a2def32d3fc1
3
+ size 103153
a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/ab223230-b52b-491a-b054-0fe645339363_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2eeeb02c5238aae755838463c10132a32d401b31edcae72614bbad5217b1a79
3
+ size 1639911
a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/full.md ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A2J-Transformer: Anchor-to-Joint Transformer Network for 3D Interacting Hand Pose Estimation from a Single RGB Image
2
+
3
+ Changlong Jiang $^{1}$ , Yang Xiao $^{1\dagger}$ , Cunlin Wu $^{1}$ , Mingyang Zhang $^{2}$ , Jinghong Zheng $^{1}$ , Zhiguo Cao $^{1}$ , and Joey Tianyi Zhou $^{3,4}$
4
+
5
+ <sup>1</sup>Key Laboratory of Image Processing and Intelligent Control, Ministry of Education, School of Artificial Intelligence and Automation, Huazhong University of Science and Technology, Wuhan 430074, China
6
+ 2 Alibaba Group
7
+ <sup>3</sup> Centre for Frontier AI Research, Agency for Science, Technology and Research (A*STAR), Singapore
8
+
9
+ $^{4}$ Institute of High Performance Computing, Agency for Science, Technology and Research (A*STAR), Singapore
10
+
11
+ changlongj, Yang_Xiao, cunlin_wu@hust.edu.cn, changhai.zmy@alibaba-inc.com,
12
+
13
+ deepzheng, zgcao@hust.edu.cn, zhouty@cfar.a-star.edu.sg
14
+
15
+ # Abstract
16
+
17
+ 3D interacting hand pose estimation from a single RGB image is a challenging task, due to serious self-occlusion and inter-occlusion towards hands, confusing similar appearance patterns between 2 hands, ill-posed joint position mapping from 2D to 3D, etc.. To address these, we propose to extend A2J-the state-of-the-art depth-based 3D single hand pose estimation method-to RGB domain under interacting hand condition. Our key idea is to equip A2J with strong local-global aware ability to well capture interacting hands' local fine details and global articulated clues among joints jointly. To this end, A2J is evolved under Transformer's non-local encoding-decoding framework to build A2J-Transformer. It holds 3 main advantages over A2J. First, self-attention across local anchor points is built to make them global spatial context aware to better capture joints' articulation clues for resisting occlusion. Secondly, each anchor point is regarded as learnable query with adaptive feature learning for facilitating pattern fitting capacity, instead of having the same local representation with the others. Last but not least, anchor point locates in 3D space instead of 2D as in A2J, to leverage 3D pose prediction. Experiments on challenging InterHand 2.6M demonstrate that, A2J-Transformer can achieve state-of-the-art model-free performance (3.38mm MPJPE advancement in 2-hand case) and can also be applied to depth domain with strong generalization. The code is available at https://github.com/ChanglongJiangGit/A2J-Transformer.
18
+
19
+ ![](images/3834c13ee28c267b81cec1851e9033adb5af131f4a4798578dbc9a9f283156a9.jpg)
20
+ Figure 1. The main idea of A2J-Transformer. 3D anchors are uniformly set and act as local regressors to predict each hand joint. Meanwhile, they are also used as queries, and the interaction among them is established to acquire global context.
21
+
22
+ # 1. Introduction
23
+
24
+ 3D interacting hand pose estimation from a single RGB image can be widely applied to the fields of virtual reality, augmented reality, human-computer interaction, etc.. [32, 34, 37]. Although the paid efforts, it still remains as a challenging research task due to the main issues of serious self-occlusion and inter-occlusion towards hands [7, 12, 16, 22, 27], confusing similar appearance patterns between 2 hands [12, 19, 27], and the ill-posed characteristics of estimating 3D hand pose via monocular RGB image [7, 16, 28].
25
+
26
+ The existing methods can be generally categorized into model-based [1,2,21,29,30,35,39,41,48] and model-free [5, 7,12,17,19,22,26,27,29,43] groups. Due to model's strong prior knowledge on hands, the former paradigm is overall of more promising performance. However, model-based methods generally require complex personalized model calibration, which is sensitive to initialization and susceptible
27
+
28
+ to trap in local minima [11, 12]. This is actually not preferred by the practical applications. Accordingly, we focus on model-free manner in regression way. The key idea is that, for effective 3D interacting hand pose estimation the predictor should be well aware of joints' local fine details and global articulated context simultaneously to resist occlusion and confusing appearance pattern issues. To this end, we propose to extend the SOTA depth-based single hand 3D pose estimation method A2J [43] to 3D interacting hand pose estimation task from a single RGB image.
29
+
30
+ Although A2J's superiority with ensemble local regression, intuitively applying it to our task cannot ensure promising performance, since it generally suffers from 3 main defects as below. First, the local anchor points for predicting offsets between them and joints lack interaction among each other. This leads to the fact that, joints' global articulated clues cannot be well captured to resist occlusion. Secondly, the anchor points within the certain spatial range share the same single-scale local convolution feature, which essentially limits the discrimination capacity on confusing visual patterns towards the interacting hands. Last, anchor points locate within 2D plane, which is not optimal for alleviating the ill-posed 2D to 3D lifting problem with single RGB image. To address these, we propose to extend A2J under Transformer's non-local encoding-decoding framework to build A2J-Transformer, with anchor point-wise adaptive multi-scale feature learning and 3D anchor point setup.
31
+
32
+ Particularly, the anchor point within A2J is evolved as the learnable query under Transformer framework. Each query will predict its position offsets to all the joints of the 2 hands. Joint's position is finally estimated via fusing the prediction results from all queries in a linear weighting way. That is to say, joint's position is determined by all the queries located over the whole image of global spatial perspective. Meanwhile, the setting query number is flexible, which is not strictly constrained by joint number as in [12]. Thanks to Transformer's non-local self-attention mechanism [40], during feature encoding stage the queries can interact with each other to capture joints' global articulated clues, which is essentially beneficial for resisting self-occlusion and inter-occlusion. Concerning the specific query, adaptive local feature learning will be conducted to extract query-wise multi-scale convolutional feature based Resnet-50 [14]. Compared with A2J's feature sharing strategy among the neighboring anchor points, our proposition can essentially facilitate query's pattern fitting capacity both for accurate joint localization and joint's hand identity verification. In summary, each query will be of strong local-global spatial awareness ability to better fit interacting hand appearance pattern. Meanwhile to facilitate RGB-based 2D to 3D hand pose lifting problem, the queries will be set within the 3D space instead of 2D counterpart as in A2J [43]. In this way, each query can directly predict its
33
+
34
+ 3D position offset between the joints, which cannot be acquired by A2J. Overall, A2J-Transformer's main research idea is shown in Fig. 1.
35
+
36
+ Compared with the most recently proposed model-free method [12] that also addresses 3D interacting hand pose estimation using Transformer, our proposition still takes some essential advantages. First, joint-like keypoint detection is not required. Secondly, query number is not strictly constrained to be equal to joint number to facilitate pattern fitting capacity. Thirdly, our query locates within 3D space instead of 2D counterpart.
37
+
38
+ The experiments on the challenging Interhand 2.6M [29] dataset verify that, our approach can achieve the state-of-the-art model-free performance (3.38mm MPJPE advancement in 2-hand case) for 3D interacting hand pose estimation from a single RGB image. And, it significantly outperforms A2J by large margins (i.e., over 5mm on MPJPE). In addition, experiments on HANDS2017 dataset [46] demonstrate that A2J-Transformer can also be applied to depth domain with promising performance.
39
+
40
+ Overall, the main contributions of this paper include:
41
+
42
+ - For the first time, we extend A2J from depth domain to RGB domain to address 3D interacting hand pose estimation from a single RGB image with promising performance;
43
+ - A2J's anchor point is evolved with Transformer's non-local self-attention mechanism with adaptive local feature learning, to make it be aware of joints' local fine details and global articulated context simultaneously;
44
+ - Anchor point is proposed to locate within 3D space to facilitate ill-posed 2D to 3D hand pose lifting problem based on monocular RGB information.
45
+
46
+ # 2. Related Works
47
+
48
+ Many methods have been proposed for 3D hand pose estimation from either RGB images or depth maps. At the same time, these methods can also be divided into single hand pose estimation and interacting hand pose estimation methods based on the number of input hands. Here we categorize all these methods into model-based and model-free groups, and mainly analysis works that estimate interacting 3D hand pose from RGB images. Besides, we discuss the usage of Transformer architectures in 3D hand pose estimation field as they are highly relevant to our work.
49
+
50
+ Model-based approach. Considering that model-based methods [1, 21, 29, 30, 33, 35, 39, 41, 48] can provide strong prior knowledge, model-based 3D hand pose estimation methods could achieve relatively better results by fitting hand models. Early methods [1, 30, 39] for model-based 3D hand pose estimation used complex optimization methods to fit their proposed parameter models. However, due to the lack of a unified model paradigm, the development of model-based methods was somewhat limited at that time. After the introduction of the compatible 3D
51
+
52
+ ![](images/36dc722515d420712998749ab04dc15ce81028ab60bb0fe0512945ea586986f3.jpg)
53
+ Figure 2. The main technical pipeline of A2J-Transformer. A2J-Transformer consists of 3 main models: pyramid feature extractor, anchor refinement model (containing feature enhancement module and anchor interaction module) and anchor offset-weight estimation model. The anchor interaction module aims to establish the connection (orange line) between anchors (orange dots).
54
+
55
+ hand model MANO [33], subsequent model-based methods [21, 35, 41, 48] are mostly based on it while using CNN or GCN modules. Due to the presence of a sufficiently strong prior model, model-based methods generally have good performance and are more stable than model-free methods. However, these approaches usually lose tracking when there are strong hand interactions and occlusions. At the same time, modeling the hands of different people is needed [13] in practical usage, which to a certain extent reduces the generalization ability of the model. Therefore, we turn our attention to the model-free approach, which needs no prior information and has more flexibility.
56
+
57
+ Model-free approach. Model-free approaches [2, 5, 7, 17, 19, 22, 27-29, 43] have been developed for a long time. In particular, the task of single hand pose estimation based on depth maps have been available for very mature methods [2, 17, 28, 43]. However, their extensions to two hands and RGB domain are non-trivial due to the severe occlusion and similar appearance between hand joints. After Moon et al. [29] propose the InterHand2.6M dataset, model-free approaches [5-7, 19, 22, 27] for interacting hand pose estimation has made great progress. For be better resistant to occlusion, some research [7, 22, 27] tend to separate the interacting hands and estimate the two hands separately, some methods [5,6] perform dense modeling by using point cloud networks. However, the prediction of details of interacting hands by these methods depend heavily on the quality of the segmentation results or the point cloud generations. Some methods [12, 29] obtain the coordinate of hand joints by directly regressing the heatmap, which could be intuitive and flexible. However, the current methods are not ideal for local detail feature extraction and still have performance shortcomings for 3D interacting hand pose estimation. The proposed method of Hampali et al. [12] is
58
+
59
+ similar to ours, which directly regress the keypoints of two hands, but there is still a problem of poor prediction effect when having strong occlusions. In contrast, by regarding densely distributed anchor points as local regressioners and establishing interactions between them, our proposed A2J-Transformer can not only extract local detailed hand poses, but also obtain global articulated hand joints' information.
60
+
61
+ Transformer in hand pose estimation. With the rise of the self-attention mechanism and the proposal of the transformer model [40], more and more visual fields promote their development by introducing the transformer model, like image classification, object detection, 3D mesh reconstruction and so on [18]. Since the Transformer model has a strong ability in capturing non-local features which is surely helpful for the hand pose estimation field, there has been many works [15, 23, 24] to introduce Transformer into this area. However, these architectures are all designed for single hand pose estimation. Recent methods for interacting hand pose estimation have achieved good results, but still suffer from performance shortcomings [12] or model limitations in flexibility [21].
62
+
63
+ Accordingly, our A2J-Transformer belongs to model-free region and introduced the Transformer module. Different from previous works, we integrates A2J and Transformer into an uniform model (i.e., A2J-Transformer) with end-to-end learning capacity, to reveal our key theoretical insight on addressing 3D interacting hand pose estimation (IHPE) task via concerning local and global visual context jointly. Meanwhile, 2D anchor point within A2J is evolved to 3D version adaptive to A2J-Transformer, to alleviate ill-posed 2D to 3D hand pose lifting problem using monocular RGB image. These propositions technically sound with promising performance and concern 3D IHPE's specific characteristics deeply.
64
+
65
+ ![](images/3f8ea3755f0f601790ddde5d42e2bdf0114c1706721b0816c45a40e2c009d84b.jpg)
66
+ Figure 3. The first encoder layer of feature enhancement module.
67
+
68
+ # 3. A2J-Transformer: Anchor-to-Joint Transformer Network
69
+
70
+ As shown in Fig. 2, A2J-Transformer consists of 3 main models: pyramid feature extractor, anchor refinement model and anchor offset-weight estimation model.
71
+
72
+ # 3.1. Pyramid feature extractor
73
+
74
+ Since multi-scale features can obtain both global information of the input image and retain enough detailed information, feature pyramids are well suitable for the task of interacting hand pose estimation. Therefore, ResNet-50 [14] is used as backbone network to extract the pyramid features from input RGB images. In particular, we get the pyramid features by using the output layer 2-4 with $8,16,32 \times$ down sample rates on in-plane size. Meanwhile, 3 convolution layers are used for generating inputs of transformer model of each feature, and 1 convolution layer is used additionally for extracting the last feature layer to maintain more spatial information. Finally, these 4 feature maps are sent to the next anchor refinement model.
75
+
76
+ # 3.2. Anchor refinement model
77
+
78
+ Anchor refinement model aims to simultaneously focus on the non-local articulated and the local fine-grained features. It contains feature enhancement module and anchor interaction module, which can enhance image features and establish the interactions between anchors respectively.
79
+
80
+ # 3.2.1 Feature enhancement module
81
+
82
+ Since multi-scale features are useful for capturing global clues and recovering local details, we integrated the self-attention module [25] to enhance multi-scale features. So we refer to this module as feature enhancement module, which consists of six encoder layers. The first encoder layer of this module is shown in Fig. 3, and the input features of the rest encoder layers are the outputs of the previous layers. All dimensions of input and output features are 256.
83
+
84
+ Technically, for the input feature pyramid, convolution layer and group normalization layer [42] are firstly used to process them to a same in-plane size. After flatten and concatenation, the generated features $F$ are added to the positional encodings $P_{xy}$ :
85
+
86
+ $$
87
+ P _ {x y} = \mathrm {P E} (\mathrm {x}, \mathrm {y}), \tag {1}
88
+ $$
89
+
90
+ ![](images/cb1a1a03a4502ed224bf81a285db0b23aa43bfc323ec7c0ceb8890de237dd728.jpg)
91
+ Figure 4. One decoder layer of anchor interaction module.
92
+
93
+ where PE means positional encoding to generate sinusoidal embeddings from float numbers [25], and $x, y$ represent the in-plane positions of the feature $F$ . Besides, we replace the self-attention module with multi-scale deformable attention module (MSDAM) [49] to mitigate issues of slow convergence and limited feature resolution.
94
+
95
+ For self-attention module, the queries $Q$ , keys $K$ and values $V$ have the same content item $F$ , and the queries contains an extra position item $P_{xy}$ :
96
+
97
+ $$
98
+ Q = F + P _ {x y}, \quad K = r e f (F), \quad V = F, \tag {2}
99
+ $$
100
+
101
+ where $ref(\cdot)$ means sample reference keys following [49]. Then, $Q, K, V$ are sent to the MSDAM to get the enhanced features for next encoder layers.
102
+
103
+ Finally, global-aware features are generated after 6 encoder layers and sent to the anchor interaction module.
104
+
105
+ # 3.2.2 Anchor interaction module
106
+
107
+ In A2J-Transformer, a uniform distribution of 3D anchor points are densely set up to perform direct estimation of hand joints through these 3D anchor points. In other words, these 3D anchor points play the role of local coordinate regressors. More details on the implementation of 3D anchor settings are described in Sec. 3.3. Estimating hand pose through local anchor points has two advantages. First, the setting of dense local 3D anchor points can effectively capture the refined local details from images, which has a good effect for estimating the detail information of strong interacting hands. Second, cross-attention module can establish interaction between local anchor points to capture global clues, which is beneficial for handling occlusion.
108
+
109
+ Based on this, anchor interaction module containing 6 decoder layers are designed to link individual anchor points, making global information available for each anchor point. One decoder layer is shown in Fig. 4 and for the first decoder layer, the Decoder Embeddings will be replaced by the Encoder Output. All dimensions of input and output features are 256.
110
+
111
+ <table><tr><td>Symbol</td><td>Definition</td></tr><tr><td>J &amp; j</td><td>Joint set and joint.j ∈ J.</td></tr><tr><td>A &amp; a</td><td>Anchor point set and anchor point.a ∈ A.</td></tr><tr><td>Tij</td><td>In-plane coordinate of joint j.</td></tr><tr><td>Td</td><td>Depth coordinate of joint j.</td></tr><tr><td>Ci(a)</td><td>In-plane coordinate of anchor point a.</td></tr><tr><td>Cd(a)</td><td>Depth coordinate of anchor point a.</td></tr><tr><td>Wj(a)</td><td>Weight of anchor a towards joint j.</td></tr><tr><td>Oj(a)</td><td>Predicted in-plane offset towards joint j from anchor point a.</td></tr><tr><td>Od(a)</td><td>Predicted depth offset towards joint j from anchor point a.</td></tr></table>
112
+
113
+ Table 1. Symbol definition within A2J-Transformer.
114
+
115
+ The symbols within A2J-Transformer are defined in Table 1 for better explaining. Different from previous Transformer-based works, we take the understanding of DAB-DETR [25] and explicitly set the coordinates of each anchor $a$ to the queries, which we call Anchor Queries. We denote $a_{q} = (x_{q},y_{q},d_{q})$ as the $q$ -th anchor query, while $x_{q},y_{q},d_{q}\in \mathbb{R}$ denotes the coordinate of $a$ in in-plane and depth. For $a_{q}$ , the spatial encodings $P_{q}$ is generated by:
116
+
117
+ $$
118
+ P _ {q} = \operatorname {M L P} \left(\operatorname {P E} \left(a _ {q}\right)\right), \tag {3}
119
+ $$
120
+
121
+ where parameters of MLP are shared across all layers.
122
+
123
+ For self-attention module, settings of queries, keys and values of decoder layers are similar to the setting in feature enhancement module:
124
+
125
+ $$
126
+ Q = D + P _ {q}, \quad K = D + P _ {q}, \quad V = D, \tag {4}
127
+ $$
128
+
129
+ where $D$ denotes the decoder embeddings.
130
+
131
+ In cross-attention module, we add the positional query embeddings $P_{q}$ to the output of self-attention module $D$ to get the context aware anchor informative queries $Q$ . Besides, anchor queries are directly set to the reference points $K$ , and $V$ is the encoder output $E$ :
132
+
133
+ $$
134
+ Q = D + P _ {q}, \quad K = a _ {q}, \quad V = E, \tag {5}
135
+ $$
136
+
137
+ and MSDAM is applied for calculating cross-attention.
138
+
139
+ # 3.3. Anchor offset-weight estimation model
140
+
141
+ As described in Section 3.2, when each anchor point is linked to each other through the Transformer module, they have both the ability to recover local details and perceive global information. To get final output, anchor offset-weight estimation model is used to estimate the 3D offsets and weights of each anchor with respect to each hand joints. That is, each anchor acts as a local estimator. The offsets and weights are estimated separately for all hand joints. Finally, we fuse the estimation results of all anchors in a weighted summation way to get the final result of joints.
142
+
143
+ The 3D anchor structure is shown in Fig. 5. The in-plane coordinates of 3D anchors are densely distributed on the input RGB image with an in-plane stride $S_{t} = 16$ . This could ensure that for each pixel in the extracted feature maps,
144
+
145
+ ![](images/4f5cfc0fe71e72e1d63514653bce7e21dd56d21839f746d26b31997344da3d1f.jpg)
146
+ Figure 5. 3D anchors in A2J-Transformer. Joints will be estimated from anchors and offsets.
147
+
148
+ there can be at least one anchor point corresponding to it while reducing the model size. On this basis, we extend the depth value number of the anchor points. In addition to the original 0 depth value, two depth values are set at the position of $\pm 100\mathrm{mm}$ under the world coordinate, centered on the root joint of each hand. This is due to the data processing procedure within baseline. That is, hand joints outside the range of $\pm 200\mathrm{mm}$ from the root of the hand are considered as invalid joints. Therefore, for the input image size $256\times 256$ , there are $16\times 16\times 3$ total anchors. This setting method extends the anchor point to the 3D space, so as to better fit the depth coordinates of the predicted joints.
149
+
150
+ Essentially, anchor points are local regressors used to estimate each joint relative to itself. As shown in Fig. 2, each anchor point returns a 3D coordinate offset from itself to all joints in offset estimation branch. Since different anchor points focus on different feature ranges, the contribution to each anchor point will also be different. So we predict the weight of each anchor point by weight estimation branch. Therefore, by these two branches, the coordinates of each joint can be calculated as the weighted sum of the prediction results of all anchor points' coordinates.
151
+
152
+ Technically, to get the offsets $O_{j}^{i}(a)$ , $O_{j}^{d}(a)$ and the anchor weights $W_{j}(a)$ , 2 MLP layers are added on the outputs of anchor interaction model. 3D offsets from each anchor point to each joint $O_{j}(a)$ are regressed by 1 MLP layer and then divided into $O_{j}^{i}(a)$ , $O_{j}^{d}(a)$ . Another MLP layer regresses each anchor weight $W_{j}(a)$ for each joint. Finally, the 3D coordinates of predicted joint $j$ can be expressed as:
153
+
154
+ $$
155
+ \left\{ \begin{array}{l} \hat {T} _ {j} ^ {i} = \sum_ {a \in A} \tilde {W} _ {j} (a) \left(C ^ {i} (a) + O _ {j} ^ {i} (a)\right) \\ \hat {T} _ {j} ^ {d} = \sum_ {a \in A} \tilde {W} _ {j} (a) \left(C ^ {d} (a) + O _ {j} ^ {d} (a)\right) \end{array} , \right. \tag {6}
156
+ $$
157
+
158
+ where $\hat{T}_j^i$ and $\hat{T}_j^d$ indicate the estimated in-plane and depth coordinate of target joint $j$ , $C_a^i$ and $C_a^d$ denote the in-plane and depth coordinates of an anchor point $a$ . $\tilde{W}_j(a)$ is the normalized weight from anchor point $a$ towards joint $j$ , which could be calculated by soft-max:
159
+
160
+ $$
161
+ \tilde {W} _ {j} (a) = \frac {e ^ {W _ {j} (a)}}{\sum_ {a \in A} e ^ {W _ {j} (a)}}. \tag {7}
162
+ $$
163
+
164
+ In this way, the estimated hand joints will adaptively select those anchor points with greater contributions to itself and give them large weights. Finally, the joint coordinates and the anchor weights are supervised through joint estimation loss and anchor point surrounding loss.
165
+
166
+ # 3.4. Loss functions
167
+
168
+ For training our performed A2J-Transformer model, we utilize two loss functions: (1) joint estimation loss, (2) anchor surrounding loss following [43].
169
+
170
+ Joint estimation loss. After getting the estimated 3D joint coordinates, we use the joint estimation loss to supervise the final output, which is formulated as:
171
+
172
+ $$
173
+ \operatorname {l o s s} _ {1} = \alpha \sum_ {j \in J} L _ {\tau_ {1}} \left(\hat {T} _ {j} ^ {i} - T _ {j} ^ {i}\right) + \sum_ {j \in J} L _ {\tau_ {2}} \left(\hat {T} _ {j} ^ {d} - T _ {j} ^ {d}\right), \tag {8}
174
+ $$
175
+
176
+ where $\hat{T}_j^i$ and $\hat{T}_j^d$ denotes the estimated in-plane coordinate and depth coordinate of joint $j$ from Eq.6, and $T_j^i$ and $T_j^d$ are the given in-plane and depth GT coordinates of joint $j$ ; parameter $\alpha$ defaults to 0.5 to balance the loss between in-plane and depth offset estimation task. $L_{\tau}(\cdot)$ is the smooth $L_1$ like loss function [31] given by:
177
+
178
+ $$
179
+ L _ {\tau} (x) = \left\{ \begin{array}{l l} \frac {1}{2 \tau} x ^ {2}, & \text {f o r} | x | < \tau , \\ | x | - \frac {\tau}{2}, & \text {o t h e r w i s e .} \end{array} \right. \tag {9}
180
+ $$
181
+
182
+ $\tau_{1}, \tau_{2}$ are set to 1, 3 for better smoothing the depth value.
183
+
184
+ Anchor surrounding loss. To lead the informative anchor points locate around the hand joints, thus facilitating the generalization ability of our model, we define the anchor surrounding loss by:
185
+
186
+ $$
187
+ \begin{array}{l} \begin{array}{l} \text {l o s s} _ {2} = \sum_ {j \in J} L _ {\tau_ {1}} \left(\sum_ {a \in A} \tilde {W} _ {j} (a) C ^ {i} (a) - T _ {j} ^ {i}\right) \\ \hline \text {l o s s} _ {2} = \sum_ {j \in J} (\sum_ {a \in A} \tilde {w} _ {j} (a) ^ {d} (a) - T _ {j} ^ {d}) \end{array} \tag {10} \\ + \sum_ {j \in J} L _ {\tau_ {2}} \left(\sum_ {a \in A} \tilde {W} _ {j} (a) C ^ {d} (a) - T _ {j} ^ {d}\right), \\ \end{array}
188
+ $$
189
+
190
+ where $\tau_{1}$ and $\tau_{2}$ are also set to 1 and 3.
191
+
192
+ Finally, the total loss function is formulated as:
193
+
194
+ $$
195
+ l o s s = \lambda_ {1} l o s s _ {1} + \lambda_ {2} l o s s _ {2}. \tag {11}
196
+ $$
197
+
198
+ where $\lambda_{1}$ and $\lambda_{2}$ are set to 3 and 1 to balance two losses.
199
+
200
+ # 4. Experiments
201
+
202
+ # 4.1. Experimental setting
203
+
204
+ # 4.1.1 Datasets
205
+
206
+ InterHand2.6M dataset [29]. InterHand2.6M is a representative two-hand RGB image dataset with challenging hand interacting scenarios. It contains 1.36M train images and 849K test images. The ground-truth contains semiautomatically annotated 3D coordinates of 42 hand joints. For fair comparison, we choose all test frames for result evaluation following InterNet [29].
207
+
208
+ RHP dataset [50]. RHP is a synthesized dataset contains two isolated hand data. 41K training and 2.7K testing samples are contained. Since the background of this dataset is an outdoor scene, we use this dataset to approximate the generalization ability of our model on in-the-wild conditions. We also follow InterNet for fair comparison.
209
+
210
+ NYU dataset [38]. NYU is a single-hand depth image dataset which has 72K training images and 8.2K testing images with 3D annotation on 36 hand joints. Following [4,10,28,43], we pick 14 of the 36 joints for evaluation. HANDS 2017 dataset [46]. HANDS 2017 is a single-hand depth image dataset which has 957K training images and 295K testing images combined from BigHand2.2M [47] and First-Person Hand Action [46]. The ground-truth contains 3D coordinate of 21 hand joints.
211
+
212
+ # 4.1.2 Evaluation metrics
213
+
214
+ The Mean Per Join Position Error (MPJPE) is used for evaluation on InterHand2.6M [29]. It is defined as a Euclidean distance (mm) between predicted and ground-truth 3D joint locations. Following [29], this metric is used after root joint alignment for each left and right hand separately. For RHP dataset, end point error (EPE) is used, which is defined as a mean Euclidean distance (mm) between the predicted and ground-truth 3D hand pose after root joint alignment. For the two depth image dataset, the average 3D distance error is used following [28, 43]. Besides, FPS is used to evaluate the inference speed, and all models are tested on single NVIDIA RTX 2080ti GPU during inference.
215
+
216
+ # 4.1.3 Implementation details
217
+
218
+ A2J-Transformer is implemented using PyTorch. For InterHand2.6M and RHP dataset, we directly crop the RGB images and resize them to $256 \times 256$ resolution. The data augmentations are exactly the same as InterNet [29]. For NYU and HANDS 2017 dataset, we follow [28] to crop and resize the depth image to $176 \times 176$ . We train our model using the Adam optimizer [20]. The learning rate is set to $1 \times 10^{-4}$ with a weight decay of $1 \times 10^{-4}$ in all cases. There are totally 42 epochs for InterHand2.6M, RHP and NYU dataset and 17 epochs for HANDS 2017 dataset.
219
+
220
+ # 4.2. Results
221
+
222
+ InterHand2.6M dataset: Comparison with the state-of-the-art methods on InterHand2.6M is listed in Table 2. It can be observed that:
223
+
224
+ - In general, A2J-Transformer outperforms other model-free methods by a large margin testing under all scenarios. This proves that our method has a significant advancement in extracting effective information from interacting hands. Compared with model-based methods, A2J-Transformer has a comparable result with the state-of-the-art method without using any hand prior information. Be
225
+
226
+ <table><tr><td rowspan="2">Methods</td><td colspan="3">MPJPE (mm)</td><td rowspan="2">FPS (s)</td><td rowspan="2">Model Size(M)</td></tr><tr><td>Single</td><td>Two</td><td>All</td></tr><tr><td colspan="6">Model-based</td></tr><tr><td>Zhang et al. [48]</td><td>-</td><td>13.48</td><td>-</td><td>17.02</td><td>143</td></tr><tr><td>Meng et al. [27]</td><td>8.51</td><td>13.12</td><td>10.97</td><td>15.47</td><td>55</td></tr><tr><td>Li et al. [21]</td><td>-</td><td>8.79</td><td>-</td><td>18.05</td><td>39</td></tr><tr><td colspan="6">Model-free</td></tr><tr><td>Moon et al. [29]</td><td>12.16</td><td>16.02</td><td>14.22</td><td>107.08</td><td>47</td></tr><tr><td>Kim et al. [19]</td><td>-</td><td>-</td><td>12.08</td><td>-</td><td>-</td></tr><tr><td>Fan et al. [7]</td><td>11.32</td><td>15.57</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Hampali et al. [12]</td><td>10.99</td><td>14.34</td><td>12.78</td><td>19.66</td><td>48</td></tr><tr><td>Ours</td><td>8.10</td><td>10.96</td><td>9.63</td><td>25.65</td><td>42</td></tr></table>
227
+
228
+ Table 2. Comparison with state-of-the-art model-based and model-free methods on InterHand2.6M [29]. MPJPE, FPS and model size are reported.
229
+
230
+ <table><tr><td>Methods</td><td>GT S</td><td>GT H</td><td>EPE</td></tr><tr><td>Zimm. et al. [50]</td><td>✓</td><td>✓</td><td>30.42</td></tr><tr><td>chen et al. [3]</td><td>✓</td><td>✓</td><td>24.20</td></tr><tr><td>Yang et al. [44]</td><td>✓</td><td>✓</td><td>19.95</td></tr><tr><td>Spurr et al. [36]</td><td>✓</td><td>✓</td><td>19.73</td></tr><tr><td>Spurr et al. [36]</td><td>✗</td><td>✗</td><td>19.73</td></tr><tr><td>Moon et al. [29]</td><td>✗</td><td>✗</td><td>20.89</td></tr><tr><td>A2J-Transformer(Ours)</td><td>✗</td><td>✗</td><td>17.75</td></tr></table>
231
+
232
+ sides, A2J-Transformer has fairly fast inference speed just behind baseline [29] and the smallest model size. In conclusion, our model achieves the best overall performance in terms of performance, running speed and model size.
233
+
234
+ - Specifically, compared with baseline [29], A2J-Transformer could get an improvement of 4.06, 5.06 and $4.59\mathrm{mm}$ under three scenarios. Compared with the SOTA model-free method [12], the improvement of our method is 2.89, 3.38 and $3.15\mathrm{mm}$ . Compared with the SOTA model-based method [21], our method could receive a comparable performance under two hands scenario without requiring any hand prior, which makes our model more flexible.
235
+ - For the running speed, A2J-Transformer has a fast inference speed with 25 FPS, surpassing all methods except baseline. Besides, A2J-Transformer also has the smallest model size with only 42M parameters. These characteristics brings our model great convenience for the future expansion and real-time 3D hand pose estimation.
236
+
237
+ RHP dataset: Comparison on RHP dataset is shown in Table 3. It shows that A2J-Transformer outperforms previous methods without relying on ground-truth information during inference time. The experiments demonstrate the effectiveness on in-the-wild images and shows the good generalization ability of A2J-Transformer.
238
+
239
+ NYU and HANDS 2017 dataset: Comparison with state-of-the-art depth based single hand estimation methods
240
+
241
+ Table 3. EPE comparison with previous state-of-the-art methods on RHP. Following [29], the checkmark denotes a method use ground-truth information during inference time. S and H denote scale and handness, respectively.
242
+
243
+ <table><tr><td>Methods</td><td>Mean Error (mm)</td><td>FPS(s)</td></tr><tr><td>Moon et al. [28]</td><td>9.22</td><td>35</td></tr><tr><td>Xiong et al. [43]</td><td>8.61</td><td>105.06</td></tr><tr><td>Fang et al. [8]</td><td>8.29</td><td>111.20</td></tr><tr><td>Ours</td><td>8.43</td><td>24.81</td></tr></table>
244
+
245
+ Table 4. Performance comparison on NYU dataset [38]. Our proposed A2J-Transformer can guarantee a competitive performance for the depth image input.
246
+
247
+ <table><tr><td>Methods</td><td>Mean Error (mm)</td><td>FPS(s)</td></tr><tr><td>Ge et al. [9]</td><td>11.30</td><td>48</td></tr><tr><td>Yuan et al. [45]</td><td>9.97</td><td>-</td></tr><tr><td>Moon et al. [28]</td><td>9.95</td><td>3.5</td></tr><tr><td>Xiong et al. [43]</td><td>8.57</td><td>105.06</td></tr><tr><td>Ours</td><td>8.32</td><td>24.81</td></tr></table>
248
+
249
+ Table 5. Performance comparison on HANDS 2017 dataset [46]. Our method can get state-of-the-art performance on this dataset.
250
+
251
+ on NYU and HANDS 2017 dataset are given in Table 4 and Table 5. Since A2J-Transformer is not specifically designed for single hand estimation on depth image, we just changed the input channel to verify the generalization ability of our model through this experiment. We can summarize that:
252
+
253
+ - Although A2J-Transformer is based on the RGB image of interacting hands, it still achieves state-of-the-art performance on HANDS 2017 dataset and gets comparable result on NYU dataset. This relies on the strong ability of A2J-Transformer to grasp the articulated hand information and the fitting ability of 3D anchor points. Compared with A2J [43], certain performance improvement can be achieved on two datasets, which proves that A2J-Transformer has a strong generalization ability.
254
+
255
+ # 4.3. Ablation study
256
+
257
+ # 4.3.1 Component effectiveness analysis
258
+
259
+ The component effectiveness analysis within A2J-Transformrer is executed on Interhand2.6M dataset. We explore the effectiveness of four parts: (1) Transformer-based model (anchor refinement model), (2) A2J (anchor-to-joint) module, (3) 3D anchor weights, (4) MSDAM. The specific implementation details are respectively set as: (1) replacing the anchor refinement model with the convolution modules in A2J, (2) directly regressing the hand joints without using anchor-to-joint module, (3) setting the weights of all anchors to all the same values and normalize them, (4) replacing the MSDAM with the origin attention module. The results are listed in Table 6. It can be observed that:
260
+
261
+ - After removing the Transformer-based model and A2J module, the performance of A2J-Transformer drops by 5 mm and 6 mm respectively, proving the effectiveness of addressing 3D interacting hand pose estimation task via concerning local and global visual context jointly.
262
+ - After removing the 3D anchor weights, the perfor
263
+
264
+ ![](images/21aeaccdbafa224334184d31360da7471e3d0e37d691bb340d10a35a89eefb08.jpg)
265
+ (a) Weight visualization on right middle PIP.
266
+
267
+ ![](images/899bd6b7af92afdcf3f33e0412f097d4d88c81e86dbecc4718d549040589bab5.jpg)
268
+ (b) Weight visualization on different joints.
269
+ Figure 6. Qualitative results of A2J-Transformer. We show the input, output and weights of anchors on different depth value layers. Red dots in the three depth maps indicate the anchors set at depth positions $+100\mathrm{mm}$ , $0\mathrm{mm}$ , and $-100\mathrm{mm}$ from the root joint respectively. The shade of red dots represent the weights assigned to these anchors as described in Sec. 3.3.
270
+
271
+ <table><tr><td>Trans.</td><td>A2J</td><td>Weights</td><td>MSDAM</td><td>MPJPE (mm)</td></tr><tr><td>X</td><td>✓</td><td>✓</td><td>✓</td><td>14.44</td></tr><tr><td>✓</td><td>X</td><td>✓</td><td>✓</td><td>15.36</td></tr><tr><td>✓</td><td>✓</td><td>X</td><td>✓</td><td>14.04</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>X</td><td>10.69</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>9.63</td></tr></table>
272
+
273
+ mance of A2J-Transformer drops by $4.4\mathrm{mm}$ . This proves that there is a performance difference in the regression results of each 3D anchor point, so the weights predicted by the model are crucial for the prediction of hand joints.
274
+
275
+ - After replacing the MADAM with the origin attention module, the performance of A2J-Transformer drops by $1\mathrm{mm}$ , which proves the MSDAM is useful to our model.
276
+
277
+ # 4.3.2 Anchor setting analysis
278
+
279
+ In order to explore the impact on model performance, more in-plane and depth values are set for comparative experiments. The specific setting methods and their performance results are shown in Table 7. All depth values are uniformly selected near the hand joints, just like the selection of 3 depth values as described in Sec. 3.3. It can be noticed that, when more anchor in-plane and depth values are set, the performance of A2J-Transformer will improve while the inference speed will decrease in general. In order to strike a balance between accuracy and efficiency, the value of 256 and 3 are finally chosen.
280
+
281
+ # 4.4. Qualitative evaluation and limitation
282
+
283
+ We show the qualitative evaluation results in Fig. 6. We can see that, A2J-Transformer could automatically enlarge the informative anchors' weights when different joint coordinates need to be predicted. The model achieves accurate results even with severe occlusions in the interacting hands. The major limitation of our method is when there is a large area of occlusion or missing in the hand area, the results
284
+
285
+ Table 6. Component effectiveness analysis of A2J-Transformer. 'Trans.' means Transformer-based model (anchor refinement model) and 'Weights' means 3D anchor weights.
286
+
287
+ <table><tr><td>In-plane</td><td>Depth</td><td>MPJPE (mm)</td><td>FPS (s)</td></tr><tr><td>256</td><td>7</td><td>9.50</td><td>19.33</td></tr><tr><td>256</td><td>5</td><td>9.61</td><td>21.21</td></tr><tr><td>256</td><td>3</td><td>9.63</td><td>25.65</td></tr><tr><td>256</td><td>1</td><td>9.75</td><td>26.06</td></tr><tr><td>64</td><td>3</td><td>12.28</td><td>25.25</td></tr><tr><td>16</td><td>3</td><td>14.07</td><td>27.39</td></tr><tr><td>4</td><td>3</td><td>15.48</td><td>27.63</td></tr></table>
288
+
289
+ Table 7. Anchor setting analysis of A2J-Transformer. 'In-plane' and 'Depth' denotes the number of selected anchor number values for in-plane and depth direction, respectively.
290
+
291
+ predicted by our model will have deviations.
292
+
293
+ # 5. Conclusion
294
+
295
+ In this paper, an 3D monocular RGB interacting hand pose estimation approach termed A2J-Transformer is proposed. Equipped with Transformer's non-local encoding-decoding framework, A2J is evolved to capture interacting hands' local fine details and global articulated clues among joints simultaneously. Besides, 3D anchors are used to better fit the depth information and estimation of accurate 3D coordinates. Experiments on InterHand2.6M and RHP dataset demonstrate the effectiveness and superiority of A2J-Transformer and extensions on NYU and HANDS 2017 dataset show the generalization ability. In future work, we will try to represent the movement of anchor points and extend our method to model-based region.
296
+
297
+ # Acknowledgment
298
+
299
+ This work is jointly supported by the National Natural Science Foundation of China (Grant No. 62271221 and U1913602). Joey Tianyi Zhou is funded by the SERC (Science and Engineering Research Council) Central Research Fund (Use-Inspired Basic Research), and the Singapore Government's Research, and Innovation and Enterprise 2020 Plan (Advanced Manufacturing and Engineering Domain) under programmatic Grant A18A1b0045.
300
+
301
+ # References
302
+
303
+ [1] Luca Ballan, Aparna Taneja, Jürgen Gall, Luc Van Gool, and Marc Pollefeys. Motion capture of hands in action using discriminative salient points. In European Conference on Computer Vision, pages 640-653. Springer, 2012. 1, 2
304
+ [2] Yujun Cai, Liuhao Ge, Jianfei Cai, and Junsong Yuan. Weakly-supervised 3d hand pose estimation from monocular rgb images. In Proceedings of the European Conference on Computer Vision (ECCV), pages 666-682, 2018. 1, 3
305
+ [3] Liangjian Chen, Shih-Yao Lin, Yusheng Xie, Hui Tang, Yufan Xue, Xiaohui Xie, Yen-Yu Lin, and Wei Fan. Generating realistic training images based on tonality-alignment generative adversarial networks for hand pose estimation. arXiv preprint arXiv:1811.09916, 2018. 7
306
+ [4] Xinghao Chen, Guijin Wang, Hengkai Guo, and Cairong Zhang. Pose guided structured region ensemble network for cascaded hand pose estimation. Neurocomputing, 395:138-149, 2020. 6
307
+ [5] Wencan Cheng, Jae Hyun Park, and Jong Hwan Ko. Hand-foldingnet: A 3d hand pose estimation network using multiscale-feature guided folding of a 2d hand skeleton. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11260-11269, 2021. 1, 3
308
+ [6] Xiaoming Deng, Dexin Zuo, Yinda Zhang, Zhaopeng Cui, Jian Cheng, Ping Tan, Liang Chang, Marc Pollefeys, Sean Fanello, and Hongan Wang. Recurrent 3d hand pose estimation using cascaded pose-guided 3d alignments. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(1):932-945, 2022. 3
309
+ [7] Zicong Fan, Adrian Spurr, Muhammed Kocabas, Siyu Tang, Michael J Black, and Otmar Hilliges. Learning to disambiguate strongly interacting hands via probabilistic per-pixel part segmentation. In 2021 International Conference on 3D Vision (3DV), pages 1-10. IEEE, 2021. 1, 3, 7
310
+ [8] Linpu Fang, Xingyan Liu, Li Liu, Hang Xu, and Wenxiong Kang. Jgr-p2o: Joint graph reasoning based pixel-to-offset prediction network for 3d hand pose estimation from a single depth image. In European Conference on Computer Vision, pages 120-137. Springer, 2020. 7
311
+ [9] Liuhao Ge, Yujun Cai, Junwu Weng, and Junsong Yuan. Hand pointnet: 3d hand pose estimation using point sets. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8417-8426, 2018. 7
312
+ [10] Hengkai Guo, Guijin Wang, Xinghao Chen, Cairong Zhang, Fei Qiao, and Huazhong Yang. Region ensemble network: Improving convolutional network for hand pose estimation. In 2017 IEEE International Conference on Image Processing (ICIP), pages 4512-4516. IEEE, 2017. 6
313
+ [11] Shreyas Hampali, Mahdi Rad, Markus Oberweger, and Vincent Lepetit. Honnotate: A method for 3d annotation of hand and object poses. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 3196-3206, 2020. 2
314
+ [12] Shreyas Hampali, Sayan Deb Sarkar, Mahdi Rad, and Vincent Lepetit. Keypoint transformer: Solving joint identification in challenging hands and object interactions for accurate
315
+
316
+ 3d pose estimation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11090-11100, 2022. 1, 2, 3, 7
317
+ [13] Shangchen Han, Po-chen Wu, Yubo Zhang, Beibei Liu, Linguuang Zhang, Zheng Wang, Weiguang Si, Peizhao Zhang, Yujun Cai, Tomas Hodan, et al. Umetrack: Unified multiview end-to-end hand tracking for vr. In SIGGRAPH Asia 2022 Conference Papers, pages 1-9, 2022. 3
318
+ [14] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 2, 4
319
+ [15] Lin Huang, Jianchao Tan, Ji Liu, and Junsong Yuan. Handtransformer: Non-autoregressive structured modeling for 3d hand pose estimation. In European Conference on Computer Vision, pages 17-33. Springer, 2020. 3
320
+ [16] Lin Huang, Boshen Zhang, Zhilin Guo, Yang Xiao, Zhiguo Cao, and Junsong Yuan. Survey on depth and rgb image-based 3d hand shape and pose estimation. *Virtual Reality & Intelligent Hardware*, 3(3):207-234, 2021. 1
321
+ [17] Umar Iqbal, Andreas Doering, Hashim Yasin, Björn Krüger, Andreas Weber, and Juergen Gall. A dual-source approach for 3d human pose estimation from single images. Computer Vision and Image Understanding, 172:37-49, 2018. 1, 3
322
+ [18] Salman Khan, Muzammal Naseer, Munawar Hayat, Syed Waqas Zamir, Fahad Shahbaz Khan, and Mubarak Shah. Transformers in vision: A survey. ACM computing surveys (CSUR), 54(10s):1-41, 2022. 3
323
+ [19] Dong Uk Kim, Kwang In Kim, and Seungryul Baek. End-to-end detection and pose estimation of two interacting hands. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11189-11198, 2021. 1, 3, 7
324
+ [20] Diederik P Kingma and Jimmy Ba. Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980, 2014. 6
325
+ [21] Mengcheng Li, Liang An, Hongwen Zhang, Lianpeng Wu, Feng Chen, Tao Yu, and Yebin Liu. Interacting attention graph for single image two-hand reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2761-2770, 2022. 1, 2, 3, 7
326
+ [22] Fanqing Lin, Connor Wilhelm, and Tony Martinez. Twohand global 3d pose estimation using monocular rgb. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2373-2381, 2021. 1, 3
327
+ [23] Kevin Lin, Lijuan Wang, and Zicheng Liu. End-to-end human pose and mesh reconstruction with transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1954-1963, 2021. 3
328
+ [24] Kevin Lin, Lijuan Wang, and Zicheng Liu. Mesh graphormer. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12939-12948, 2021. 3
329
+ [25] Shilong Liu, Feng Li, Hao Zhang, Xiao Yang, Xianbiao Qi, Hang Su, Jun Zhu, and Lei Zhang. Dab-detr: Dynamic anchor boxes are better queries for detr. arXiv preprint arXiv:2201.12329, 2022. 4, 5
330
+ [26] Yang Liu, Jie Jiang, and Jiahao Sun. Hand pose estimation from rgb images based on deep learning: A survey. In
331
+
332
+ 2021 IEEE 7th International Conference on Virtual Reality (ICVR), pages 82-89. IEEE, 2021. 1
333
+ [27] Hao Meng, Sheng Jin, Wentao Liu, Chen Qian, Mengxiang Lin, Wanli Ouyang, and Ping Luo. 3d interacting hand pose estimation by hand de-occlusion and removal. arXiv preprint arXiv:2207.11061, 2022. 1, 3, 7
334
+ [28] Gyeongsik Moon, Ju Yong Chang, and Kyoung Mu Lee. V2v-posenet: Voxel-to-voxel prediction network for accurate 3d hand and human pose estimation from a single depth map. In Proceedings of the IEEE conference on computer vision and pattern Recognition, pages 5079-5088, 2018. 1, 3, 6, 7
335
+ [29] Gyeongsik Moon, Shoou-I Yu, He Wen, Takaaki Shiratori, and Kyoung Mu Lee. Interhand2. 6m: A dataset and baseline for 3d interacting hand pose estimation from a single rgb image. In European Conference on Computer Vision, pages 548-564. Springer, 2020. 1, 2, 3, 6, 7
336
+ [30] Iasonas Oikonomidis, Nikolaos Kyriazis, and Antonis A Argyros. Tracking the articulated motion of two strongly interacting hands. In 2012 IEEE conference on computer vision and pattern recognition, pages 1862-1869. IEEE, 2012. 1, 2
337
+ [31] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems, 28, 2015. 6
338
+ [32] Javier Romero, Hedvig Kjellström, and Danica Kragic. Monocular real-time 3d articulated hand pose estimation. In 2009 9th IEEE-RAS International Conference on Humanoid Robots, pages 87-92. IEEE, 2009. 1
339
+ [33] Javier Romero, Dimitrios Tzionas, and Michael J Black. Embodied hands: Modeling and capturing hands and bodies together. arXiv preprint arXiv:2201.02610, 2022. 2, 3
340
+ [34] Jamie Shotton, Andrew Fitzgibbon, Mat Cook, Toby Sharp, Mark Finocchio, Richard Moore, Alex Kipman, and Andrew Blake. Real-time human pose recognition in parts from single depth images. In CVPR 2011, pages 1297-1304. IEEE, 2011. 1
341
+ [35] Breannan Smith, Chenglei Wu, He Wen, Patrick Peluse, Yaser Sheikh, Jessica K Hodgins, and Takaaki Shiratori. Constraining dense hand surface tracking with elasticity. ACM Transactions on Graphics (TOG), 39(6):1-14, 2020. 1, 2, 3
342
+ [36] Adrian Spurr, Jie Song, Seonwook Park, and Otmar Hilliges. Cross-modal deep variational hand pose estimation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 89-98, 2018. 7
343
+ [37] Danhang Tang, Hyung Jin Chang, Alykhan Tejani, and Tae Kyun Kim. Latent regression forest: Structured estimation of 3d articulated hand posture. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3786-3793, 2014. 1
344
+ [38] Jonathan Tompson, Murphy Stein, Yann Lecun, and Ken Perlin. Real-time continuous pose recovery of human hands using convolutional networks. ACM Transactions on Graphics (ToG), 33(5):1-10, 2014. 6, 7
345
+ [39] Dimitrios Tzionas, Luca Ballan, Abhilash Srikantha, Pablo Aponte, Marc Pollefeys, and Juergen Gall. Capturing hands
346
+
347
+ in action using discriminative salient points and physics simulation. International Journal of Computer Vision, 118(2):172-193, 2016. 1, 2
348
+ [40] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 2, 3
349
+ [41] Jiayi Wang, Franziska Mueller, Florian Bernard, Suzanne Sorli, Oleksandr Sotnychenko, Neng Qian, Miguel A Otaduy, Dan Casas, and Christian Theobalt. Rgb2hands: real-time tracking of 3d hand interactions from monocular rgb video. ACM Transactions on Graphics (ToG), 39(6):1-16, 2020. 1, 2, 3
350
+ [42] Yuxin Wu and Kaiming He. Group normalization. In Proceedings of the European conference on computer vision (ECCV), pages 3-19, 2018. 4
351
+ [43] Fu Xiong, Boshen Zhang, Yang Xiao, Zhiguo Cao, Taidong Yu, Joey Tianyi Zhou, and Junsong Yuan. A2j: Anchor-to-joint regression network for 3d articulated pose estimation from a single depth image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 793-802, 2019. 1, 2, 3, 6, 7
352
+ [44] Linlin Yang and Angela Yao. Disentangling latent hands for image synthesis and pose estimation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9877-9886, 2019. 7
353
+ [45] Shanxin Yuan, Guillermo Garcia-Hernando, Björn Stenger, Gyeongsik Moon, Ju Yong Chang, Kyoung Mu Lee, Pavlo Molchanov, Jan Kautz, Sina Honari, Liuhao Ge, et al. Depth-based 3d hand pose estimation: From current achievements to future goals. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2636-2645, 2018. 7
354
+ [46] Shanxin Yuan, Qi Ye, Guillermo Garcia-Hernando, and Tae-Kyun Kim. The 2017 hands in the million challenge on 3d hand pose estimation. arXiv preprint arXiv:1707.02237, 2017. 2, 6, 7
355
+ [47] Shanxin Yuan, Qi Ye, Bjorn Stenger, Siddhant Jain, and Tae-Kyun Kim. Bighand2. 2m benchmark: Hand pose dataset and state of the art analysis. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4866-4874, 2017. 6
356
+ [48] Baowen Zhang, Yangang Wang, Xiaoming Deng, Yinda Zhang, Ping Tan, Cuixia Ma, and Hongan Wang. Interacting two-hand 3d pose and shape reconstruction from single color image. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11354-11363, 2021. 1, 2, 3, 7
357
+ [49] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable transformers for end-to-end object detection. arXiv preprint arXiv:2010.04159, 2020. 4
358
+ [50] Christian Zimmermann and Thomas Brox. Learning to estimate 3d hand pose from single rgb images. In Proceedings of the IEEE international conference on computer vision, pages 4903-4911, 2017. 6, 7
a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:607d3d0b088c2e171036337e4f13e9d06224bfa899bf9a0e439adfdfd0bea016
3
+ size 418100
a2jtransformeranchortojointtransformernetworkfor3dinteractinghandposeestimationfromasinglergbimage/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f79ec21ccc8d2136ae99e6b54aba580b8007d3858d0b5645551f21f8ed4d8874
3
+ size 401714
abcdarbitrarybitwisecoefficientfordequantization/74de187f-bb6a-4967-8de8-7245e6e6dc31_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ce6a02367991ac5ab139eeb49d4f23ce5178d79448352b55531040ee2f010f1
3
+ size 85055
abcdarbitrarybitwisecoefficientfordequantization/74de187f-bb6a-4967-8de8-7245e6e6dc31_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab404f96cb55333c43752d0d7b9db73eed685b32b90472f35ad6637c31f30a13
3
+ size 102418
abcdarbitrarybitwisecoefficientfordequantization/74de187f-bb6a-4967-8de8-7245e6e6dc31_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:53ba165bc81fdaf79fa2d329af06dc809bcce631e462e57a0308b1eda454f16b
3
+ size 8492745
abcdarbitrarybitwisecoefficientfordequantization/full.md ADDED
@@ -0,0 +1,374 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ABCD : Arbitrary Bitwise Coefficient for De-quantization
2
+
3
+ Woo Kyoung Han Byeonghun Lee Sang Hyun Park Kyong Hwan Jin* Daegu Gyeongbuk Institute of Science and Technology (DGIST), Korea
4
+
5
+ {cjss7894, qudgns1113, shpark13135, kyong.jin}@dgist.ac.kr
6
+
7
+ # Abstract
8
+
9
+ Modern displays and contents support more than 8bits image and video. However, bit-starving situations such as compression codecs make low bit-depth (LBD) images (<8bits), occurring banding and blurry artifacts. Previous bit depth expansion (BDE) methods still produce unsatisfactory high bit-depth (HBD) images. To this end, we propose an implicit neural function with a bit query to recover de-quantized images from arbitrarily quantized inputs. We develop a phasor estimator to exploit the information of the nearest pixels. Our method shows superior performance against prior BDE methods on natural and animation images. We also demonstrate our model on YouTube UGC datasets for de-banding. Our source code is available at https://github.com/WooKyoungHan/ABCD
10
+
11
+ # 1. Introduction
12
+
13
+ The bit depth in digital contents means a number of binary digits representing pixel values. As humans recognize a wide range of color and luminance, modern display devices and cameras support more than the 8-bit depth of image and video [21, 28]. Regardless of these efforts, the image and video CODECs enforce HBD images to be quantized into LBD images due to bit starvation. Thus, most contents are under 8 bits leading to false contours and blurry artifacts. Bit depth expansion, a.k.a. de-quantization, aims to recover missing bits caused by such quantizations.
14
+
15
+ Conventional methods such as [6, 10, 19, 24, 36-38] have been proposed for the de-quantization problem. However, these methods suffer from blurry artifacts resulting in distortions of details or false contours in extreme BDE. Recently, learning-based approaches, a.k.a. deep neural network, have shown remarkable performances in BDE [4, 9, 18, 26, 32, 40, 43]. Most learning-based approaches [4, 9, 32, 40, 43] reconstruct HBD images in an end-to-end manner. Recent methods [18, 26] recover residual components corresponding to missing bits from LBD images. In particular, the method called D16 [26], with the best per
16
+
17
+ ![](images/62dd7ca5aee221c646927b862290a3e9240c8d4552ed8987905b738e2826438b.jpg)
18
+ Figure 1. Overview of arbitrary bit-depth expansion (dequantization) using ABCD. Our ABCD estimates dominant phasors of images and calculates the bit-query of LBD. Then, an MLP takes the estimated phasor information and bit-wise query $(s)$ to predict the bit-wise coefficient $(\widehat{\mathbf{C}})$ of HBD images.
19
+
20
+ formance so far, conducts a binary classification per each bit plane. However, D16 requires multiple deep neural networks models for every bit-planes.
21
+
22
+ Recently, an implicit neural representation (INR) which maps coordinates into signal values [25, 29], shows promising performances in various tasks [5, 11, 15, 22, 25, 29]. The implicit neural networks have a spectral bias problem toward low frequencies, which makes INR hard to represent high-frequency components [27]. Fortunately, several solutions are developed to relax the spectral bias [15, 23, 30, 33, 41]. However, there is no INR approach for bit depth expansion problems.
23
+
24
+ In this paper, we propose a novel model, the Arbitrary Bit-wise Coefficient model for De-quantization (ABCD), to recover missing bits from the randomly quantized LBD image to any HBD image. The proposed model addresses the spectral bias of INR and improves de-quantization quality through the use of an encoder to estimate the dominant phasors in the ground truth. As shown in Fig. 1, our encoder estimates the dominant phasors to mitigate the spectral bias of INR. Then, the model utilizes an INR to achieve arbitrary-bit reconstructions in the amplitude domain. Finally, a bit
25
+
26
+ ![](images/9a4de727c3bc4e5dbf456328486ca9c880249252a61edffeb474c0dd32f3d182.jpg)
27
+ Input
28
+
29
+ ![](images/17f5e6541ed9af2af112cf04cb3f6a8cdac031a51a34e7beb1646569fab4fc45.jpg)
30
+ D16 [26]
31
+ Figure 2. Visual demonstration 3-bit to 8-bit de-quantization: input, deep network approach (D16 [26]), and our method. The neural network method [26] reduces severe false contours of input, but false contours still remain. Our ABCD removes such artifacts, clearly.
32
+
33
+ ![](images/487d66fd8fd6469930db83e4646912c048ba2f7a541d62295b03c04b9326e0c4.jpg)
34
+ ABCD(Ours)
35
+
36
+ decoding step converts bit coefficients into HBD images by multiplying the bit-basis. The proposed model represents a significant advancement over previous de-quantization techniques with providing high flexibility and accuracy as it effectively recovers missing bits from randomly quantized inputs.
37
+
38
+ In summary, our main contributions are as follows:
39
+
40
+ - We propose a bit depth expansion algorithm using an implicit neural representation with a bit query in arbitrarily quantized bit levels and demonstrate our method achieves state-of-the-art performance.
41
+ - We show that the proposed phasor estimator predicts the dominant phasors of the ground truth coefficient' in the Fourier domain.
42
+ - We validate our pre-trained model not only on five image datasets as de-quantization but also on the YouTube-UGC dataset as de-banding.
43
+
44
+ # 2. Related Work
45
+
46
+ Bit depth expansion There are straightforward ways for BDE, such as zero padding (ZP) method and the bit replication (BR) [36], which sets '0' or most significant bits (MSBs) for missing bits. Even though these algorithms are hardware friendly, each reconstructed signal depends only on its value without considering surrounding pixels. In contrast, interpolation methods [6, 37], content-adaptive (CA) BDE, and contour region reconstruction (CRR) effectively remove false contour artifacts. However, they blur the details. Intensity potential for adaptive de-quantization (IPAD) method [19] proposed an iterative algorithm which uses the intensity potential field calculated with the connected component label. IPAD achieved higher PSNR than the algorithms above. However, IPAD still suffers from false contour artifacts in large BDE.
47
+
48
+ Meanwhile, the BDE is highly related to de-banding. The banding artifacts are staircase-like color phenomena of
49
+
50
+ images, especially in video contents. Previous works [31, 34] resolved de-banding with adaptive filter-based methods. Because the artifact is mainly derived from compression with quantization, we apply our de-quantization method to remove the banding artifact.
51
+
52
+ Learning-based Bit Recovery The proposed neural network algorithms [4, 18, 32, 40] predict favorable HBD images with higher performance than the aforementioned expert rule systems [19, 37]. The BE-CALF [18] performs BDE by recovering residuals of LBD images. However, it only supports dedicated bit LBD images as inputs. Even though the BitNet [4] supports 3 to 6-bit inputs with a single model, the performance is not as good as BE-CALF. Unlike previous works, the D4 and D16 networks [26] employ the bit-wise classification per each missing bit-plane. However, multiple models are required for predicting each bitplane (e.g., 4 to 8-bit BDE requires four independent models). Furthermore, Fig. 2 shows that the learning-based approach [26] is not able to remove false contours completely. Unlikely the prior works, we designed the network receiving randomly quantized LBD images with single training.
53
+
54
+ Implicit Neural Representation (INR) Recently, various tasks [11, 22, 29] apply a multi-layer perceptron (MLP) as implicit neural representation. Although the INR parameterizes coordinates to continuous signals with memory-efficient frameworks, INR has two issues: per-scene optimization and spectral bias. To overcome a problem of per-scene optimization, prior works [5, 15] concatenate latent features from inputs with coordinates. The spectral bias [27] induces the network to learn mainly low frequencies. Recently, in the super-resolution task, the local texture estimator (LTE) [15] overcomes such spectral bias by learning high-resolution images' dominant frequency and phases. Nevertheless, there is no INR for de-quantization to our best knowledge. Therefore, we suggest INR as a function of given LBD data and amplitude coordinates calculated with input and output bit-depth. Moreover, we develop
55
+
56
+ ![](images/cc7ed5573636c3abc3dfde20d4b9c6e6632d3d28b0f31ef546993da6adc9861b.jpg)
57
+ Figure 3. Bit-depth expansion with our proposed Arbitrary Bit-wise Coefficient De-quantizer (ABCD). ABCD-based arbitrary-bit depth expansion consists of an encoder $(E_{\varphi})$ , a decoder $(f_{\theta})$ , a phasor detector (a blue-shaded region), and a coefficient estimator (a pink-shaded region). Inputs of ABCD are as follows: LBD images $(\mathbf{I}_q)$ , input bit $(q)$ , and target bit $(N)$ . ABCD transforms the features into phasor representations and concatenates with bit-query $(s)$ to predict coefficient $\hat{\mathbf{C}}$ . At test time, ABCD multiplies $s$ with $\hat{\mathbf{C}}$ and adds LBD image $\mathbf{I}_q$ to retrieve HBD image $\mathbf{I}_N$ .
58
+
59
+ a phasor estimator so that the network avoids spectral bias.
60
+
61
+ # 3. Problem Formulation
62
+
63
+ In this section, we analyze images with modular arithmetic properties and formulate implicit neural representation as a function of amplitude. Let $\mathbf{I}_N$ and $\mathbf{I}_q$ be an HBD image and an LBD image, respectively, with bit-plane zero padding when $q < N$ . Then, the quantization equation from $\mathbf{I}_N$ to $\mathbf{I}_q$ is defined as below:
64
+
65
+ $$
66
+ \mathbf {I} _ {q} = \left\lfloor \frac {\mathbf {I} _ {N}}{2 ^ {N - q}} \right\rfloor 2 ^ {N - q}, \tag {1}
67
+ $$
68
+
69
+ where $\lfloor \cdot \rfloor$ is a floor function mapped to the greatest integer. $\mathbf{I}_q$ contains $q$ MSBs of $\mathbf{I}_{\mathbf{N}}$ . Then, The HBD image is a sum of the LBD image $\mathbf{I}_q$ and the residual image $\mathbf{R}$ as below:
70
+
71
+ $$
72
+ \mathbf {I} _ {N} = \mathbf {I} _ {q} + \mathbf {R}. \tag {2}
73
+ $$
74
+
75
+ Decomposition of Quantized Signal Our approach estimates the residual image $\mathbf{R}$ with a function of both encoded latent variables and a bit-wise query. Here, we define bitwise basis and its coefficients which normalize quantization residuals. The binary set $\{0,1\}$ together with exclusive-or (XOR) operation $\oplus$ and multiplication $\cdot$ compose a binary field denoted by $\mathbb{F}_2$ . It is known that binary vector space $\{0,1\}^N$ over $\mathbb{F}_2$ have orthonormal basis as one-hot encoding denoted as $e_n$ where $n = 0,\dots ,N - 1$ and $n$ is integer [1]. We interpret an arbitrary $N$ -bit number as an element of the $N$ -dimensional binary vector space $(\{0,1\}^{N},\oplus ,\cdot)$ . To realize such a binary vector in a real number, we present an arbitrary positive number $a$ as a power series of 2 with elements $(b_{i})$ from a binary vector:
76
+
77
+ $$
78
+ a = \sum_ {i = - \infty} ^ {\infty} b _ {i} 2 ^ {i}, \tag {3}
79
+ $$
80
+
81
+ where $b_{i} \in \{0,1\}$ and $i \in \mathbb{Z}$ . When we split above series in Eq. (3) into two terms with respect to any integer $L$ ,
82
+
83
+ $$
84
+ a = \sum_ {i = L + 1} ^ {\infty} b _ {i} 2 ^ {i} + \sum_ {j = - \infty} ^ {L} b _ {j} 2 ^ {j} = \sum_ {i = L + 1} ^ {\infty} b _ {i} 2 ^ {i} + C \cdot 2 ^ {L + 1}, \tag {4}
85
+ $$
86
+
87
+ where $C \in [0,1]$ . Note that digital images consist of positive integers such as "uint8". We present Eq.(2) with Eq.(4) into a digital image form as below:
88
+
89
+ $$
90
+ \mathbf {I} _ {N} = \underbrace {\sum_ {i = N - q} ^ {N - 1} 2 ^ {i} \cdot \mathbf {B} _ {i}} _ {\mathbf {I} _ {q}} + \underbrace {2 ^ {N - q} \cdot \mathbf {C}} _ {\mathbf {R}}, \tag {5}
91
+ $$
92
+
93
+ where $\mathbf{B}_i\in \{0,1\}^{H\times W\times 3}$ denotes a binary image at $i$ -th least significant bit-plane. Thus, the residual image $\mathbf{R}$ is a multiplication between $2^{N - q}$ and the real number image $\mathbf{C}\in [0,1)^{H\times W\times 3}$ . The orthonormal basis $e_q$ of the binary vector space is equivalent to $2^{N - q}$ in the arithmetic form. From now on, we call $\mathbf{C}$ bit-wise coefficient of the bit-wise query ( $s\coloneqq 2^{N - q}$ ) which is equivalent to $e_q$ .
94
+
95
+ Arbitrary Bitwise Coefficients Our method, ABCD, aims to predict coefficient $\mathbf{C}$ to reconstruct residual images by using the implicit neural representation (INR) as a function of latent vector and its quantizing bit-query. The INR parameterizes a continuous signal with an MLP fed by coordinates [5, 23, 29]. We design INR $(f_{\theta})$ using amplitudes as an input coordinate so that it represents bit-wise coefficients $\mathbf{C}$ as continuous signals along the amplitude axis as follows:
96
+
97
+ $$
98
+ \mathbf {C} (\mathbf {x}, s) \simeq f _ {\theta} \left(\mathbf {I} _ {q} [ \mathcal {N} (\mathbf {x}) ], s\right), \tag {6}
99
+ $$
100
+
101
+ where $\mathbf{x}$ is a 2D coordinate in the image domain and $\mathcal{N}(\mathbf{x})$ is a set of nearest pixels around the center pixel, $\mathbf{x}$ . A decoder $f_{\theta}$ maps coefficient image $\widehat{\mathbf{C}}$ from domain composed with latent vector and bit-wise query; $f_{\theta}(z,s): (\mathcal{Z},\mathcal{S}) \mapsto \mathcal{C}$ , where $z \in \mathcal{Z}$ is a latent tensor from an encoder $E_{\varphi}$ , $\mathcal{S}$
102
+
103
+ is set of basis $e_i$ and $\mathcal{C}$ is a space of predicted coefficient values from $f_{\theta}$ .
104
+
105
+ Phasor Estimator Inspired from [15, 33], we insert phasor estimator by modifying local texture estimator [15] to relax spectral bias. We demonstrate the difference between our ABCD and local texture estimator in Sec. 6. The overall system is constructed as follows:
106
+
107
+ $$
108
+ \widehat {\mathbf {C}} (\mathbf {x}, \mathbf {I} _ {q}, s; \boldsymbol {\Theta}) = f _ {\theta} \left(h _ {\psi} \left(\mathbf {z} _ {\mathbf {x}}\right), s\right), \tag {7}
109
+ $$
110
+
111
+ where $\mathbf{z} = E_{\varphi}(\mathbf{I}_q)$ , $h_{\psi}(\cdot)$ denotes the phasor estimator of ABCD and $\Theta = \{\theta, \varphi, \psi\}$ is a set of trainable parameters. Our phasor estimator $(h_{\psi}(\cdot))$ consists of two elements: (1) an amplitude estimator $(h_a(\cdot): \mathbb{R}^C \mapsto \mathbb{R}^{2K})$ and (2) a phase estimator $(h_p(\cdot): \mathbb{R}^C \mapsto \mathbb{R}^{2K})$ . Thus, given a coordinate $\mathbf{x} \in \mathbb{R}^2$ , the estimating function $h_{\psi}(\cdot): (\mathbb{R}^C) \mapsto \mathbb{R}^{2K}$ is defined as
112
+
113
+ $$
114
+ h _ {\psi} \left(\mathbf {z} _ {\mathbf {x}}\right) = \left[ \begin{array}{l} \mathbf {A} _ {\mathbf {x}} ^ {1} \\ \mathbf {A} _ {\mathbf {x}} ^ {2} \end{array} \right] \odot \left[ \begin{array}{l} \cos \left(\pi \boldsymbol {\Phi} _ {\mathbf {x}} ^ {1}\right) \\ \sin \left(\pi \boldsymbol {\Phi} _ {\mathbf {x}} ^ {2}\right) \end{array} \right], \tag {8}
115
+ $$
116
+
117
+ $$
118
+ \text {w h e r e} \left[ \begin{array}{l} \mathbf {A} _ {\mathbf {x}} ^ {1} \\ \mathbf {A} _ {\mathbf {x}} ^ {2} \end{array} \right] = h _ {a} (\mathbf {z} _ {\mathbf {x}}), \left[ \begin{array}{l} \boldsymbol {\Phi} _ {\mathbf {x}} ^ {1} \\ \boldsymbol {\Phi} _ {\mathbf {x}} ^ {2} \end{array} \right] = h _ {p} (\mathbf {z} _ {\mathbf {x}}), \tag {9}
119
+ $$
120
+
121
+ $\mathbf{A}_{\mathbf{x}}^{1,2} \in \mathbb{R}^K$ is an amplitude vector at $\mathbf{x}$ , $\Phi_{\mathbf{x}}^{1,2} \in \mathbb{R}^K$ denotes a phase vector at $\mathbf{x}$ , and $\odot$ represents element-wise multiplication. We interpret that by observing pixels inside a receptive field (RF), ABCD with the encoder $(h_\psi \circ E_\varphi)$ estimates dominant phasors accurately. Here, the size of RF is decided by the encoder $(E_\varphi)$ . We visually demonstrate estimated phasors in Fig. 6.
122
+
123
+ Given a series of $\mathcal{I}$ images with the different levels of quantizations between 3 and 8, the learning problem is defined as follows:
124
+
125
+ $$
126
+ \widehat {\boldsymbol {\Theta}} = \arg \min _ {\boldsymbol {\Theta}} \sum_ {j \in \mathcal {J}} \sum_ {q \in [ 3, 8 ]} \| \mathbf {C} \left(\mathbf {I} _ {N} ^ {j}\right) - \widehat {\mathbf {C}} \left(\mathbf {I} _ {q} ^ {j}, s; \boldsymbol {\Theta}\right) \| _ {2}. \tag {10}
127
+ $$
128
+
129
+ Bit decoding To retrieve de-quantized image values as in Eq.(5), bitwise coefficients (C) is multiplied with basis value $s$ and added with corresponding input value $\mathbf{I}_q$ from bitplane zero-padding. The whole process of decoding is written as follows:
130
+
131
+ $$
132
+ \hat {\mathbf {I}} _ {N} (\mathbf {x}) = \underbrace {\widehat {\mathbf {C}} \left(\mathbf {x} , \mathbf {I} _ {q} , s ; \widehat {\boldsymbol {\Theta}}\right) \times s} _ {\mathbf {R}} + \mathbf {I} _ {q} (\mathbf {x}) \tag {11}
133
+ $$
134
+
135
+ # 4. Method
136
+
137
+ # 4.1. Network Detail
138
+
139
+ Our ABCD-based arbitrary BDE network includes an encoder $(E_{\varphi})$ , a phasor detector (blue shaded area in Fig. 3), a decoder $(f_{\theta})$ , and a bit-wise coefficient estimator (pink shaded area in Fig. 3). This section describes a backbone structure (including encoder and decoder) and architectural details of ABCD.
140
+
141
+ Encoder $(E_{\varphi})$ and Decoder $(f_{\theta})$ We use EDSR [17], RDN [42], and SwinIR [16] as an encoder $(E_{\varphi})$ . The EDSR [17] is composed of 38 ResBlocks with 128 channels. The RDN [42] is composed of 8 RDB blocks. Since the BDE task requires equivalent sizes between input and output, we apply encoders without upsampling layers. The decoder $(f_{\theta})$ is composed with 5-layer MLP and ReLU activation function [5, 15]. The dimension of the first layer is 257, and the hidden dimensions are 256.
142
+
143
+ ABCD Our ABCD contains a phasor estimator and a bitwise coefficient estimator. Inspired by the LTE [14, 15], we hypothesize that our ABCD learns the phasor distribution from Fourier representations. The phasor estimator contains amplitude estimator $(h_a)$ , phase estimator $(h_p)$ and sinusoidal activations. These estimators consist of $3 \times 3$ convolution layers having 256 output channels. Bit-wise coefficient estimator calculates bit-query $s$ following Eq.(5), and concatenates it with the output of the phasor estimator (Eq.(8)). The concatenated vector contains 257 channels which are identical to an input dimension of MLP. We clip the output of network with normalized tanh activation $(0 \leq \cdot \leq 1)$ to prevent overshoot and undershoot effect. Our network reconstructs HBD images by multiplying coefficients $\hat{\mathbf{C}}$ with bit-query $s$ as in Eq. (11).
144
+
145
+ # 4.2. Training Strategy
146
+
147
+ We construct a minibatch with uniformly sampled quantization levels from 3-bit to 8-bit. Note that our network shows robustness for unseen levels (2-bit or 10-bit). Let $q$ be a quantization level randomly sampled from 3 to 8 integer. With Eq.(1), we quantize the HBD image to $q$ -bit and add bit-wise zero padding before feeding into a network. We calculate the bit-wise coordinate $s = 2^{N - q}|_{N = 16}$ and divide $\mathbf{R}$ by $s$ . We randomly sample pixels from ground truth(GT) (C).
148
+
149
+ # 5. Experiment
150
+
151
+ # 5.1. Training
152
+
153
+ Dataset As in [26], we use 2000 16-bit images, each 1000 from the Sintel dataset and the MIT-Adobe FiveK dataset [3] for training. For evaluation, we report peak signal-to-noise ratio (PSNR) and structural similarity (SSIM) on the MIT-Adobe FiveK [3], Sintel datasets [7], benchmarks for TESTIMAGES 1200 [2], Kodak [8], and ESPL v2 [13].
154
+
155
+ Implementation detail We use $64 \times 64$ patches for inputs of our network and optimize it by Adam [12]. We use 1000 epochs with a batch size 16. When we train ABCD with CNN-based encoders, such as EDSR [17] or RDN [42], the learning rate is initialized to 1e-4 and decayed by factor of 0.5 at [200, 400, 600, 800]. For a transformer encoder (SwinIR) [16], the learning rate is initialized as 1e-5 and decayed by factor of 0.5 at [500, 800, 900, 950].
156
+
157
+ ![](images/75223805e7642b591ba9dcd6b345d77dc999447e4c32549664c09e934755b705.jpg)
158
+ Sintel [7]
159
+
160
+ ![](images/2b6a6a01a41e88cb881478fcfd62b3c9e37b4f8908009b70bbffdbc46e9ebe47.jpg)
161
+
162
+ ![](images/dac906ea667eb9104152d1a00fffeb3daf515ffc10735653aafcdef51eac14a9.jpg)
163
+
164
+ ![](images/4e71e7405212a831c4dde1331242403efc2e00f3408dea508f156167bbb715da.jpg)
165
+
166
+ ![](images/8adecddb7cad92bfd20e99a83ca7d9c8ab00d1a2d2e6a97fe694daf86cc9b240.jpg)
167
+
168
+ ![](images/7ea8ad6e72fab406384cfb2892c0bd59e301c214cfae857782c2dab597159468.jpg)
169
+ Kodak [8]
170
+
171
+ ![](images/b113e79a01acd5604a669831f63820e91626619a4649c269d34dde1c85b13dc7.jpg)
172
+
173
+ ![](images/bf81ccb700ce455d12d9d81849494d58b8e99943b717e1867dd883572666d10d.jpg)
174
+
175
+ ![](images/8acbdc531b48f8db966e96e2933c0951e5f1b00471b02ba59733c00951f5b699.jpg)
176
+
177
+ ![](images/e866a2744ddc40ddb285b04e731d2f4070b3d98a370c080ac8f0c438bdad7778.jpg)
178
+
179
+ ![](images/6c707d1f5748c264983612935911c6b4cda671cf6f11f43457240a38256f4edf.jpg)
180
+ ESPL v2 [13]
181
+
182
+ ![](images/93d6ea5e0e93ac9554e0900d3352c8d29a7feae7b3b7dea7499735617453ef05.jpg)
183
+
184
+ ![](images/f89232da1b9140b14b12763c10af378325a4caa729386b02f76aa465fb58366a.jpg)
185
+
186
+ ![](images/e365734e4e3f2a40608184623987853ebcfb1061372f03e8cf055df7639e0508.jpg)
187
+
188
+ ![](images/a929671714d7183bdd8185966da7a0499484e6922217f93c0ff78c3da2a16257.jpg)
189
+
190
+ ![](images/93e41bbd052cd7df79987890ea268ae638db87d67a3361c9dba6729bf3eaf56a.jpg)
191
+ ESPL v2 [13]
192
+
193
+ ![](images/08be89139d67a9242da5b904cbc747b522244224e8e0547efbb5331070bf57f1.jpg)
194
+
195
+ ![](images/ee385111682d99b354763451de579fd1a8c45f6b3ac55c49b88261d5b9c9e47e.jpg)
196
+
197
+ ![](images/2a06a233c0f6580029a7368663be7c60128920a4c40aa6fbe7895ababfe349e6.jpg)
198
+
199
+ ![](images/463f792f1c09041280dff1fa28595059d7e2246242e2fcdbde3b0f57af65623f.jpg)
200
+
201
+ ![](images/47890bfbcf178c2be7653afc4c645fd619b5c4e5c21b64efe07591207ca84ef7.jpg)
202
+ TESTIMAGES1200 [2]
203
+ IPAD [19]
204
+ Figure 4. Qualitative comparison in 3-bit $\rightarrow$ 8-bit BDE. SwinIR [20] is used as an encoder for ABCD.
205
+
206
+ ![](images/091babf330b04ca0e49313942d90017bd7878b7b3cb4fb6ab0725e5bf4e16d23.jpg)
207
+ BitNet [4]
208
+
209
+ ![](images/7e33667f2b4693b792c131b52b50a235fafa0ed88d743e84b1116c644c2f640f.jpg)
210
+ D16 [26]
211
+
212
+ ![](images/6dbe9ddee4119eb22e096e36b7ee52512b9017e50e926b48295b55f950fa4da4.jpg)
213
+ ABCD (Ours)
214
+
215
+ ![](images/62150d70d257461c63713f147c2e0b4ac2a5aa6a54aa1e71f69ebf4e9cfe105a.jpg)
216
+ GT
217
+
218
+ # 5.2. Evaluation
219
+
220
+ Qualitative result We use 8-bit ground truth images since the standard displays support up to 8-bit. Fig. 4 shows a qualitative comparison to other BDE methods, IPAD [19], BitNet [4], and D16 [26]. We found that D16 [26] suffers from false contour and BitNet [4] blurs details. The first and second rows demonstrate that our ABCD has the advantage of reconstructing details blurred by quantization. Also, overall comparisons show that our ABCD is effective in removing false contour artifacts. Furthermore, in Fig. 5, we demonstrate that our ABCD restores the extreme bitplanes. Note that 2-bit inputs in Fig. 5 are unused sample for training.
221
+
222
+ Quantitative result We compare the performance of our method against existing methods; IPAD [19], which is a non-learnable method, BitNet [4], BE-CALF [18], and D16 [26]. The input in Tab. 1 refers to $q$ -quantized images with
223
+
224
+ zero-padding at missing bitplanes. The number of trainable parameters is written below. The pre-trained BitNet supports 3-bit or 4-bit to 8-bit and 3 to 6-bit to 16-bit expansion. The BE-CALF [18] provides a pre-trained model for 4-bit or 8-bit to 16-bit expansion; however, the training code is not available. We directly copy the numeric result from the original papers. In Tab. 1 the test dataset is composed of randomly selected 50 images in the Sintel dataset and the last 1000 (filenames a4001 to 5000) images of the MIT-Adobe dataset enhanced by expert E. Our ABCD outperforms all methods with any encoder. The maximum gain of PSNR is 1.52dB on Sintel for $4\rightarrow 16$ BDE.
225
+
226
+ We report results on benchmarks at the bottom of Tab. 1. The TESTIMAGES dataset [2] contains 40 images with 16-bit depth. The Kodak [8] dataset contains 24 natural images with 8-bit depth. The ESPL v2 [13] contains 25 animated images with 8-bit depth. In both natural and animation images, our ABCD shows better performances than
227
+
228
+ <table><tr><td>Test</td><td colspan="6">Sintel</td><td colspan="4">MIT-Adobe FiveK</td></tr><tr><td>Method</td><td>4 → 8</td><td>4 → 12</td><td>4 → 16</td><td>6 → 12</td><td>6 → 16</td><td>8 → 16</td><td>3 → 16</td><td>4 → 16</td><td>5 → 16</td><td>6 → 16</td></tr><tr><td rowspan="2">Input</td><td>29.16</td><td>28.79</td><td>28.77</td><td>40.90</td><td>40.81</td><td>52.86</td><td>22.90</td><td>28.86</td><td>34.86</td><td>40.88</td></tr><tr><td>0.8864</td><td>0.8844</td><td>0.8843</td><td>0.9858</td><td>0.9857</td><td>0.9990</td><td>0.7381</td><td>0.8769</td><td>0.9556</td><td>0.9871</td></tr><tr><td rowspan="2">IPAD [19]</td><td>35.86</td><td>35.78</td><td>35.76</td><td>47.66</td><td>47.62</td><td>58.62</td><td>29.86</td><td>35.74</td><td>41.18</td><td>46.43</td></tr><tr><td>0.9457</td><td>0.9452</td><td>0.9451</td><td>0.9903</td><td>0.9902</td><td>0.9989</td><td>0.8624</td><td>0.9378</td><td>0.9743</td><td>0.9903</td></tr><tr><td>BitNet [4]</td><td>39.34</td><td>39.49</td><td>39.49</td><td>49.72</td><td>49.68</td><td>57.55</td><td>33.46</td><td>39.21</td><td>44.02</td><td>48.46</td></tr><tr><td>(0.94M)</td><td>0.9701</td><td>0.9719</td><td>0.9719</td><td>0.9954</td><td>0.9954</td><td>0.9989</td><td>0.9128</td><td>0.9632</td><td>0.9853</td><td>0.9943</td></tr><tr><td>BE-CALF [18]</td><td>39.91</td><td>39.98</td><td>39.98</td><td>51.14</td><td>51.14</td><td>59.51</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>(5.18M)</td><td>0.9737</td><td>0.9752</td><td>0.9752</td><td>0.9940</td><td>0.9940</td><td>0.9993</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>D16 [26]</td><td>41.19</td><td>41.51</td><td>41.51</td><td>53.47</td><td>53.48</td><td>63.51</td><td>34.11</td><td>39.95</td><td>44.94</td><td>49.72</td></tr><tr><td>(≤15.46M)</td><td>0.9794</td><td>0.9810</td><td>0.9810</td><td>0.9980</td><td>0.9979</td><td>0.9998</td><td>0.9279</td><td>0.9693</td><td>0.9876</td><td>0.9953</td></tr><tr><td>RDN-ABCD</td><td>42.31</td><td>42.84</td><td>42.84</td><td>54.07</td><td>54.10</td><td>63.75</td><td>35.14</td><td>40.94</td><td>45.68</td><td>50.08</td></tr><tr><td>(11.52M)</td><td>0.9831</td><td>0.9847</td><td>0.9847</td><td>0.9984</td><td>0.9984</td><td>0.9998</td><td>0.9392</td><td>0.9746</td><td>0.9893</td><td>0.9957</td></tr><tr><td>EDSR-ABCD</td><td>42.47</td><td>43.02</td><td>43.02</td><td>54.15</td><td>54.18</td><td>63.78</td><td>35.25</td><td>41.04</td><td>45.74</td><td>50.11</td></tr><tr><td>(12.22M)</td><td>0.9837</td><td>0.9852</td><td>0.9852</td><td>0.9984</td><td>0.9984</td><td>0.9998</td><td>0.9401</td><td>0.9748</td><td>0.9893</td><td>0.9957</td></tr><tr><td>SwinIR-ABCD</td><td>42.51</td><td>43.03</td><td>43.03</td><td>54.08</td><td>54.12</td><td>63.74</td><td>35.44</td><td>41.18</td><td>45.80</td><td>50.13</td></tr><tr><td>(12.10M)</td><td>0.9844</td><td>0.9855</td><td>0.9855</td><td>0.9984</td><td>0.9984</td><td>0.9998</td><td>0.9412</td><td>0.9751</td><td>0.9895</td><td>0.9957</td></tr><tr><td>Benchmark</td><td colspan="6">TESTIMAGES 1200</td><td colspan="2">KODAK</td><td colspan="2">ESPL v2</td></tr><tr><td>Method</td><td>4 → 8</td><td>4 → 12</td><td>4 → 16</td><td>6 → 12</td><td>6 → 16</td><td>8 → 16</td><td>3 → 8</td><td>4 → 8</td><td>3 → 8</td><td>4 → 8</td></tr><tr><td rowspan="2">Input</td><td>29.21</td><td>28.85</td><td>28.83</td><td>40.95</td><td>40.86</td><td>52.92</td><td>22.77</td><td>29.06</td><td>23.20</td><td>29.28</td></tr><tr><td>0.8764</td><td>0.8741</td><td>0.8739</td><td>0.9856</td><td>0.9855</td><td>0.9990</td><td>0.7671</td><td>0.8998</td><td>0.6616</td><td>0.8261</td></tr><tr><td rowspan="2">IPAD [19]</td><td>36.29</td><td>36.20</td><td>36.18</td><td>47.20</td><td>47.15</td><td>57.84</td><td>29.20</td><td>34.90</td><td>29.86</td><td>35.75</td></tr><tr><td>0.9450</td><td>0.9444</td><td>0.9443</td><td>0.9901</td><td>0.9899</td><td>0.9988</td><td>0.8515</td><td>0.9345</td><td>0.8379</td><td>0.9207</td></tr><tr><td>BitNet [4]</td><td>38.75</td><td>38.81</td><td>38.80</td><td>49.52</td><td>49.48</td><td>53.60</td><td>32.68</td><td>38.48</td><td>32.58</td><td>38.23</td></tr><tr><td>(0.94M)</td><td>0.9571</td><td>0.9589</td><td>0.9589</td><td>0.9944</td><td>0.9944</td><td>0.9970</td><td>0.9172</td><td>0.9659</td><td>0.9001</td><td>0.9399</td></tr><tr><td>BE-CALF [18]</td><td>38.45</td><td>38.50</td><td>38.50</td><td>49.85</td><td>49.84</td><td>58.11</td><td>-</td><td>38.92</td><td>-</td><td>38.43</td></tr><tr><td>(5.18M)</td><td>0.9632</td><td>0.9648</td><td>0.9649</td><td>0.9945</td><td>0.9945</td><td>0.9992</td><td>-</td><td>0.9681</td><td>-</td><td>0.9479</td></tr><tr><td>D16 [26]</td><td>40.39</td><td>40.42</td><td>40.41</td><td>52.12</td><td>52.12</td><td>61.68</td><td>33.67</td><td>39.52</td><td>33.47</td><td>39.53</td></tr><tr><td>(≤14.27M)</td><td>0.9725</td><td>0.9735</td><td>0.9735</td><td>0.9967</td><td>0.9967</td><td>0.9996</td><td>0.9337</td><td>0.9723</td><td>0.9001</td><td>0.9528</td></tr><tr><td>RDN-ABCD</td><td>40.81</td><td>41.36</td><td>41.38</td><td>52.56</td><td>52.59</td><td>61.72</td><td>34.38</td><td>40.11</td><td>34.21</td><td>40.20</td></tr><tr><td>(11.52M)</td><td>0.9745</td><td>0.9760</td><td>0.9761</td><td>0.9971</td><td>0.9971</td><td>0.9996</td><td>0.9415</td><td>0.9748</td><td>0.9093</td><td>0.9583</td></tr><tr><td>EDSR-ABCD</td><td>41.12</td><td>41.65</td><td>41.65</td><td>52.76</td><td>52.78</td><td>61.78</td><td>34.50</td><td>40.23</td><td>34.36</td><td>40.24</td></tr><tr><td>(12.22M)</td><td>0.9755</td><td>0.9770</td><td>0.9771</td><td>0.9972</td><td>0.9972</td><td>0.9996</td><td>0.9426</td><td>0.9753</td><td>0.9106</td><td>0.9580</td></tr><tr><td>SwinIR-ABCD</td><td>41.29</td><td>41.76</td><td>41.77</td><td>52.82</td><td>52.83</td><td>61.78</td><td>34.62</td><td>40.31</td><td>34.55</td><td>40.35</td></tr><tr><td>(12.10M)</td><td>0.9769</td><td>0.9779</td><td>0.9779</td><td>0.9974</td><td>0.9974</td><td>0.9997</td><td>0.9443</td><td>0.9762</td><td>0.9125</td><td>0.9584</td></tr></table>
229
+
230
+ Table 1. Quantitative comparisons (PSNR (dB)& SSIM) for arbitrary bit-depth expansion on the test set of Sintel [7] & MIT-Adobe FiveK [3](top) and benchmark set including TESTIMAGES 1200 [2], Kodak [8], ESPL v2 [13] (bottom). Red and blue colors indicate the best and the second-best performance, respectively. (-) indicates not reported. $(\mathrm{q}\rightarrow \mathrm{N})$ refers q-bit input to N-bit output BDE.
231
+
232
+ ![](images/eed7a4a25cf6eb9be89fec3284706dfab68b2edd8b8813b9be78b53801129378.jpg)
233
+ 2-bit Input
234
+
235
+ ![](images/d5209cd964947abb8a90f172d97a24f0fc9981a9dae268ea9750837412fc0baf.jpg)
236
+ IPAD [19]
237
+
238
+ ![](images/893f57b5492372408e01206882f5a2e1da8ed7f54cf159dfeb6cd463d54f73ef.jpg)
239
+ BitNet [4]
240
+ Figure 5. Qualitative comparison for 2-bit → 8-bit BDE. D16 [26] does not provide 2-bit recovery pre-trained model. SwinIR [20] is used as an encoder for ABCD.
241
+
242
+ ![](images/b421c8b10c0e7c84a0e7a410deb6206828b81026406dd951dcbeb1489a3a3690.jpg)
243
+ ABCD (Ours)
244
+
245
+ ![](images/eaf36537f709dcd193c97285b2b4722c6b4f645b26c19ba236919596bdd9d2bd.jpg)
246
+ GT
247
+
248
+ other methods. Our ground truth of training is 16-bit depth images, so 8 and 12-bit target expansion are out-of-range for ABCD. However, thanks to the INR that maps bit-wise coordinates to its coefficient, ABCD achieves the highest PSNR.
249
+
250
+ # 5.3. Ablation Study
251
+
252
+ Network components In Tab. 2, we conducted ablation studies for individual components of ABCD. ABCD contains a phasor estimator in the encoder and bit-wise coordinate concatenation in the decoder. Furthermore, our output is the bit-wise coefficient $(\hat{\mathbf{C}})$ instead of pixel values $(\hat{\mathbf{I}}_{HBD}$ or $\hat{\mathbf{R}})$ . To support this, we train EDSR [17]-based ABCD without each component and mechanism. (-P): En
253
+
254
+ coder without phasor estimator (replaced by ResBlocks), (-S): Decoder without bit-wise query $(s)$ , (+B): Mechanism that estimates residual image $(\hat{\mathbf{R}})$ , and (+L): Addition of long skip connection of LBD images $(\mathbf{I}_{LBD})$ so that the network predicts the natural image $(\hat{\mathbf{I}}_{HBD})$ .
255
+
256
+ The middle part (-P& -S) of Tab. 2 shows that the phasor estimator and bit-wise query enhance the performance of ABCD, especially in relatively low bit-depth input. The phasor estimator improved performance in a high range. Without the bit-wise query, it makes the output bit-depth to be dedicated (16-bit) and causes performance drops in the network. The ablations of mechanisms $\mathrm{ABCD(+B)}$ and $\mathrm{ABCD(+L)}$ degrade the performances of the network. For extracting the residual image $(\hat{\mathbf{R}})$ , arbitrary residual images
257
+
258
+ <table><tr><td rowspan="2"></td><td colspan="6">Input Bit → Output Bit</td></tr><tr><td>3 → 16</td><td>4 → 16</td><td>5 → 16</td><td>6 → 16</td><td>7 → 16</td><td>8 → 16</td></tr><tr><td>ABCD</td><td>34.57</td><td>41.65</td><td>47.67</td><td>52.78</td><td>57.24</td><td>61.78</td></tr><tr><td>ABCD(-P)</td><td>34.27</td><td>41.25</td><td>47.36</td><td>52.60</td><td>57.16</td><td>61.70</td></tr><tr><td>ABCD(-S)</td><td>34.55</td><td>41.58</td><td>47.66</td><td>52.77</td><td>57.24</td><td>61.76</td></tr><tr><td>ABCD(+B)</td><td>25.27</td><td>31.54</td><td>37.70</td><td>43.81</td><td>49.87</td><td>55.92</td></tr><tr><td>ABCD(+L)</td><td>34.26</td><td>41.11</td><td>46.87</td><td>51.75</td><td>56.08</td><td>60.52</td></tr></table>
259
+
260
+ Table 2. Quantitative ablation study of ABCD on TESTIMAGES 1200 [2] (PSNR(dB)). Definitions of -P, -S, +B, +L are shown in Sec. 5.3. EDSR [17] is used as an encoder.
261
+
262
+ <table><tr><td rowspan="2"></td><td colspan="5">Input Bit → Output Bit</td></tr><tr><td>2 → 16</td><td>4 → 16</td><td>6 → 16</td><td>8 → 16</td><td>10 → 16</td></tr><tr><td>3-8 (Proposed)</td><td>26.55</td><td>41.65</td><td>52.78</td><td>61.78</td><td>70.98</td></tr><tr><td>Only-2</td><td>26.31</td><td>34.15</td><td>45.56</td><td>57.50</td><td>69.50</td></tr><tr><td>Only-4</td><td>20.55</td><td>41.16</td><td>46.44</td><td>57.53</td><td>69.40</td></tr><tr><td>Only-6</td><td>20.34</td><td>34.44</td><td>52.76</td><td>58.80</td><td>69.74</td></tr><tr><td>Only-8&amp;10</td><td></td><td></td><td>diverged</td><td></td><td></td></tr></table>
263
+
264
+ have a different range of amplitude which make the network hard to learn.
265
+
266
+ Fixed-bit training We evaluate the effect of training ABCD for fixed bit depth expansion. The result is shown in Tab. 3. Note that the training range of our ABCD is 3 to 8 bit. So, 2-bit and 10-bit inputs are out-of-distribution. We observed that bit depth expansion for fixed-bit training has poor performance in out-of-distribution bits as well as indistribution bit-level. Furthermore, high-bit inputs (8 or 10) diverge.
267
+
268
+ # 5.4. Phasor Estimation
269
+
270
+ We demonstrate that our phasor estimator extracts dominant phasors from quantized inputs. For sanity check, we sort the phasor of ground truth (C) in descending order in the absolute value of amplitude and select $K$ phasors. With the formula below, we calculate estimated phases, from Eq. (8):
271
+
272
+ $$
273
+ \angle \hat {\Phi} _ {\mathbf {x}} = \tan^ {- 1} \left(\frac {A _ {\mathbf {x}} ^ {1} \cos (\pi \Phi_ {\mathbf {x}} ^ {1}) + A _ {\mathbf {x}} ^ {2} \sin (\pi \Phi_ {\mathbf {x}} ^ {2})}{A _ {\mathbf {x}} ^ {1} \sin (\pi \Phi_ {\mathbf {x}} ^ {1}) + A _ {\mathbf {x}} ^ {2} \cos (\pi \Phi_ {\mathbf {x}} ^ {2})}\right) \tag {12}
274
+ $$
275
+
276
+ We compared the distribution of phasors between GT and the predicted in low-frequency and high-frequency textures of quantized inputs. In Fig. 6, red boxes in the first column are the receptive field (RF) of ABCD, and the second column refers the ground truth of $\hat{\mathbf{C}}$ . The diagram in Fig. 6 represents an accumulated number for each angle. Note that the Fourier transform of images is conjugate symmetric. We find that our phasor estimator learns the distribution of the dominant phasors of both high-frequency and low-frequency textures. For example, in the third row of Fig. 6, the density of dominant phasors is accumulated near $\pi$ (rad), and our ABCD follows those phasors.
277
+
278
+ ![](images/d62623783aa18cd18cc76b6d970c6171632e16a191bce8b56f02015006523995.jpg)
279
+ Figure 6. Comparison of the dominant phasor distribution of the ABCD and GT. SwinIR [16] is used as an encoder.
280
+
281
+ ![](images/cbef50e2f99adf7d261563590904d1da9d1ac9daa9095fe5d4e5d26508f89eec.jpg)
282
+ Figure 7. Blind banding artifact detector (BBAND) reconstruction of our ABCD. The natural images (top) and BBAND images (bottom).
283
+
284
+ Table 3. Ablation study of ABCD for a dedicated bit-depth expansion. Evaluated on TESTIMAGES 1200 (PSNR(dB)). Only- $k$ refers to training the model with $k$ -bit quantized images.
285
+
286
+ <table><tr><td>resolution method</td><td>360p</td><td>480p</td><td>720p</td><td>1080p</td><td>Total</td></tr><tr><td>Original Video</td><td>0.5525</td><td>0.4500</td><td>0.4018</td><td>0.3606</td><td>0.4316</td></tr><tr><td>EDSR-ABCD</td><td>0.4760</td><td>0.4180</td><td>0.3476</td><td>0.3144</td><td>0.3809</td></tr><tr><td>Improvement</td><td>+13.85%</td><td>+7.11%</td><td>+13.49%</td><td>+12.81%</td><td>+11.75%</td></tr></table>
287
+
288
+ Table 4. Quantitative enhancement of BBAND(↓) metric with ABCD for YouTube-UGC [39] dataset.
289
+
290
+ # 5.5. Debanding
291
+
292
+ We validate the effectiveness of our ABCD for debanding in unseen dataset. Inspired by the relations between false contour artifacts and quantization, we hypothesize that our pre-trained ABCD can resolve banding effects. We calculate the blind banding artifact detector (BBAND) score [35] between original and enhanced frames by ABCD. As we have no information about how many bits are required for ABCD to resolve banding effects, we assume that severe artifacts demand large bit depth expansion. We enhance original images with $3 \rightarrow 8$ BDE results $(\hat{\mathbf{C}}_{3 \rightarrow 8})$ . The quan-
293
+
294
+ ![](images/c1e16d920e562ca638884f8a2ef5bddfb03791734e410c1b60042557537f654e.jpg)
295
+ Figure 8. False contour artifacts and analysis in bitplanes for 3-bit→8-bit bit depth expansion. Comparison with D16 [26], EDSR-ABCD and SwinIR-ABCD.
296
+
297
+ titative result in Tab. 4 shows our ABCD improves BBAND score in YouTube-UGC dataset [39]. The qualitative results are in Fig. 7.
298
+
299
+ # 6. Discussion
300
+
301
+ Phasor Estimator The dependency on the input image sets the phases of LTE [15] and ABCD apart from each other. While LTE infers phases from their scale factor $(\hat{\mathbf{c}} := (2 / r_x, 2 / r_y))$ , ABCD estimates phases from images. Since LTE is a super-resolution network, it requires local coordinates $(\delta := \mathbf{x}_{\mathrm{query}} - \mathbf{x}_{\mathrm{nearest}})$ to learn frequencies. Unlike LTE, outputs of ABCD always have the same resolution with the input images. This implies that local coordinates $\delta$ are $\vec{0}$ . Thus, the formulation is given as follows:
302
+
303
+ $$
304
+ \underbrace {\mathbf {A} _ {\mathbf {j}} \odot \left[\begin{array}{c}\cos (\pi (\mathbf {F} _ {\mathbf {j}} \cdot \delta + h _ {p} (\hat {\mathbf {c}}))\\\sin (\pi (\mathbf {F} _ {\mathbf {j}} \cdot \delta + h _ {p} (\hat {\mathbf {c}}))\end{array}\right] _ {\delta = \vec {0}}} _ {\text {L T E [ 1 5 ]}} \rightarrow \underbrace {\left[\begin{array}{c}\mathbf {A} _ {\mathbf {x}} ^ {1}\\\mathbf {A} _ {\mathbf {x}} ^ {2}\end{array}\right] \odot \left[\begin{array}{c}\cos (\pi \boldsymbol {\Phi} _ {\mathbf {x}} ^ {1})\\\sin (\pi \boldsymbol {\Phi} _ {\mathbf {x}} ^ {2})\end{array}\right]} _ {\text {A B C D}}.
305
+ $$
306
+
307
+ Artifacts from Encoder In Fig. 8, CNN-based BDE methods have false contour artifacts in restoring a high range of bit-depth. This artifact appears when networks restore high pixel values nearby low-valued pixels. Red arrows indicate such artifacts. Since contour artifacts do not appear on the GT as well as the 3-bit input, it implies that artifacts are caused by the network. We decompose each of the predictions in bit-planes to see behaviors in bit-planes. We overcome these artifacts by using the attention model, SwinIR [16].
308
+
309
+ FLOPs and Memory In Tab. 5 and Fig. 9, we report the number or training parameters, floating point operations (FLOPs), the memory consumption and the average computation time. We test 4 to 8 bit-depth expansion in TESTIMAGES1200 [2] datasets. D16 [26] requires such parameters because of per-bit processing. For a fair comparison, all methods run on Google Colab. Although the BitNet [4] and BE-CALF [18] have smaller sizes and FLOPs than ours, their results are about 3dB lower than ours. We apply EDSR-baseline [17] and SwinIR-lightweight [16] from
310
+
311
+ ![](images/80150dc133d41eddd45bc58f78296b00c4a5fd0672e9492d0656cac7d3dcf131.jpg)
312
+ Figure 9. FLOPs and PSNR Comparison with other methods in TESTIMAGES1200 [2] for $4\rightarrow 16$ bit-depth expansion. FLOPs are calculated with image size $1200\times 1200$
313
+
314
+ <table><tr><td>#Eval /Query</td><td>Method</td><td># Params.</td><td>Mem. (GB)</td><td>Time (s)</td></tr><tr><td rowspan="5">1.44M (12002px)</td><td>BitNet [4]</td><td>0.77M</td><td>-</td><td>6.93</td></tr><tr><td>D16 [26]</td><td>14.31M</td><td>4.88</td><td>14.778</td></tr><tr><td>SwinIR-ABCD (ours)</td><td>12.10M</td><td>15.32</td><td>11.238</td></tr><tr><td>EDSR-ABCD (ours)</td><td>12.22M</td><td>11.21</td><td>7.147</td></tr><tr><td>RDN-ABCD (ours)</td><td>11.50M</td><td>10.84</td><td>8.099</td></tr></table>
315
+
316
+ Table 5. Memory consumption (MB) & computation time (s) comparison for an $4\rightarrow 16$ BDE task.
317
+
318
+ their official code and confirm that our framework overcomes the trade-off between computational complexities and performances.
319
+
320
+ # 7. Conclusion
321
+
322
+ We proposed an implicit neural network approach as a function of a bit-wise query for BDE. The residual image calculated from bit-wise coefficients recovers the arbitrary depth of missing bit planes with single training. Furthermore, we show that the proposed method removes severe artifacts such as false contour, and blurry artifacts, effectively. Our phasor estimator shows similar phasor diagrams with that of the original image leading to accurate predictions of bit-wise coefficient. The results of test and benchmark datasets demonstrate that our network outperforms state-of-the-art models up to 1.52dB.
323
+
324
+ Acknowledgement This work was partly supported by the grants of the DGIST R&D program of the Ministry of Science and ICT of KOREA (22-KUJoint-02, 21-IJRP-01), Smart HealthCare Program(www.kipot.or.kr) funded by the Korean National Police Agency(KNPA) (No. 230222M01), and Institute of Information & communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) (No.2021-0-02068, Artificial Intelligence Innovation Hub).
325
+
326
+ # References
327
+
328
+ [1] Kenichi Arai and Hiroyuki Okazaki. N-dimensional binary vector spaces. Formalized Mathematics, 21(2):75-81, 2013. 3
329
+ [2] Nicola Asuni and Andrea Giachetti. Testimages: A large data archive for display and algorithm testing. Journal of Graphics Tools, 17(4):113-125, 2013. 4, 5, 6, 7, 8
330
+ [3] Vladimir Bychkovsky, Sylvain Paris, Eric Chan, and Frédo Durand. Learning photographic global tonal adjustment with a database of input / output image pairs. In The Twenty-Fourth IEEE Conference on Computer Vision and Pattern Recognition, 2011. 4, 6
331
+ [4] Junyoung Byun, Kyujin Shim, and Changick Kim. BitNet: Learning-based bit-depth expansion. In Asian Conference on Computer Vision, pages 67-82. Springer, 2018. 1, 2, 5, 6, 8
332
+ [5] Yinbo Chen, Sifei Liu, and Xiaolong Wang. Learning Continuous Image Representation With Local Implicit Image Function. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8628-8638, June 2021. 1, 2, 3, 4
333
+ [6] Cheuk-Hong Cheng, Oscar C Au, Chun-Hung Liu, and Ka-Yue Yip. Bit-depth expansion by contour region reconstruction. In 2009 IEEE International Symposium on Circuits and Systems, pages 944-947. IEEE, 2009. 1, 2
334
+ [7] Xiph Foundation. "xiph.org". [Online]. Available: http://www.xiph.org/. 4, 5, 6
335
+ [8] Rich Franzen. Kodak lossless true color image suite. source: http://r0k.us/graphics/kodak, 4(2), 1999. 4, 5, 6
336
+ [9] Xianxu Hou and Guoping Qiu. Image companding and inverse halftoning using deep convolutional neural networks. arXiv preprint arXiv:1707.00116, 2017. 1
337
+ [10] LIU Chun Hung, CA Oscar, PHW WONG, and MC Kung. Bit-depth expansion by adaptive filter. In Proc. of IEEE Int. Sym. on Circuits and Systems, 2008. 1
338
+ [11] Chiyu "Max" Jiang, Avneesh Sud, Ameesh Makadia, Jingwei Huang, Matthias Niessner, and Thomas Funkhouser. Local Implicit Grid Representations for 3D Scenes. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2020. 1, 2
339
+ [12] Diederik P. Kingma and Jimmy Ba. Adam: A Method for Stochastic Optimization. In Yoshua Bengio and Yann LeCun, editors, 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, 2015. 4
340
+ [13] Debarati Kundu, Lark Kwon Choi, Alan C Bovik, and Brian L Evans. Perceptual quality evaluation of synthetic pictures distorted by compression and transmission. Signal Processing: Image Communication, 61:54-72, 2018. 4, 5, 6
341
+ [14] Jaewon Lee, Kwang Pyo Choi, and Kyong Hwan Jin. Learning local implicit fourier representation for image warping. In European Conference on Computer Vision (ECCV), pages 182-200. Springer, 2022. 4
342
+ [15] Jaewon Lee and Kyong Hwan Jin. Local texture estimator for implicit representation function. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1929-1938, 2022. 1, 2, 4, 8
343
+
344
+ [16] Jingyun Liang, Jiezhang Cao, Guolei Sun, Kai Zhang, Luc Van Gool, and Radu Timofte. SwinIR: Image Restoration Using Swin Transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV) Workshops, pages 1833-1844, October 2021. 4, 7, 8
345
+ [17] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Young Mu Lee. Enhanced Deep Residual Networks for Single Image Super-Resolution. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, July 2017. 4, 6, 7, 8
346
+ [18] Jing Liu, Wanning Sun, Yuting Su, Peiguang Jing, and Xiaokang Yang. BE-CALF: bit-depth enhancement by concatenating all level features of dnn. IEEE Transactions on Image Processing, 28(10):4926-4940, 2019. 1, 2, 5, 6, 8
347
+ [19] Jing Liu, Guangtao Zhai, Anan Liu, Xiaokang Yang, Xibin Zhao, and Chang Wen Chen. IPAD: Intensity potential for adaptive de-quantization. IEEE Transactions on Image Processing, 27(10):4860-4872, 2018. 1, 2, 5, 6
348
+ [20] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin Transformer: Hierarchical Vision Transformer Using Shifted Windows. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10012-10022, October 2021. 5, 6
349
+ [21] Rafal Mantiuk, Grzegorz Krawczyk, Karol Myszkowski, and Hans-Peter Seidel. Perception-motivated high dynamic range video encoding. ACM Transactions on Graphics (TOG), 23(3):733-741, 2004. 1
350
+ [22] Lars Mescheder, Michael Oechsle, Michael Niemeyer, Sebastian Nowozin, and Andreas Geiger. Occupancy Networks: Learning 3D Reconstruction in Function Space. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 1, 2
351
+ [23] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing Scenes as Neural Radiance Fields for View Synthesis. In Proceedings of the European Conference on Computer Vision (ECCV), August 2020. 1, 3
352
+ [24] Gaurav Mittal, Vinit Jakhetiya, Sunil Prasad Jaiswal, Oscar C Au, Anil Kumar Tiwari, and Dai Wei. Bit-depth expansion using minimum risk based classification. In VCIP, pages 1-5. Citeseer, 2012. 1
353
+ [25] Jeong Joon Park, Peter Florence, Julian Straub, Richard Newcombe, and Steven Lovegrove. DeepSDF: Learning Continuous Signed Distance Functions for Shape Representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2019. 1
354
+ [26] Abhijith Punnappurath and Michael S Brown. A little bit more: Bitplane-wise bit-depth recovery. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 1, 2, 4, 5, 6, 8
355
+ [27] Nasim Rahaman, Aristide Baratin, Devansh Arpit, Felix Draxler, Min Lin, Fred Hamprecht, Yoshua Bengio, and Aaron Courville. On the Spectral Bias of Neural Networks. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, Proceedings of the 36th International Conference on Machine Learning, volume 97 of Proceedings of Machine
356
+
357
+ Learning Research, pages 5301-5310. PMLR, 09-15 Jun 2019. 1, 2
358
+ [28] Helge Seetzen, Wolfgang Heidrich, Wolfgang Stuerzlinger, Greg Ward, Lorne Whitehead, Matthew Trentacoste, Abhijeeet Ghosh, and Andrejs Vorozcovs. High dynamic range display systems. In ACM SIGGRAPH 2004 Papers, pages 760-768. ACM New York, NY, USA, 2004. 1
359
+ [29] Vincent Sitzmann, Julien Martel, Alexander Bergman, David Lindell, and Gordon Wetzstein. Implicit Neural Representations with Periodic Activation Functions. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 7462-7473. Curran Associates, Inc., 2020. 1, 2, 3
360
+ [30] Vincent Sitzmann, Michael Zollhoefer, and Gordon Wetzstein. Scene Representation Networks: Continuous 3D-Structure-Aware Neural Scene Representations. In H. Wallach, H. Larochelle, A. Beygelzimer, F. d'Alché-Buc, E. Fox, and R. Garnett, editors, Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc., 2019. 1
361
+ [31] Qing Song, Guan-Ming Su, and Pamela C. Cosman. Efficient debanding filtering for inverse tone mapped high dynamic range videos. IEEE Transactions on Circuits and Systems for Video Technology, 30(8):2575-2589, 2020. 2
362
+ [32] Yuting Su, Wanning Sun, Jing Liu, Guangtao Zhai, and Peiguang Jing. Photo-realistic image bit-depth enhancement via residual transposed convolutional neural network. Neurocomputing, 347:200-211, 2019. 1, 2
363
+ [33] Matthew Tancik, Pratul Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ramamoorthi, Jonathan Barron, and Ren Ng. Fourier Features Let Networks Learn High Frequency Functions in Low Dimensional Domains. In H. Larochelle, M. Ranzato, R. Hadsell, M. F. Balcan, and H. Lin, editors, Advances in Neural Information Processing Systems, volume 33, pages 7537-7547. Curran Associates, Inc., 2020. 1, 4
364
+ [34] Zhengzhong Tu, Jessie Lin, Yilin Wang, Balu Adsumilli, and Alan C Bovik. Adaptive debanding filter. IEEE Signal Processing Letters, 27:1715-1719, 2020. 2
365
+ [35] Zhengzhong Tu, Jessie Lin, Yilin Wang, Balu Adsumilli, and Alan C Bovik. BBAND index: a no-reference banding artifact predictor. In ICASSP 2020-2020 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2712-2716. IEEE, 2020. 7
366
+ [36] Robert A Ulichney and Shiufun Cheung. Pixel bit-depth increase by bit replication. In Color Imaging: Device-Independent Color, Color Hardcopy, and Graphic Arts III, volume 3300, pages 232–241. SPIE, 1998. 1, 2
367
+ [37] Pengfei Wan, Oscar C Au, Ketan Tang, Yuanfang Guo, and Lu Fang. From 2d extrapolation to 1d interpolation: Content adaptive image bit-depth expansion. In 2012 IEEE International Conference on Multimedia and Expo, pages 170-175. IEEE, 2012. 1, 2
368
+ [38] Pengfei Wan, Gene Cheung, Dinei Florencio, Cha Zhang, and Oscar C Au. Image bit-depth enhancement via maximum a posteriori estimation of ac signal. IEEE Transactions on Image Processing, 25(6):2896-2909, 2016. 1
369
+
370
+ [39] Yilin Wang, Sasi Inguva, and Balu Adsumilli. YouTube UGC dataset for video compression research. In 2019 IEEE 21st International Workshop on Multimedia Signal Processing (MMSP), pages 1-5. IEEE, 2019. 7, 8
371
+ [40] Yi Xiao, Chao Pan, Yan Zheng, Xianyi Zhu, Zheng Qin, and Jin Yuan. Gradient-guided dcnn for inverse halftoning and image expanding. In Asian Conference on Computer Vision, pages 207-222. Springer, 2018. 2
372
+ [41] Gizem Yüce, Guillermo Ortiz-Jiménez, Beril Besbinar, and Pascal Frossard. A structured dictionary perspective on implicit neural representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19228-19238, 2022. 1
373
+ [42] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual Dense Network for Image SuperResolution. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), June 2018. 4
374
+ [43] Yang Zhao, Ronggang Wang, Wei Jia, Wangmeng Zuo, Xiaoping Liu, and Wen Gao. Deep reconstruction of least significant bits for bit-depth expansion. IEEE Transactions on Image Processing, 28(6):2847-2859, 2019. 1
abcdarbitrarybitwisecoefficientfordequantization/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0a39acd476dcd12fb2a73017733b46b4106042f9225c8cef182f709c45c4dda
3
+ size 1050245
abcdarbitrarybitwisecoefficientfordequantization/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e5145246be67ccda77c5dc6b400ccd47988d22e0e4d127b3a8dbcfc717eae4
3
+ size 474557
ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/b8595b73-1c79-4c16-b7bb-6fddbb1f1ffa_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b07828485f02fc3cc49ec3ebd155a9ff9afbc445a54394364f167883fd90c14
3
+ size 73896
ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/b8595b73-1c79-4c16-b7bb-6fddbb1f1ffa_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8546ec6ff32036059ded700ed49575083e86bc0c4e979267218014b6ea91355
3
+ size 89280
ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/b8595b73-1c79-4c16-b7bb-6fddbb1f1ffa_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22cc80de2692ed781f8a4ad3f3ce0f0af35a2e77a7d62b5b1f29e6eb9943e1b6
3
+ size 10073978
ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/full.md ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ABLE-NeRF: Attention-Based Rendering with Learnable Embeddings for Neural Radiance Field
2
+
3
+ Zhe Jun Tang $^{1}$ Tat-Jen Cham $^{2}$ Haiyu Zhao $^{3}$
4
+
5
+ $^{1}$ S-Lab, Nanyang Technological University $^{2}$ Nanyang Technological University $^{3}$ SenseTime Research
6
+
7
+ # Abstract
8
+
9
+ Neural Radiance Field (NeRF) is a popular method in representing 3D scenes by optimising a continuous volumetric scene function. Its large success which lies in applying volumetric rendering (VR) is also its Achilles' heel in producing view-dependent effects. As a consequence, glossy and transparent surfaces often appear murky. A remedy to reduce these artefacts is to constrain this VR equation by excluding volumes with back-facing normal. While this approach has some success in rendering glossy surfaces, translucent objects are still poorly represented. In this paper, we present an alternative to the physics-based VR approach by introducing a self-attention-based framework on volumes along a ray. In addition, inspired by modern game engines which utilise Light Probes to store local lighting passing through the scene, we incorporate Learnable Embeddings to capture view dependent effects within the scene. Our method, which we call ABLE-NeRF, significantly reduces 'blurry' glossy surfaces in rendering and produces realistic translucent surfaces which lack in prior art. In the Blender dataset, ABLE-NeRF achieves SOTA results and surpasses Ref-NeRF in all 3 image quality metrics PSNR, SSIM, LPIPS.
10
+
11
+ # 1. Introduction
12
+
13
+ Neural Radiance Field (NeRF) has become the de facto method for 3D scene representation. By representing the scene as a continuous function, NeRF is able to generate photo-realistic novel view images by marching camera rays through the scene. NeRF first samples a set of 3D points along a camera ray and outputs its outgoing radiance. The final pixel colour of a camera ray is then computed using volumetric rendering (VR) which colours are alpha-composed. This simple approach allows NeRF to generate impressive photo-realistic novel views of a complex 3D scene. However, NeRF is unable to produce accurate colours of objects with view-dependent effects. Colours of
14
+
15
+ ![](images/4a9b2ce52bd2012cfc263fa7cafd7986c1637c3c55899f03f79db2088532e9d6.jpg)
16
+ Figure 1. We illustrate two views of the Blender 'Drums' Scene. The surface of the drums exhibit either a translucent surface or a reflective surface at different angles. As shown, Ref-NeRF model has severe difficulties interpolating between the translucent and reflective surfaces as the viewing angle changes. Our method demonstrates its superiority over NeRF rendering models by producing such accurate view-dependent effects. In addition, the specularity of the cymbals are rendered much closer to ground truth compared to Ref-NeRF.
17
+
18
+ translucent objects often appear murky and glossy objects have blurry specular highlights. Our work aims to reduce these artefacts.
19
+
20
+ The exhibited artefacts of the NeRF rendering model is largely due to the inherent usage of VR as features are ac
21
+
22
+ cumulated in the colour space. Variants of NeRF attempt to tackle this defect by altering the basis of this VR equation. For instance, Ref-NeRF first predicts the normal vector of each point on the ray. If a point has a predicted normal facing backwards from the camera, its colour is excluded from computation via regularisation. However, prediction of normals in an object's interior is ill-posed since these points are not on actual surfaces. As a consequence, Ref-NeRF achieves some success over the baseline NeRF model, albeit imperfectly.
23
+
24
+ When rendering translucent objects with additional specular effects, NeRF and its variants suffer from the same deficiency. This is due to the computation of $\sigma$ which is analogous to the 'opacity' attribute of a point used in the VR equation. It is also related to the point's transmissivity and its contribution of radiance of to its ray. As per the Fresnel effect [5], this property should depend on viewing angles. Similarly, [19] describes a notion of 'alphasphere', which describes an opacity hull of a point that stores an opacity value viewed at direction $\omega$ . Most NeRF methods disregard the viewing angle in computing $\sigma$ . In fig. 1, the surface of the uttermost right drum in the Blender scene exhibits changing reflective and translucent properties at different viewing angles. Ref-Nerf and other variants, by discounting the dependency of $\sigma$ on viewing angle, may not render accurate colours of such objects.
25
+
26
+ Additionally, learning to model opacity and colour separately may be inadequate in predicting the ray's colour. Accumulating high-frequency features directly in the colour space causes the model to be sensitive to both opacity and sampling intervals of points along the ray. Therefore we rework how volumetric rendering can be applied to view synthesis. Inspecting the VR equation reveals that this methodology is similar to a self-attention mechanism; a point's contribution to its ray colour is dependent on points lying in-front of it. By this principle we designed ABLE-NeRF as an attention-based framework. To mimic the VR equation, mask attention is applied to points, preventing them from attending to others behind it.
27
+
28
+ The second stage of ABLE-NeRF takes inspiration from modern game engines in relighting objects by invoking a form of memorisation framework called 'baking'. In practice, traditional computer graphics rendering methods would capture indirect lighting by applying Monte Carlo path tracing to cache irradiance and then apply interpolation during run-time. Similarly, game engines would use lightmaps to cache global illumination for lower computational costs. For relighting dynamic objects, localised light probes are embedded in the scene to capture light passing through free space. At run-time, moving objects query from these light probes for accurate relighting. The commonality between all these approaches is the process of 'memorising' lighting information and interpolating them during
29
+
30
+ run time for accurate relighting. As such, we take inspiration from these methods by creating a memorisation network for view synthesis. Given a static scene, we incorporate Learnable Embeddings (LE), which are learnable memory tokens, to store scene information in latent space during training. Specifically, the LE attends to points sampled during ray casting via cross-attention to memorise scene information. To render accurate view dependent effects a directional view token, comprising of camera pose, would decode from these embeddings.
31
+
32
+ ABLE-NeRF provides high quality rendering on novel view synthesis tasks. The memorisation network achieves significant improvements in producing precise specular effects over Ref-NeRF. Moreover, by reworking volumetric rendering as an attention framework, ABLE-NeRF renders much more accurate colours of translucent objects than prior art. On the blender dataset, ABLE-NeRF excels both quantitatively and qualitatively relative to Ref-NeRF.
33
+
34
+ In summary, our technical contributions are:
35
+
36
+ (1) An approach demonstrating the capability and superiority of transformers modelling a physics based volumetric rendering approach.
37
+ (2) A memorisation based framework with Learnable Embeddings (LE) to capture and render detailed view-dependent effects with a cross-attention network.
38
+
39
+ # 2. Related Work
40
+
41
+ We first review techniques from computer graphics for capturing indirect lighting effects and global illumination. Following, we discuss how NeRF and its other variants render photo-realistic images of a 3D scene from an unseen viewing angle.
42
+
43
+ Indirect Illumination in Rendering. Rendering with indirect illumination is a widely studied topic. Pioneering works using path tracing [15] or light tracing [10] cast rays from a camera until they hit a point and traces random rays at the visible surface to light sources. However, these methods require heavy computation as sampling multiple rays is a costly operation. Instead, irradiance caching [16] is applied to sparsely samples rays and its indirect illumination is stored to speed up this process. An object's illumination will then be interpolated at its nearby cached values. Other methods involving a pre-computation based method like radiance transfer and lightmaps [1], first calculate the surface brightness and store it in texture maps for real time performance. Unlike lightmaps storing surface lighting information, light probes [27] bake lighting information passing through the scene. During run time, dynamic objects would query from the nearest light probes for indirect lighting information. The use of probes can be similarly be extended to reflections. In game engines, reflection probes [28] are made to capture images as cubemaps within the scene. These cubemaps are then utilised by objects with
44
+
45
+ reflective materials to produce convincing reflections of the environment.
46
+
47
+ The impetus to incorporate Learnable Embeddings in our work takes inspiration from how these light or reflection probes function. Yet, our work differs from the traditional graphics pipeline in the type of information being captured. Unlike probes in game engines, these embeddings do not exist as physical entities in the 3D scene geometry. Instead, Learnable Embeddings operate within the latent space as learnable latent vectors. In this manner, the LE capture latent information of a given scene. Thus, it is crucial to optimise these LE via training. Similar to relighting dynamic objects by interpolating nearby light probes or reflection probes, new viewing angles would query from these LE to achieve accurate view dependent effects.
48
+
49
+ 3D Scene Representation for View Synthesis Numerous methods have been proposed for generating new images of a scene using only a few captured images. Light field rendering methods [13, 18] characterise the unobstructed flow of light passing through the scene as 4D function and slice this slab differently to generate new views. While these methods require a large number of light field samples to interpolate new views, recent deep learning-based methods [23] only require sparse inputs. Separately, image based rendering methods [6,7,14,26,31] balance a set of weights heuristically or learned to blend nearby input images creating novel views. Scene representation methods also extend to volumetric methods by colouring voxel grids [22] or reconstructing plenoxels [12]. Methods involving neural networks are also capable of learning volumetric scene representation through gradient-based methods [11, 17, 24, 25]
50
+
51
+ The shift towards coordinate-based methods has shown a quantum leap in concise 3D scene representation. With a few layers of MLP, NeRF [20] can map a continuous input of 3D coordinates to the scene geometry and appearance. NeRF can also be extended to dynamic scenes, avatar animations, and even scene editing. These algorithms, which model appearance, typically decompose scene materials into its BRDF properties [32]. As a result, they require strong physics based assumptions such as known lighting conditions or single type materials. On the contrary, Ref-NeRF [30] does not assume these precise physical meanings. This enables Ref-NeRF to avoid relying on such assumptions. Our work follows this school of thought. We do not assume a physics based learning approach as we replace volumetric rendering by an end to end deep learning methodology.
52
+
53
+ Transformers for View Synthesis The use of transformers for view synthesis have gained popularity lately. IBR-Net [31] applies a ray transformer to generate $\sigma$ values before using the VR equation to accumulate colours. In [26], the authors apply a two-stage transformer-based model to aggregate features along epipolar lines in each reference
54
+
55
+ views and then combine these reference views with a view transformer. SRT [21] extracts features from training images with a CNN and then apply transformers to aggregate features before using a target ray to query for a pixel colour. NeRF-in-detail [2] also uses a transformer to propose new sample points along a ray and then apply NeRF to generate a ray colour. Unlike ABLE-NeRF, none of these methods apply transformers to model a physics based volumetric rendering approach.
56
+
57
+ # 2.1. Neural Radiance Field Overview
58
+
59
+ NeRF represents a 3D scene as a continuous volumetric scene function. It traces a pixel ray $\mathbf{r}(t) = \mathbf{o} + t\mathbf{d}$ , into a scene where $\mathbf{o}$ and $\mathbf{d}$ represent the camera origin and pose. After sampling for 3D points along the ray, NeRF predict point's opacity using spatial MLPs. Following which, a directional MLP determines the colour of the point. Finally, to compute the colour of a ray, alpha composition with numerical quadrature is applied to these points based on (1).
60
+
61
+ $$
62
+ \hat {\mathbf {C}} (\mathbf {r}) = \sum_ {i = 1} ^ {N} T _ {i} (1 - \exp (- \sigma_ {i} \delta_ {i})) \mathbf {c} _ {i} \tag {1}
63
+ $$
64
+
65
+ where
66
+
67
+ $$
68
+ T _ {i} = \exp \left(- \sum_ {j = 1} ^ {i - 1} \sigma_ {j} \delta_ {j}\right) \tag {2}
69
+ $$
70
+
71
+ NeRF maintains two separate sets of model parameters for the coarse and fine network. The network is optimised with a total squared error between the predicted pixel colour and the true pixel colours of both the coarse and fine network.
72
+
73
+ $$
74
+ \mathcal {L} = \sum_ {\mathbf {r} \in \mathcal {R}} \left[ \left\| \hat {C} _ {c} (\mathbf {r}) - C _ {c} (\mathbf {r}) \right\| _ {2} ^ {2} + \left\| \hat {C} _ {f} (\mathbf {r}) - C _ {f} (\mathbf {r}) \right\| _ {2} ^ {2} \right] \tag {3}
75
+ $$
76
+
77
+ In practise, only the output of the fine network is used to render the final image.
78
+
79
+ # 3. Method
80
+
81
+ As aforementioned, applying NeRF's volumetric rendering to accumulate features in the colour space causes the outgoing radiance to be highly sensitive to both opacity $\sigma$ prediction and the point sampling intervals $\delta$ . Despite the $\delta$ intervals, the density $\sigma$ of each point acts as a differential opacity for controlling the accumulated radiance along a ray passing through space [20]. As such, NeRF has difficulty predicting the colour of a surface point exhibiting both transmissive and reflective properties at different angles, resulting in a 'murky' appearance. ABLE-NeRF addresses this issue by diverging from such physics-based volumetric rendering equation. Instead, we formu
82
+
83
+ ![](images/6d7ec833fd8c28ec8e9b7a899c06b6c1e572bad5778daf5a52892efd75bae27d.jpg)
84
+ Figure 2. A visualisation of ABLE-NeRF. Similar to mip-NeRF, we cast a ray and sample for N conic frustum volumes between the near and far boundary. Each volume passes through a Volume Embedding layer consisting of several layers of MLP. A ray token 'R' is appended to the sequence of points before propagating it to the Attention-Based rendering Transformer (AB Transformer) module. After the last transformer layer, the ray token is used to compute a non explicit view-dependent colour. Next, several Learnable Embedding (LE) and a view-dependent token 'V' are appended to the sequence of embedded volumes post AB Transformer module before passing to LE Transformer. Within LE Transformer, LE cross-attend to the embedded volumes to memorise static scene information. LE then processes this information with self-attention and a view-dependent token 'V' decodes from LE. The final colour is produced by a tone mapping function that takes into account both the colour and view tokens, after the MLP head.
85
+
86
+ ![](images/c9222c7184885b06663371b745c54844264362f5903b5daa5d617f706b1a0bdf.jpg)
87
+
88
+ ![](images/be8a96a0df2821281bb3b078b72741486aacb7e4f86c9788f815c4d565ae9a2c.jpg)
89
+
90
+ late an attention-based network in ABLE-NeRF to determine a ray's colour. These changes allow ABLE-NeRF a flexibility to selectively assign attention weights to points compared to alpha compositing point features (2) along a ray. We constrained the attention mechanism by introducing masks where frontal points are restricted from attending to rear points. This masking strategy allows us to encode a viewing directional information implicitly. In addition, to capture view-dependent appearance caused by forms of indirect illumination, we incorporate LE as a methodology inspired by light and reflection probes from modern game engines.
91
+
92
+ # 3.1. Attention-based Volumetric Rendering
93
+
94
+ NeRF predicts both $\sigma$ value and colour of a sampled point. As a consequence, NeRF faces difficulties in predicting a surface that exhibits both translucent and reflective properties at different angles shown in fig. 1. Authors of [30] attribute NeRF's inadequacy in predicting an object's specular effects to the difficulty in interpolating between 'highly complicated functions' for glossy appearance. We further extend this argument, stating it is even more challenging to interpolate between glossy and translucent appearances of a sampled point that exudes both characteristics.
95
+
96
+ To solve this issue, we can decompose the problem into rendering translucent and reflective surfaces separately. Determining a point's $\sigma$ is equivalent to controlling a point's
97
+
98
+ opacity [20]. Therefore, points along a translucent surface should have low $\sigma$ values to describe a low radiance accumulation along a ray. Conversely, for an entirely reflective surface, the points of the reflective surface should have a high $\sigma$ value to indicate a high outgoing radiance. Thus, predicting a point's $\sigma$ is critical in describing its outgoing radiance. However, in NeRF, $\sigma$ is fixed for a point that is either translucent or reflective at different angles. In this scenario, the task of predicting a point's outgoing radiance is left to the viewing directional MLP, which is ill-equipped to do so.
99
+
100
+ Inspired by the use of volumetric rendering (2), the weight of a point depends on the weights of itself and the frontal points lying along the same ray. In our work, we apply a transformer model to generate the weights of individual points of the same ray. With this approach, we do not generate $\sigma$ values directly based on the spatial position of a sampled conic frustum of volume [3]. Instead of assigning weights based on $\sigma$ and $\delta$ as per (1), the importance of a point contributing to a ray's radiance is determined by an attention mechanism.
101
+
102
+ For a given ray, we sample $N$ number of conic frustums of volumes along it encoded with Integrated Positional Encoding (IPE) described in mip-NeRF. Each conic volume passes through a volume embedding block of four MLP layers to generate a volume embedding $\mathbf{v}^i$ , where $i$ denotes the position of conic volume along the ray starting from the camera, with latent dimensional size of $D$ . Similar to
103
+
104
+ ViT [9] and BERT's [class] token [8], we preprocess a ray token $\mathbf{R}$ of the same dimension to the sequence of volume embeddings. We abuse the notation of sets to describe an input sequence $\mathbb{Z}_0$ , as a set of ray token and the sequence of embedded conic volumes, to the first transformer layer as described in (4). The subscript notation in $\mathbb{Z}$ is used to denote the number of successive self-attention operations on the set.
105
+
106
+ In a manner similar to (2), we utilise a 'masking' technique to the limit the attention of volume embeddings solely to those that lie ahead of them along the ray, thereby excluding all others. Specifically, a volume embedding can only attend to the itself, the ray token, and other volume embeddings lying in front of it. This exclusion is represented in the set exclusion shown in (5), where conic volumes sequenced behind $\mathbf{v}^i$ are excluded from the standard self attention operation. The masking is expressed by setting the scaled-dot product attention to $-\infty$ in the input of softmax, similar to the decoder settings of Transformers, to denote illegal connections [29]. This 'masking' constraint allows us to implicitly encode view-dependent information; zero masking indicating a bi-directional ray, while masking constraints it to being uni-directional. We demonstrate in sec. 5.1 the importance of masking. No masking is applied to the ray token (6).
107
+
108
+ After the final encoder layer L, a single MLP classification head is attached to $\mathbf{R}_L$ to predict the colour of the ray (7). The equations are presented below.
109
+
110
+ $$
111
+ \mathbb {Z} _ {0} = \left\{\mathbf {R}, \mathbf {v} ^ {1}, \mathbf {v} ^ {2}, \dots , \mathbf {v} ^ {N} \right\} \tag {4}
112
+ $$
113
+
114
+ $$
115
+ \mathbf {v} _ {l} ^ {i} = \operatorname {A t t} \left(\mathbb {Z} _ {l - 1} \backslash \left\{\mathbf {v} _ {l - 1} ^ {i + 1}, \dots , \mathbf {v} _ {l - 1} ^ {N} \right\}\right) \tag {5}
116
+ $$
117
+
118
+ $$
119
+ \mathbf {R} _ {l} = \operatorname {A t t} \left(\mathbb {Z} _ {l - 1}\right) \tag {6}
120
+ $$
121
+
122
+ $$
123
+ \mathbf {y} = \operatorname {M L P} \left(\mathbf {R} _ {L}\right) \tag {7}
124
+ $$
125
+
126
+ # 3.2. Hierarchical Volume Sampling with Coarse-Fine Feature Propagation
127
+
128
+ We follow the general NeRF rendering strategy in creating two networks: coarse and fine. In NeRF, the coarse network uses $N_{c}$ stratified samples as inputs and then resamples $N_{f} = \frac{1}{2} N_{c}$ points. Next, the fine network uses the total $N_{c} + N_{f}$ points to produce the final colour. Unlike NeRF, mip-NeRF samples $N_{c} = N_{f}$ conic frustum volumes for each of the coarse and fine networks. The final predicted ray colour uses only $N_{f}$ samples for computation, discarding information from the coarse network. In our work, the coarse network also uses $N_{c}$ stratified samples. To generate $N_{f} = N_{c}$ samples in our fine network, we sample from the attention weights of the output coarse ray token at state $\mathbf{R}_L^C$ (after $L$ layers attending to all the coarse volume embeddings in the coarse network). Unlike mip-NeRF which discards coarse sample information entirely, we retain this information by reusing coarse ray token as the input fine ray
129
+
130
+ token $(\mathbf{R}_0^F = \mathbf{R}_L^C)$ for the fine network. Thus, we retain the ray representation from the coarse network. This approach allows us to avoid the quadratic cost of scaling up to an entire $N_{c} + N_{f}$ samples in every transformer layer of the fine network and only rely on $N_{f}$ samples.
131
+
132
+ # 3.3. Learnable Embeddings for View-Dependent Appearance
133
+
134
+ NeRF's rendering process strictly calculates the radiance of points along a ray. However, the directional MLP is insufficient in computing the specularities of a point. Other NeRF variants attempt to resolve this with some success by predicting a reflection direction of each point [30]. The general rendering equation [15] describes how indirect illumination should include multi bounce lighting, where lights reflects off surfaces and shines upon other surfaces. In NeRF's strict rendering ray casting approach, only points on the ray are used for radiance computation. Consequently, NeRF's rendering model can only coarsely approximate direct and indirect illumination using a view direction. We are interested in resolving this issue by capturing the indirect illumination effects radiated by other possible sources. Hence, it is imperative to formulate a query process for external sources beyond volumes along a ray. Inspired by game engines' usage of probes, we create LE to store static scene information. These LE serves as a form of memory which allows us to design a secondary branch of attention mechanism as seen in fig. 2.
135
+
136
+ Like the ViT class token, Learnable Embeddings (LE) in our work are trainable network parameters (memory tokens) used to capture static lighting information by querying from conic frustums in latent space. The iterative training process whereby LE attends to conic volumes in the scene allows the scene lighting information to be encoded as memory. During inference, conic volumes are mapped into latent space via these embeddings and then decoded with a view directional token. In our architecture, the view token is a camera pose Fourier encoded by 16 bands and mapped to the same dimension as LE via a linear layer.
137
+
138
+ # 3.4. Tone Mapping
139
+
140
+ The attention-based rendering backbone outputs the direct illumination exuded by the conic frustum of volumes along the ray. Separately, the cross-attention branch with LE outputs the view dependent illumination of these volumes. In this manner, we prevent the network from overfitting with this separation. To combine both the outputs, we apply a fixed mapping function to convert linear colours to sRGB, capped to [0,1] as Ref-NeRF [30].
141
+
142
+ # 4. Experiments
143
+
144
+ We implement our model on two datasets; the Blender dataset and Shiny Blender dataset. Similar to mip-Nerf [3],
145
+
146
+ <table><tr><td>Model</td><td>PSNR ↑</td><td>SSIM ↑</td><td>LPIPS ↓</td></tr><tr><td>PhySG</td><td>20.60</td><td>0.861</td><td>0.144</td></tr><tr><td>VolSDF</td><td>27.96</td><td>0.932</td><td>0.096</td></tr><tr><td>NSVF</td><td>31.74</td><td>0.953</td><td>0.046</td></tr><tr><td>NeRF</td><td>32.38</td><td>0.957</td><td>0.043</td></tr><tr><td>Mip-NeRF</td><td>33.09</td><td>0.961</td><td>0.043</td></tr><tr><td>Ref-NeRF</td><td>33.99</td><td>0.966</td><td>0.038</td></tr><tr><td>Ours, no LE</td><td>34.05</td><td>0.963</td><td>0.041</td></tr><tr><td>Ours</td><td>35.02</td><td>0.975</td><td>0.035</td></tr></table>
147
+
148
+ we sample conic frustums of volumes along a ray. Our model maintains two networks, coarse and fine. The number of transformer layers, $L$ , described in sec. 3.1 is 2 and 6 for the coarse and fine networks respectively. The coarse network is designed as a lighter network with fewer layers, as its purpose is to generate fine samples, similar to mip-NeRF 360 [4] proposal MLP. We sample 192 conic frustums in total, 96 samples in each network, and included 32 LE (shared by coarse and fine networks) to store view-dependent information. The volume embedding module consists of 4 MLP layers, each with 192 hidden units, with ReLU activations. The dimensions of each transformers are set to 192, the same dimension as the volume embedding layers and in the feed-forward layers, the FF hidden unit ratios are set to 1:3. For the Shiny Blender dataset, we set the number of LE to 16, as it contains simpler objects compared to the standard Blender dataset. Optimisation on each scene is trained for 250k iterations.
149
+
150
+ On each dataset, we evaluate ABLE-NeRF with three commonly used image quality metrics; PSNR, SSIM, LPIPS. A full breakdown of per-scene scores is included in the supplementary materials.
151
+
152
+ # 4.1.Blender Dataset
153
+
154
+ We compare ABLE-NeRF with the latest neural based synthesis network on the standard Blender dataset that originated from NeRF's paper. The results in Table 1 show that our work surpasses prior art when compared to the top performing NeRF based method which applies a physics-based volumetric rendering.
155
+
156
+ ABLE-NeRF also outperforms prior art qualitatively in rendering photo-realistic appearances of surfaces. As seen in fig. 3, ABLE-NeRF renders compelling visuals of highly complex surfaces in the Blender Ship scene where the surfaces of the waves resemble the ground truth more closely compared to Ref-NeRF. In the Materials scene, ABLE-NeRF produces reflections of intra-scene objects, attributed to the use of LE, which captures multi-bounce lighting effects. The appearance of reflections of spheres off another neighbouring sphere (reflections of reflections) is clearer
157
+
158
+ ![](images/da1a318f2e415317f47f838ac94374fd2915d003164bb971b09ff552218c5edb.jpg)
159
+
160
+ ![](images/c6c9e9200ce20737680393788708517f3ef3045482c091a8c6dcaaec52cb8956.jpg)
161
+
162
+ ![](images/3fc13b945303a36c688bda14c5f143c6c88bb1fe8281c179034b730afec5ef96.jpg)
163
+
164
+ ![](images/91dce48574ed83bf7298f56c1732235292b2b50115dadfc1a7f3ac2ba634832a.jpg)
165
+
166
+ ![](images/6f0af363748e4b823f8be9dce4e8e8d0d508edbff0cad49fa1cc1cb9ac6fe58e.jpg)
167
+
168
+ ![](images/83653025384043da49d0855f8b754359ccb95b2364c2d5d6d8c48862e00f9f27.jpg)
169
+
170
+ ![](images/1d884241896324278c81d67ce8089d7877b64537f1fb2dd463d32ddefdf2149f.jpg)
171
+
172
+ ![](images/4ee55264c8755de9c39313c735d00eccf80fa2e3ce7b274743b4b36c0f7b5230.jpg)
173
+
174
+ ![](images/fb5d59c0f467eb62f23a56dd9d7239a221d3f35a8b165239aa323a0da9a810dc.jpg)
175
+
176
+ ![](images/600638417c271274976ecb27362ba0327d5677b1e964e88c6caffe2cf997f5f9.jpg)
177
+ Ref-NeRF
178
+
179
+ ![](images/67e404bacc18a007f5c3e8fd22016736b0780792864b41d1e224433f419c616c.jpg)
180
+ ABLE-NeRF
181
+
182
+ ![](images/2069d673c799b7faae47eef07f67f4dc15ae0707b53d2a88ae67e450d27e5b54.jpg)
183
+ Ground Truth
184
+ Figure 3. ABLE-NeRF significantly improves upon visual realism of highly complex surfaces such as the waves in the Blender ship scene. Furthermore ABLE-NeRF is able to capture intra-scene reflections of neighbouring spheres off glossy sphere in the Blender Materials scene. Top performing NeRF based variant often fail in producing surfaces of complex geometries and challenging view-dependent multi-bounce lighting.
185
+
186
+ Table 1. Baseline comparisons of ABLE-NeRF and previous approaches on Blender dataset. Results extracted from [30].
187
+
188
+ <table><tr><td>Model</td><td>PSNR ↑</td><td>SSIM ↑</td><td>LPIPS ↓</td></tr><tr><td>PhySG</td><td>26.21</td><td>0.921</td><td>0.121</td></tr><tr><td>Mip-NeRF</td><td>29.21</td><td>0.942</td><td>0.092</td></tr><tr><td>Ref-NeRF (no pred. normals)</td><td>30.91</td><td>0.936</td><td>0.105</td></tr><tr><td>Ref-NeRF</td><td>35.96</td><td>0.967</td><td>0.058</td></tr><tr><td>Ours</td><td>33.88</td><td>0.969</td><td>0.075</td></tr></table>
189
+
190
+ Table 2. Baseline comparisons of ABLE-NeRF and previous approaches on Shiny Blender dataset. Results extracted from [30].
191
+
192
+ compared to standard ray-casting approaches of NeRF. This highlights the importance of maintaining LE to capture indirect lighting effects.
193
+
194
+ # 4.2. Shiny Blender Dataset
195
+
196
+ Compared to the Blender dataset by NeRF [20], the Shiny Blender dataset by Ref-NeRF [30] contains objects with more glossy surfaces. It is important to note that the
197
+
198
+ ![](images/1fa11eb572258b7560413c1077114a9cdede550f5cdb9f8cdb6a042afdca158d.jpg)
199
+ Ref-NeRF
200
+
201
+ ![](images/b716edfa8ec27c0493eeeae6ec844ef4327ee693450da3f123b4df84d32392b3.jpg)
202
+ ABLE-NeRF
203
+
204
+ ![](images/7635dc6d7b58dd6cb7152ce21eb53f5922189174c8cb174342b06c3ed700873c.jpg)
205
+ Ground Truth
206
+
207
+ ![](images/eb74824dd4e7df0017ddf8660b6ea8118a7c0e5b1b1e09be51843551f61be7ac.jpg)
208
+ Figure 4. ABLE-NeRF is able render intra-scene surface reflections better than Ref-NeRF. As shown in the Shiny Blender Coffee scene, the reflection of the teaspoon on the side of the cup appears more apparent than Ref-NeRF.
209
+ (a)
210
+
211
+ ![](images/64c1475f3a44bab87c95e82021356808ea8e125a382206930f18671f1afeab95.jpg)
212
+ (c)
213
+
214
+ ![](images/e27067ed039ee3f7a5a3c30aa84c85c1529af307d80c6d0425225253d87548c9.jpg)
215
+ (d)
216
+ Figure 5. Prior to tone-mapping, (a) and (c) are outputs from the AB Transformer while (b) and (d) are outputs from the LE Transformer.
217
+
218
+ Shiny Blender dataset mostly consists of singular objects with simple geometries, where the surfaces are smooth and rounded. As a result, Ref-NeRF outperforms ABLE-NeRF in terms of PSNR and LPIPS since the normals for such objects are easier to predict, compared to the complex surfaces in the standard Blender dataset of NeRF. For example, for a smooth rounded 'Shiny Ball', Ref-NeRF outperforms ABLE-NeRF due to its simpler geometry. However, for a more complex surface such as the 'Toaster', ABLE-NeRF outperforms Ref-NeRF. We display Ref-NeRF ablation study results with no normal predictions to support our case. Without normal predictions, ABLE-NeRF surpasses Ref-NeRF by a wide margin.
219
+
220
+ It is worth highlighting to readers that ABLE-NeRF excels at capturing intra-scene reflections of surfaces caused by multi-bounce lighting, which are highly complex scenes. In such scenarios, the reflections of objects interact with other objects within the scene. In fig. 4, we show reflections of the teaspoon off the cup in the 'Coffee' scene is rendered closely to ground truth. Ref-NeRF fails to capture such intra-scene reflections compared to ABLE-NeRF. The intra-scene reflection due to multi-bounce lighting is well captured as shown in fig. 5.
221
+
222
+ # 5. Architectural Analysis
223
+
224
+ # 5.1. Masking Strategy
225
+
226
+ The masking strategy is imperative in allowing transformers to model volumetric rendering as an end to end deep learning based approach. Without masks, the model
227
+
228
+ ![](images/740287bdb4ae39a41a8371e51f4acb09ea86b72c87c128e67435db81b6aa84fa.jpg)
229
+ No Masking
230
+
231
+ ![](images/a71bd5cd5856d4eff8aee44263459a41864eec8c62faa6e7b277146890b29c15.jpg)
232
+ With Mask
233
+
234
+ ![](images/7295b7829a1b77860fa0645d099a00228f85b37747c8e3ef8d444688d43bac79.jpg)
235
+ Ground Truth
236
+
237
+ ![](images/2bd36fffb96f88606b1cdd4ce08e35c0b6c8ddae75f9144b48cdbbb84edc8449.jpg)
238
+ Figure 6. Here we show the importance of masking rear points from frontal point along a camera ray. Without mask, a bidirectional ray is implied causing the network to have difficulty rendering the object's surface accurately. With the masking strategy, we enable transformers to mimic a volumetric rendering strategy and also implicitly encode a view directional information.
239
+ Original
240
+ Figure 7. As we corrupt the weights of LE with additive Gaussian noise, we observe that the view-dependent surfaces of the drum scene changes. As we continue to destroy the weights of LE by setting it to zero, we corrupt the specularities and transparencies of the drums. Observe that the specularity in left cymbal of the uttermost right figure is completely eradicated. Diffuse surfaces largely remain unchanged perceptually.
241
+
242
+ ![](images/d930ae70429fb6acb32a285c981cade9de2ff92e8495fc4bbf24323a8760b83c.jpg)
243
+ Gaussian Noise Addition
244
+
245
+ ![](images/65696fa1baa588063ebfc81e28837c6cc65df08d0eaa884b03e8eeaa06f964c4.jpg)
246
+ Resetting LE
247
+
248
+ would render inaccurate surfaces as seen in fig. 6. By including masks, we implicitly encode a uni-directional ray versus a non-masking bidirectional ray as shown in the figure. We have attempted to introduce a uni-directional ray information without masks by appending a volume's position on a ray. However, this attempt is ineffective compared to our original masking strategy.
249
+
250
+ # 5.2. Learnable Embeddings
251
+
252
+ Learnable Embeddings Inclusion We validate our model design choices by performing an ablation study on the Blender dataset. In this setup, we exclude LE and compute the final ray colour using only the ray token. The results are included in Table 1. Without LE, the model performs comparatively with Ref-NeRF. By including LE, ABLE-NeRF has the flexibility to attend to embeddings not constrained to the line of ray. Evidently, we demonstrate in fig. 8 that LE allows our model to capture better view-dependent peculiarities.
253
+
254
+ Perturbating Learnable Embedding We formally described LE as a form of memory for a static scene. As a form of analysis to understand what LE are effectively memorising, we perturbed these memory by adding Gaus-
255
+
256
+ ![](images/31f875289ac121d0569a54e3eb28b6563faad4e873c930d006346f00aba741d7.jpg)
257
+ Ground Truth
258
+
259
+ ![](images/22189eadc628bd61ee50eaeb002b78ed47d9f42c441cdc6ade8e32f6286d0120.jpg)
260
+ Complete Model
261
+ Figure 8. We visualise how ABLE-NeRF benefits from the use of transformers for volumetric rendering and also the inclusion of LE. Observe the speculatities of the right and centre cymbals. Removing LE causes the model to fail in capturing specular effects effectively on the cymbals. Even without LE, the basic backbone of using transformers as a deep learning VR-based approach allows us to render the translucent portions of the drums more accurately compared to NeRF based approaches.
262
+
263
+ ![](images/114878c18be5130aa0bf26b5171c142e440af0bb98c20c02184b70a8b8afa068.jpg)
264
+ No Learnable Embeddings
265
+
266
+ ![](images/a382b2026754627f3fa4a6e38af89ff8be65cc2210523371c058515a8f928d5b.jpg)
267
+ Ref-NeRF
268
+
269
+ ![](images/bc3c62247ec9c14ec75e771c97a67feb7c0eefc601e69f412585ca1d81b7a54c.jpg)
270
+ Figure 9. We extract the per-volume attention from the ray token. By selecting the highest attention weight to the volume, we are able to plot a depth map based on the distance traversed to that conic volume frustum along the ray from the camera origin.
271
+
272
+ ![](images/ca89da686f4abac0b7a370c41c7ebbecf56582b8b86b67a805ef82a64a42ae22.jpg)
273
+
274
+ sian noise to corrupt these memory tokens. Lastly, we wiped the memory of LE, collapsing all memory into a single entity, by setting its weights to zero. With reference to fig. 7, observe that the diffuse surfaces remains perceptually unchanged while view-dependent surfaces with specularities and translucency are affected. This analysis offers us insights to how LE could be modified to edit scenes dependent on viewing direction for future work.
275
+
276
+ # 5.3. Attention Maps as Depth Maps
277
+
278
+ As the ray token selectively assigns attention weights to conic volume frustums along the ray, the volume with the highest attention weight could imply a surface of the object in the scene. With the attention map, we plot a depth map based on the distance traversed from the camera origin to the volume with the highest attention weight. Fig. 9 illustrates the capability of ABLE-NeRF in generating depth maps from attention weights.
279
+
280
+ # 6. Conclusion
281
+
282
+ We have highlighted the general issues of NeRF-based rendering methods that use physics-based volumetric rendering, which cause difficulties in rendering complex surfaces that exhibits both translucent and specular appearances. Our model, ABLE-NeRF, ameliorates such issues by applying a deep learning-based method using masking on transformers to learn a physics-based volumetric rendering method. With the attention weights generated by transformers, we can re-sample a 3D space effectively with visual content and output a depth map from an attention map. Lastly, we have included Learnable Embeddings as a form of memorisation framework to capture view-dependent lighting effects in latent space and allow the view angle token to query these LE beyond a ray for accurate view-dependent visuals. These contributions allow ABLE-NeRF to significantly improve upon prior art in novel view synthesis. We believe that our work paves the way forward in rendering accurate visuals of complex objects and scenes, as well as hinting at the potential for new scene editing methods by reprogramming LE. Our code is available at https://github.com/TangZJ/able-nerf.
283
+
284
+ Acknowledgements This study is supported under the RIE2020 Industry Alignment Fund - Industry Collaboration Projects (IAF-ICP) Funding Initiative, as well as cash and in-kind contribution from the industry partner(s).
285
+
286
+ # References
287
+
288
+ [1] Michael Abrash. *Quake's lighting model: Surface caching*. Graphic programming black book, 2000. 2
289
+ [2] Relja Arandjelović and Andrew Zisserman. Nerf in detail: Learning to sample for view synthesis. arXiv preprint arXiv:2106.05264, 2021. 3
290
+ [3] Jonathan T Barron, Ben Mildenhall, Matthew Tancik, Peter Hedman, Ricardo Martin-Brualla, and Pratul P Srinivasan. Mip-nerf: A multiscale representation for anti-aliasing neural radiance fields. In Proceedings of the IEEE/CVF Inter
291
+
292
+ national Conference on Computer Vision, pages 5855-5864, 2021. 4, 5
293
+ [4] Jonathan T Barron, Ben Mildenhall, Dor Verbin, Pratul P Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5470–5479, 2022. 6
294
+ [5] Max Born and Emil Wolf. Principles of optics: electromagnetic theory of propagation, interference and diffraction of light. Elsevier, 2013. 2
295
+ [6] Chris Buehler, Michael Bosse, Leonard McMillan, Steven Gortler, and Michael Cohen. Unstructured lumigraph rendering. In Proceedings of the 28th annual conference on Computer graphics and interactive techniques, pages 425-432, 2001. 3
296
+ [7] Paul E Debevec, Camillo J Taylor, and Jitendra Malik. Modeling and rendering architecture from photographs: A hybrid geometry-and image-based approach. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 11-20, 1996. 3
297
+ [8] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 5
298
+ [9] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 5
299
+ [10] Philip Dutre and Yves D Willems. Importance-driven monte carlo light tracing. In Photorealistic Rendering Techniques, pages 188-197. Springer, 1995. 2
300
+ [11] John Flynn, Michael Broxton, Paul Debevec, Matthew DuVall, Graham Fyffe, Ryan Overbeck, Noah Snavely, and Richard Tucker. Deepview: View synthesis with learned gradient descent. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2367-2376, 2019. 3
301
+ [12] Sara Fridovich-Keil, Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 5501–5510, 2022. 3
302
+ [13] Steven J Gortler, Radek Grzeszcuk, Richard Szeliski, and Michael F Cohen. The lumigraph. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 43-54, 1996. 3
303
+ [14] Peter Hedman, Julien Philip, True Price, Jan-Michael Frahm, George Drettakis, and Gabriel Brostow. Deep blending for free-viewpoint image-based rendering. ACM Transactions on Graphics (TOG), 37(6):1-15, 2018. 3
304
+ [15] James T Kajiya. The rendering equation. In Proceedings of the 13th annual conference on Computer graphics and interactive techniques, pages 143-150, 1986. 2, 5
305
+ [16] Jaroslav Krivanek and Pascal Gautron. Practical global illumination with irradiance caching. Synthesis lectures on computer graphics and animation, 4(1):1-148, 2009. 2
306
+
307
+ [17] Samuli Laine and Tero Karras. Efficient sparse voxel octrees. IEEE Transactions on Visualization and Computer Graphics, 17(8):1048-1059, 2010. 3
308
+ [18] Marc Levoy and Pat Hanrahan. Light field rendering. In Proceedings of the 23rd annual conference on Computer graphics and interactive techniques, pages 31-42, 1996. 3
309
+ [19] Wojciech Matusik, Hanspeter Pfister, Addy Ngan, Paul Beardsley, Remo Ziegler, and Leonard McMillan. Image-based 3d photography using opacity hulls. ACM Transactions on Graphics (TOG), 21(3):427-437, 2002. 2
310
+ [20] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 3, 4, 6
311
+ [21] Mehdi SM Sajjadi, Henning Meyer, Etienne Pot, Urs Bergmann, Klaus Greff, Noha Radwan, Suhani Vora, Mario Lucic, Daniel Duckworth, Alexey Dosovitskiy, et al. Scene representation transformer: Geometry-free novel view synthesis through set-latent scene representations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6229-6238, 2022. 3
312
+ [22] Steven M Seitz and Charles R Dyer. Photorealistic scene reconstruction by voxel coloring. International Journal of Computer Vision, 35(2):151-173, 1999. 3
313
+ [23] Vincent Sitzmann, Semon Rezchikov, Bill Freeman, Josh Tenenbaum, and Fredo Durand. Light field networks: Neural scene representations with single-evaluation rendering. Advances in Neural Information Processing Systems, 34:19313-19325, 2021. 3
314
+ [24] Vincent Sitzmann, Justus Thies, Felix Heide, Matthias Nießner, Gordon Wetzstein, and Michael Zollhofer. Deepvoxels: Learning persistent 3d feature embeddings. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2437-2446, 2019. 3
315
+ [25] Pratul P Srinivasan, Richard Tucker, Jonathan T Barron, Ravi Ramamoorthi, Ren Ng, and Noah Snavely. Pushing the boundaries of view extrapolation with multiplane images. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 175-184, 2019. 3
316
+ [26] Mohammed Suhail, Carlos Esteves, Leonid Sigal, and Ameesh Makadia. Light field neural rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8269-8279, 2022. 3
317
+ [27] Unity Technologies. Light probes. 2
318
+ [28] Unity Technologies. Reflection probe. 2
319
+ [29] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 30, 2017. 5
320
+ [30] Dor Verbin, Peter Hedman, Ben Mildenhall, Todd Zickler, Jonathan T Barron, and Pratul P Srinivasan. Ref-nerf: Structured view-dependent appearance for neural radiance fields. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 5481-5490. IEEE, 2022. 3, 4, 5, 6
321
+ [31] Qianqian Wang, Zhicheng Wang, Kyle Genova, Pratul P Srinivasan, Howard Zhou, Jonathan T Barron, Ricardo
322
+
323
+ Martin-Brualla, Noah Snavely, and Thomas Funkhouser. Ibrnet: Learning multi-view image-based rendering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4690-4699, 2021. 3
324
+ [32] Xiuming Zhang, Pratul P Srinivasan, Boyang Deng, Paul Debevec, William T Freeman, and Jonathan T Barron. Ner-factor: Neural factorization of shape and reflectance under an unknown illumination. ACM Transactions on Graphics (TOG), 40(6):1-18, 2021. 3
ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6f10a1ef0784e491884cd59c400fde63b3bd823bef533782831dbc51b4a1b843
3
+ size 466884
ablenerfattentionbasedrenderingwithlearnableembeddingsforneuralradiancefield/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:202ae487e0875d754beab8293057ff91b12f9e1728aabffbdc6e44ca2f9ead0d
3
+ size 354903
abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/0373c754-2d12-4d52-95c7-16e20ad62c3a_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:272bb3176fd4e0463d9035c6b5d1661b9bb1db0ca1d90ad3e027d1fd4e74a211
3
+ size 92538
abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/0373c754-2d12-4d52-95c7-16e20ad62c3a_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee38b409696780711078c1d6741c44d006407eb685d911740cc57c8117648ff3
3
+ size 117733
abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/0373c754-2d12-4d52-95c7-16e20ad62c3a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:982fef4d4ec63d06643331eec377cb430e0bb4f35198319854985f717051b6e0
3
+ size 921319
abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/full.md ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Abstract Visual Reasoning: An Algebraic Approach for Solving Raven's Progressive Matrices
2
+
3
+ Jingyi Xu $^{1*}$ Tushar Vaidya $^{2\circ\ast}$ Yufei Wu $^{2\circ\ast}$ Saket Chandra $^{1}$ Zhangsheng Lai $^{3\circ}$ Kai Fong Ernest Chong $^{1\dagger}$ $^{1}$ Singapore University of Technology and Design
4
+ $^{2}$ Nanyang Technological University $^{3}$ Singapore Polytechnic
5
+
6
+ jingyi_xu@mymail.sutd.edu.sg tushar
7
+
8
+ yufei002@e.ntu.edu.sg
9
+
10
+ lai-zhangsheng@sp.edu.sg {saket_chandra,ernest-chong}@sutd.edu.sg
11
+
12
+ # Abstract
13
+
14
+ We introduce algebraic machine reasoning, a new reasoning framework that is well-suited for abstract reasoning. Effectively, algebraic machine reasoning reduces the difficult process of novel problem-solving to routine algebraic computation. The fundamental algebraic objects of interest are the ideals of some suitably initialized polynomial ring. We shall explain how solving Raven's Progressive Matrices (RPMs) can be realized as computational problems in algebra, which combine various well-known algebraic subroutines that include: Computing the Gröbner basis of an ideal, checking for ideal containment, etc. Crucially, the additional algebraic structure satisfied by ideals allows for more operations on ideals beyond set-theoretic operations.
15
+
16
+ Our algebraic machine reasoning framework is not only able to select the correct answer from a given answer set, but also able to generate the correct answer with only the question matrix given. Experiments on the I-RAVEN dataset yield an overall $93.2\%$ accuracy, which significantly outperforms the current state-of-the-art accuracy of $77.0\%$ and exceeds human performance at $84.4\%$ accuracy.
17
+
18
+ # 1. Introduction
19
+
20
+ When we think of machine reasoning, nothing captures our imagination more than the possibility that machines would eventually surpass humans in intelligence tests and general reasoning tasks. Even for humans, to excel in IQ tests, such as the well-known Raven's progressive matrices (RPMs) [5], is already a non-trivial feat. A typical RPM instance is composed of a question matrix and an answer set; see Fig. 1. A question matrix is a $3 \times 3$ grid of panels
21
+
22
+ ![](images/b3014e3cd65ca62e77b7c51b631a449e32dddeb9dc52d63330f92cf6f2c613c3.jpg)
23
+ Figure 1. An example of RPM instance from the I-RAVEN dataset. The correct answer is marked with a red box.
24
+
25
+ ![](images/dbe5b7a535e7edaaf8665a0ce37b844858e114746761014ff03bf919a7764cbf.jpg)
26
+
27
+ that satisfy certain hidden rules, where the first 8 panels are filled with geometric entities, and the 9-th panel is "missing". The goal is to infer the correct answer for this last panel from among the 8 panels in the given answer set.
28
+
29
+ The ability to solve RPMs is the quintessential display of what cognitive scientists call fluid intelligence. The word "fluid" alludes to the mental agility of discovering new relations and abstractions [28], especially for solving novel problems not encountered before. Thus, it is not surprising that abstract reasoning on novel problems is widely hailed as the hallmark of human intelligence [6].
30
+
31
+ Although there has been much recent progress in machine reasoning [15, 17, 30-33, 37, 38, 46, 47], a common criticism [9, 25, 26] is that existing reasoning frameworks have focused on approaches involving extensive training, even when solving well-established reasoning tests such as RPMs. Perhaps most pertinently, as [9] argues, reasoning tasks such as RPMs should not need task-specific perfor
32
+
33
+ ![](images/5053e64a19eae16dfcc522c1cf2e3b6a3cb14b64366317a9486b8c6ef9075518.jpg)
34
+ Figure 2. An overview of our algebraic machine reasoning framework, organized into 2 stages.
35
+
36
+ mance optimization. After all, if a machine optimizes performance by training on task-specific data, then that task cannot possibly be novel to the machine.
37
+
38
+ To better emulate human reasoning, we propose what we call "algebraic machine reasoning", a new reasoning framework that is well-suited for abstract reasoning. Our framework solves RPMs without needing to optimize for performance on task-specific data, analogous to how a gifted child solves RPMs without needing practice on RPMs. Our key starting point is to define concepts as ideals of some suitably initialized polynomial ring. These ideals are treated as the "actual objects of study" in algebraic machine reasoning, which do not require any numerical values to be assigned to them. We shall elucidate how the RPM task can be realized as a computational problem in algebra involving ideals.
39
+
40
+ Our reasoning framework can be broadly divided into two stages: (1) algebraic representation, and (2) algebraic machine reasoning; see Fig. 2. In the first stage, we represent RPM panels as ideals, based on perceptual attribute values extracted from object detection models. In the second stage, we propose 4 invariance modules to extract patterns from the RPM question matrix.
41
+
42
+ To summarize, our main contributions are as follows:
43
+
44
+ - We reduce "solving the RPM task" to "solving a computational problem in algebra". Specifically, we present how the discovery of abstract patterns can be realized very concretely as algebraic computations known as primary decompositions of ideals.
45
+ - In our algebraic machine reasoning framework, we introduce 4 invariance modules for extracting patterns that are meaningful to humans.
46
+ - Our framework is not only able to select the correct answer from a given answer set, but also able to generate answers without needing any given answer set.
47
+ - Experiments conducted on RAVEN and I-RAVEN datasets demonstrate that our reasoning framework significantly outperforms state-of-the-art methods.
48
+
49
+ # 2. Related Work
50
+
51
+ RPM solvers. There has been much recent interest in solving RPMs with deep-learning-based methods [15, 23, 32, 47, 48, 51-54]. Most methods extract features from raw RPM images using neural networks, and select answers by measuring panel similarities. Several works instead focus on generating correct answers without needing the answer set [27, 34]. To evaluate the reasoning capabilities of these methods, RPM-like datasets such as PGM [32] and RAVEN [46] have been proposed. Subsequently, I-RAVEN [12] and RAVEN-FAIR [3] are introduced to overcome a shortcut flaw in the answer set generation of RAVEN.
52
+
53
+ Algebraic methods in AI. Using algebraic methods in AI is not new. Systems of polynomial equations are commonly seen in computer vision [29] and robotics [8], which are solved algebraically via Gröbner basis computations. In statistical learning theory, methods in algebraic geometry [41] and algebraic statistics [10] are used to study singularities in statistical models [22, 42, 43, 45], to analyze generalization error in hierarchical models [39, 40], to learn invariant subspaces of probability distributions [18, 20], and to model Bayesian networks [11, 36]. A common theme in these works is to study suitably defined algebraic varieties. In deep learning, algebraic methods are used to study the expressivity of neural nets [7, 16, 24, 50]. In automated theorem proving, Gröbner basis computations are used in proof-checking [35]. Recently, a matrix formulation of first-order logic was applied to the RPM task [49], where relations are approximated by matrices and reasoning is framed as a bilevel optimization task to find best-fit matrix operators. As far as we know, methods from commutative algebra have not been used in machine reasoning.
54
+
55
+ # 3. Proposed Algebraic Framework
56
+
57
+ In abstract reasoning, a key cognitive step is to "discover patterns from observations", which can be formulated con
58
+
59
+ cretely as "finding invariances in observations". In this section, we describe how algebraic objects known as ideals are used to represent RPM instances, how patterns are extracted from such algebraic representations, and how RPMs can be solved, both for answer selection and answer generation, as computational problems in algebra.
60
+
61
+ # 3.1. Preliminaries
62
+
63
+ Throughout, let $R = \mathbb{R}[x_1, \ldots, x_n]$ be the ring of polynomials in variables $x_1, \ldots, x_n$ , with real coefficients. In particular, $R$ is closed under addition and multiplication of polynomials, i.e., for any $a, b \in R$ , we have $a + b, ab \in R$ .
64
+
65
+ # 3.1.1 Algebraic definitions
66
+
67
+ Ideals in polynomial rings. A subset $I \subseteq R$ is called an ideal if there exist polynomials $g_1, \ldots, g_k$ in $R$ such that
68
+
69
+ $$
70
+ I = \left\{f _ {1} g _ {1} + \dots + f _ {k} g _ {k} | f _ {1}, \dots , f _ {k} \in R \right\}
71
+ $$
72
+
73
+ contains all polynomial combinations of $g_1, \ldots, g_k$ . We say that $\mathcal{G} = \{g_1, \ldots, g_k\}$ is a generating set for $I$ , we call $g_1, \ldots, g_k$ generators, and we write either $I = \langle g_1, \ldots, g_k \rangle$ or $I = \langle \mathcal{G} \rangle$ . Note that generating sets of ideals are not unique. If $I$ has a generating set consisting only of monomials, then we say that $I$ is a monomial ideal. (Recall that a monomial is a polynomial with a single term.) Given ideals $J_1 = \langle g_1, \ldots, g_k \rangle$ and $J_2 = \langle h_1, \ldots, h_\ell \rangle$ , there are three basic operations (sums, products, intersections):
74
+
75
+ $$
76
+ J _ {1} + J _ {2} := \langle g _ {1}, \dots , g _ {k}, h _ {1}, \dots , h _ {\ell} \rangle ;
77
+ $$
78
+
79
+ $$
80
+ J _ {1} J _ {2} := \langle \left\{g _ {i} h _ {j} \mid 1 \leq i \leq k, 1 \leq j \leq \ell \right\} \rangle ;
81
+ $$
82
+
83
+ $$
84
+ J _ {1} \cap J _ {2} := \{r \in R: r \in J _ {1} \text {a n d} r \in J _ {2} \}.
85
+ $$
86
+
87
+ Most algebraic computations involving ideals, especially "advanced" operations (e.g. primary decompositions), require computing their Gröbner bases as a key initial step. More generally, Gröbner basis computation forms the backbone of most algorithms in algebra; see Appendix A.2.
88
+
89
+ Primary decompositions. In commutative algebra, primary decompositions of ideals are a far-reaching generalization of the idea of prime factorization for integers. Its importance to algebraists cannot be overstated. Informally, every ideal $J$ has a decomposition $J = J_{1} \cap \dots \cap J_{s}$ as an intersection of finitely many primary ideals. This intersection is called a primary decomposition of $J$ , and each $J_{j}$ is called a primary component of the decomposition. In the special case when $J$ is a monomial ideal, there is an **unique minimal primary decomposition** with maximal monomial primary components [2]; We denote this unique set of primary components by $\mathrm{pd}(J)$ . See Appendix A.3 for details.
90
+
91
+ # 3.1.2 Concepts as monomial ideals
92
+
93
+ We define a concept to be a monomial ideal of $R$ . In particular, the zero ideal $\langle 0 \rangle \subseteq R$ is the concept "null", and could be interpreted as "impossible" or "nothing", while the ideal $\langle 1 \rangle = R$ is the concept "conceivable", and could be interpreted as "possible" or "everything". Given a concept
94
+
95
+ $J \subseteq R$ , a monomial in $J$ is called an instance of the concept. For example, $x_{\mathrm{black}}x_{\mathrm{square}}$ is an instance of $\langle x_{\mathrm{square}} \rangle$ (the concept "square"). For each $x_i$ , we say $\langle x_i \rangle \subseteq R$ is a primitive concept, and $x_i$ is a primitive instance.
96
+
97
+ Theorem 3.1. There are infinitely many concepts in $R$ , even though there are finitely many primitive concepts in $R$ . Furthermore, if $J \subseteq R$ is a concept, then the following hold:
98
+
99
+ (i) $J$ has infinitely many instances, unless $J = \langle 0\rangle$
100
+ (ii) $J$ has a unique minimal generating set consisting of finitely many instances, which we denote by $\mathrm{mingen}(J)$ .
101
+ (iii) If $J \neq \langle 1 \rangle$ , then $J$ has a unique set of associated concepts $\{P_1, \ldots, P_k\}$ , together with a unique minimal primary decomposition $J = J_1 \cap \dots \cap J_k$ , such that each $J_i$ is a concept contained in $P_i$ , that is maximal among all possible primary components contained in $P_i$ that are concepts.
102
+
103
+ See Appendix A.4 for a proof of Theorem 3.1 and for more details on why defining concepts as monomial ideals captures the expressiveness of concepts in human reasoning.
104
+
105
+ # 3.2. Stage 1: Algebraic representation
106
+
107
+ We shall use the RPM instance depicted in Fig 1 as our running example, to show the entire algebraic reasoning process: (1) algebraic representation; and (2) algebraic machine reasoning. In this subsection, we focus on the first stage. Recall that every RPM instance is composed of 16 panels filled with geometric entities. For our running example, each entity can be described using 4 attributes: "color", "size", "type", and "position". We also need one additional attribute to represent the "number" of entities in the panel.
108
+
109
+ # 3.2.1 Attribute concepts
110
+
111
+ In human cognition, certain semantically similar concepts are naturally grouped to form a more general concept. For example, concepts such as "red", "green", "blue", "yellow", etc., can be grouped to form a new concept that represents "color". Intuitively, we can think of "color" as an attribute, and "red", "green", "blue", "yellow" as attribute values.
112
+
113
+ For our running example, the 5 attributes are represented by 5 concepts (monomial ideals). In general, all possible values for each attribute are encoded as generators for the concept representing that attribute. However, for ease of explanation, we shall consider only those attribute values that are involved in Fig. 1 to explain our example:
114
+
115
+ $$
116
+ \begin{array}{l} \mathcal {A} _ {\text {n u m}} := \left\{x _ {\text {o n e}}, x _ {\text {t w o}} \right\}, \\ \mathcal {A} _ {\text {p o s}} := \left\{x _ {\text {l e f t}}, x _ {\text {r i g h t}} \right\}, \\ \end{array}
117
+ $$
118
+
119
+ $$
120
+ \begin{array}{l} \mathcal {A} _ {\text {t y p e}} := \left\{x _ {\text {t r i a n g l e}}, x _ {\text {s q u a r e}}, x _ {\text {p e n t a g o n}}, x _ {\text {h e x a g o n}}, x _ {\text {c i r c l e}} \right\}, \\ \mathcal {A} _ {\text {c o l o r}} := \left\{x _ {\text {w h i t e}}, x _ {\text {g r a y}}, x _ {\text {d g r a y}}, x _ {\text {b l a c k}} \right\}, \\ \mathcal {A} _ {\text {s i z e}} := \left\{x _ {\text {s m a l l}}, x _ {\text {a v g}}, x _ {\text {l a r g e}} \right\}. \\ \end{array}
121
+ $$
122
+
123
+ Let $\mathcal{L} := \{\text{num, pos, type, color, size}\}$ be the set of attribute labels, and let $\mathcal{A}_{\text{all}} := \bigcup_{\ell \in \mathcal{L}} \mathcal{A}_{\ell}$ . Initialize the ring
124
+
125
+ $R \coloneqq \mathbb{R}[\mathcal{A}_{\mathrm{all}}]$ of all polynomials on the variables in $\mathcal{A}_{\mathrm{all}}$ with real coefficients. For each $\ell \in \mathcal{L}$ , let $J_{\ell}$ be the concept $\langle \mathcal{A}_{\ell} \rangle \subseteq R$ . These concepts, which we call attribute concepts, are task-specific. We assume humans tend to discover and organize complex patterns in terms of attributes. Thus for pattern extraction, we shall use the inductive bias that a concept representing a pattern is deemed meaningful if it is in some attribute concept.
126
+
127
+ # 3.2.2 Representation of RPM panels
128
+
129
+ In order to encode the RPM images algebraically, we first need to train perception modules to extract attribute information directly from raw images. One possible approach for perception, as used in our experiments, is to train 4 RetinaNet models (each with a ResNet-50 backbone) separately for all 4 attributes except "number", which can be directly inferred by counting the number of bounding boxes.
130
+
131
+ After extracting attribute values for entities, we can represent each panel as a concept. For example, the top-left panel of the RPM in Fig. 1 can be encoded as the concept $J_{1,1} = \langle x_{\mathrm{two}}x_{\mathrm{left}}x_{\mathrm{square}}x_{\mathrm{black}}x_{\mathrm{avg}}, x_{\mathrm{two}}x_{\mathrm{right}}x_{\mathrm{triangle}}x_{\mathrm{gray}}x_{\mathrm{avg}} \rangle$ in the polynomial ring $R$ . Here, $J_{1,1}$ represents a panel with two entities, a black square of average size on the left, and a gray triangle of average size on the right. The indices in $J_{1,1}$ tell us that the panel is in row 1, column 1. Similarly, we can encode the remaining 7 panels of the question matrix as concepts $J_{1,2}, J_{1,3}, \ldots, J_{3,2}$ and encode the 8 answer options as concepts $J_{\mathrm{ans1}}, \ldots, J_{\mathrm{ans8}}$ . In general, every monomial generator of each concept describes an entity in the associated panel.
132
+
133
+ The list of 8 concepts $\mathbf{J} = [J_{1,1},\dots ,J_{3,2}]$ shall be called a concept matrix; this represents the RPM question matrix with a missing 9-th panel. Let $\mathbf{J}_i\coloneqq [J_{i,1},J_{i,2},J_{i,3}]$ (for $i = 1,2$ ) represent the $i$ -th row in the question matrix.
134
+
135
+ # 3.3. Stage 2: algebraic machine reasoning
136
+
137
+ Previously in Section 3.2, we have already encoded the question matrix in an RPM instance as a concept matrix $\mathbf{J} = [J_{1,1},\ldots ,J_{3,2}]$ . In this subsection, we will introduce the reasoning process of our algebraic framework.
138
+
139
+ Our goal of extracting patterns for a single row of $\mathbf{J}$ can be mathematically formulated as "finding invariance" across the concepts that represent the panels in this row. (The same process can be applied to columns.) This seemingly imprecise idea of "finding invariance" can be realized very concretely via the computation of primary decompositions. Ideally, we want to extract patterns that are meaningful to humans. Hence we have designed 4 invariance modules to mimic human cognition in pattern recognition.
140
+
141
+ # 3.3.1 Prior knowledge
142
+
143
+ To use algebraic machine reasoning, we adopt:
144
+
145
+ - Inductive bias of attribute concepts (see Section 3.2.1);
146
+
147
+ - Useful binary operations on numerical values;
148
+ - Functions that map concepts to concepts.
149
+
150
+ There are numerous binary operations, such as $+, -, \times$ , $\div$ , $\min$ , $\max$ , etc., that can be applied to numerical values extracted from concepts. For the RPM task, we use $+$ , $-$ .
151
+
152
+ In algebra, the study of functions between algebraic objects is a productive strategy for understanding the underlying algebraic structure. Analogously, we shall use maps on concepts to extract complex patterns. For the RPM task, we need to cyclically order the values in $\mathcal{A}_{\ell}$ for each attribute $\ell \in \mathcal{L}$ before we can extract sequential information. To encode the idea of "next", we introduce the function $f_{\mathrm{next}}(J|\Delta)$ defined on concepts $J$ , where $\Delta$ represents the step-size. Each variable $x \in \mathcal{A}_{\ell}$ that appears in a generator of $J$ is mapped to the $\Delta$ -th variable after $x$ , w.r.t. the cyclic order on $\mathcal{A}_{\ell}$ . For example, $f_{\mathrm{next}}(\langle x_{\mathrm{square}}x_{\mathrm{gray}}x_{\mathrm{avg}}\rangle |1) = \langle x_{\mathrm{pentagon}}x_{\mathrm{dgray}}x_{\mathrm{large}}\rangle$ , and $f_{\mathrm{next}}(\langle x_{\mathrm{square}}\rangle | - 2) = \langle x_{\mathrm{circle}}\rangle$ .
153
+
154
+ # 3.3.2 Reasoning via primary decompositions
155
+
156
+ Given concepts $J_{1},\ldots ,J_{k}$ that share a common "pattern", how do we extract this pattern? Abstractly, a common pattern can be treated as a concept $K$ that contains all of these concepts $J_{1},\ldots ,J_{k}$ . If there are several common patterns $K_{1},\ldots ,K_{r}$ , then each concept $J_{i}$ can be "decomposed" as $J_{i} = K_{1}\cap \dots \cap K_{r}\cap K_{i}^{\prime}$ for some ideal $K_{i}^{\prime}$ . Thus, we have the following algebraic problem: Given $J_{1},\ldots ,J_{k}$ , compute their common components $K_{1},\ldots ,K_{r}$ .
157
+
158
+ Recall that a concept $J$ has a unique minimal primary decomposition, since concepts are monomial ideals. Thus, to extract the common patterns of concepts $J_{1},\ldots ,J_{k}$ , we first have to compute $\mathrm{pd}(J_1),\dots,\mathrm{pd}(J_k)$ , then extract the common primary components. The intersection of (any subset of) these common components would yield a new concept, which can be interpreted as a common pattern of the concepts $J_{1},\ldots ,J_{k}$ . As part of our inductive bias, we are only interested in those primary components that are contained in attribute concepts. See Appendix A.3 for further details.
159
+
160
+ # 3.3.3 Proposed invariance modules
161
+
162
+ Our 4 proposed invariance modules are: (1) intra-invariance module, (2) inter-invariance module, (3) compositional invariance module, and (4) binary-operator invariance module. Intuitively, they check for 4 general types of invariances across a sequence of concepts $J_{1}, \ldots, J_{k}$ (e.g. a row $\mathbf{J}_{i} = [J_{i,1}, J_{i,2}, J_{i,3}]$ for the RPM task). Such invariances apply not just to the RPM task, but could be applied to other RPM-like tasks, e.g. based on different prior knowledge, different grid layouts, etc. Full computational details for our running example can be found in Appendix B.3.
163
+
164
+ 1. Intra-invariance module extracts patterns where the set of values for some attribute within concept $J_{i}$ remains invariant over all $i$ . First, we define $J_{+} := J_{1} + \dots + J_{k}$ and $J_{\cap} := J_{1} \cap \dots \cap J_{k}$ ; see Section 3.1.1. Intuitively, $J_{+}$ and
165
+
166
+ $J_{\cap}$ are concepts that capture information about the entire sequence $J_{1},\ldots ,J_{k}$ in two different ways. Next, we compute the common primary components of $J_{+}$ and $J_{\cap}$ that are contained in attribute concepts. Finally, we return the attributes associated to these common primary components: $\mathcal{P}_{\mathrm{intra}}([J_1\dots J_k])\coloneqq \{\mathrm{attr}\in \mathcal{L}\mid \exists I\in \mathrm{pd}(J_+) \cap \mathrm{pd}(J_{\cap}),I\subseteq \langle \mathcal{A}_{\mathrm{attr}}\rangle \} .$
167
+
168
+ 2. Inter-invariance module extracts patterns arising from the set difference between $\mathrm{pd}(J_{\cap})$ and $\mathrm{pd}(J_{+})$ . Thereafter, we check for the invariance of these extracted patterns across multiple sequences. The extracted set of patterns is:
169
+
170
+ $$
171
+ \mathcal{P}_{\text{inter}}([J_{1},\ldots ,J_{k}]):= \left\{(\operatorname {attr},\mathcal{I})\bigg| \begin{array}{c}\mathcal{I}\subseteq \operatorname {pd}(J_{\cap}) - \operatorname {pd}(J_{+}),\\ \operatorname {attr}\in \mathcal{L},I\subseteq \langle \mathcal{A}_{\text{attr}}\rangle \forall I\in \mathcal{I} \end{array} \right\} ,
172
+ $$
173
+
174
+ where $\mathcal{I}$ is a set of concepts, and “-” refers to set difference. We omit $\mathrm{pd}(J_{+})$ so that we do not overcount the patterns already extracted in the previous module. Informally, for each pair (attr, $\mathcal{I}$ ), the concepts in $\mathcal{I}$ can be interpreted as those “primary” concepts that correspond to at least one of $J_{1},\ldots ,J_{k}$ , that do not correspond to all of $J_{1},\ldots ,J_{k}$ , and that are contained in $\langle \mathcal{A}_{\mathrm{attr}}\rangle$ .
175
+ 3. Compositional invariance module extracts patterns arising from invariant attribute values in the following new sequence of concepts:
176
+
177
+ $$
178
+ [ J _ {1} ^ {\prime}, \dots , J _ {k} ^ {\prime} ] = [ f ^ {k - 1} (J _ {1}), f ^ {k - 2} (J _ {2}), \dots , f (J _ {k - 1}), J _ {k} ],
179
+ $$
180
+
181
+ where $f$ is some given function. Intuitively, for such patterns, there are some attributes whose values are invariant in $[f(J_i), J_{i+1}]$ for all $i = 1, \ldots, k-1$ . By checking the intersection of primary components of the concepts in the new sequence, the extracted set of patterns is given by:
182
+
183
+ $$
184
+ \mathcal{P}_{\mathrm{comp}}([J_{1},\ldots ,J_{k}]):= \Bigg\{(\mathrm{attr},f)\bigg| \begin{array}{c}\exists I\in \bigcap_{i = 1}^{k}\mathrm{pd}(f^{k - i}(J_{i})),\\ \mathrm{attr}\in \mathcal{L},I\subseteq \langle \mathcal{A}_{\mathrm{attr}}\rangle \end{array} \Bigg\}.
185
+ $$
186
+
187
+ The given function used for the RPM task is $f_{\mathrm{next}}(\cdot |\Delta)$ , where $\Delta$ represents the number of steps; see Section 3.3.1.
188
+
189
+ 4. Binary-operator module extracts numerical patterns, based on a given real-valued function $g$ on concepts, and a given set $\Lambda$ of binary operators. The extracted patterns are:
190
+
191
+ $$
192
+ \mathcal {P} _ {\mathrm {b i n a r y}} (\mathbf {J} _ {i}) := \left\{\overline {{\varnothing}} \middle | \begin{array}{c} \overline {{\varnothing}} = [ \varnothing_ {1}, \ldots , \varnothing_ {k - 2} ], \varnothing_ {i} \in \Lambda , \\ g (J _ {1}) \varnothing_ {1} \dots \varnothing_ {k - 2} g (J _ {k - 1}) = g (J _ {k}) \end{array} \right\}.
193
+ $$
194
+
195
+ # 3.3.4 Extracting row-wise patterns
196
+
197
+ Given a concept matrix $\mathbf{J} = [J_{1,1},\dots ,J_{3,2}]$ , how do we extract the patterns from its $i$ -th row? We first begin by extracting the common position values among all 8 panels:
198
+
199
+ $$
200
+ \operatorname {c o m P o s} (\mathbf {J}) := \left\{p \in \mathcal {A} _ {\text {p o s}} \mid \exists I \in \bigcap_ {J \in \mathbf {J}} \operatorname {p d} (J), p \in I \right\}
201
+ $$
202
+
203
+ For each common position $p \in \mathrm{comPos}(\mathbf{J})$ , we generate two new concept matrices $\bar{\mathbf{J}}^{(p)}$ and $\hat{\mathbf{J}}^{(p)}$ , such that:
204
+
205
+ Each concept $\bar{J}_{i,j}^{(p)}$ in $\bar{\mathbf{J}}^{(p)}$ is generated by the unique generator in $J_{i,j}$ that is divisible by $p$ ;
206
+ - Each concept $\hat{J}_{i,j}^{(p)}$ in $\hat{\mathbf{J}}^{(p)}$ is generated by all generators in $J_{i,j}$ that are not divisible by $p$ .
207
+
208
+ (Recall that generators of a concept are polynomials.)
209
+
210
+ Informally, we are splitting each panel in the RPM image into 2 panels, one that contains only the entity in the
211
+
212
+ common position $p$ , and the other that contains all remaining entities not in position $p$ . This step allows us to reason about rules that involve only a portion of the panels.
213
+
214
+ Consequently, if $\mathrm{comPos}(\mathbf{J}) = \{p_1,\dots ,p_k\}$ , then we can extend the single concept matrix into a list of concept matrices $[\mathbf{J},\bar{\mathbf{J}}^{(p_1)},\hat{\mathbf{J}}^{(p_1)},\ldots ,\bar{\mathbf{J}}^{(p_k)},\hat{\mathbf{J}}^{(p_k)}]$ .
215
+
216
+ For each concept matrix $\tilde{\mathbf{J}}$ from the extended list, we consider its $i$ -th row $\tilde{\mathbf{J}}_i = [\check{J}_{i,1},\check{J}_{i,2},\check{J}_{i,3}]$ (left-to-right) and extract patterns from $\tilde{\mathbf{J}}_i$ via the 4 modules from Section 3.3.3. Let $\mathcal{P}(\check{\mathbf{J}}_i)$ be the set of all such patterns, i.e., $\mathcal{P}(\check{\mathbf{J}}_i)\coloneqq \mathcal{P}_{\mathrm{intra}}(\check{\mathbf{J}}_i)\cup \mathcal{P}_{\mathrm{inter}}(\check{\mathbf{J}}_i)\cup \mathcal{P}_{\mathrm{comp}}(\check{\mathbf{J}}_i)\cup \mathcal{P}_{\mathrm{binary}}(\check{\mathbf{J}}_i)$ .
217
+
218
+ Finally, for row $i = 1,2$ , we define
219
+
220
+ $$
221
+ \mathcal {P} _ {i} ^ {(\text {a l l})} (\mathbf {J}): := \bigcup_ {\mathbf {j}} \left\{\left(K, \check {\mathbf {J}}\right) \mid K \in \mathcal {P} \left(\check {\mathbf {J}} _ {i}\right) \right\}, \tag {1}
222
+ $$
223
+
224
+ where the union ranges over all concept matrices $\breve{\mathbf{J}}$ in the extended list, i.e. $\tilde{\mathbf{J}}\in [\mathbf{J},\bar{\mathbf{J}}^{(p_1)},\hat{\mathbf{J}}^{(p_1)},\dots ,\bar{\mathbf{J}}^{(p_k)},\hat{\mathbf{J}}^{(p_k)}]$ . Note that $\mathcal{P}_i^{\mathrm{(all)}}(\mathbf{J})$ can be regarded as all the patterns extracted from the $i$ -th row of the original concept matrix $\mathbf{J}$ . If instead $\mathbf{J} = [J_{1,1},\ldots ,J_{3,3}]$ is a list containing 9 concepts, then we can define $\mathcal{P}_3^{\mathrm{(all)}}(\mathbf{J})$ analogously.
225
+
226
+ # Algorithm 1 Answer selection.
227
+
228
+ Inputs: Concept matrix $\mathbf{J} = [J_{1,1}\dots J_{3,2}]$ , and associated answer set $[J_{\mathrm{ans1}},\ldots ,J_{\mathrm{ans8}}]$ .
229
+
230
+ 1: Initialize comPattern = [0, ..., 0]1×8.
231
+ 2: Compute $\mathcal{P}_{1,2}(\mathbf{J})\coloneqq \mathcal{P}_1^{(\mathrm{all})}(\mathbf{J})\cap \mathcal{P}_2^{(\mathrm{all})}(\mathbf{J}).$ //see(1)
232
+ 3: for $i$ from 1 to 8 do
233
+ 4: $\mathbf{J} \gets [J_{1,1}, \dots, J_{3,2}, J_{\mathrm{ansi}}]$
234
+ 5: Compute $\mathcal{P}_3^{(\mathrm{all})}(\mathbf{J})$
235
+ 6: comPattern[i] $\leftarrow \left|\mathcal{P}_{1,2}(\mathbf{J})\cap \mathcal{P}_3^{\mathrm{(all)}}(\mathbf{J})\right|$
236
+ 7: return answer index $i = \operatorname{argmax}_{i'}$ comPattern $[i']$ .
237
+
238
+ # 3.4. Solving RPMs
239
+
240
+ # 3.4.1 Answer selection
241
+
242
+ In Section 3.3.4, we described how row-wise patterns can be extracted using the 4 invariance modules. Thus, a natural approach for answer selection is to determine which answer option, when inserted in place of the missing panel, would maximize the number of patterns that are common to all three rows. Consequently, answer selection is reduced to a simple optimization problem; see Algorithm 1.
243
+
244
+ # 3.4.2 Answer generation
245
+
246
+ Since our algebraic machine reasoning framework is able to extract common patterns that are meaningful to humans, hidden in the raw RPM images, it provides a new way to generate answers without needing a given answer set. This is similar to a gifted human who is able to solve the RPM task, by first recognizing the patterns in the first two rows, then inferring what the missing panel should be. Intuitively, we are applying "inverse" operations of the 4 invariance modules to generate the concept representing the missing panel; see Algorithm 2 for an overview.
247
+
248
+ Briefly speaking, for a given RPM concept matrix $\mathbf{J}$ , we first compute the common patterns among the first two rows via $\mathcal{P}_{1,2}(\mathbf{J}) \coloneqq \mathcal{P}_1^{(\mathrm{all})}(\mathbf{J}) \cap \mathcal{P}_2^{(\mathrm{all})}(\mathbf{J})$ ; see (1). Each element in $\mathcal{P}_{1,2}(\mathbf{J})$ is a pair $(K, \check{\mathbf{J}})$ , where $K$ is a common pattern (for rows 1 and 2) specific to one attribute, and $\check{\mathbf{J}}$ is the corresponding concept matrix. (This represents the difficult step of pattern discovery by a gifted human.) Then, we go through all common patterns to compute the attribute values for the missing 9th panel. (This represents a routine consistency check of the discovered patterns; see Appendix B.2 for full algorithmic details, and B.3 for an example.)
249
+
250
+ In general, when integrating all the attribute values for $J_{3,3}$ derived from the patterns in $\mathcal{P}_{1,2}(\mathbf{J})$ , it is possible that entities (i) have multiple possible values for a single attribute; or (ii) have missing attribute values. Case (i) occurs when there are multiple patterns extracted for a single attribute, while case (ii) occurs when there are no nonconflicting patterns for this attribute. For either case, we randomly select an attribute value from the possible values.
251
+
252
+ # Algorithm 2 Answer generation.
253
+
254
+ Inputs: Concept matrix $\mathbf{J} = [J_{1,1}\dots J_{3,2}]$
255
+
256
+ 1: for $(K,\check{\mathbf{J}})\in \mathcal{P}_1^{\mathrm{(all)}}(\mathbf{J})\cap \mathcal{P}_2^{\mathrm{(all)}}(\mathbf{J})$ do // see (1)
257
+ 2: if $[\tilde{J}_{3,1},\tilde{J}_{3,2}]$ does not conflict with pattern $K$ then
258
+ 3: Compute attribute value for $\tilde{J}_{3,3}$ using pattern $K$ .
259
+ 4: Collect all the above attribute values for $J_{3,3}$ .
260
+ 5: while $\nexists$ unique value for some attribute of an entity do
261
+ 6: Randomly choose one valid attribute value.
262
+ 7: Generate ideal $J_{3,3} \subseteq R$ .
263
+ 8: return $J_{3,3}$ and the corresponding image.
264
+
265
+ # 4. Discussion
266
+
267
+ Algebraic machine reasoning provides a fundamentally new paradigm for machine reasoning beyond numerical computation. Abstract notions in reasoning tasks are encoded very concretely as ideals, which are computable algebraic objects. We treat ideals as "actual objects of study", and we do not require numerical values to be assigned to them. This means our framework is capable of reasoning on more qualitative or abstract notions that do not naturally have associated numerical values. Novel problem-solving, such as the discovery of new abstract patterns from observations, is realized concretely as computations on ideals (e.g. computing the primary decompositions of ideals). In particular, we are not solving a system of polynomial equations, in contrast to existing applications of algebra in AI (cf. Section 2). Variables (or primitive instances) are not assigned values. We do not evaluate polynomials at input values.
268
+
269
+ Theory-wise, our proposed approach breaks new ground. We established a new connection between machine reasoning and commutative algebra, two areas that were completely unrelated previously. There is over a century's worth
270
+
271
+ of very deep results in commutative algebra that have not been tapped. Could algebraic methods be the key to tackling the long-standing fundamental questions in machine reasoning? It was only much more recently in 2014 that Léon Bottou [4] suggested that humans should "build reasoning capabilities from the ground up", and he speculated that the missing ingredient could be an algebraic approach.
272
+
273
+ Why use ideals to represent concepts? Why not use sets? Why not use symbolic expressions, e.g. polynomials? Intuitively, we think of a concept as an "umbrella term" consisting of multiple (potentially infinitely many) instances of the concept. Treating concepts as merely sets of instances is inadequate in capturing the expressiveness of human reasoning. A set-theoretic representation system with finitely many "primitive sets" can only have finitely many possible sets in total. In contrast, we proved that we can construct infinitely many concepts from only finitely many primitive concepts (Theorem 3.1). This agrees with our intuition that humans are able to express infinitely many concepts from only finitely many primitive concepts. The main reason is that the "richer" algebraic structure of ideals allows for significantly more operations on ideals, beyond set-theoretic operations. See Appendix A.4 for further discussion.
274
+
275
+ Why is our algebraic method fundamentally different from logic-based methods, e.g. those based on logic programming? At the heart of logic-based reasoning is the idea that reasoning can be realized concretely as the resolution (or inverse resolution) of logical expressions. Inherent in this idea is the notion of satisfiability; cf. [14]. Intuitively, we have a logical expression, usually expressed in a canonical normal form, and we want to assign truth values (true or false) to literals in the logical expression, so that the entire expression is satisfied (i.e. truth value is "true"); see Appendix C.1 for more discussion. In fact, much of the exciting progress in automated theorem proving [1, 13, 19, 21, 44, 55] is based on logic-based reasoning.
276
+
277
+ In contrast, algebraic machine reasoning builds upon computational algebra and computer algebra systems. At the heart of our algebraic approach is the idea that reasoning can be realized concretely as solving computational problems in algebra. Crucially, there is no notion of satisfiability. We do not assign truth values (or numerical values) to concepts in $R = \mathbb{k}[x_1, \ldots, x_n]$ . In particular, although primitive concepts $\langle x_1 \rangle, \ldots, \langle x_n \rangle$ in $R$ correspond to the variables $x_1, \ldots, x_n$ , we do not assign values to primitive concepts. Instead, ideals are treated as the "actual objects of study", and we reduce "solving a reasoning task" to "solving non-numerical computational problems involving ideals". Moreover, our framework can discover new patterns beyond the actual rules of the RPM task; see Section 5.2.
278
+
279
+ In the RPM task, we have attribute concepts representing "position", "number", "type", "size", and "color"; these are concepts that categorize the primitive instances according
280
+
281
+ <table><tr><td></td><td>Method</td><td>Avg. Acc.</td><td>Center</td><td>2×2G</td><td>3×3G</td><td>O-IC</td><td>O-IG</td><td>L-R</td><td>U-D</td></tr><tr><td>1</td><td>LSTM [46]</td><td>18.9 / 13.1</td><td>26.2 / 13.2</td><td>16.7 / 14.1</td><td>15.1 / 13.7</td><td>21.9 / 12.2</td><td>21.1 / 13.0</td><td>14.6 / 12.8</td><td>16.5 / 12.4</td></tr><tr><td>2</td><td>WReN [32]</td><td>23.8 / 34.0</td><td>29.4 / 58.4</td><td>26.8 / 38.9</td><td>23.5 / 37.7</td><td>22.5 / 38.8</td><td>21.5 / 22.6</td><td>21.9 / 21.6</td><td>21.4 / 19.7</td></tr><tr><td>3</td><td>ResNet [46]</td><td>40.3 / 53.4</td><td>44.7 / 52.8</td><td>29.3 / 41.9</td><td>27.9 / 44.3</td><td>46.2 / 63.2</td><td>35.8 / 53.1</td><td>51.2 / 58.8</td><td>47.4 / 60.2</td></tr><tr><td>4</td><td>ResNet+DRT [46]</td><td>40.4 / 59.6</td><td>46.5 / 58.1</td><td>28.8 / 46.5</td><td>27.3 / 50.4</td><td>46.0 / 69.1</td><td>34.2 / 60.1</td><td>50.1 / 65.8</td><td>49.8 / 67.1</td></tr><tr><td>5</td><td>LEN [51]</td><td>41.4 / 72.9</td><td>56.4 / 80.2</td><td>31.7 / 57.5</td><td>29.7 / 62.1</td><td>52.1 / 84.4</td><td>31.7 / 71.5</td><td>44.2 / 73.5</td><td>44.2 / 81.2</td></tr><tr><td>6</td><td>CoPINet [47]</td><td>46.1 / 91.4</td><td>54.4 / 95.1</td><td>36.8 / 77.5</td><td>31.9 / 78.9</td><td>52.2 / 98.5</td><td>42.8 / 91.4</td><td>51.9 / 99.1</td><td>52.5 / 99.7</td></tr><tr><td>7</td><td>DCNet [54]</td><td>49.4 / 93.6</td><td>57.8 / 97.8</td><td>34.1 / 81.7</td><td>35.5 / 86.7</td><td>57.0 / 99.0</td><td>42.9 / 91.5</td><td>58.5 / 99.8</td><td>60.0 / 99.8</td></tr><tr><td>8</td><td>NCD [52]</td><td>48.2 / 37.0</td><td>60.0 / 45.5</td><td>31.2 / 35.5</td><td>30.0 / 39.5</td><td>62.4 / 40.3</td><td>39.0 / 30.0</td><td>58.9 / 34.9</td><td>57.2 / 33.4</td></tr><tr><td>9</td><td>SRAN [12]</td><td>60.8 / -</td><td>78.2 / -</td><td>50.1 / -</td><td>42.4 / -</td><td>68.2 / -</td><td>46.3 / -</td><td>70.1 / -</td><td>70.3 / -</td></tr><tr><td>10</td><td>PrAE [48]</td><td>77.0 / 65.0</td><td>90.5 / 76.5</td><td>85.4 / 78.6</td><td>45.6 / 28.6</td><td>63.5 / 48.1</td><td>60.7 / 42.6</td><td>96.3 / 90.1</td><td>97.4 / 90.9</td></tr><tr><td>11</td><td>Our Method</td><td>93.2 / 92.9</td><td>99.5 / 98.8</td><td>89.6 / 91.9</td><td>89.7 / 93.1</td><td>99.6 / 98.2</td><td>74.7 / 70.1</td><td>99.7 / 99.2</td><td>99.5 / 99.1</td></tr><tr><td></td><td>Human [46]</td><td>- / 84.4</td><td>- / 95.5</td><td>- / 81.8</td><td>- / 79.6</td><td>- / 86.4</td><td>- / 81.8</td><td>- / 86.4</td><td>- / 81.8</td></tr></table>
282
+
283
+ Table 1. Performance on I-RAVEN/RAVEN. We report mean accuracy, and the accuracies for all configurations: Center, 2x2Grid, 3x3Grid, Out-InCenter, Out-InGrid, Left-Right, and Up-Down.
284
+
285
+ to their semantics, into what humans would call attributes. Intuitively, an attribute concept combines certain primitive concepts together in a manner that is "meaningful" to the task. For example, $\langle x_{\mathrm{white}},x_{\mathrm{gray}},x_{\mathrm{black}}\rangle$ is "more meaningful" than $\langle x_{\mathrm{white}},x_{\mathrm{circle}},x_{\mathrm{large}}\rangle$ as a "simpler" or "generalized" concept, since we would treat $x_{\mathrm{white}},x_{\mathrm{gray}},x_{\mathrm{black}}$ as instances of a single broader "color" concept.
286
+
287
+ Notice that the primitive concepts correspond precisely to the prediction classes of our object detection models. Such prediction classes are already implicitly identified by the available data. Consequently, our method is limited by what our perception modules can perceive. For other tasks, e.g. where text data is available, entity extraction methods can be used to identify primitive concepts. Note also that our method requires prior knowledge, since there is no training step for the reasoning module. This limitation can be mitigated if we replace user-defined functions on concepts with trainable functions optimized via deep learning.
288
+
289
+ In general, the identification of attribute concepts is task-specific, and the resulting reasoning performance would depend heavily on these identified attribute concepts. Effectively, our choice of attribute concepts would determine the inductive bias of our reasoning framework: As we decompose a concept $J$ into "simpler" concepts (i.e. primary components in $\mathrm{pd}(J)$ ), only those "simpler" concepts contained in attribute concepts are deemed "meaningful". Concretely, let $J, J' \subsetneq R$ be concepts such that $\mathrm{pd}(J) = \{J_1, \ldots, J_k\}$ and $\mathrm{pd}(J') = \{J_1', \ldots, J_\ell'\}$ , i.e. $J, J'$ have minimal primary decompositions $J = J_1 \cap \dots \cap J_k$ and $J' = J_1' \cap \dots \cap J_\ell'$ , respectively. We can examine their primary components and extract out those primary components (between the two primary decompositions) that are contained in some common attribute concept. For example, if $A$ is an attribute concept of $R$ such that $J_1 \subseteq A$ and $J_1' \subseteq A$ , then $J$ and $J'$ share a "common pattern", represented by the attribute concept $A$ .
290
+
291
+ # 5. Experiment results
292
+
293
+ To show the effectiveness of our framework, we conducted experiments on the RAVEN [46] and I-RAVEN datasets. In both datasets, RPMs are generated according to 7 configurations. We trained our perception modules on 4200 images from I-RAVEN [12] (600 from each configuration), and used them to predict attribute values of entities. The average accuracy of our perception modules is $96.24\%$ . For both datasets, we tested on 2000 instances for each configuration. Overall, our reasoning framework is fast (7 hours for 14000 instances on a 16-core Gen11 Intel i7 CPU processor). See Appendix B for full experiment details.
294
+
295
+ # 5.1. Comparison with other baselines
296
+
297
+ Table 1 compares the performance of our method with 10 other baseline methods. We use the accuracies on I-RAVEN reported in [12, 52] for methods 1-7, and the accuracies on RAVEN reported in [46, 52] for methods 1-5. All the other accuracies are obtained from the original papers. As a reference, we also include the human performance on the RAVEN dataset (i.e. not I-RAVEN) as reported in [46].
298
+
299
+ # 5.2. Ambiguous instances and new patterns
300
+
301
+ Although our method outperforms all baselines, some instances have multiple answer options that are assigned equal top scores by our framework. Most of these cases occur due to the discovery of (i) "accidental" unintended rules (e.g. Fig. 3); or (ii) new patterns beyond the actual rules in the dataset (e.g. Fig. 4). Case (i) occurs because in the design of I-RAVEN, at most one rule is assigned to each attribute.
302
+
303
+ Interestingly, case (ii) reveals that our framework is able to discover completely new patterns that are not originally designed as rules for I-RAVEN. In Fig. 4, the new pattern discovered is arguably very natural to humans.
304
+
305
+ ![](images/0b84aa89685513a620c58a315cde14c223c3ff41ad1658b19c9fb942958cce67.jpg)
306
+ Figure 3. An example of an ambiguous RPM instance. The given answer is option $\mathbf{g}$ . For I-RAVEN, the type sequence ("circle", "hexagon", "pentagon") in the first two rows follows a Progression rule with consecutively decreasing type indices, so $\mathbf{g}$ could be a correct answer. (Remaining attribute values are determined by other patterns.) However, our framework assigns equal top scores to both options $\mathbf{d}$ and $\mathbf{g}$ , as a result of another inter-invariance pattern for type (the type set {"circle", "hexagon", "pentagon"} is invariant across the rows). Thus, option $\mathbf{d}$ could also be correct.
307
+
308
+ ![](images/52b0ffa3e15143405b81d9199c6fb009f6f1682a2a5207580223a33ec97f3cf1.jpg)
309
+
310
+ ![](images/48ad3b5d1770eb49e2800290786ef6892084e693a7cab2f5fd9ebb1e575f30a2.jpg)
311
+ Figure 4. An example of an RPM instance with an unexpected new pattern. The given answer is option $\mathbf{h}$ . In each row, the number of entities in the first 2 panels sum up to the number of entities in the 3rd panel, so $\mathbf{h}$ could be correct. However, our framework assigns equal top scores to both options $\mathbf{b}$ and $\mathbf{h}$ , as a result of a new interinvariance pattern for number (informally, every panel has either 1 or 2 entities). Thus option $\mathbf{b}$ could also be correct.
312
+
313
+ ![](images/e9a320df64cba168945347857829ff94f5ee0764581328a01c0fc3d78d4f1f8f.jpg)
314
+
315
+ # 5.3. Evaluation of answer generation
316
+
317
+ Every RPM instance is assumed to have a single correct answer from the given answer set. However, there are multiple other possible images that are also acceptable as correct answers. For example, images modified from the given correct answer, via random perturbations of those attributes that are not involved in any of the rules (e.g. entity angles in the I-RAVEN dataset), are also correct. All these distinct correct answers (images) can be encoded algebraically as the same concept, based on prior knowledge of which raw perceptual attributes are relevant for the RPM task. Hence, to evaluate the answer generation process proposed in Section 3.4.2, we will directly evaluate the generated concepts.
318
+
319
+ Let $J = \langle e_1,\dots ,e_k\rangle$ and $J^{\prime} = \langle e_{1}^{\prime},\ldots ,e_{\ell}^{\prime}\rangle$ be concepts
320
+
321
+ representing the ground truth answer and our generated answer, respectively. Here, each $e_i$ (or $e_i'$ ) is a monomial of the form $x_i^{(\mathrm{pos})}x_i^{(\mathrm{type})}x_i^{(\mathrm{color})}x_i^{(\mathrm{size})}$ , and represents an entity described by 4 attributes. Motivated by the well-known idea of Intersection over Union (IoU), we propose a new similarity measure between $J$ and $J'$ . In order to define analogous notions of "intersection" and "union", we first pair $e_i$ with $e_j'$ if $x_i^{(\mathrm{pos})} = x_j'^{(\mathrm{pos})}$ (i.e. same "position" values). This pairing is well-defined, since the "position" values of the entities in any panel are uniquely determined. Hence we can group all entities in $J$ and $J'$ into 3 sets:
322
+
323
+ $$
324
+ \overline {{S _ {1}}} := \{(e _ {i}, e _ {j} ^ {\prime}) \mid e _ {i} \in J, e _ {j} ^ {\prime} \in J ^ {\prime}, x _ {i} ^ {(\mathrm {p o s})} = x _ {j} ^ {\prime (\mathrm {p o s})} \};
325
+ $$
326
+
327
+ $$
328
+ S _ {2} := \left\{e _ {i} \in J \mid \notin e _ {j} ^ {\prime} \in J ^ {\prime} \text {s u c h t h a t} \left(e _ {i}, e _ {j} ^ {\prime}\right) \in S _ {1} \right\};
329
+ $$
330
+
331
+ $$
332
+ S _ {3} := \left\{e _ {j} ^ {\prime} \in J ^ {\prime} \mid \not \exists e _ {i} \in J \text {s u c h t h a t} \left(e _ {i}, e _ {j} ^ {\prime}\right) \in S _ {1} \right\}.
333
+ $$
334
+
335
+ We can interpret $S_{1}$ and $S_{1} \cup S_{2} \cup S_{3}$ as analogous notions of the "intersection" and "union" of $J$ and $J'$ , respectively. Thus, we define our similarity measure as follows:
336
+
337
+ $$
338
+ \varphi (J, J ^ {\prime}) := \frac {\sum_ {\left(e _ {i} , e _ {j} ^ {\prime}\right) \in S _ {1}} \phi \left(e _ {i} , e _ {j} ^ {\prime}\right)}{| S _ {1} | + | S _ {2} | + | S _ {3} |}; \tag {2}
339
+ $$
340
+
341
+ $$
342
+ \phi \left(e _ {i}, e _ {j} ^ {\prime}\right) := \frac {1}{4} \sum_ {a} \mathbb {1} \left(x _ {i} ^ {(a)} = x _ {j} ^ {\prime (a)}\right); \tag {3}
343
+ $$
344
+
345
+ where in (3), $a$ ranges over the 4 attributes in {pos, type, color, size}. Here, $\phi(e_i, e_j')$ is the similarity score between $e_i$ and $e_j'$ , measured by the proportion of common variables.
346
+
347
+ The overall average similarity score of the generated answers is $67.7\%$ . Note that within a panel, some attribute values such as "size", "color" and "position", may be totally random for 2x2Grid, 3x3Grid, Out-InGrid (e.g. as shown in Fig. 3). Hence, achieving high similarity scores for such cases would inherently require task-specific optimization and knowledge of how the data is generated. We assume neither. This could explain why our overall similarity score is lower than our answer selection accuracy.
348
+
349
+ For examples of generated images, see Appendix B.5.
350
+
351
+ # 6. Conclusion
352
+
353
+ Algebraic machine reasoning is a reasoning framework that is well-suited for abstract reasoning. In its current form, we have used primary decompositions as a key algebraic operation to discover abstract patterns in the RPM task, via the invariance modules that we have specially designed to mimic human reasoning. The idea that "discovering common patterns" can be realized concretely as "computing primary decompositions" is rather broad, and could potentially be applied to other inferential reasoning tasks.
354
+
355
+ More generally, our algebraic approach opens up new possibilities of tapping into the vast literature of commutative algebra and computational algebra. There are numerous algebraic operations on ideals (ideal quotients, radicals, saturation, etc.) and algebraic invariants (depth, height, etc.) that have not been explored in machine reasoning (or even in AI). Can we use them to tackle other reasoning tasks?
356
+
357
+ # References
358
+
359
+ [1] Ibrahim Abdelaziz, Maxwell Crouse, Bassem Makni, Vernon Austel, Cristina Cornelio, Shajith Ikbal, Pavan Kapanipathi, Ndivhuwo Makondo, Kavitha Srinivas, Michael Witbrock, et al. Learning to guide a saturation-based theorem prover. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2022. 6
360
+ [2] Dave Bayer, André Galligo, and Mike Stillman. Gröbner bases and extension of scalars. In Computational algebraic geometry and commutative algebra (Cortona, 1991), Sympos. Math., XXXIV, pages 198-215. Cambridge Univ. Press, Cambridge, 1993. 3
361
+ [3] Yaniv Benny, Niv Pekar, and Lior Wolf. Scale-localized abstract reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12557-12565, 2021. 2
362
+ [4] Léon Bottou. From machine learning to machine reasoning. Machine learning, 94(2):133-149, 2014. 6
363
+ [5] Patricia A Carpenter, Marcel A Just, and Peter Shell. What one intelligence test measures: a theoretical account of the processing in the Raven progressive matrices test. *Psychological review*, 97(3):404, 1990. 1
364
+ [6] John B Carroll et al. Human cognitive abilities: A survey of factor-analytic studies. Number 1. Cambridge University Press, 1993. 1
365
+ [7] Kai Fong Ernest Chong. A closer look at the approximation capabilities of neural networks. In International Conference on Learning Representations, 2020. 2
366
+ [8] David A Cox, John Little, and Donal O'Shea. Ideals, varieties, and algorithms: an introduction to computational algebraic geometry and commutative algebra. Springer, 2015. 2
367
+ [9] Ian Davidson and Peter B. Walker. Towards fluid machine intelligence: Can we make a gifted AI? Proceedings of the AAAI Conference on Artificial Intelligence, 33(01):9760-9764, 2019. 1
368
+ [10] Mathias Drton, Bernd Sturmfels, and Seth Sullivant. Lectures on algebraic statistics, volume 39. Springer Science & Business Media, 2008. 2
369
+ [11] Eliana Duarte, Orlando Marigliano, and Bernd Sturmfels. Discrete statistical models with rational maximum likelihood estimator. Bernoulli, 27(1):135-154, 2021. 2
370
+ [12] Sheng Hu, Yuqing Ma, Xianglong Liu, Yanlu Wei, and Shihao Bai. Stratified rule-aware network for abstract visual reasoning. In Proceedings of AAAI Conference on Artificial Intelligence (AAAI), 2021. 2, 7
371
+ [13] Geoffrey Irving, Christian Szegedy, Alexander A Alemi, Niklas Eén, François Chollet, and Josef Urban. Deepmath-deep sequence models for premise selection. In Advances in Neural Information Processing Systems, pages 2235-2243, 2016. 6
372
+ [14] Joxan Jaffar and J-L Lassez. Constraint logic programming. In Proceedings of the 14th ACM SIGACT-SIGPLAN symposium on Principles of programming languages, pages 111-119, 1987. 6
373
+
374
+ [15] Marius Jahres and Thomas Martinetz. Solving Raven's progressive matrices with multi-layer relation networks. arXiv preprint arXiv:2003.11608, 2020. 1, 2
375
+ [16] Joa Kileel, Matthew Trager, and Joan Bruna. On the expressive power of deep polynomial neural networks. Advances in neural information processing systems, 2019. 2
376
+ [17] Youngsung Kim, Jinwoo Shin, Eunho Yang, and Sung Ju Hwang. Few-shot visual reasoning with meta-analogical contrastive learning. Neurips 2020, 2020. 1
377
+ [18] Franz J Kiraly, Paul Von Bunau, Frank C Meinecke, Duncan AJ Blythe, Klaus-Robert Müller, and Kenji Fukumizu. Algebraic geometric comparison of probability distributions. Journal of Machine Learning Research, 13(3), 2012. 2
378
+ [19] Guillaume Lample and François Charton. Deep learning for symbolic mathematics. In International Conference on Learning Representations, 2020. 6
379
+ [20] Paul Larsen and Franz Kiraly. Fano schemes of generic intersections and machine learning. International Journal of Algebra and Computation, 24(07):923-933, 2014. 2
380
+ [21] Wenda Li, Lei Yu, Yuhuai Wu, and Lawrence C Paulson. Isarstep: a benchmark for high-level mathematical reasoning. In International Conference on Learning Representations, 2021. 6
381
+ [22] Shaowei Lin. Algebraic methods for evaluating integrals in Bayesian statistics. PhD thesis, UC Berkeley, 2011. 2
382
+ [23] Muyang Lyu, Ruixuan Liu, and Junyi Wang. Solving raven's progressive matrices using rnns reasoning network. In 2022 7th International Conference on Computational Intelligence and Applications (ICCIA), pages 32-37. IEEE, 2022. 2
383
+ [24] Petros Maragos, Vasileios Charisopoulos, and Emmanouil Theodosis. Tropical geometry and machine learning. Proceedings of the IEEE, 2021. 2
384
+ [25] Melanie Mitchell. Abstraction and analogy-making in artificial intelligence. arXiv preprint arXiv:2102.10717, 2021. To appear in Annals of the New York Academy of Sciences. 1
385
+ [26] Melanie Mitchell. Why AI is harder than we think. arXiv preprint arXiv:2104.12871, 2021. 1
386
+ [27] Niv Pekar, Yaniv Benny, and Lior Wolf. Generating correct answers for progressive matrices intelligence tests. Advances in Neural Information Processing Systems, 33:7390-7400, 2020. 2
387
+ [28] Patrick Perret. Children's inductive reasoning: Developmental and educational perspectives. Journal of Cognitive Education and Psychology, 14(3):389-408, 2015. 1
388
+ [29] Sylvain Petitjean. Algebraic geometry and object representation in computer vision. In Martial Hebert, Jean Ponce, Terry Boult, and Ari Gross, editors, Object Representation in Computer Vision, pages 155-165, Berlin, Heidelberg, 1995. Springer Berlin Heidelberg. 2
389
+ [30] Meng Qu, Junkun Chen, Louis-Pascal Xhonneux, Yoshua Bengio, and Jian Tang. RNNLogic: Learning logic rules for reasoning on knowledge graphs. In International Conference on Learning Representations, 2021. 1
390
+ [31] Markus Norman Rabe, Dennis Lee, Kshitij Bansal, and Christian Szegedy. Mathematical reasoning via self-supervised skip-tree training. In International Conference on Learning Representations, 2021. 1
391
+
392
+ [32] Adam Santoro, Felix Hill, David Barrett, Ari Morcos, and Timothy Lillicrap. Measuring abstract reasoning in neural networks. In International Conference on Machine Learning, pages 4477-4486, 2018. 1, 2, 7
393
+ [33] Snejana Shegheva and Ashok Goel. The structural affinity method for solving the Raven's Progressive Matrices test for intelligence. In Proceedings of the AAAI Conference on Artificial Intelligence, 2018. 1
394
+ [34] Fan Shi, Bin Li, and Xiangyang Xue. Raven's progressive matrices completion with latent gaussian process priors. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 35, pages 9612-9620, 2021. 2
395
+ [35] Jürgen Stuber. Superposition theorem proving for commutative rings. In *Automated Deduction—A Basis for Applications*, pages 31–55. Springer, 1998. 2
396
+ [36] Seth Sullivant. Algebraic geometry of gaussian bayesian networks. Advances in Applied Mathematics, 40(4):482-513, 2008. 2
397
+ [37] Sjoerd Van Steenkiste, Francesco Locatello, Jürgen Schmidhuber, and Olivier Bachem. Are disentangled representations helpful for abstract visual reasoning? In Advances in Neural Information Processing Systems, pages 14245-14258, 2019. 1
398
+ [38] Po-Wei Wang, Priya Donti, Bryan Wilder, and Zico Kolter. Satnet: Bridging deep learning and logical reasoning using a differentiable satisfiability solver. In International Conference on Machine Learning, pages 6545–6554. PMLR, 2019. 1
399
+ [39] Sumio Watanabe. Algebraic analysis for nonidentifiable learning machines. Neural Comput., 13(4):899-933, apr 2001. 2
400
+ [40] Sumio Watanabe. Algebraic geometrical methods for hierarchical learning machines. Neural Netw., 14(8):1049-1060, oct 2001. 2
401
+ [41] Sumio Watanabe. Algebraic geometry and statistical learning theory. Number 25. Cambridge university press, 2009. 2
402
+ [42] Sumio Watanabe. A widely applicable bayesian information criterion. Journal of Machine Learning Research, 14(3):867-897, 2013. 2
403
+ [43] Sumio Watanabe and Manfred Opper. Asymptotic equivalence of bayes cross validation and widely applicable information criterion in singular learning theory. Journal of machine learning research, 11(12), 2010. 2
404
+ [44] Yuhuai Wu, Albert Jiang, Jimmy Ba, and Roger Baker Grosse. Int: An inequality benchmark for evaluating generalization in theorem proving. In International Conference on Learning Representations, 2021. 6
405
+ [45] Keisuke Yamazaki and Sumio Watanabe. Singularities in mixture models and upper bounds of stochastic complexity. Neural Networks, 16(7):1029-1038, 2003. 2
406
+ [46] Chi Zhang, Feng Gao, Baoxiong Jia, Yixin Zhu, and Song-Chun Zhu. RAVEN: A dataset for relational and analogical visual reasoning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5317-5327, 2019. 1, 2, 7
407
+
408
+ [47] Chi Zhang, Baoxiong Jia, Feng Gao, Yixin Zhu, Hongjing Lu, and Song-Chun Zhu. Learning perceptual inference by contrasting. In Advances in Neural Information Processing Systems, pages 1075–1087, 2019. 1, 2, 7
409
+ [48] Chi Zhang, Baoxiong Jia, Song-Chun Zhu, and Yixin Zhu. Abstract spatial-temporal reasoning via probabilistic abduction and execution. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 7
410
+ [49] Chi Zhang, Sirui Xie, Baoxiong Jia, Ying Nian Wu, Song-Chun Zhu, and Yixin Zhu. Learning algebraic representation for systematic generalization in abstract reasoning. In Computer Vision-ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23-27, 2022, Proceedings, Part XXXIX, pages 692-709. Springer, 2022. 2
411
+ [50] Liwen Zhang, Gregory Naitzat, and Lek-Heng Lim. Tropical geometry of deep neural networks. In International Conference on Machine Learning, pages 5824-5832. PMLR, 2018. 2
412
+ [51] Kecheng Zheng, Zheng-Jun Zha, and Wei Wei. Abstract reasoning with distracting features. In Advances in Neural Information Processing Systems, volume 32, 2019. 2, 7
413
+ [52] Tao Zhuo, Qiang Huang, and Mohan Kankanhalli. Unsupervised abstract reasoning for raven's problem matrices. IEEE Transactions on Image Processing, 30:8332-8341, 2021. 2, 7
414
+ [53] Tao Zhuo and Mohan Kankanhalli. Solving raven's progressive matrices with neural networks. arXiv preprint arXiv:2002.01646, 2020. 2
415
+ [54] Tao Zhuo and Mohan Kankanhalli. Effective abstract reasoning with dual-contrast network. In International Conference on Learning Representations, 2021. 2, 7
416
+ [55] Zsolt Zombori, Josef Urban, and Chad E Brown. Prolog technology reinforcement learning prover. In International Joint Conference on Automated Reasoning, pages 489-507. Springer, 2020. 6
abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d0f09a41da2a1872e3f3be6b0ae5ed255f06dbf982db6a55bbd7c6268488bd3
3
+ size 381983
abstractvisualreasoninganalgebraicapproachforsolvingravensprogressivematrices/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd66465f7b66167406354aa022e372951492c0e8228a92e257d5b550acb931a4
3
+ size 593265
acapanticipationcaptioningwithcommonsenseknowledge/0e9d14f2-ed8b-47a0-933f-6fa45b08d07e_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e96aa323e5824666a6eeb7d54ada472ee2324d95c3c195962f2c81ac89224c25
3
+ size 70928
acapanticipationcaptioningwithcommonsenseknowledge/0e9d14f2-ed8b-47a0-933f-6fa45b08d07e_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf76defc437d31f66dac34247bf0b38be756bc6c014f0d8fe307ca2aa2558256
3
+ size 85713
acapanticipationcaptioningwithcommonsenseknowledge/0e9d14f2-ed8b-47a0-933f-6fa45b08d07e_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2703f70ec79c6d7a03998744daee70e574a895268d40443b31b3eb534a8042af
3
+ size 4281820
acapanticipationcaptioningwithcommonsenseknowledge/full.md ADDED
@@ -0,0 +1,302 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A-CAP: Anticipation Captioning with Commonsense Knowledge
2
+
3
+ Duc Minh Vo
4
+ The University of Tokyo, Japan
5
+ vmduc@nlab.cic.i.u-tokyo.ac.jp
6
+
7
+ Quoc-An Luong
8
+ The Graduate University for Advanced Studies, Japan
9
+ lqan@nii.ac.jp
10
+
11
+ Akihiro Sugimoto
12
+ National Institute of Informatics, Japan
13
+ sugimoto@nii.ac.jp
14
+
15
+ Hideki Nakayama
16
+ The University of Tokyo, Japan
17
+ nakayama@ci.i.u-tokyo.ac.jp
18
+
19
+ # Abstract
20
+
21
+ Humans possess the capacity to reason about the future based on a sparse collection of visual cues acquired over time. In order to emulate this ability, we introduce a novel task called Anticipation Captioning, which generates a caption for an unseen oracle image using a sparsely temporally-ordered set of images. To tackle this new task, we propose a model called A-CAP, which incorporates commonsense knowledge into a pre-trained vision-language model, allowing it to anticipate the caption. Through both qualitative and quantitative evaluations on a customized visual storytelling dataset, A-CAP outperforms other image captioning methods and establishes a strong baseline for anticipation captioning. We also address the challenges inherent in this task.
22
+
23
+ # 1. Introduction
24
+
25
+ When humans observe the real world, we not only capture visual information (e.g. objects), but also forecast the future from past and current observations. For example, in Fig. 1, given some photos of an attack in a hockey game, we can predict without a doubt that "the athlete will shoot the puck toward the goalie". In fact, anticipatory ability aids us in surviving in a world of volatility. This ability necessitates a significant shift from visual to cognitive understanding, which extends far beyond the scope of tasks that primarily use visible visual data, such as object detection, action recognition, and existing image captioning. As a result, a variety of new tasks have been proposed to emulate humans' anticipatory ability, such as generating future images [12,29], and action prediction [22,37]. Despite their great success, the aforementioned tasks frequently involve densely temporal information (i.e., video), which can be difficult to acquire at times, and their outcomes are not friendly to everyone, particularly those with visual impairments.
26
+
27
+ <table><tr><td></td><td>a)
28
+ c)
29
+ Sparsely temporally-ordered images
30
+ (Input for all tasks)</td><td>Oracle image
31
+ (for reference)</td></tr><tr><td>Task</td><td>Output(s) for
32
+ input images</td><td>Output for
33
+ oracle image</td></tr><tr><td>Image
34
+ captioning</td><td>a) A man standing with a hockey stick.
35
+ b) A man standing on an ice rink.
36
+ c) A man holding a hockey stick.
37
+ d) A group of men playing a game of hockey.</td><td>(N/A)</td></tr><tr><td>Story
38
+ telling</td><td>a, b, c, d) This breakaway was the first threat to
39
+ score. The wingman took the puck to the goal
40
+ but a nice play by the goalie saved the goal.
41
+ Finally, the other team gets the puck deep into
42
+ red zone. He is now within 20 feet of the goal.</td><td>(N/A)</td></tr><tr><td>Anticipation
43
+ captioning
44
+ (Ours)</td><td>(N/A)</td><td>He shoots, he
45
+ scores and the
46
+ game ends one
47
+ to nothing.</td></tr></table>
48
+
49
+ Figure 1. Given a set of sparsely temporally-ordered images (a, b, c, d), image captioning [38] and storytelling [35] tasks generate captions for those images, while our anticipation captioning task anticipates what happens afterward. To illustrate the potential future, we show their related oracle image. It should be noted that our task only receives the same inputs as others.
50
+
51
+ In this work, we hope to dislodge the time constraints imposed by previous tasks while also looking for a more user-friendly output format. Needless to say, textual description is a potential candidate because generating text from images has been successfully explored in a variety of ways [6, 14, 21, 33, 35, 38], showing a number of applications. Furthermore, we can easily leverage recent advances in text-to-image [28] or text-to-sound [36] as a flexible transformation that will benefit other downstream tasks,
52
+
53
+ allowing everyone to fully grasp our output in their own way. With this in mind, we go beyond the immediately visible content of the images, proposing a new task of image captioning problems, called anticipation captioning. Anticipation captioning is to generate a caption for an unseen image (referred to as the oracle image) that is the future of a given set of sparsely temporally-ordered images. The term "sparse" means that two consecutive images are not required to be as close in time as those in a video, allowing the scene to change freely as long as the change does not disrupt the information flow of the image sequence, as seen in Fig. 1. Our task is a new branch of the image captioning problems [6, 14, 21, 35, 38]; it is to predict only captions in the future. As an example, we depict the outputs of generic image captioning, visual storytelling, and our task in Fig. 1. The image captioning model [38] generates a caption for each individual image, whereas visual storytelling [35] connects all images in a story. Our task, on the other hand, produces a caption for the oracle image that is similar to human anticipation: "he shoots, he scores, and the game ends one to nothing". Unlike [12, 22, 29, 37], anticipation captioning does not require strictly temporal information while producing a more informative output. In theory, the greater the success of this task, the greater the deductive ability of the intelligent system. Meanwhile, other applications such as incident prevention or behavior prediction can be launched.
54
+
55
+ Additionally, we propose a baseline model, A-CAP, to solve this new task rather than simply using current image captioning models, given their failures in predicting the future. We hypothesize that under common thinking, the future can be predicted from observable concepts (e.g., objects, events) appearing in the input images, implying that the future cannot be dramatically changed to the "football scene" from the "hockey scene", for instance. As a result, we make full use of commonsense knowledge to connect all detected concepts in terms of a graph while expanding the graph toward forecasted ones, creating a knowledge graph. The term "forecasted concept" refers to a concept that is not visible in the given image but related to another concept visible in the image (we can infer the forecasted concept from the related concept using common thinking). Technically, each node in our constructed graph is either a detected concept in given inputs or a forecasted one explored using the ConceptNet [30], and nodes are connected if and only if they have corresponding ConceptNet relations. After aggregating all node information with a graph neural network, we use prompt learning [39, 40] to integrate the enriched nodes into a frozen pre-trained vision-language (VL) model, successfully generating the anticipated caption. The following are our primary contributions.
56
+
57
+ - We introduce a novel task of anticipation captioning, which predicts a caption for the future from a given set of sparsely temporally-ordered images.
58
+
59
+ - For anticipation captioning, we establish a strong baseline model A-CAP, which incorporates commonsense knowledge into a pre-trained VL model.
60
+
61
+ We evaluate the effectiveness of A-CAP in both qualitative and quantitative ways, using a customized VIST dataset [14]. Extensive experiments show that A-CAP successfully generates captions for oracle images that are more accurate, descriptive, and reasonable than those generated by other captioning methods [35, 38].
62
+
63
+ # 2. Related work
64
+
65
+ Future forecasting has long been studied in computer vision. Some attempts [12, 16, 29, 34] have been made to generate future images/frames from a given video (i.e., dense time-series images). Meanwhile, some methods [22, 37] use past observations to predict future events. These methods heavily rely on the dense temporal-structure to learn visual representations, implying that such representations are different from those for sparsely temporally-ordered images. Furthermore, generated images/frames are not always of high quality [12, 16, 29, 34], and the set of predicted future events is limited [22, 37], making them difficult to apply to downstream tasks. Our method, on the other hand, accepts only sparsely temporal information as long as we can detect objects/events. Furthermore, our method is designed to generate textual descriptions that are easier to interpret than outputs by other methods [12, 16, 22, 29, 34, 37].
66
+
67
+ In NLP, there are also several approaches to predict the future: story ending generation [7, 18], temporal order anticipation [23, 24]. Though those methods use texts as inputs while our method uses images, we can think of story ending generation as an indirect way to solve our problem because we can generate a story first and then predict its ending.
68
+
69
+ Image captioning is a long-standing problem with numerous methods developed to address various purposes. Captioning models [6, 21] in an early stage aim to generate generic descriptions for given images. They are then evolved in various directions to generate dense captions [15], novel object captions [33], controllable captions [9], or visual story telling [8, 14, 35]. Anticipation captioning belongs to the image captioning family, with the exception that we predict a caption for the future. Furthermore, our method is based on recent methods [33, 38], which use a vision-language model to generate better captions. Rather than fine-tuning or retraining the model, we use prompt learning [39, 40] to replace the object tags used in the concatenated sequence of words—object tags—ROIs of VinVL [38] with our detected and forecasted concepts.
70
+
71
+ ![](images/ed4002769e088f0d8621e0fdd71e83655e9d972972784d83f3fba543491935d9.jpg)
72
+ Figure 2. The overall pipeline of our proposed A-CAP. The preprocessing step is used to build the knowledge graph, extract image features and tokenize the input words. In the knowledge graph construction, blue nodes represent the detected concepts obtained from concept detection while brown nodes represent the forecasted concepts obtained from the ConceptNet. Our network consists of a trainable graph neural network and a frozen pre-trained VinVL [38]. The outputs of the graph neural network are the enriched nodes of the knowledge graph. During inference time, the dash-dotted red part is removed.
73
+
74
+ # 3. Our approach
75
+
76
+ # 3.1. Problem statement
77
+
78
+ Our input is a set of $k$ sparsely temporally-ordered images $I_{1}, I_{2}, \ldots, I_{k}$ . It is worth noting that $I_{i}$ and $I_{i+1}$ are not necessarily strongly temporal as illustrated in Fig. 1. We assume that an image $I_{k+1}$ is an oracle image that continues the set of $k$ images, and that a caption $C_{k+1}$ corresponds to $I_{k+1}$ which is a future of $I_{1}, I_{2}, \ldots, I_{k}$ . Obviously, the oracle image is sparsely temporally-ordered with respect to the input images as we intentionally seek to anticipate the future.
79
+
80
+ Our task is to generate caption $C_{k + 1}$ using given $k$ images. The task is formally defined as follows:
81
+
82
+ $$
83
+ C _ {k + 1} = \operatorname {C A P T I O N} \left(I _ {1}, I _ {2}, \dots , I _ {k}\right), \tag {1}
84
+ $$
85
+
86
+ where $\mathrm{CAPTION}(\cdot)$ is a captioning system that will be discussed later. Note that we produce neither captions for each input image $I_{1},\ldots ,I_{k}$ nor oracle image $I_{k + 1}$ .
87
+
88
+ # 3.2. Proposed A-CAP
89
+
90
+ # 3.2.1 Design of A-CAP
91
+
92
+ Given the progress of vision-language models in image captioning tasks, we choose VinVL [38] as our base architecture. VinVL takes a concatenated sequence of words—concepts—ROIs as input (note that words are not used during inference time; object tags are used instead of concepts in the original paper [38]). The core idea is the usage of
93
+
94
+ concepts, which allows better alignment between the vision and language spaces. The above observation suggests that incorporating forecasted concepts into VinVL is critical in allowing the model to generate the anticipated caption. However, simply using VinVL is not wise because it detects only concepts appearing in images. We thus find forecasted concepts based on the detected concepts. Under normal circumstances, forecasted concepts should be related to current observable concepts. Therefore, to retrieve forecasted concepts, we use commonsense knowledge, which consists of many popular concepts and their relationships.
95
+
96
+ VinVL [38] is trained on a very large dataset, making fine-tuning or re-training difficult. To avoid this difficulty, we use the prompt learning technique to train the concept embeddings only while other parameters are fixed. In what follows, we detail our model.
97
+
98
+ # 3.2.2 Network architecture
99
+
100
+ We base A-CAP on the VinVL [38] architecture. As discussed above, we use concepts as a prompt to allow the model to generate a desired caption. We can then focus on learning the embeddings for all detected and forecasted concepts. To this end, we first retrieve the forecasted concepts using the detected ones and then construct the knowledge graph that connects all concepts. This is because the graph structure is effective for learning the interactions between concepts. We use an undirected graph for simplicity where two concepts are connected as long as their relationship exists. The concept embeddings are then enhanced using a graph neural network. Next, the enriched concept embeddings are injected into a frozen VinVL to generate the caption. Fig. 2 depicts our simple yet effective A-CAP.
101
+
102
+ # 3.2.3 Modules of A-CAP
103
+
104
+ Pre-processing. The input images are pre-processed to (i) construct the knowledge graph and (ii) extract image features. We also tokenize the ground-truth captions used to train the model during training. We obtain $N$ features (ROIs) with the size of $1 \times 2054$ each after image feature extraction using Faster-RCNN [27] trained on the COCO dataset. Each image feature is fed into VinVL's embedding layer to reduce its size to $1 \times 768$ . We then take the average of all image features $\overline{\mathbf{f}} = \frac{1}{N} \sum_{i=1}^{N} \mathbf{f}_i$ to construct a context feature $(1 \times 768)$ which will be used later. Simultaneously, we obtain $L$ word embeddings of the caption $\{\mathbf{w}_i\}_{i=1}^L$ , each of which has a size of $1 \times 768$ . For more information on image feature extraction and tokenizer, see VinVL [38].
105
+
106
+ We now detail knowledge graph construction. We follow Chen et al. [8] to detect concepts for each input image. Specifically, we use clarifai [1] to obtain the top-ten concepts $\{c_i\}_{i=1}^{10}$ for each image. As a result, we detect $k \times 10$ concepts in total. Then, using ConceptNet [30], we use each
107
+
108
+ detected concept as a query to heuristically retrieve forecasted concepts with 2-hop neighbors of the query. Since the number of forecasted concepts is large ( $>400$ ) and many of them are unrelated to input images, we employ a filtering process to retain only the informative concepts.
109
+
110
+ Let $c_i^t$ be a forecasted concept. Using a pre-trained language model RoBERTa [20], we compute a relevance score between the forecasted concept and image context as:
111
+
112
+ $$
113
+ \rho_ {c _ {i} ^ {f}} = f _ {\mathrm {h e a d}} (f _ {\mathrm {e n c}} ([ \bar {\bf f}; {\bf c} _ {i} ^ {f} ])),
114
+ $$
115
+
116
+ where $\mathbf{c}_i^f = \mathrm{BERT}(c_i^f)$ is an embedding vector of the concept $c_{i}^{f}$ extracted by a pre-trained BERT [10], $[\cdot ;\cdot ]$ denotes the concatenation operator, $f_{\mathrm{enc}}$ is the encoder part of the language model while $f_{\mathrm{head}}$ is a softmax layer. This score indicates the probability of $\mathbf{c}_i^f$ related to $\bar{\mathbf{f}}$
117
+
118
+ We keep $M$ forecasted concepts having high relevance scores. In total, we have $k \times 10$ detected concepts $\{c_i\}_{i=1}^{k \times 10}$ and $M$ forecasted concepts $\{c_i^f\}_{i=1}^M$ in our knowledge graph $(k \times 10 + M$ nodes). If two concepts are related in the ConceptNet [30], an undirected edge is given to connect them. For simplicity, we do not use a specific relation (e.g., has, IsA). Furthermore, a concept in $I_i$ is connected to its related concepts in the adjacent images $I_{i-1}$ and $I_{i+1}$ to ensure information flow and the awareness of the temporal order of the images. Hereafter, we use the same notation to refer to detected and forecasted concepts $\{c_i\}_{i=1}^{k \times 10 + M}$ .
119
+
120
+ Graph neural network is used to update the node embeddings through iterative messages passing between neighbors on the graph. We use graph attention network [32] to build our graph neural network. To produce the input for the graph network, we first employ pre-trained BERT [10] to embed each concept into an embedding with the size of $1 \times 768$ . To be more specific, each node embedding is calculated as $\mathbf{e}_i = \mathrm{BERT}(c_i)$ . To strengthen the connection between concepts and image context, we concatenate the node embedding and the context feature as $\mathbf{e}_i = [\mathbf{e}_i; \overline{\mathbf{f}}]$ . Brevity, we summarize the entire computation in each graph layer:
121
+
122
+ $$
123
+ \{\tilde {\mathbf {e}} _ {1} ^ {(l)}, \ldots , \tilde {\mathbf {e}} _ {k \times 1 0 + M} ^ {(l)} \} = \mathrm {G N N} (\{\mathbf {e} _ {1} ^ {(l - 1)}, \ldots , \mathbf {e} _ {k \times 1 0 + M} ^ {(l - 1)} \}),
124
+ $$
125
+
126
+ where $l$ indicates the current graph layer while $l - 1$ does the previous one, $\mathrm{GNN}(\cdot)$ represents a graph layer. In detail, each node is updated by:
127
+
128
+ $$
129
+ \hat {\alpha} _ {j i} = \left(\mathbf {e} _ {i} ^ {(l - 1)} \mathbf {W} _ {q}\right) \left(\mathbf {e} _ {j} ^ {(l - 1)} \mathbf {W} _ {k}\right) ^ {\top},
130
+ $$
131
+
132
+ $$
133
+ \alpha_ {j i} = \operatorname {S O F T M A X} \left(\hat {\alpha} _ {j i} / \sqrt {D}\right),
134
+ $$
135
+
136
+ $$
137
+ \hat {\mathbf {e}} _ {i} ^ {(l - 1)} = \sum_ {j \in \mathcal {N} _ {i} \cup \{i \}} \alpha_ {j i} (\mathbf {e} _ {j} ^ {(l - 1)} \mathbf {W} _ {v}),
138
+ $$
139
+
140
+ $$
141
+ \tilde {\mathbf {e}} _ {i} ^ {(l)} = \operatorname {L A Y E R N O R M} \left(\mathbf {e} _ {i} ^ {(l - 1)} + \hat {\mathbf {e}} _ {i} ^ {(l - 1)} \mathbf {W} _ {o}\right),
142
+ $$
143
+
144
+ where $\mathbf{W}_q, \mathbf{W}_k, \mathbf{W}_v, \mathbf{W}_o \in \mathbb{R}^{D \times D}$ are learnable matrices, $\mathcal{N}_i$ represents the neighbors of node $i$ , $D = 768 + 768 =$
145
+
146
+ 1536, SOFTMAX and LAYERNORM are the softmax function and the batch normalization, respectively. We note that $\mathbf{e}_i^{(0)}$ is the initial node embedding (i.e., $[\mathbf{e}_i;\overline{\mathbf{f}}])$ .
147
+
148
+ In practice, we use 2 graph layers. After the graph attention network, we add two more fully connected layers to reduce the size of each $\tilde{\mathbf{e}}_i$ to $1\times 768$ .
149
+
150
+ Frozen VinVL. As discussed above, the concept embeddings learned from the graph neural network are used as a prompt to generate the caption. To this end, we inject all $\{\tilde{\mathbf{e}}_i\}_{i=1}^{k \times 10 + M}$ into a frozen pre-trained VinVL [38]. As a result, the input of VinVL is changed to $\{\mathbf{w}_1, \dots, \mathbf{w}_L, [\mathrm{SEP}], \tilde{\mathbf{e}}_1, \dots, \tilde{\mathbf{e}}_{k \times 10 + M}, [\mathrm{SEP}], \mathbf{f}_1, \dots, \mathbf{f}_N\}$ . We note that [SEP] is a special token used to distinguish different types of tokens. We do not feed $\mathbf{w}_i$ to the network during inference time, but instead, create $L \times [\mathrm{MASK}]$ as pseudo words. Formally, Eq. 1 becomes
151
+
152
+ $$
153
+ C _ {k + 1} = \mathrm {A} - \operatorname {C A P} \left(\tilde {\mathbf {e}} _ {1}, \dots , \tilde {\mathbf {e}} _ {k \times 1 0 + M}, [ \operatorname {S E P} ], \mathbf {f} _ {1}, \dots , \mathbf {f} _ {N}\right).
154
+ $$
155
+
156
+ Loss function. Following previous works, we simply use cross entropy between the generated and the ground-truth captions to train the network. We do not use CIDEr optimization because the pre-trained VinVL has been well-trained on a large text-image corpus.
157
+
158
+ # 4. Experiments
159
+
160
+ # 4.1. Dataset and training details
161
+
162
+ Dataset. We use the visual storytelling dataset (VIST) [14] with a modification to evaluate our method because there is no dataset tailored for our task. The original VIST includes 210,819 photos from 10,117 Flickr albums. Given five input temporally ordered images from the same event, the corresponding five human-annotated sentences are provided as ground-truths. There are 4,098, 4,988, and 5,050 samples for training, validation, and test sets, respectively. We use the first four images of each sample as input $(k = 4)$ and the last sentence of each sample as the ground-truth caption. We keep the last image of each sample as an oracle image for reference. The training, validation, and test sets all have the same number of samples as the original dataset.
163
+
164
+ Dataset verification. We investigate the correlation between $C_{k + 1}$ and $C_1, C_2, \ldots, C_k$ (corresponding captions to $I_1, I_2, \ldots, I_k$ ) in two ways. First, we compute the sentence cosine similarity $\mathrm{sim}(\mathrm{S}(C_{k + 1}), \mathrm{S}(C_i))$ ( $i = 1, \ldots, k$ ) and then test whether those similarities monotonically increase (i.e., $\mathrm{sim}(\mathrm{S}(C_{k + 1}), \mathrm{S}(C_i)) < \mathrm{sim}(\mathrm{S}(C_{k + 1}), \mathrm{S}(C_{i + 1}))$ ) ( $\mathrm{S}(\cdot)$ is a pre-trained SentenceTransformer model [2], outputting an embedding vector for a given sentence). We confirm that $72.69\%$ of samples follow monotonic increasing, $10.32\%$ have only one sentence similarity that violates monotonic increasing, and only $4.4\%$ do not comply with the monotonicity. As the second, we use a pre-trained BERT model [10] to figure out whether $C_{i + 1}$ is the next
165
+
166
+ ![](images/4df5a11d4df9b733c2031dab9f979fc9ff069f92d89e366d77470e8ab0428b18.jpg)
167
+ Figure 3. Examples of generated captions obtained by all compared methods. We show the oracle images and ground-truth captions for reference purposes. VinVL [38] generates captions that are out of context with the input images. VinVL [38] + Oracle image sometimes generates reasonable captions. AREL [35] + BART [17] tends to generate a general ending for the sequence of images. In contrast, our method A-CAP predicts more accurate, descriptive, and plausible captions than others.
168
+
169
+ ![](images/d95b71f03f663a7da679e996df480ab38e6ffda75c1873e352f21a934b1dcef9.jpg)
170
+ Figure 4. The generated images obtained by using stable diffusion model [28] to generate an image from each generated caption in Fig. 3. The order of images is the same as the order of captions in Fig. 3. The images generated using our captions are close to the ground-truth ones while those by other methods are not.
171
+
172
+ sentence of $C_i$ . We see that $77.34\%$ of the samples satisfy the next sentence condition (i.e., $C_{i+1}$ is always the next
173
+
174
+ sentence of $C_i$ for all sentences in the sequence), $17.78\%$ have only one sentence that does not meet the condition, and $0.06\%$ do not satisfy the condition (i.e., $C_{i + 1}$ is never the next sentence of $C_i$ ). The above verification shows that the VIST dataset mostly meets our assumption.
175
+
176
+ Training details. We set the length of the word sequence $L = 35$ , the number of ROIs $N = 100$ (25 ROIs for each image), the number of forecasted concepts $M = 60$ (the number of concepts is $4 \times 10 + 60 = 100$ in total).
177
+
178
+ We build A-CAP using PyTorch, in which we use the pre-trained VinVL model published by its authors [3]. We remark that we freeze all the parameters of VinVL during training time. Given the small size of our used dataset, we train the model for only 10 epochs with a batch size of 16 and a learning rate of 3e-5. It takes four hours to train our model on a single GTX-3090 GPU.
179
+
180
+ # 4.2. Compared methods and evaluation metrics
181
+
182
+ Compared methods. We carefully design methods that can be straightforwardly applied to our task. For a fair comparison, all compared methods are fine-tuned on VIST. To avoid over-tuning, we only train the methods for a few epochs and select their best checkpoints.
183
+
184
+ VinVL [38] is a cutting-edge image captioning model. We strictly adhere to its settings, but instead of a single im
185
+
186
+ age, we use the input as our method. Comparing our method to VinVL will demonstrate the advancement of our method over the conventional image captioning model.
187
+
188
+ VinVL [38] + Oracle image is the method where VinVL uses the ground-truth oracle image in training and testing. Since we do not successfully generate oracle images using existing methods, we may regard this method as a method that sequentially generates the oracle image and caption.
189
+
190
+ AREL [35] + BART [17] is a combination of visual storytelling (AREL [35]) and story ending generation (BART [17]). Particularly, we generate a story for the input and then generate the ending sentence for that story. We compare the ending sentence to the caption by our method. Evaluation metrics. Since our problem is an open domain generation like dialogue generation, we follow [11] to use automatic metrics to quantitatively evaluate all the methods in two aspects: accuracy and descriptiveness. For accuracy evaluation, we report referenced metrics including BLEU [25], CIDEr [31]. Since those metrics are sensitive to the whole sentence structure [19], we also report SPICE [5], CLIPScore, and RefCLIPScore [13] to overcome the structural dependency. For descriptiveness evaluation, we adopt a self-retrieval strategy, drawing on prior work. This strategy is based on the observation that more descriptive captions with significant details frequently lead to more precise self-retrieval, i.e., retrieving the target image from a set of similar images given the generated caption. We report the refined R@1, R@5, and R@10 scores using CLIP [26] as the retriever.
191
+
192
+ # 4.3. Qualitative comparisons
193
+
194
+ In Fig. 3, we show some randomly selected examples of captions generated by our method as well as others. Despite its enormous success in image captioning, VinVL [38] is unable to generate the expected captions. We can see that the captions generated by VinVL are completely out of context with the input images. This observation suggests that the current image captioning model is inadequate for our task. VinVL [38] + Oracle image generates reasonable captions to some extent when the oracle images are close enough to the input images (see first and second samples). However, if the temporal information is too sparse as in the third and fourth samples, it fails to generate captions that are linked to the inputs. These results imply that even if we can generate a high-quality unseen oracle image, the model struggles to complete the task. We notice that AREL [35] + BART [17] generates a general ending for the story (e.g., having a great time). On the contrary, our method produces more accurate and reasonable captions that reflect the inputs' future. In most cases, we can see that our method accurately predicts what is likely to happen, which is close to the ground-truth captions. When we examine the third sample in greater detail, we can see that our caption is in-
195
+
196
+ correct because we failed to detect the concept "falling" in the second image. However, we believe that the generated caption is still plausible under ordinary situations.
197
+
198
+ To have a better understanding of the generated captions, we use the stable diffusion model [28] implemented on the Huggingface platform [4] with the default settings to generate an image from each generated caption, and choose the first generated image for each method as shown in Fig. 4. The images obtained from our generated captions are similar to the ground-truth ones, indicating that our method generates correct anticipated captions. Furthermore, Fig. 4 demonstrates the benefits of our task to downstream tasks, specifically future image generation in this case.
199
+
200
+ # 4.4.Quantitative comparisons
201
+
202
+ The quantitative scores are summarized in Table 1, first four rows. We first assess all methods based on their accuracy. All of the results in Table 1 support the advantage of our method over the other methods. Though our method obtains the highest scores, we notice that it does not significantly outperform the other methods on referenced metrics (BLEU and CIDEr). The reason for this observation is that those metrics are calculated using ground-truth captions. Because our task is an open-domain generation, it is difficult to generate a caption that is nearly identical to the ground-truth one. However, based on the qualitative comparison in Figs. 3 and 4, we can conclude that our method outperforms the others. SPICE and the unreferenced metrics (CLIPScore, RefCLIPScore) also justify our conclusion. We see substantial improvements in these metrics, indicating that our generated captions accurately reflect the oracle images. Notably, as shown in Fig. 3, our generated captions are, without a doubt, the future of input images.
203
+
204
+ The descriptiveness of generated captions is then assessed using R@1, R@5, and R@10 scores. In comparison to VinVL [38] and AREL [35] + BART [17], our method outperforms them significantly. This is thanks to the fact that captions generated by our method are close to the ground-truth images, whereas those obtained by the other methods are not. Our method and VinVL [38] + Oracle image achieve the same level. This is not surprising, given that VinVL [38] + Oracle image generates captions directly from oracle images.
205
+
206
+ We conclude that our method is more promising than the other methods in solving the anticipation captioning task. Furthermore, the experiments highlight the shortcomings of using image captioning and story ending models in our task.
207
+
208
+ # 4.5. Detailed analysis
209
+
210
+ Ablation study. To validate the plausibility of our model design, we investigate two ablated models: A-CAP w/o GNN and A-CAP w/o context. A-CAP w/o GNN denotes the model that does not use a graph neural network (instead,
211
+
212
+ Table 1. Quantitative comparison against other methods. For accuracy evaluation, we report referenced metrics (BLEU [25] (B-1, B-4), CIDEr [31]), SPICE [5], and unreferenced metrics (CLIPScore and RefCLIPScore [13]). For descriptiveness evaluation, we report top-1, top-5 and top-10 retrieval accuracy (R@1, R@5, R@10, respectively). Our method outperforms others on all metrics. Higher scores are better. Gray background indicates results obtained by our method, and $\Delta$ indicates the improvement over compared methods.
213
+
214
+ <table><tr><td rowspan="2">Method</td><td colspan="6">Accuracy</td><td colspan="3">Descriptiveness</td></tr><tr><td>B-1</td><td>B-4</td><td>CIDEr</td><td>SPICE</td><td>CLIPScore</td><td>RefCLIPScore</td><td>R@1</td><td>R@5</td><td>R@10</td></tr><tr><td>VinVL [38]</td><td>31.7</td><td>3.1</td><td>2.6</td><td>13.8</td><td>40.7</td><td>42.8</td><td>1.3</td><td>6.5</td><td>10.8</td></tr><tr><td>VinVL [38] + Oracle image</td><td>34.9</td><td>3.8</td><td>4.3</td><td>16.9</td><td>57.9</td><td>61.3</td><td>8.1</td><td>17.2</td><td>31.1</td></tr><tr><td>AREL [35] + BART [17]</td><td>30.9</td><td>2.0</td><td>3.1</td><td>11.4</td><td>37.8</td><td>39.7</td><td>1.1</td><td>5.9</td><td>9.3</td></tr><tr><td>A-CAP</td><td>37.2</td><td>6.9</td><td>4.7</td><td>20.1</td><td>65.2</td><td>70.2</td><td>8.7</td><td>18.9</td><td>31.5</td></tr><tr><td>A-CAP w/o GNN</td><td>34.8</td><td>5.2</td><td>3.7</td><td>14.5</td><td>38.2</td><td>47.3</td><td>3.6</td><td>8.7</td><td>15.4</td></tr><tr><td>A-CAP w/o context</td><td>36.1</td><td>6.2</td><td>4.2</td><td>13.9</td><td>39.8</td><td>46.9</td><td>4.1</td><td>9.5</td><td>16.1</td></tr><tr><td>Δ</td><td>2.3↑</td><td>3.1↑</td><td>0.4↑</td><td>3.2↑</td><td>7.3↑</td><td>8.9↑</td><td>0.6↑</td><td>1.7↑</td><td>0.4↑</td></tr></table>
215
+
216
+ ![](images/c77494ea00a5b260b174b60e103f9f644e8e1350144041449bb0d00372a0e68d.jpg)
217
+ Figure 5. Examples of generated captions by two ablated models: A-CAP w/o GNN, A-CAP w/o context, and full model A-CAP. We select two inputs where the detected concepts almost overlap. A-CAP w/o GNN generates captions that most likely describe the inputs. A-CAP w/o context generates captions that are far from the inputs and similar to each other.
218
+
219
+ we directly feed the concept embeddings $\tilde{\mathbf{e}}_i = \mathrm{BERT}(c_i)$ to the pre-trained VinVL). A-CAP w/o context is the model in which we do not concatenate the node embeddings and the context feature (we instead use only the node embeddings as graph neural network inputs). We also drop the two fully connected layers on top of the graph neural network because reducing the size of embeddings is no longer required.
220
+
221
+ The last two rows of Table 1 quantify the performance of the two ablated models. When we simplify the model, the performance scores are degraded. In the case of A-CAP w/o GNN, the concept embeddings are insufficient to guide the model to generate the expected caption. As a result, the caption most likely describes the inputs as depicted in Fig. 5. The graph neural network enriches and connects concept embeddings, making them more powerful as a prompt to the model. Similarly, A-CAP w/o context breaks the connections between concepts and the context of images in gen
222
+
223
+ Table 2. Impact of the number of forecasted concepts on the performance of our model. Using either a large number of concepts or no concepts drops the performance drastically.
224
+
225
+ <table><tr><td rowspan="2">Number of forecasted concepts</td><td colspan="3">Accuracy</td><td colspan="3">Descriptiveness</td></tr><tr><td>SPICE</td><td>CLIPScore</td><td>RefCLIPScore</td><td>R@1</td><td>R@5</td><td>R@10</td></tr><tr><td>M = 400</td><td>5.8</td><td>15.3</td><td>12.1</td><td>1.1</td><td>3.7</td><td>7.6</td></tr><tr><td>M = 200</td><td>5.4</td><td>16.7</td><td>13.0</td><td>0.9</td><td>4.2</td><td>7.1</td></tr><tr><td>M = 100</td><td>15.7</td><td>48.6</td><td>52.4</td><td>6.2</td><td>15.7</td><td>26.6</td></tr><tr><td>M = 60 (used model)</td><td>20.1</td><td>65.2</td><td>70.2</td><td>8.7</td><td>18.9</td><td>31.5</td></tr><tr><td>M = 0</td><td>14.2</td><td>43.1</td><td>44.7</td><td>1.9</td><td>7.3</td><td>11.2</td></tr></table>
226
+
227
+ eral, resulting in captions that are far from the inputs and similar to each other if the detected concepts are similar (Fig. 5). This indicates that the context feature compensates for the concepts in order to make the correct prediction. In contrast, the full model generates plausible captions.
228
+
229
+ We do not investigate the model where all the parameters are trainable since the training collapsed despite our best efforts. The reason for this failure is that the training data is too small in comparison with the one used to train VinVL.
230
+
231
+ Impact of the number of forecasted concepts. As stated above, when we search for concepts on ConceptNet, we usually have more than 400 forecasted concepts. We empirically retain $M = 60$ forecasted concepts to eliminate irrelevant concepts and balance the number of concepts and image features. We now investigate how the number of forecasted concepts affects the captions generated. To this end, we run our method through a series of scenarios using the number of forecasted concepts at 400, 200, 100, and 0.
232
+
233
+ Table 2 shows the results of all tested scenarios on accuracy and descriptiveness. We can see that retrieving a large number of concepts ( $M = 400$ or $M = 200$ ) degrades performance. The reason is obvious because when we include a larger number of irrelevant concepts, the input becomes too noisy, preventing the model from selecting essential information. The model with $M = 100$ forecasted concepts
234
+
235
+ ![](images/b7231beab833888571015dfca2a76392bff240ba5455d9bd9cc85546a5c48321.jpg)
236
+ Figure 6. A case study of samples with low scores. Though our method generates a plausible caption, it is far from the ground-truth caption. The reason is that the oracle image changes significantly from the inputs.
237
+
238
+ comes close to our best performance ( $M = 60$ ). Finally, we examine an extreme case where no forecasted concept is employed ( $M = 0$ ). The performance drops to the same level as that of VinVL [38] (first row in Table 1). This is due to the fact that the inputs to the two models are nearly identical. This experiment confirms that the number of forecasted concepts has an effect on our performance, implying that retrieving a sufficient number of concepts results in improved effectiveness.
239
+
240
+ A case study of samples with low scores. While our method produces promising quantitative results, we notice a relatively small number of samples with low scores when delving into each sample in detail. We thus manually check those samples, as shown in Fig. 6. Given what is happening in the inputs, our generated caption is reasonable because the next step of the wedding party is "cutting a wedding cake". The ground-truth caption, in contrast, is completely different because the scene shifts from "wedding" to "nighttime". We recall that our hypothesis is that the scene does not change significantly, but in this case, it does. Though our method fails to predict the far future, it does correctly predict the near future. We may ignore such failures because they contradict our hypothesis. In fact, when we exclude those failure samples from quantitative comparison, our outperformance becomes more significant than before.
241
+
242
+ Limitations. First, our method is heavily reliant on concept detection (here, clarifai). When we are unable to detect important concepts, our method is unable to predict the correct caption, as seen in Fig. 4, third example. Second, as shown in Table 2, the performance of our method is dependent on the number of forecasted concepts from commonsense knowledge. We use a simple filtering process in this paper, namely, computing the relevance score between concept and image context and empirically retaining $M = 60$ forecasted concepts. Our strategy is effective, but it may not be optimal. To improve this issue, it is necessary to learn how to determine a suitable number of concepts. One possible solution is to learn concept selection while training the model. This is left for our future work.
243
+
244
+ # 5. Discussions
245
+
246
+ We now discuss the potential negative societal impacts of our task. While we believe our introduced task will push
247
+
248
+ more applications to make our lives safer and benefit downstream tasks, we have noticed that it has the potential to be abused. One of the concerns is that it will be used to predict behavior for nefarious purposes, such as criminal activity.
249
+
250
+ Our task still has some difficulties. First, to the best of our knowledge, no suitable dataset exists to serve as a benchmark. Though our used VIST dataset [14] is useful to some extent, it is originally designed for the visual storytelling task, so it does not always meet task requirements, as already seen. As a result, a new dataset for this task is required, which should cover various scenarios such as near future, far future, abnormal thinking, and rationale. We should note that owing to the labor cost of creating a dataset, we are currently using the customized VIST to assess the performance of our method. Second, evaluating the task is difficult. Although appropriate evaluation metrics for the open domain are still unavailable, our used metrics are partially effective in our task. This is because, as we do not account for the diversity of potential futures, generating a caption close to the ground-truth (BLEU, CIDEr) is a valid indicator of the model's predictive capability. Moreover, considering the dataset that we employed, CLIP-based scores are suitable for evaluating the degree of similarity between the generated captions and the oracle images, which are presumed to represent the future of the input images. In fact, our experiments show that the current metrics cannot evaluate the task thoroughly. User study may compensate for the automatic metrics, but it is expensive and subjective, as is customary. We believe that new metrics for this task can capitalize on the advantages of the vision-language space, such as CLIP [26]. Furthermore, new metrics should emphasize the rationale, which explains the reason why the model generates that caption but not another.
251
+
252
+ # 6. Conclusion
253
+
254
+ We introduced a new task, called anticipation captioning, that generates a caption for an unseen oracle image, given a sparsely temporally-ordered set of images. For this new task, we proposed a baseline model (A-CAP), which incorporates commonsense knowledge into the off-the-shelf vision-language model VinVL. We evaluated A-CAP on a customized VIST dataset, showing that A-CAP outperforms other image captioning methods. We also addressed the potential positive and negative impacts of the task as well as its challenges, in order to encourage further research.
255
+
256
+ Acknowledgement. This work was supported by the Institute of AI and Beyond of the University of Tokyo, JSPS/MEXT KAKENHI Grant Numbers JP19H04166, JP22H05015, and 22K17947, and the commissioned research (No. 225) by the National Institute of Information and Communications Technology (NICT), Japan.
257
+
258
+ # References
259
+
260
+ [1] https://github.com/soodoku/clarifai.3
261
+ [2] https://www.sbert.net/.4
262
+ [3] https://github.com/microsoft/Oscar.5
263
+ [4] https : / / huggingface . co / spaces / stabilityai/stable-diffusion.6
264
+ [5] Peter Anderson, Basura Fernando, Mark Johnson, and Stephen Gould. Spice: Semantic propositional image caption evaluation. In ECCV, 2016. 6, 7
265
+ [6] Peter Anderson, Xiaodong He, Chris Buehler, Damien Teney, Mark Johnson, Stephen Gould, and Lei Zhang. Bottom-up and top-down attention for image captioning and visual question answering. In CVPR, 2018. 1, 2
266
+ [7] Gang Chen, Yang Liu, Huanbo Luan, Meng Zhang, Qun Liu, and Maosong Sun. Learning to generate explainable plots for neural story generation. IEEE/ACM Trans. Audio, Speech and Lang. Proc., 2020. 2
267
+ [8] Hong Chen, Yifei Huang, Hiroya Takamura, and Hideki Nakayama. Commonsense knowledge aware concept selection for diverse and informative visual storytelling. In AAAI, 2021. 2, 3
268
+ [9] Long Chen, Zhihong Jiang, Jun Xiao, and Wei Liu. Humanlike controllable image captioning with verb-specific semantic roles. In CVPR, 2021. 2
269
+ [10] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In NAACL, 2019. 4
270
+ [11] Sarah E. Finch and Jinho D. Choi. Towards unified dialogue system evaluation: A comprehensive analysis of current evaluation protocols. In Proceedings of the 21th Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 236-245, 1st virtual meeting, July 2020. Association for Computational Linguistics. 6
271
+ [12] Danijar Hafner, Timothy Lillicrap, Jimmy Ba, and Mohammad Norouzi. Dream to control: Learning behaviors by latent imagination. *ICLR*, 2019. 1, 2
272
+ [13] Jack Hessel, Ari Holtzman, Maxwell Forbes, Ronan Le Bras, and Yejin Choi. CLIPScore: a reference-free evaluation metric for image captioning. In EMNLP, 2021. 6, 7
273
+ [14] Ting-Hao K. Huang, Francis Ferraro, Nasrin Mostafazadeh, Ishan Misra, Jacob Devlin, Aishwarya Agrawal, Ross Girshick, Xiaodong He, Pushmeet Kohli, Dhruv Batra, et al. Visual storytelling. In NAACL, 2016. 1, 2, 4, 8
274
+ [15] Justin Johnson, Andrej Karpathy, and Li Fei-Fei. Densecap: Fully convolutional localization networks for dense captioning. In CVPR, 2016. 2
275
+ [16] Vincent Le Guen and Nicolas Thome. Disentangling physical dynamics from unknown factors for unsupervised video prediction. In CVPR, 2020. 2
276
+ [17] Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In ACL, 2020. 5, 6, 7
277
+ [18] Zhongyang Li, Xiao Ding, and Ting Liu. Story ending prediction by transferable bert. In *IJCAI*, 2019. 2
278
+
279
+ [19] Lixin Liu, Jiajun Tang, Xiaojun Wan, and Zongming Guo. Generating diverse and descriptive image captions using visual paraphrases. In ICCV, 2019. 6
280
+ [20] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. Roberta: A robustly optimized BERT pretraining approach. CoRR, 2019. 4
281
+ [21] Jiasen Lu, Jianwei Yang, Dhruv Batra, and Devi Parikh. Neural baby talk. In CVPR, 2018. 1, 2
282
+ [22] Lukáš Neumann, Andrew Zisserman, and Andrea Vedaldi. Future event prediction: If and when. In CVPRW, 2019. 1, 2
283
+ [23] Qiang Ning, Zhili Feng, Hao Wu, and Dan Roth. Joint reasoning for temporal and causal relations. In ACL, 2018. 2
284
+ [24] Qiang Ning, Hao Wu, Rujun Han, Nanyun Peng, Matt Gardner, and Dan Roth. TORQUE: A reading comprehension dataset of temporal ordering questions. In EMNLP, 2020. 2
285
+ [25] Kishore Papineni, Salim Roukos, Todd Ward, and Wei-Jing Zhu. Bleu: a method for automatic evaluation of machine translation. In ACL, 2002. 6, 7
286
+ [26] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. Learning transferable visual models from natural language supervision. In ICML, 2021. 6, 8
287
+ [27] Shaoqing Ren, Kaiming He, Ross B. Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In NIPS, 2015. 3
288
+ [28] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, 2022. 1, 5, 6
289
+ [29] Julian Schrittwieser, Ioannis Antonoglou, Thomas Hubert, Karen Simonyan, Laurent Sifre, Simon Schmitt, Arthur Guez, Edward Lockhart, Demis Hassabis, Thore Graepel, et al. Mastering atari, go, chess and shogi by planning with a learned model. Nature, 2020. 1, 2
290
+ [30] Robyn Speer, Joshua Chin, and Catherine Havasi. Concept-net 5.5: An open multilingual graph of general knowledge. In AAAI, 2017. 2, 3, 4
291
+ [31] Ramakrishna Vedantam, C. Lawrence Zitnick, and Devi Parikh. Cider: Consensus-based image description evaluation. In CVPR, 2015. 6, 7
292
+ [32] Petar Velicković, Guillem Cucurull, Arantxa Casanova, Adriana Romero, Pietro Lio, and Yoshua Bengio. Graph attention networks. *ICLR*, 2017. 4
293
+ [33] Duc Minh Vo, Hong Chen, Akihiro Sugimoto, and Hideki Nakayama. NOC-REK: novel object captioning with retrieved vocabulary from external knowledge. In CVPR, 2022. 1, 2
294
+ [34] Carl Vondrick and Antonio Torralba. Generating the future with adversarial transformers. In CVPR, 2017. 2
295
+ [35] Xin Wang, Wenhu Chen, Yuan-Fang Wang, and William Yang Wang. No metrics are perfect: Adversarial reward learning for visual storytelling. In ACL, 2018, 1, 2, 5, 6, 7
296
+ [36] Dongchao Yang, Jianwei Yu, Helin Wang, Wen Wang, Chao Weng, Yuexian Zou, and Dong Yu. Diffsound: Discrete dif
297
+
298
+ fusion model for text-to-sound generation. arXiv e-prints, 2022. 1
299
+ [37] Kuo-Hao Zen, William B. Shen, De-An Huang, Min Sun, and Juan Carlos Niebles. Visual forecasting by imitating dynamics in natural sequences. In ICCV, 2017. 1, 2
300
+ [38] Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. Vinvl: Making visual representations matter in vision-language models. In CVPR, 2021. 1, 2, 3, 4, 5, 6, 7, 8
301
+ [39] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Zwei Liu. Conditional prompt learning for vision-language models. In CVPR, 2022. 2
302
+ [40] Kaiyang Zhou, Jingkang Yang, Chen Change Loy, and Ziwei Liu. Learning to prompt for vision-language models. IJCV, 2022. 2
acapanticipationcaptioningwithcommonsenseknowledge/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f129c92ad08d8949f4d1e2c9230a3a9758f4c05a6b5c80ce2c777c29cd81ed9
3
+ size 526234
acapanticipationcaptioningwithcommonsenseknowledge/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a0151c59c9cf8de127b3ed4c9e3c5ebe1ab44a6f292c3021911d022846eb92d
3
+ size 335864
acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/49024a39-e689-4fa1-87f4-7de949651fed_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:629344dc16f8dbfd8d3818051173f80cec083d37edd48bdaf542059538439c42
3
+ size 80495
acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/49024a39-e689-4fa1-87f4-7de949651fed_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11f20a2cdca907c07748020f2123d232ffc3e36f0d2969095861db847f9d405b
3
+ size 102785
acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/49024a39-e689-4fa1-87f4-7de949651fed_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef7cb4085a75e886dcfca7e625e1e7f041a968a6ff2da4088f4fb617cf3e28d0
3
+ size 3744507
acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/full.md ADDED
@@ -0,0 +1,321 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Accelerated Coordinate Encoding: Learning to Relocalize in Minutes using RGB and Poses
2
+
3
+ Eric Brachmann Niantic
4
+
5
+ Tommaso Cavallari Niantic
6
+
7
+ Victor Adrian Prisacariu
8
+ Niantic, University of Oxford
9
+
10
+ ![](images/ef29a10b08e7cbf2c654e3c7cd1bfe858ca0ab5f7bb7cec34fa651ec32a136ce.jpg)
11
+ Figure 1. Mapping Time vs. Relocalization Rate. We show the mapping time of multiple state-of-the-art relocalizers on a standard dataset, 7Scenes [53]. We measure accuracy as the percentage of frames with a pose error below $5\mathrm{cm}$ and $5^{\circ}$ . Our approach, ACE, maps a new environment two orders of magnitude faster than the baseline, DSAC* [10], while being as accurate. We also report the average map size of each approach in brackets. $\dagger$ signifies that mapping needs depth, while other methods only need RGB and poses.
12
+
13
+ ![](images/7c9eb21f22d6c5cc0ab676b23b3aa65338f909a43e654e0611d1c9af2ce26b45.jpg)
14
+
15
+ ![](images/790c47869493170bb0abd20ee9121ad88336bed920ee59e7ee9a086595bf620d.jpg)
16
+
17
+ # Abstract
18
+
19
+ Learning-based visual relocalizers exhibit leading pose accuracy, but require hours or days of training. Since training needs to happen on each new scene again, long training times make learning-based relocalization impractical for most applications, despite its promise of high accuracy. In this paper we show how such a system can actually achieve the same accuracy in less than 5 minutes. We start from the obvious: a relocalization network can be split in a scene-agnostic feature backbone, and a scene-specific prediction head. Less obvious: using an MLP prediction head allows us to optimize across thousands of view points simultaneously in each single training iteration. This leads to stable and extremely fast convergence. Furthermore, we substitute effective but slow end-to-end training using a robust pose solver with a curriculum over a reprojection loss. Our approach does not require privileged knowledge, such a depth maps or a 3D model, for speedy training. Overall, our approach is up to $300x$ faster in mapping than state-of-the-art scene coordinate regression, while keeping accuracy on par. Code is available: https://nianticlabs.github.io/ace
20
+
21
+ # 1. Introduction
22
+
23
+ Time is really the only capital that any human being has, and the only thing he can't afford to lose.
24
+
25
+ Thomas Edison
26
+
27
+ Time is relative. Time spent waiting can stretch to infinity. Imagine waiting for a visual relocalizer to finally work in a new environment. It can take hours – and feel like days – until the relocalizer has finished its pre-processing of the scene. Only then can it estimate the camera's position and orientation to support real-time applications like navigation or augmented reality (AR).
28
+
29
+ Relocalizers need that extensive pre-processing to build a map of the environment that defines the coordinate space we want to relocalize in. Visual relocalizers typically build maps from sets of images of the environment, for each of which the camera pose is known. There are two prevalent families of structure-based relocalizers that meet the high accuracy requirements of applications like AR.
30
+
31
+ Sparse feature-matching approaches [12, 25, 40, 44, 48, 49, 67] need to build an explicit 3D reconstruction of a scene using structure-from-motion (SfM) software [51, 55, 63]. Even when poses of mapping images are known, the run
32
+
33
+ time of SfM for scene triangulation varies a lot, and can lie anywhere between 10 minutes and 10 hours depending on how many mapping frames are used. When mapping succeeds, feature-based relocalizers are fast at query time and accurate [44,49]. Less refined maps can be built in real time using SLAM, if one is willing to accept the detrimental effect on accuracy [4]. In either case, the underlying maps can consume vast amounts of storage, and can reveal private information that was present in the mapping images [16,56].
34
+
35
+ On the other hand, scene coordinate regression [5, 7, 10, 20, 31, 53, 64] learns an implicit representation of the scene via gradient descent. The resulting maps can be as small as 4MB [10], and privacy preserving [67]. But, while scene coordinate regression is on-par with feature-matching in terms of accuracy and relocalization time [4], the fact that they map an environment via hours-long training of a network makes them unattractive for most applications. The state-of-the-art scene coordinate regression pipeline, DSAC* [10], requires 15 hours to reach top accuracy on a premium GPU, see Fig. 1. We can stop training any time, and see which accuracy we get but, after 5 minutes mapping time, DSAC* has a relocalization rate in the single digits. In fact, the corresponding data point for the plot in Fig. 1 can be found at the bottom of the previous page.
36
+
37
+ The aim of this work is summarized quickly: we take a scene coordinate regression-based relocalizer, the slowest approach in terms of mapping time, and make it one of the fastest. In particular, we present Accelerated Coordinate Encoding (ACE), a schema to train scene coordinate regression in 5 minutes to state-of-the-art accuracy.
38
+
39
+ Speeding up training time normally causes moderate interest in our community, at best. This is somewhat justified in train-once-deploy-often settings. Still, learning-based visual relocalization does not fall within that category, as training needs to happen on each new scene, again. Therefore, fast training has a range of important implications:
40
+
41
+ - Mapping delay. We reduce the time between collecting mapping data, and having a top-performing relocalizer for that environment.
42
+ - Cost. Computation time is expensive. Our approach maps a scene within minutes on a budget GPU.
43
+ - Energy consumption. Extensive computation is an environmental burden. We significantly reduce the resource footprint of learning-based relocalization.
44
+ - Reproducibility. Using ACE to map all scenes of the datasets used in this paper can be done almost five times over on a budget GPU, in the time it takes DSAC* to map a single scene on a premium GPU.
45
+
46
+ We show that a thoughtful split of a standard scene coordinate regression network allows for more efficient training. In particular, we regard scene coordinate regression as a mapping from a high-dimensional feature vector to a 3D
47
+
48
+ point in scene space. We show that a multi-layer perceptron (MLP) can represent that mapping well, as opposed to convolutional networks normally deployed [7, 10, 20]. Training a scene-specific MLP allows us to optimize over many (oftentimes all available) mapping views at once in each single training iteration. This leads to very stable gradients that allow us to operate in very aggressive, high-learning rate regimes. We couple this with a curriculum over a reprojection loss that lets the network burn in on reliable scene structures at later stages of training. This mimics end-to-end training schemes that involve differentiating through robust pose estimation during training [10], but are much slower than our approach. We summarize our contributions:
49
+
50
+ - Accelerated Coordinate Encoding (ACE), a scene coordinate regression system that maps a new scene in 5 minutes. Previous state-of-the-art scene coordinate regression systems require hours of mapping to achieve comparable relocalization accuracy.
51
+ - ACE compiles a scene into 4MB worth of network weights. Previous scene coordinate regression systems required 7-times more storage, or had to sacrifice accuracy for scene compression.
52
+ - Our approach requires only posed RGB images for mapping. Previous fast mapping relocalizers relied on privileged knowledge like depth maps or a scene mesh for speedy mapping.
53
+
54
+ # 2. Related Work
55
+
56
+ Visual relocalization requires some representation of the environment we want to relocalize in. We refer to these representations as "maps", and the process of creating them as "mapping". Our work is predominately concerned with the time needed for mapping, and, secondly, the storage demand of the maps created.
57
+
58
+ Image Retrieval and Pose Regression. Arguably the simplest form of a map is a database of mapping images and their poses. Given a query image, we look for the most similar mapping images using image retrieval [2, 42, 57], and approximate the query pose with the top retrieved mapping pose [12, 50]. Pose regression uses neural networks to either predict the absolute pose from a query image directly, or predict the relative pose between the query image and the top retrieved mapping image. All absolute and most relative pose regression methods [11, 28, 29, 52, 58, 62, 68] train scene-specific networks which can take significant time, e.g. [28] reports multiple hours per scene for PoseNet. Some relative pose regression works report results with generalist, scene-agnostic networks that do not incur additional mapping time on top of building the retrieval index [58, 62]. Map-free relocalization [3] is an extreme variation that couples scene-agnostic relative pose regression with a single
59
+
60
+ reference frame for practically instant relocalization. Recently, some authors use neural radiance fields (NeRFs) [38] for camera pose estimation [36, 66]. In its early stage, this family of methods has yet to demonstrate its merits against the corpus of existing relocalisers and on standard benchmarks. Some of the aforementioned approaches have attractive mapping times, i.e. require only little scene-specific pre-processing. But their pose accuracy falls far behind structure-based approaches that we discuss next.
61
+
62
+ Feature Matching. Feature matching-based relocalizers [12,25,40,44,49] calculate the camera pose from correspondences between the query image and 3D scene space. They establish correspondences via discrete matching of local feature descriptors. Thus, they require a 3D point cloud of an environment where each 3D point stores one or multiple feature descriptors for matching. These point clouds can be created by running SfM software, such as COLMAP [51]. Even if poses of mapping images are known in advance, e.g. from on-device visual odometry [1,24,27,39], feature triangulation with SfM can take several hours, depending on the number of mapping frames. Also, the storage requirements can be significant, mainly due to the need for storing hundreds of thousands of descriptor vectors for matching. Strategies exist to alleviate the storage burden, such as storing fewer descriptors per 3D point [26,47,49], compressing descriptors [35,65] or removing 3D points [65]. More recently, GoMatch [67] and MeshLoc [40] removed the need to store descriptors entirely by matching against the scene geometry. None of the aforementioned strategies reduce the mapping time – on the converse, often they incur additional post-processing costs for the SfM point clouds. To reduce mapping time, one could use only a fraction of all mapping images for SfM, or reduce the image resolution. However, this would likely also affect the pose estimation accuracy.
63
+
64
+ Scene Coordinate Regression. Relocalizers in this family regress 3D coordinates in scene space for a given 2D pixel position in the query image [53]. Robust optimization over scene-to-image correspondences yields the desired query camera pose. To regress correspondences, most works rely on random forests [6, 14, 15, 53, 61] or, more recently, convolutional neural networks [5, 7, 8, 10, 13, 20, 31]. Thus, the scene representation is implicit, and the map is encoded in the weights of the neural network. This has advantages as the implicit map is privacy-preserving [56, 67]: an explicit scene representation can only be re-generated with images of the environment. Also, scene coordinate regression has small storage requirements. DSAC* [10] achieves state-of-the-art accuracy with 28MB networks, and acceptable accuracy with 4MB networks. Relocalization in large-scale environments can be challenging, but strategies exist that rely on network ensembles [8].
65
+
66
+ The main drawback of scene coordinate regression is its long mapping time, since mapping entails training a neural network for each specific scene. DSAC++ [7] reported 6 days of training for a single scene. DSAC* reduced the training time to 15 hours - given a powerful GPU. This is still one order of magnitude slower than typical feature matching approaches need to reconstruct a scene. In our work, we show how few conceptual changes to a scene coordinate regression pipeline result in a speedup of two orders of magnitude. Thus, we pave the way for deep scene coordinate regression to be useful in practical applications.
67
+
68
+ A variety of recipes allow for fast mapping if depth, rendered or measured, is given. Indeed, the original SCoRF paper [53] reported to train their random forest with RGB-D images under 10 minutes. Cavallari et al. [15] show how to adapt a pre-trained neural scene representation in real time for a new scene, but their approach requires depth inputs for the adaptation, and for relocalization. Dong et al. [20] use very few mapping frames with depth to achieve a mapping time of 2 minutes. The architecture described in [20] consists of a scene-agnostic feature backbone, and a scene-specific region classification head - very similar to our setup. However, their prediction head is convolutional, and thus misses the opportunity for highly efficient training as we will show. SANet [64] is a scene coordinate regression variant that builds on image retrieval. A scene-agnostic network interpolates the coordinate maps of the top retrieved mapping frames to yield the query scene coordinates. None of the aforementioned approaches is applicable when mapping images are RGB only. Depth channels for mapping can be rendered from a dense scene mesh [7], but mesh creation would increase the mapping time. Our work is the first to show fast scene coordinate regression mapping from RGB and poses alone.
69
+
70
+ # 3. Method
71
+
72
+ Our goal is to estimate a camera pose $\mathbf{h}$ given a single RGB image $I$ . We define the camera pose as the rigid body transformation that maps coordinates in camera space $\mathbf{e}_i$ to coordinates in scene space $\mathbf{y}_i$ , therefore $\mathbf{y}_i = \mathbf{h}\mathbf{e}_i$ . We can estimate the pose from image-to-scene correspondences:
73
+
74
+ $$
75
+ \mathbf {h} = g (\mathcal {C}), \text {w i t h} \mathcal {C} = \left\{\left(\mathbf {x} _ {i}, \mathbf {y} _ {i}\right) \right\}, \tag {1}
76
+ $$
77
+
78
+ where $\mathcal{C}$ is the set of correspondences between 2D pixel positions $\mathbf{x}_i$ and 3D scene coordinates $\mathbf{y}_i$ . Function $g$ denotes a robust pose solver. Usually $g$ consists of a PnP minimal solver [22] in a RANSAC [21] loop, followed by refinement. Refinement consists of iterative optimization of the reprojection error over all RANSAC inliers using Levenberg-Marquardt [30, 37]. For more details concerning pose solving we refer to [10], as our focus is on correspondence prediction. To obtain correspondences, we follow the ap
79
+
80
+ ![](images/19a2b2ecc41f4bdb7b09044314024fc8242d125bbbbbc933f1fb02fece43bf3b.jpg)
81
+ Figure 2. Standard Training Loop [10]. Previous works train a coordinate regression network with one mapping image at a time. The network predicts dense scene coordinates, and is supervised with the ground truth camera pose and a reprojection loss.
82
+
83
+ proach of scene coordinate regression [53]. We learn a function to predict 3D scene points for any 2D image location:
84
+
85
+ $$
86
+ \mathbf {y} _ {i} = f \left(\mathbf {p} _ {i}; \mathbf {w}\right), \text {w i t h} \mathbf {p} _ {i} = \mathcal {P} \left(\mathbf {x} _ {i}, I\right), \tag {2}
87
+ $$
88
+
89
+ where $f$ is a neural network parameterized by learnable weights $\mathbf{w}$ , and $\mathbf{p}_i$ is an image patch extracted around pixel position $\mathbf{x}_i$ from image $I$ . Therefore, $f$ implements a mapping from patches to coordinates, $f: \mathbb{R}^{C_{\mathrm{I}} \times H_{\mathrm{P}} \times W_{\mathrm{P}}} \to \mathbb{R}^3$ . We have RGB images but usually take grayscale inputs with $C_{\mathrm{I}} = 1$ . Typical patch dimensions are $H_{\mathrm{P}} = W_{\mathrm{P}} = 81\mathrm{px}$ [7-10]. For state-of-the-art architectures there is no explicit patch extraction. A fully convolutional neural network [33] with limited receptive field slides over the input image to efficiently predict dense outputs while reusing computation of neighboring pixels. However, for our subsequent discussion, the explicit patch notation will prove useful.
90
+
91
+ We learn the function $f$ by optimizing over all mapping images $\mathcal{I}_{\mathrm{M}}$ with their ground truth poses $\mathbf{h}_i^*$ as supervision:
92
+
93
+ $$
94
+ \underset {\mathbf {w}} {\operatorname {a r g m i n}} \sum_ {I \in \mathcal {I} _ {\mathrm {M}}} \sum_ {i} \ell_ {\pi} \left[ \mathbf {x} _ {i}, \overbrace {f (\mathbf {p} _ {i} ; \mathbf {w}) , \mathbf {h} _ {i} ^ {*}} ^ {\mathbf {y} _ {i}}, \right. \tag {3}
95
+ $$
96
+
97
+ where $\ell_{\pi}$ is a reprojection loss that we discuss in Sec. 3.2. We optimize Eq. 3 using minibatch stochastic gradient descent. The network predicts dense scene coordinates from one mapping image at a time, and all predictions are supervised using the ground truth mapping pose, see Fig. 2.
98
+
99
+ # 3.1. Efficient Training by Gradient Decorrelation
100
+
101
+ With the standard training, we optimize over predictions for thousands of patches in each training iteration – but they all come from the same image. Hence, their loss and their gradients will be highly correlated. A prediction $\mathbf{y}_i$ and the prediction for the pixel next to it will be very similar, so will be the pixel loss and its gradient.
102
+
103
+ Our key idea is to randomize patches over the entire training set, and construct training batches from many different mapping views. This decorrelates gradients within a batch and leads to a very stable training signal, robustness to high learning rates, and, ultimately, fast convergence.
104
+
105
+ A naive implementation of this idea would be slow if it resorted to explicit patch extraction [5]. The expressive power of convolutional layers, and their efficient computation using fully convolutional architectures is key for state-of-the-art scene coordinate regression. Therefore, we propose to split the regression network into a convolutional backbone, and a multi-layer perceptron (MLP) head:
106
+
107
+ $$
108
+ f \left(\mathbf {p} _ {i}; \mathbf {w}\right) = f _ {\mathrm {H}} \left(\mathbf {f} _ {i}; \mathbf {w} _ {\mathrm {H}}\right), \text {w i t h} \mathbf {f} _ {i} = f _ {\mathrm {B}} \left(\mathbf {p} _ {i}; \mathbf {w} _ {\mathrm {B}}\right), \tag {4}
109
+ $$
110
+
111
+ where $f_{\mathrm{B}}$ is the backbone that predicts a high-dimensional feature $\mathbf{f}_i$ with dimensionality $C_{\mathbf{f}}$ , and $f_{\mathrm{H}}$ is the regression head that predicts scene coordinates:
112
+
113
+ $$
114
+ f _ {\mathrm {B}}: \mathbb {R} ^ {C _ {1} \times H _ {\mathrm {P}} \times W _ {\mathrm {P}}} \rightarrow \mathbb {R} ^ {C _ {\mathrm {f}}} \text {a n d} f _ {\mathrm {H}}: \mathbb {R} ^ {C _ {\mathrm {f}}} \rightarrow \mathbb {R} ^ {3}. \tag {5}
115
+ $$
116
+
117
+ Similar to [20], we argue that $f_{\mathrm{B}}$ can be implemented using a scene-agnostic convolutional network - a generic feature extractor. In addition to [20], we argue that $f_{\mathrm{H}}$ can be implemented using a MLP instead of another convolutional network. Fig. 2 signifies our network split. Convolution layers with $3 \times 3$ kernels are blue, and $1 \times 1$ convolutions are green. The latter are MLPs with shared weights. This standard network design is used in pipelines like DSAC* [10].
118
+
119
+ Note how function $f_{\mathrm{H}}$ needs no spatial context, i.e. differently from the backbone, $f_{\mathrm{H}}$ does not need access to neighbouring pixels for its computation. Therefore, we can easily construct training batches for $f_{\mathrm{H}}$ with random samples across all mapping images. Specifically, we construct a fixed size training buffer by running the pre-trained backbone $f_{\mathrm{B}}$ over the mapping images. This buffer contains millions of features $\mathbf{f}_i$ with their associated pixels positions $\mathbf{x}_i$ , camera intrinsics $\mathbf{K}_i$ and ground truth mapping poses $\mathbf{h}_i^*$ . We generate this buffer once, in the first minute of training. Afterwards, we start the main training loop that iterates over the buffer. At the beginning of each epoch, we shuffle the buffer to mix features (essentially patches) across all mapping data. In each training step, we construct batches of several thousand features, potentially computing a parameter update over thousands of mapping views at once. Not only is the gradient computation extremely efficient for our MLP regression head, but the gradients are also decorrelated which allows us to use high learning rates for fast convergence. Fig. 3 shows our training procedure.
120
+
121
+ # 3.2. Curriculum Training
122
+
123
+ Previous state-of-the-art scene coordinate regression pipelines use a multi-stage training process. Firstly, they optimize a pixel-level reprojection loss. Secondly, they do end-to-end training, where they propagate a pose error back through a differentiable pose solver [5,7]. End-to-end training lets the network focus on reliable scene structures while ignoring outlier predictions. However, end-to-end training is extremely costly. For example, in [10], end-to-end training incurs half of the training time for $10\%$ of the parameter
124
+
125
+ ![](images/799f57794571ece1ca2fb8c38e6be1ac3c694723b4a16af0977273df59e96e39.jpg)
126
+ Figure 3. ACE Training Loop. Training consists of two stages: Buffer generation (left) and the main training loop (right). To create a training buffer, we pass mapping images through a scene-agnostic backbone that extracts high-dimensional feature vectors. Each colored box in the buffer represents one such feature, and features with the same color came from the same mapping image. In the main loop, we train a scene-specific MLP that predicts scene coordinates from backbone features. We assemble training batches from random features and their associated mapping poses. Thus, we supervise the scene-specific MLP with many, diverse mapping views in each training iteration.
127
+
128
+ updates. To mimic the effects of end-to-end training, we construct a curriculum over a much simpler pixel-wise reprojection loss. We use a moving inlier threshold throughout the training process that starts loose, and gets more restrictive as training progresses. Therefore, the network can focus on predictions that are already good, and neglect less precise predictions that would be filtered by RANSAC during pose estimation. Our training loss is based on the pixelwise reprojection loss of DSAC* [10]:
129
+
130
+ $$
131
+ \ell_ {\pi} \left[ \mathbf {x} _ {i}, \mathbf {y} _ {i}, \mathbf {h} _ {i} ^ {*} \right] = \left\{ \begin{array}{l l} \hat {e} _ {\pi} \left(\mathbf {x} _ {i}, \mathbf {y} _ {i}, \mathbf {h} _ {i} ^ {*}\right) & \text {i f} \mathbf {y} _ {i} \in \mathcal {V} \\ | | \mathbf {y} _ {i} - \bar {\mathbf {y}} _ {i} | | _ {0} & \text {o t h e r w i s e .} \end{array} \right. \tag {6}
132
+ $$
133
+
134
+ This loss optimizes a robust reprojection error $\hat{e}_{\pi}$ for all valid coordinate predictions $\mathcal{V}$ . Valid predictions are between $10\mathrm{cm}$ and $1000\mathrm{m}$ in front of the image plane, and have a reprojection error below $1000\mathrm{px}$ . For invalid predictions, the loss optimizes the distance to a dummy scene coordinate $\bar{\mathbf{y}}_i$ that is calculated from the ground truth camera pose assuming a fixed image depth of $10\mathrm{m}$ . The main difference between DSAC* and our approach is in the definition of the robust reprojection error $\hat{e}_{\pi}$ . DSAC* uses the reprojection error $e_{\pi}$ up to a threshold $\tau$ , and the square root of the reprojection error beyond. Instead, we use tanh clamping of the reprojection error:
135
+
136
+ $$
137
+ \hat {e} _ {\boldsymbol {\pi}} \left(\mathbf {x} _ {i}, \mathbf {y} _ {i}, \mathbf {h} _ {i} ^ {*}\right) = \tau (t) \tanh \left(\frac {e _ {\boldsymbol {\pi}} \left(\mathbf {x} _ {i} , \mathbf {y} _ {i} , \mathbf {h} _ {i} ^ {*}\right)}{\tau (t)}\right) \tag {7}
138
+ $$
139
+
140
+ We dynamically re-scale the tanh according to a threshold $\tau$ that varies throughout training:
141
+
142
+ $$
143
+ \tau (t) = w (t) \tau_ {\max } + \tau_ {\min }, \text {w i t h} w (t) = \sqrt {1 - t ^ {2}}, \tag {8}
144
+ $$
145
+
146
+ where $t \in (0,1)$ denotes the relative training progress. This curriculum implements a circular schedule of threshold $\tau$ , which remains close to $\tau_{\mathrm{max}}$ in the beginning of training, and declines towards $\tau_{\mathrm{min}}$ at the end of training.
147
+
148
+ # 3.3. Backbone Training
149
+
150
+ As backbone, we can use any dense feature description network [19, 32, 43, 59]. However, existing solutions are often optimized towards sparse feature matching. Their descriptors are meant to be informative at key points. In contrast, we need descriptors that are distinctive for any position in the input image. Thus, we present a simple way to train a feature description network tailored towards scene coordinate regression. We adhere to the network architecture of DSAC* [10]. We use the early convolutional layers as our backbone, and split off the subsequent MLP as our scene-specific regression head. To train the backbone, we resort to the image-level training of DSAC* [10] (cf. Fig. 2) but couple it with our training curriculum of Eq. 6.
151
+
152
+ Instead of training the backbone with one regression head for a single scene, we train it with $N$ regression heads for $N$ scenes, in parallel. This bottleneck architecture forces the backbone to predict features that are useful for a wide range of scenes. We train the backbone on 100 scenes from ScanNet [17] for 1 week, resulting in 11MB of weights that can be used to extract dense descriptors on any new scene. See the Supplement for more details on the training process.
153
+
154
+ # 3.4. Further Improvements
155
+
156
+ We train the entire network with half-precision floating-point weights. This gives us an additional speed boost, especially on budget GPUs. We also store our networks with float16 precision. This allows us to increase the depth of our regression heads while maintaining 4MB maps. On top of our loss curriculum (see Sec. 3.2), we use a one cycle learning rate schedule [54], i.e. we increase the learning rate in the middle of training, and reduce it towards the end. We found a small but consistent advantage in overparameterizing the scene coordinate representation: we predict homogeneous coordinates $\mathbf{y}' = (x,y,z,w)^\top$ and apply a w-clip, enforcing $w$ to be positive by applying a softplus operation.
157
+
158
+ <table><tr><td rowspan="2" colspan="2"></td><td rowspan="2">Mapping w/ Mesh/Depth</td><td rowspan="2">Mapping Time</td><td rowspan="2">Map Size</td><td colspan="2">7 Scenes</td><td colspan="2">12 Scenes</td></tr><tr><td>SfM poses</td><td>D-SLAM poses</td><td>SfM poses</td><td>D-SLAM poses</td></tr><tr><td rowspan="4">FM</td><td>AS (SIFT) [49]</td><td>No</td><td rowspan="4">~1.5h</td><td>~200MB</td><td>98.5%</td><td>68.7%</td><td>99.8%</td><td>99.6%</td></tr><tr><td>D.VLAD+R2D2 [25]</td><td>No</td><td>~1GB</td><td>95.7%</td><td>77.6%</td><td>99.9%</td><td>99.7%</td></tr><tr><td>hLoc (SP+SG) [44,45]</td><td>No</td><td>~2GB</td><td>95.7%</td><td>76.8%</td><td>100%</td><td>99.8%</td></tr><tr><td>pixLoc [46]</td><td>No</td><td>~1GB</td><td>N/A</td><td>75.7%</td><td>N/A</td><td>N/A</td></tr><tr><td rowspan="4">SCR (w/Depth)</td><td>DSAC* (Full) [10]</td><td>Yes</td><td>15h</td><td>28MB</td><td>98.2%</td><td>84.0%</td><td>99.8%</td><td>99.2%</td></tr><tr><td>DSAC* (Tiny) [10]</td><td>Yes</td><td>11h</td><td>4MB</td><td>85.6%</td><td>70.0%</td><td>84.4%</td><td>83.1%</td></tr><tr><td>SANet [64]</td><td>Yes</td><td>~2.3 min</td><td>~550MB</td><td>N/A</td><td>68.2%</td><td>N/A</td><td>N/A</td></tr><tr><td>SRC [20]</td><td>Yes</td><td>2 min†</td><td>40MB</td><td>81.1%</td><td>55.2%</td><td>N/A</td><td>N/A</td></tr><tr><td rowspan="3">SCR</td><td>DSAC* (Full) [10]</td><td>No</td><td>15h</td><td>28MB</td><td>96.0%</td><td>81.1%</td><td>99.6%</td><td>98.8%</td></tr><tr><td>DSAC* (Tiny) [10]</td><td>No</td><td>11h</td><td>4MB</td><td>84.3%</td><td>69.1%</td><td>81.9%</td><td>81.6%</td></tr><tr><td>ACE (ours)</td><td>No</td><td>5 min</td><td>4MB</td><td>97.1%</td><td>80.8%</td><td>99.9%</td><td>99.6%</td></tr></table>
159
+
160
+ Table 1. Indoor Relocalization Results. We report the percentage of frames below a $5\mathrm{cm},5^{\circ}$ pose error. Best results in bold for the "SCR" group, second best results underlined. We list the time needed for mapping, the map size and, whether depth (rendered or measured) is needed for mapping. See the main text and Supp. for details on these numbers. $\dagger$ does not include time needed to pre-cluster the scene.
161
+
162
+ # 4. Experiments
163
+
164
+ We implement our approach in PyTorch [41], based on the public code of DSAC* [10]. We list our main parameter choices here, and refer to the Supplement for more details. We create a training buffer of 8M backbone features in 1 minute, randomly sampled from mapping images. In the 4 minutes training, we do 16 complete passes over the training buffer with a batch size of 5120. Our training curriculum starts with a soft threshold of $\tau_{\mathrm{max}} = 50\mathrm{px}$ and ends with $\tau_{\mathrm{min}} = 1\mathrm{px}$ . We optimize using AdamW [34] with a learning rate between $5 \cdot 10^{-4}$ and $5 \cdot 10^{-3}$ and a 1 cycle schedule [54]. We reuse the DSAC* robust pose estimator with 64 RANSAC hypotheses and 10px inlier threshold.
165
+
166
+ # 4.1. Indoor Relocalization
167
+
168
+ We conducted experiments on 7Scenes [53] and 12Scenes [60], two indoor relocalization datasets. They provide mapping and query images for several small-scale indoor rooms. In these environments, speedy mapping is particularly desirable. If mapping takes hours or days, the room might have changed, or the user might have wandered off. Two sets of ground truth poses are available [4], one from running depth-based SLAM [18, 27], one from running SfM [51]. We report results on both because relocalizer performance can be biased towards one or the other [4].
169
+
170
+ Our main comparison is to DSAC* [10], our baseline. We include other top-performing methods as well, such as feature matching (FM) approaches, and scene coordinate regression (SCR) pipelines that use depth during mapping. We list results in Table 1. Multiple approaches achieve high accuracy, but require vastly different resources. ACE is the only approach that 1) achieves top accuracy 2) in less than 10 minutes per scene 3) with less than 10MB per scene. At the same time, it does not need depth - either measured or rendered from a mesh - for mapping.
171
+
172
+ Note: Comparing mapping times and map sizes is a difficult endeavour. The exact numbers will depend on hardware, implementation details and hyper-parameters. For our main baseline, DSAC*, we made sure that the numbers are accurate by re-running their public code on our hardware. For the other approaches, which we mainly list for context, we resort to numbers given in their respective papers, or estimated numbers based on publicly available statistics. We detail our reasoning for each method in the Supplement. As such, these numbers should be taken with a grain of salt. But we are confident that the order of magnitude is correct, as confirmed by cross-referencing with other publications.
173
+
174
+ # 4.2. Outdoor Relocalization
175
+
176
+ Cambridge Landmarks [29]. This dataset collects mapping and query images of buildings across the old town of Cambridge. Ground truth poses stem from reconstructing mapping and query images jointly using SfM [63]. We report our results in Table 2. Feature matching-approaches do very well on this dataset, presumably due to their similarity to the SfM reference algorithm [4]. ACE performs reasonably well compared to DSAC* considering the spatial extent of each scene and our 4MB memory footprint. We provide a variant of ACE, denoted *Poker*, where we split a scene into four ACE models, and return the pose estimate with the largest inlier count at query time (see Supp. for details). *Poker* outperforms DSAC* on average while still being comparatively lean w.r.t. mapping time and storage.
177
+
178
+ **Wayspots.** The previous datasets are not ideal for fully showcasing the advantages of our method. Their mapping poses either stem from D-SLAM, where depth would be available, or SfM reconstruction, where 3D point clouds would be available. However, RGB-based visual odometry runs on millions of phones [1, 24] providing mapping images and poses, but neither depth nor full 3D point clouds
179
+
180
+ <table><tr><td rowspan="2" colspan="2"></td><td rowspan="2">Mapping w/ Mesh/Depth</td><td rowspan="2">Mapping Time</td><td rowspan="2">Map Size</td><td colspan="5">Cambridge Landmarks</td><td rowspan="2">Average (cm / °)</td></tr><tr><td>Court</td><td>King&#x27;s</td><td>Hospital</td><td>Shop</td><td>St. Mary&#x27;s</td></tr><tr><td rowspan="5">FM</td><td>AS (SIFT) [49]</td><td>No</td><td rowspan="5">~35min</td><td>~200MB</td><td>24/0.1</td><td>13/0.2</td><td>20/0.4</td><td>4/0.2</td><td>8/0.3</td><td>14/0.2</td></tr><tr><td>hLoc (SP+SG) [44,45]</td><td>No</td><td>~800MB</td><td>16/0.1</td><td>12/0.2</td><td>15/0.3</td><td>4/0.2</td><td>7/0.2</td><td>11/0.2</td></tr><tr><td>pixLoc [46]</td><td>No</td><td>~600MB</td><td>30/0.1</td><td>14/0.2</td><td>16/0.3</td><td>5/0.2</td><td>10/0.3</td><td>15/0.2</td></tr><tr><td>GoMatch [67]</td><td>No</td><td>~12MB</td><td>N/A</td><td>25/0.6</td><td>283/8.1</td><td>48/4.8</td><td>335/9.9</td><td>N/A</td></tr><tr><td>HybridSC [12]</td><td>No</td><td>~1MB</td><td>N/A</td><td>81/0.6</td><td>75/1.0</td><td>19/0.5</td><td>50/0.5</td><td>N/A</td></tr><tr><td rowspan="2">APR</td><td>PoseNet17 [28]</td><td>No</td><td>4-24h</td><td>50MB</td><td>683/3.5</td><td>88/1.0</td><td>320/3.3</td><td>88/3.8</td><td>157/3.3</td><td>267/3.0</td></tr><tr><td>MS-Transformer [52]</td><td>No</td><td>~7h</td><td>~18MB</td><td>N/A</td><td>83/1.5</td><td>181/2.4</td><td>86/3.1</td><td>162/4.0</td><td>N/A</td></tr><tr><td rowspan="3">SCR w/ Depth</td><td>DSAC* (Full) [10]</td><td>Yes</td><td>15h</td><td>28MB</td><td>49/0.3</td><td>15/0.3</td><td>21/0.4</td><td>5/0.3</td><td>13/0.4</td><td>21/0.3</td></tr><tr><td>SANet [64]</td><td>Yes</td><td>~1min</td><td>~260MB</td><td>328/2.0</td><td>32/0.5</td><td>32/0.5</td><td>10/0.5</td><td>16/0.6</td><td>84/0.8</td></tr><tr><td>SRC [20]</td><td>Yes</td><td>2 min†</td><td>40MB</td><td>81/0.5</td><td>39/0.7</td><td>38/0.5</td><td>19/1.0</td><td>31/1.0</td><td>42/0.7</td></tr><tr><td rowspan="4">SCR</td><td>DSAC* (Full) [10]</td><td>No</td><td>15h</td><td>28MB</td><td>34/0.2</td><td>18/0.3</td><td>21/0.4</td><td>5/0.3</td><td>15/0.6</td><td>19/0.4</td></tr><tr><td>DSAC* (Tiny) [10]</td><td>No</td><td>11h</td><td>4MB</td><td>98/0.5</td><td>27/0.4</td><td>33/0.6</td><td>11/0.5</td><td>56/1.8</td><td>45/0.8</td></tr><tr><td>ACE (ours)</td><td>No</td><td>5 min</td><td>4MB</td><td>43/0.2</td><td>28/0.4</td><td>31/0.6</td><td>5/0.3</td><td>18/0.6</td><td>25/0.4</td></tr><tr><td>Poker (Quad ACE Ensemble)</td><td>No</td><td>20 min</td><td>16MB</td><td>28/0.1</td><td>18/0.3</td><td>25/0.5</td><td>5/0.3</td><td>9/0.3</td><td>17/0.3</td></tr></table>
181
+
182
+ with feature descriptors. Therefore, we curate a new relocalization dataset, denoted Wayspots, from a publicly available corpus of phone scans. In particular, we select 10 consecutive scenes from the training split of the MapFree dataset [3]. Each scene depicts a small outdoor place and comes with two full, independent scans. We use one for mapping and one for query. Arnold et al. [3] reconstructed ground truth poses of both scans using SfM [51]. We register the original phone trajectories to the SfM poses. In our experiments, we used the original phone trajectories for mapping, and SfM poses solely for evaluation. We refer to the Supplement for details on the dataset and its curation. We show an overview of all scenes, and our main results in Fig. 4. We outperform DSAC* on average while being two orders of magnitude faster in mapping. In Fig. 5, we show a qualitative comparison of ACE and DSAC* on the "Rock" scene, including a variant of DSAC* that was stopped after 5 minutes mapping.
183
+
184
+ # 4.3. Analysis
185
+
186
+ Mapping Time. The speed of training a network depends on the GPU model. In Table 3, we compare mapping times of ACE and DSAC* on a premium GPU (NVIDIA V100) with a more affordable model (NVIDIA T4)<sup>1</sup>. For DSAC*, mapping takes twice as long on the cheaper GPU. The difference is smaller for the "Tiny" version of DSAC* that uses a leaner network, but mapping still takes hours. Conversely, ACE experiences only a $10\%$ slowdown on the T4 GPU, due to having to train just the regression head, in half-precision.
187
+
188
+ Table 2. Cambridge Landmarks [29] Results. We report median rotation and position errors. Best results in bold for the "SCR" group, second best results underlined. Methods using depth for mapping rely on a dense multi-view stereo mesh that took hours to compute [7,29]. See the main text and Supp. for details about mapping times and storage demands. $\dagger$ does not include time needed to pre-cluster the scene.
189
+
190
+ <table><tr><td>GPU</td><td>Method</td><td>Mapping Time</td><td>ACE Speed-up</td></tr><tr><td rowspan="3">NVIDIA V100</td><td>ACE</td><td>291s</td><td>-</td></tr><tr><td>DSAC* (Tiny)</td><td>11h</td><td>130x</td></tr><tr><td>DSAC* (Full)</td><td>15h</td><td>180x</td></tr><tr><td rowspan="3">NVIDIA T4</td><td>ACE</td><td>327s</td><td>-</td></tr><tr><td>DSAC* (Tiny)</td><td>14h</td><td>150x</td></tr><tr><td>DSAC* (Full)</td><td>28h</td><td>310x</td></tr></table>
191
+
192
+ Table 3. Mapping Times. Comparison of methods on different GPUs. T4 GPUs offer less compute capabilities but are considerably cheaper. DSAC* (Full) uses a 28MB network. DSAC* (Tiny) uses a smaller, 4MB network.
193
+
194
+ Map Size. The network architecture of our regression head determines the size of our maps. In particular we can vary the number of layers in our MLP, see Fig. 6 for an analysis. Smaller maps of 2.5MB achieve slightly lower accuracy, but still yield comfortable relocalization rates above $95\%$ on 7Scenes. Using larger maps of 5.5MB does not pay off: because of their increased size, these networks undergo fewer passes over the training buffer in 5 minutes. Note how ACE achieves $\sim 80\%$ accuracy after a single epoch (75s).
195
+
196
+ More Experiments in the Supplement. We demonstrate the positive impact of gradient de-correlation, predicting homogeneous scene coordinates and our loss curriculum. Furthermore, we substitute the ACE feature backbone with various off-the-shelf networks, including SuperPoint [19] and DISK [59], to find them less suited for our task. We also vary the dimensionality of ACE backbone features to find more dimensions working better. See supplement for details.
197
+
198
+ <table><tr><td>Scene</td><td>DSAC* (Full)</td><td>DSAC* (Tiny)</td><td>ACE (Ours)</td></tr><tr><td>Cubes</td><td>83.8%</td><td>68.7%</td><td>97.0%</td></tr><tr><td>Bears</td><td>82.6%</td><td>73.1%</td><td>80.7%</td></tr><tr><td>Winter Sign</td><td>0.2%</td><td>0.3%</td><td>1.0%</td></tr><tr><td>Inscription</td><td>54.1%</td><td>41.3%</td><td>49.0%</td></tr><tr><td>The Rock</td><td>100%</td><td>99.8%</td><td>100%</td></tr><tr><td>Tendrils</td><td>25.1%</td><td>19.6%</td><td>34.9%</td></tr><tr><td>Map</td><td>56.7%</td><td>53.3%</td><td>56.5%</td></tr><tr><td>Square Bench</td><td>69.5%</td><td>60.3%</td><td>66.7%</td></tr><tr><td>Statue</td><td>0.0%</td><td>0.0%</td><td>0.0%</td></tr><tr><td>Lawn</td><td>34.7%</td><td>20.0%</td><td>35.8%</td></tr><tr><td>Average</td><td>50.7%</td><td>43.6%</td><td>52.2%</td></tr></table>
199
+
200
+ ![](images/3fd46a36fd3a06e159db867c84367b2572fdc1cbe1e61bb22a3d9d11dced4774.jpg)
201
+ Cubes
202
+
203
+ ![](images/12f25611bcc95ef8d2fb2a6b759fc59dd77c5b3aa391ce4f683dcd8ec77abd35.jpg)
204
+ Bears
205
+
206
+ ![](images/0fa4e12e86ed730a0b066053e4acb78f0b7fdb03a9d037c17dfdd82751177d8d.jpg)
207
+ Winter Sign
208
+
209
+ ![](images/a6057bef6d30d69f4acec8eb5d2e2dcc76f406f5ffa17611776625140a17903e.jpg)
210
+ Inscription
211
+
212
+ ![](images/4fe988e98f7c95db2fc2cb80625acda824b56dcc3044384ed549f7ae72ed02e4.jpg)
213
+ The Rock
214
+
215
+ ![](images/222e103d1c40b304e4041d3e6f89f4e2277250eba40ef3c5f47e0f088ac9c45b.jpg)
216
+ Tendrils
217
+
218
+ ![](images/3f75d38cbf78efd07ae78a85d94c0d0b5f42c65625f705737196a6c9fc6379e2.jpg)
219
+ Map Sq
220
+
221
+ ![](images/f4f3836a62825942a1b33a2b94408e8f24622f3a4f0674df28d77cfee8d6859f.jpg)
222
+ Square Bench
223
+
224
+ ![](images/76c4cf460ed55affb48114b29e2a036b4099879d45008d0405900cd72a10572d.jpg)
225
+ Statue
226
+
227
+ ![](images/9984c97d6d55a6ccfdc5ba8bc72abefb6087da1a31aa9284dcd8be8189b06cee.jpg)
228
+ Lawn
229
+
230
+ ![](images/3c9535e0e15b9ccf4635d5968261d5a7ec4baf54bf4998a5a6bbcd30ad792868.jpg)
231
+
232
+ ![](images/32e3ee6ab39f0a906076270404eb8da6c30012206cdefaae4caf8a979a31c435.jpg)
233
+
234
+ ![](images/617e04636449e494fa293b40c632b16daefa08afe31407b8c9e6a0cdef45841e.jpg)
235
+ Figure 4. The Wayspots Dataset. We curate a new relocalization dataset from a public corpus of phone scans [3]. (left) We show accuracy as the percentage of frames below $10\mathrm{cm},5^{\circ}$ pose error. Best results in bold. (right) One mapping image of each scene, and a visualisation of ground truth poses and ACE estimates for one scene. The dataset does not provide 3D point clouds, so we show the ACE map instead. See the Supplementary for details of the process we use to render 3D point clouds from the trained ACE maps.
236
+
237
+ ![](images/0bef90c69f513a0b3b96c0b6c32069a0fc752bbebe3759925ddb728212bb63a3.jpg)
238
+ Figure 5. Qualitative results. We compare qualitative results of ACE and DSAC* on one scene of the Wayspots dataset. Full training of DSAC* takes 15 hours. When stopped after 5 minutes, relocalization accuracy is poor. We show the learned map for each method.
239
+ Figure 6. Map Size. We vary the map size of ACE by using more or less MLP layers. 4MB maps correspond to our standard settings. Results on 7Scenes with SfM pseudo ground truth poses.
240
+
241
+ # 5. Conclusion and Future Work
242
+
243
+ We have presented ACE, a relocalizer able to map new environments in 5 minutes. ACE reduces cost and energy consumption of mapping by two orders of magnitude compared to previous RGB-based scene coordinate regression approaches, making this family of algorithms practical.
244
+
245
+ The changes from previous state-of-the-art relocalizers that we propose in this paper are mainly conceptual, leveraging decorrelation of gradients by patch-level training. We see further potential for speedups by clever engineering, such as interleaving buffer creation and training in separate threads, or early stopping for easy scenes.
246
+
247
+ Acknowledgements: We thank Torsten Sattler for helpful comments about the resource footprint of feature-based relocalizers, Mohamed Sayed for help with the 3D visualisation of ACE, and Clément Godard for early contributions to the code base of ACE.
248
+
249
+ # References
250
+
251
+ [1] Apple. ARKit. Accessed: 11 November 2022. 3, 6
252
+ [2] Relja Arandjelovic, Petr Gronat, Akihiko Torii, Tomas Pajdla, and Josef Sivic. NetVLAD: CNN architecture for weakly supervised place recognition. In CVPR, 2016. 2
253
+ [3] Eduardo Arnold, Jamie Wynn, Sara Vicente, Guillermo Garcia-Hernando, Aron Monszpart, Victor Adrian Prisacariu, Daniyar Turmukhambetov, and Eric Brachmann. Map-free visual relocalization: Metric pose relative to a single image. In ECCV, 2022. 2, 7, 8
254
+ [4] Eric Brachmann, Martin Humenberger, Carsten Rother, and Torsten Sattler. On the limits of pseudo ground truth in visual camera re-localisation. In ICCV, 2021. 2, 6
255
+ [5] Eric Brachmann, Alexander Krull, Sebastian Nowozin, Jamie Shotton, Frank Michel, Stefan Gumhold, and Carsten Rother. DSAC-differentiable ransac for camera localization. In CVPR, 2017. 2, 3, 4
256
+ [6] Eric Brachmann, Frank Michel, Alexander Krull, Michael Y. Yang, Stefan Gumhold, and Carsten Rother. Uncertainty-driven 6D pose estimation of objects and scenes from a single RGB image. In CVPR, 2016. 3
257
+ [7] Eric Brachmann and Carsten Rother. Learning Less is More - 6D Camera Localization via 3D Surface Regression. In CVPR, 2018. 2, 3, 4, 7
258
+ [8] Eric Brachmann and Carsten Rother. Expert sample consensus applied to camera re-localization. In ICCV, 2019. 3, 4
259
+ [9] Eric Brachmann and Carsten Rother. Neural-guided RANSAC: Learning where to sample model hypotheses. In ICCV, 2019. 4
260
+ [10] Eric Brachmann and Carsten Rother. Visual camera relocalization from RGB and RGB-D images using DSAC. TPAMI, 2021. 1, 2, 3, 4, 5, 6, 7
261
+ [11] Samarth Brahmbhatt, Jinwei Gu, Kihwan Kim, James Hays, and Jan Kautz. Geometry-aware learning of maps for camera localization. In CVPR, 2018. 2
262
+ [12] Federico Camposeco, Andrea Cohen, Marc Pollefeys, and Torsten Sattler. Hybrid scene compression for visual localization. In CVPR, 2019. 1, 2, 3, 7
263
+ [13] Tommaso Cavallari, Luca Bertinetto, Jishnu Mukhoti, Philip Torr, and Stuart Golodetz. Let's take this online: Adapting scene coordinate regression network predictions for online rgb-d camera relocalisation. In 3DV, 2019. 3
264
+ [14] Tommaso Cavallari, Stuart Golodetz, Nicholas A Lord, Julien Valentin, Luigi Di Stefano, and Philip HS Torr. On-the-fly adaptation of regression forests for online camera relocalisation. In CVPR, 2017. 3
265
+ [15] Tommaso Cavallari, Stuart Golodetz, Nicholas A. Lord, Julien Valentin, Victor A. Prisacariu, Luigi Di Stefano, and Philip H. S. Torr. Real-time rgb-d camera pose estimation in novel scenes using a relocalisation cascade. TPAMI, 2019. 3
266
+ [16] Kunal Chelani, Fredrik Kahl, and Torsten Sattler. How privacy-preserving are line clouds? recovering scene details from 3d lines. In CVPR, 2021. 2
267
+ [17] Angela Dai, Angel X Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. ScanNet: Richly-annotated 3D reconstructions of indoor scenes. In CVPR, 2017. 5
268
+
269
+ [18] Angela Dai, Matthias Nießner, Michael Zollhöfer, Shahram Izadi, and Christian Theobalt. Bundlefusion: Real-time globally consistent 3D reconstruction using on-the-fly surface reintegration. TOG, 2017. 6
270
+ [19] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. In CVPRW, 2018. 5, 7
271
+ [20] Siyan Dong, Shuzhe Wang, Yixin Zhuang, Juho Kannala, Marc Pollefeys, and Baoquan Chen. Visual localization via few-shot scene region classification. In 3DV, 2022. 2, 3, 4, 6, 7
272
+ [21] Martin A Fischler and Robert C Bolles. Random sample consensus: a paradigm for model fitting with applications to image analysis and automated cartography. Communications of the ACM, 1981. 3
273
+ [22] Xiao-Shan Gao, Xiao-Rong Hou, Jianliang Tang, and Hang-Fei Cheng. Complete solution classification for the perspective-three-point problem. TPAMI, 2003. 3
274
+ [23] Google. Google Compute Engine GPU Pricing. Accessed: 11 November 2022. 7
275
+ [24] Google. ARCore. Accessed: 11 November 2022. 3, 6
276
+ [25] Martin Humenberger, Yohann Cabon, Nicolas Guerin, Julien Morat, Jérôme Revaud, Philippe Rerole, Noé Pion, Cesar de Souza, Vincent Leroy, and Gabriela Csurka. Robust image retrieval-based visual localization using Kapture, 2020. 1, 3, 6
277
+ [26] Arnold Irschara, Christopher Zach, Jan-Michael Frahm, and Horst Bischof. From structure-from-motion point clouds to fast location recognition. In CVPR, 2009. 3
278
+ [27] Shahram Izadi, David Kim, Otmar Hilliges, David Molyneaux, Richard Newcombe, Pushmeet Kohli, Jamie Shotton, Steve Hodges, Dustin Freeman, Andrew Davison, et al. Kinectfusion: real-time 3D reconstruction and interaction using a moving depth camera. In UIST, 2011. 3, 6
279
+ [28] Alex Kendall and Roberto Cipolla. Geometric loss functions for camera pose regression with deep learning. CVPR, 2017. 2, 7
280
+ [29] Alex Kendall, Matthew Grimes, and Roberto Cipolla. Posenet: A convolutional network for real-time 6-DOF camera relocalization. In CVPR, 2015. 2, 6, 7
281
+ [30] K. Levenberg. A method for the solution of certain problems in least squares. Quarterly Journal on Applied Mathematics, 1944. 3
282
+ [31] Xiaotian Li, Shuzhe Wang, Yi Zhao, Jakob Verbeek, and Juho Kannala. Hierarchical scene coordinate classification and regression for visual localization. In CVPR, 2020. 2, 3
283
+ [32] Ce Liu, Jenny Yuen, and Antonio Torralba. SIFT flow: Dense correspondence across scenes and its applications. TPAMI, 2011. 5
284
+ [33] J. Long, E. Shelhamer, and T. Darrell. Fully convolutional networks for semantic segmentation. In CVPR, 2015. 4
285
+ [34] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. In *ICLR*, 2019. 6
286
+ [35] Simon Lynen, Bernhard Zeisl, Dror Aiger, Michael Bosse, Joel Hesch, Marc Pollefeys, Roland Siegwart, and Torsten Sattler. Large-scale, real-time visual-inertial localization revisited. Intl. Journal of Robotics Research, 2020. 3
287
+
288
+ [36] Dominic Maggio, Marcus Abate, Jingnan Shi, Courtney Mario, and Luca Carlone. Loc-NeRF: Monte carlo localization using neural radiance fields, 2022. 3
289
+ [37] Donald W. Marquardt. An algorithm for least-squares estimation of nonlinear parameters. Journal of the Society for Industrial and Applied Mathematics, 1963. 3
290
+ [38] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 3
291
+ [39] Richard A Newcombe, Shahram Izadi, Otmar Hilliges, David Molyneaux, David Kim, Andrew J Davison, Pushmeet Kohi, Jamie Shotton, Steve Hodges, and Andrew Fitzgibbon. KinectFusion: Real-time dense surface mapping and tracking. In ISMAR, 2011. 3
292
+ [40] Vojtech Panek, Zuzana Kukelova, and Torsten Sattler. MeshLoc: Mesh-Based Visual Localization. In ECCV, 2022. 1, 3
293
+ [41] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in PyTorch. In NIPS-W, 2017. 6
294
+ [42] Jerome Revaud, Jon Almazan, Rafael Rezende, and Cesar De Souza. Learning with average precision: Training image retrieval with a listwise loss. In ICCV, 2019. 2
295
+ [43] Jerome Revaud, Philippe Weinzaepfel, César Roberto de Souza, and Martin Humenberger. R2D2: repeatable and reliable detector and descriptor. In NeurIPS, 2019. 5
296
+ [44] Paul-Edouard Sarlin, Cesar Cadena, Roland Siegwart, and Marcin Dymczyk. From coarse to fine: Robust hierarchical localization at large scale. In CVPR, 2019. 1, 2, 3, 6, 7
297
+ [45] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. SuperGlue: Learning feature matching with graph neural networks. In CVPR, 2020. 6, 7
298
+ [46] Paul-Edouard Sarlin, Ajaykumar Unagar, Måns Larsson, Hugo Germain, Carl Toft, Victor Larsson, Marc Pollefeys, Vincent Lepetit, Lars Hammarstrand, Fredrik Kahl, and Torsten Sattler. Back to the Feature: Learning Robust Camera Localization from Pixels to Pose. In CVPR, 2021. 6, 7
299
+ [47] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Fast image-based localization using direct 2D-to-3D matching. In ICCV, 2011. 3
300
+ [48] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Improving image-based localization by active correspondence search. In ECCV, 2012. 1
301
+ [49] Torsten Sattler, Bastian Leibe, and Leif Kobbelt. Efficient & Effective Prioritized Matching for Large-Scale Image-Based Localization. TPAMI, 2017. 1, 2, 3, 6, 7
302
+ [50] Torsten Sattler, Qunjie Zhou, Marc Pollefeys, and Laura Leal-Taixe. Understanding the limitations of cnn-based absolute camera pose regression. In CVPR, 2019. 2
303
+ [51] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In CVPR, 2016. 1, 3, 6, 7
304
+
305
+ [52] Yoli Shavit, Ron Ferens, and Yoshi Keller. Learning multiscene absolute pose regression with transformers. In ICCV, 2021. 2, 7
306
+ [53] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in RGB-D images. In CVPR, 2013. 1, 2, 3, 4, 6
307
+ [54] Leslie N. Smith and Nicholay Topin. Super-convergence: very fast training of neural networks using large learning rates. In Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications, 2019. 5, 6
308
+ [55] Noah Snavely, Steven M. Seitz, and Richard Szeliski. Photo tourism: Exploring photo collections in 3d. In SIGGRAPH, 2006. 1
309
+ [56] Pablo Speciale, Johannes L. Schonberger, Sing Bing Kang, Sudipta N. Sinha, and Marc Pollefeys. Privacy preserving image-based localization. In CVPR, June 2019. 2, 3
310
+ [57] Akihiko Torii, Relja Arandjelovic, Josef Sivic, Masatoshi Okutomi, and Tomas Pajdla. 24/7 place recognition by view synthesis. In CVPR, 2015. 2
311
+ [58] Mehmet Özgür Türkoğlu, Eric Brachmann, Konrad Schindler, Gabriel Brostow, and Aron Monszpart. Visual Camera Re-Localization Using Graph Neural Networks and Relative Pose Supervision. In 3DV, 2021. 2
312
+ [59] MichalTyszkiiewicz, Pascal Fua, and Eduard Trulls. DISK: Learning local features with policy gradient. In NeurIPS, 2020. 5, 7
313
+ [60] Julien Valentin, Angela Dai, Matthias Nießner, Pushmeet Kohli, Philip Torr, Shahram Izadi, and Cem Keskin. Learning to navigate the energy landscape. In 3DV, 2016. 6
314
+ [61] Julien Valentin, Matthias Nießner, Jamie Shotton, Andrew Fitzgibbon, Shahram Izadi, and Philip H. S. Torr. Exploiting uncertainty in regression forests for accurate camera relocalization. In CVPR, 2015. 3
315
+ [62] Dominik Winkelbauer, Maximilian Denninger, and Rudolph Triebel. Learning to localize in new environments from synthetic training data. In ICRA, 2021. 2
316
+ [63] Changchang Wu. VisualSFM: A visual structure from motion system, 2011. 1, 6
317
+ [64] Luwei Yang, Ziqian Bai, Chengzhou Tang, Honghua Li, Yasutaka Furukawa, and Ping Tan. SANet: Scene agnostic network for camera localization. In ICCV, 2019. 2, 3, 6, 7
318
+ [65] Luwei Yang, Rakesh Shrestha, Wenbo Li, Shuaicheng Liu, Guofeng Zhang, Zhaopeng Cui, and Ping Tan. Scenesqueezeer: Learning to compress scene for camera relocalization. In CVPR, 2022. 3
319
+ [66] Lin Yen-Chen, Pete Florence, Jonathan T. Barron, Alberto Rodriguez, Phillip Isola, and Tsung-Yi Lin. iNeRF: Inverting neural radiance fields for pose estimation. In IROS, 2021. 3
320
+ [67] Qunjie Zhou, Sérgio Agostinho, Aljoša Osep, and Laura Leal-Taixe. Is geometry enough for matching in visual localization? In ECCV, 2022. 1, 2, 3, 7
321
+ [68] Qunjie Zhou, Torsten Sattler, Marc Pollefeys, and Laura Leal-Taixe. To learn or not to learn: Visual localization from essential matrices. In ICRA, 2020. 2
acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:505b2cc2e411f7b0f86f1e30c989468d9d78c76c03c5e4c88c81fe67d074c245
3
+ size 659465
acceleratedcoordinateencodinglearningtorelocalizeinminutesusingrgbandposes/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8bfd756804267f7244e2678b5b0abe73a8ac3553c54b64e0c6f509aafc70f3c
3
+ size 401528
acceleratingdatasetdistillationviamodelaugmentation/38ccc529-8263-4fb5-b4f1-49f67321c951_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d02e4c450f59b75ee81925280baa2be8035f71e582571be13cd796be17d8005d
3
+ size 77603
acceleratingdatasetdistillationviamodelaugmentation/38ccc529-8263-4fb5-b4f1-49f67321c951_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a84aa535d10068747d40fd60cff72409374296e40fe9eba602721334064e8d5
3
+ size 98393
acceleratingdatasetdistillationviamodelaugmentation/38ccc529-8263-4fb5-b4f1-49f67321c951_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c47bbc64733620f84101d7b2cac50323713fb538aac2612fa81a97d4ded0f29b
3
+ size 569625
acceleratingdatasetdistillationviamodelaugmentation/full.md ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Accelerating Dataset Distillation via Model Augmentation
2
+
3
+ Lei Zhang $^{1*}$ Jie Zhang $^{1*}$ Bowen Lei $^{2}$ Subhabrata Mukherjee $^{3}$
4
+
5
+ Xiang Pan $^{4}$ Bo Zhao $^{5}$ Caiwen Ding $^{6}$ Yao Li $^{7}$ Dongkuan Xu $^{8\dagger}$
6
+
7
+ $^{1}$ Zhejiang University $^{2}$ Texas A&M University $^{3}$ Microsoft Research
8
+
9
+ $^{4}$ New York University $^{5}$ Beijing Academy of Artificial Intelligence $^{6}$ University of Connecticut
10
+
11
+ <sup>7</sup>University of North Carolina, Chapel Hill <sup>8</sup>North Carolina State University
12
+
13
+ {z1.leizhang, zj_zhangjie}@zju.edu.cn dxu27@ncsu.edu
14
+
15
+ # Abstract
16
+
17
+ Dataset Distillation (DD), a newly emerging field, aims at generating much smaller but efficient synthetic training datasets from large ones. Existing DD methods based on gradient matching achieve leading performance; however, they are extremely computationally intensive as they require continuously optimizing a dataset among thousands of randomly initialized models. In this paper, we assume that training the synthetic data with diverse models leads to better generalization performance. Thus we propose two model augmentation techniques, i.e. using early-stage models and parameter perturbation to learn an informative synthetic set with significantly reduced training cost. Extensive experiments demonstrate that our method achieves up to $20 \times$ speedup and comparable performance on par with state-of-the-art methods.
18
+
19
+ # 1. Introduction
20
+
21
+ Dataset Distillation (DD) [3, 48] or Dataset Condensation [55, 56], aims to reduce the training cost by generating a small but informative synthetic set of training examples; such that the performance of a model trained on the small synthetic set is similar to that trained on the original, large-scale dataset. Recently, DD has become an increasingly more popular research topic, and has been explored in a variety of contexts, including federated learning [17, 42], continual learning [33, 40], neural architecture search [43, 57], medical computing [25, 26] and graph neural networks [21, 30].
22
+
23
+ DD has been typically cast as a meta-learning problem [16] involving bilevel optimization. For instance, Wang et al. [48] formulate the network parameters as a function of the learnable synthetic set in the inner-loop
24
+
25
+ ![](images/5e81284d37fc97e3312caa31bad6d91ed0f2df185495247229ccb548ffe31433.jpg)
26
+ Figure 1. Performances of condensed datasets for training ConvNet-3 v.s. GPU hours to learn the 10 images per class condensed CIFAR-10 datasets with a single RTX-2080 GPU. Ours, Ours, and Ours accelerates the training speed of the state-of-the-art method IDC [22] $5 \times$ , $10 \times$ , and $20 \times$ faster.
27
+
28
+ optimization; then optimize the synthetic set by minimizing classification loss on the real data in the outer-loop. This recursive computation hinders its application to real-world large-scale model training, which involves thousands to millions of gradient descent steps. Several methods have been proposed to improve the DD method by introducing ridge regression loss [2, 36], trajectory matching loss [3], etc. To avoid unrolling the recursive computation graph, Zhao et al. [57] propose to learn synthetic set by matching gradients generated by real and synthetic data when training deep networks. Based on this surrogate goal, several methods have been proposed to improve the informativeness or compatibility of synthetic datasets from other perspectives, ranging from data augmentation [55], contrastive signaling [24], resolution reduction [22], and bit encoding [41].
29
+
30
+ Although model training on a small synthetic set is fast, the dataset distillation process is typically expensive. For instance, the state-of-the-art method IDC [22] takes approximately 30 hours to condense 50,000 CIFAR-10 im
31
+
32
+ ages into 500 synthetic images with a single RTX-2080 GPU, which is equivalent to the time it takes to train 60 ConvNet-3 models on the original dataset. Furthermore, the distillation time cost will rapidly increase for large-scale datasets e.g. ImageNet-1K, which prevents its application in computation-limited environments like end-user devices. Prior work [56] on reducing the distillation cost results in significant regression from the state-of-the-art performance. In this paper, we aim to speed up the dataset distillation process, while preserving even improving the testing performance over state-of-the-art methods.
33
+
34
+ Prior works are computationally expensive as they focus on generalization ability such that the learned synthetic set is useful to train many different networks as opposed to a targeted network. This requires optimizing the synthetic set over thousands of differently initialized networks. For example, IDC [22] learns the synthetic set over 2000 randomly initialized models, while the trajectory matching method (TM) [3] optimizes the synthetic set for 10000 distillation steps with 200 pre-trained expert models. Dataset distillation, which learns the synthetic data that is generalizable to unseen models, can be considered as an orthogonal approach to model training which learns model parameters that are generalizable to unseen data. Similarly, training the synthetic data with diverse models leads to better generalization performance. This intuitive idea leads to the following research questions:
35
+
36
+ Question 1. How to design the candidate pool of models to learn synthetic data, for instance, consisting of randomly initialized, early-stage or well-trained models?
37
+
38
+ Prior works [3, 22, 48, 57] use models from all training stages. The underlying assumption is that models from all training stages have similar importance. Zhao et al. [56] show that synthetic sets with similar generalization performance can be learned with different model parameter distributions, given an objective function in the form of feature distribution matching between real and synthetic data. In this paper, we take a closer look at this problem and show that learning synthetic data on early-stage models is more efficient for gradient/parameter matching based dataset distillation methods.
39
+
40
+ Question 2. Can we learn a good synthetic set using only a few models?
41
+
42
+ Our goal is to learn a synthetic set with a small number of (pre-trained) models to minimize the computational cost. However, using fewer models leads to poor generalization ability of the synthetic set. Therefore, we propose to apply parameter perturbation on selected early-stage models to incorporate model diversity and improve the generalization ability of the learned synthetic set.
43
+
44
+ In a nutshell, we propose two model augmentation techniques to accelerate the training speed of dataset distillation, namely using early-stage models and parameter
45
+
46
+ perturbation to learn an informative synthetic set with significantly less training cost. As illustrated in Fig. 1., our method achieves up to $20 \times$ speedup and comparable performance on par with state-of-the-art DD methods.
47
+
48
+ # 2. Related Work
49
+
50
+ # 2.1. Dataset Distillation
51
+
52
+ Recent advances in deep learning [6, 7, 13, 14, 53, 54] rely on massive amounts of training data that not only consume a lot of computational resources, but it is also time-consuming to train these models on large data. Dataset Distillation (DD) is introduced by Wang et al. [48], in which network parameters are modeled as functions of synthetic data, and learned by gradient-based hyperparameter optimization [32]. Subsequently, various works significantly improve the performance by learning on soft labels [2, 44], optimizing via infinite-width kernel limit [36, 37], matching on gradient-space [19, 57], model parameter-space [3], and distribution space [47, 56], amplifying contrastive signals [24], adopting data augmentations [55], and exploring regularity of dataset [22]. DD has been applied to various scenarios including continual learning [33, 38, 40], privacy [8], federated learning [11, 17, 52], graph neural network [20, 21], neural architecture search [43] for images [4], text [29], and medical imaging data [27]. In addition to the efforts made to improve performance and expand applications, few studies have focused on the efficiency of DD. This is a critical and practical problem closely related to the real-world application of DD.
53
+
54
+ # 2.2. Efficient Dataset Distillation
55
+
56
+ In this work, we focus on the efficiency of dataset distillation algorithm, which is under-explored in previous works. Zhao et al. [56] make improvements in efficiency via distribution matching in random embedding spaces, which replaces expensive bi-level optimization in common methods [22, 57]. However, the speed-up of DD in their work results in a significant drop in performance, which exhibits a large gap between their method and other SOTA DD methods [22]. Cazenavette et al. [4] improve efficiency via parameter matching in pre-trained networks. However, they need to pre-train 100 networks from scratch on real data, which leads to massively increased computational resources. In this work, we seek to significantly reduce training time and lower computational resources, while maintaining comparable performance.
57
+
58
+ # 3. Preliminary
59
+
60
+ The goal of dataset distillation is to generate a synthetic dataset $\mathcal{S}$ from the original training dataset $\mathcal{T}$ such that an arbitrary model trained on $\mathcal{S}$ is similar to the
61
+
62
+ one trained on $\mathcal{T}$ . Among various dataset distillation approaches [3, 22, 37, 56], gradient-matching methods have achieved state-of-the-art performance. However, they require a large amount of training time and expensive computational resources. In this paper, we propose to use gradient matching to reduce the computational requirement while maintaining similar performance.
63
+
64
+ Gradient Matching. Gradient-matching dataset distillation approach [57] matches the network gradients on synthetic dataset $S$ to the gradients on real dataset $\mathcal{T}$ . The overall training object can be formulated as:
65
+
66
+ $$
67
+ \underset {\mathcal {S}} {\text {m a x i m i z e}} \sum_ {t = 0} ^ {T} \operatorname {C o s} \left(\nabla_ {\theta} \ell \left(\theta_ {t}; \mathcal {S}\right), \nabla_ {\theta} \ell \left(\theta_ {t}; \mathcal {T}\right)\right) \tag {1}
68
+ $$
69
+
70
+ $$
71
+ w. r. t. \quad \theta_ {t + 1} = \theta_ {t} - \eta \nabla_ {\theta} \ell (\theta_ {t}; \mathcal {S})
72
+ $$
73
+
74
+ where $\theta_{t}$ denotes the network weights at the $t^{\mathrm{th}}$ training step from the randomly initialized weights $\theta_0$ given $S$ , $\ell (\theta ,S)$ denotes the training loss for weight $\theta$ and the dataset $S$ , $\ell$ denotes loss function, and $\operatorname {Cos}(\cdot ,\cdot)$ denotes the channel-wise cosine similarity.
75
+
76
+ In addition, recent works have made various efforts to enhance the performance of gradient-matching from the perspective of data diversity. Zhao et al. [55] utilize differentiable siamese augmentation to synthesize more informative images. Kim et al. [22] explore the regularity of dataset to strengthen the representability of condensed datasets.
77
+
78
+ Discussion on Efficiency. Current works [22, 55, 57] use a large number of randomly initialized networks (e.g., 2000) to improve the generalization performance of condensed dataset. The huge number of models makes the DD process time-consuming and computation-expensive. For instance, condensing 1 image per class in a synthetic dataset of CIFAR-10 by using state-of-the-art method IDC [22] consumes $200\mathrm{k}$ epochs of updating network, in addition to the $2,000\mathrm{k}$ epochs of updating $\mathcal{S}$ , which requires over 22.2 hours on a single RTX-2080 GPU. While Zhao et al. [56] make efforts to solve computation the challenge by using distribution-matching instead of gradient-matching – reducing number of updates from $200\mathrm{k}$ to $20\mathrm{k}$ and training time from 22.2 hours to 0.83 hours – the accuracy of condensed data also degrades dramatically from $50.6\%$ to $26.0\%$ . This potentially results from the redundant learning on randomly initialized networks.
79
+
80
+ # 4. Method
81
+
82
+ # 4.1. Overview
83
+
84
+ We illustrate the framework of our proposed efficient dataset distillation method in Fig. 2. Our method consists of three stages: 1) Early-stage Pre-training, 2) Parameter Perturbation, and 3) Distillation via gradient-matching. In stage 1, we utilize pre-trained networks at the early stage
85
+
86
+ ![](images/24a9cd1938e189b5464b8767533ab6fca8a2357cf75656df5d85e536396f160c.jpg)
87
+ Figure 2. The illustration of our proposed fast dataset distillation method. We perform early-stage pretraining and parameter perturbation on models in dataset distillation.
88
+
89
+ as an informative parameter space for dataset distillation. In stage 2, we conduct parameter perturbation on models selected from stage 1 to further augment the diversity of model parameter distribution. In stage 3, the synthetic dataset is optimized with gradient-matching strategy on these augmented models from early stages.
90
+
91
+ # 4.2. Early-Stage Models: Initializing with Informative Parameter Space
92
+
93
+ Existing gradient-matching methods [22, 55, 57] train synthetic data on a large number of randomly initialized networks for learning to generalize to unseen initializations. Furthermore, the initialized networks will be updated for many SGD steps in the inner-loop for learning better synthetic data, which requires much computational resources.
94
+
95
+ Data augmentation is frequently used to prevent overfitting and improve generalization performance when optimizing deep networks [49, 51]. Similarly, we propose to use model augmentation to improve the generalization performance when learning condensed datasets. Inspired by ModelSoups [31, 50], a practical method to improve performance of model ensembles, we pre-train a set of networks with different hyper-parameters, including learning rate, random seed, and data augmentation, so that we construct a parameter space with rich diversity. Instead of leveraging randomly initialized networks in each outer loop in traditional methods, we sample those early-stage networks as the initialization, which are more informative for implementing gradient matching.
96
+
97
+ Comparing with well-trained networks, using early-stage networks have two benefits. First, early-stage networks require less training cost. Second, the early-stage
98
+
99
+ networks have rich diversity [1, 12, 39] and provide large gradients [10], which leads to better gradient matching. More discussion can be found in the supplementary.
100
+
101
+ # 4.3. Parameter Perturbation: Diversifying Parameter Space
102
+
103
+ Motivated by the data perturbation which is widely used to diversify the training data for better knowledge distillation [34, 35], we propose to conduct the model perturbation in dataset distillation for further diversifying the parameter space. We implement perturbation after sampling the network (parameters) from the early-stage parameter space in each outer loop.
104
+
105
+ We formulate our fast dataset distillation as the gradient-matching on parameter-perturbed early-stage models between real data and synthetic data:
106
+
107
+ $$
108
+ \min _ {\mathcal {S}} D (\nabla_ {\theta} \ell (\hat {\theta}; \mathcal {S}), \nabla_ {\theta} \ell (\hat {\theta}; \mathcal {T})) \tag {2}
109
+ $$
110
+
111
+ $$
112
+ w. r. t. \hat {\theta} \leftarrow \theta^ {\mathcal {T}} + \alpha \cdot \mathbf {d},
113
+ $$
114
+
115
+ where $\theta^T$ represents network weights trained on real data $\mathcal{T}$ , $D$ denotes a distance-based matching objective, and $\alpha$ is the magnitude of parameter perturbation. $\mathbf{d}$ is sampled from a Gaussian distribution $\mathcal{N}(0,\mathbf{I})$ with dimensions compatible with network parameter $\theta$ and filter normalized by
116
+
117
+ $$
118
+ \mathbf {d} _ {l, j} \leftarrow \frac {\mathbf {d} _ {l , j}}{\| \mathbf {d} _ {l , j} \| _ {F} + \epsilon} \| \theta_ {l, j} \| _ {F} \tag {3}
119
+ $$
120
+
121
+ to eliminate the scaling invariance of neural networks [28], where $\mathbf{d}_{l,j}$ is the $j$ -th filter at the $l$ -th layer of $\mathbf{d}$ and $\| \cdot \|_F$ denotes the Frobenius norm. $\epsilon$ is a small positive constant.
122
+
123
+ # 4.4. Training Algorithm
124
+
125
+ We depict our method in Algorithm 1. We build our training algorithm on the state-of-the-art method IDC [22]. Before dataset distillation, we pre-trained $N$ models on real data for only a few epochs. This is significantly cheaper than existing methods that well-train many networks till convergence. We train the condensed dataset $\mathcal{S}$ for $T$ outer loops and $M$ inner loops. At each outer loop, we randomly select a model from $N$ early-stage models as initialization and employ parameter perturbation on it. At each inner loop, we optimize the synthetic samples $\mathcal{S}$ by minimizing the gradient matching loss with regard to the sampled real batch $\mathcal{T}_c$ and real synthetic batch $\mathcal{S}_c$ of the same class $c$ , respectively. The network $\theta_m$ is then updated on real data. Please refer to [22] for more details. The numbers of pre-train epochs $P$ and outer loop $K$ are relatively small. In experiments, we set $P = 2$ compared with 300 for a well-trained network and $K = 400$ compared with 2000 in SOTA DD method IDC [22]. Note that our method can also be easily applied to other dataset distillation methods for reducing training time, and we explore it in Sec. 5.3.
126
+
127
+ Algorithm 1: Efficient Dataset Distillation
128
+ Input: Training data $\mathcal{T}$ loss function $l$ , number of
129
+ classes $C$ ,number of model $N$ , magnitude $\alpha$ augmentation function $\mathcal{A}$ , multi-information
130
+ function $f$ , deep neural network $\psi_{\theta}$ parameterized with $\theta$ Output: Condensed dataset S Definition: $D(B,B^{\prime};\theta) = \| \nabla_{\theta}\ell (\theta ;B) - \nabla_{\theta}\ell (\theta ;B^{\prime})\|$ / $\star$ Early-Stage Pre-train $\star /$ 1 Randomly initialize $N$ networks $\{\tau_{1}\dots \tau_{N}\}$ .
131
+ 2 for $n\gets 1$ to $N$ do
132
+ 3 Update network $\tau_{n}$ on real data T: for $p\gets 1$ to $P$ do
133
+ 5 $\tau_{n,p + 1}\gets \tau_{n,p} - \eta \nabla_{\tau_{n,p}}\ell (\tau_{n,p};\mathcal{A}(\mathcal{T}))$ end
134
+ 7 end
135
+ 8 Initialize condensed dataset S
136
+ 9 for $t\gets 0$ to $T$ do
137
+ 10 Randomly load one checkpoint from $\{\tau_1\dots \tau_N\}$ to initialize $\psi_{\theta}$ ; / \* Parameter Perturbation \*/
138
+ 11 Sample vector d from Gaussian distribution
139
+ 12 Parameter perturbation on $\psi_{\theta}$ .. $\theta \gets \theta +\alpha \cdot \mathbf{d}$ for $m\gets 0$ to $M$ do
140
+ 14 for $c\gets 0$ to $C$ do
141
+ 15 Sample an intra-class mini-batch $T_{c}\sim T,S_{c}\sim S$
142
+ 16 Update synthetic data $S_{c}$ .. $S_{c}\gets S_{c} - \lambda \nabla_{S_{c}}D(A(f(S_{c})),A(T_{c}))$
143
+ 18 end
144
+ 19 Sample a mini-batch $T\sim T$
145
+ 20 Update network $\psi_{\theta}$ w.r.t classification loss:
146
+ 21 $\theta_{m + 1}\gets \theta_m - \eta \nabla_\theta \ell (\theta_m;\mathcal{A}(T))$
147
+ 22 end
148
+ 23 end
149
+
150
+ # 5. Experiments
151
+
152
+ In this section, we first evaluate our method on various datasets against state-of-the-art baselines. Next, we examine the proposed method in depth with ablation analysis.
153
+
154
+ # 5.1. Experimental Setups
155
+
156
+ Datasets. We evaluate performance of neural networks trained on condensed datasets generated by several methods as baselines. Following previous works [4, 22, 57], we conduct experiments on both low- and high-resolution datasets including CIFAR-10, CIFAR-100 [23], and ImageNet [5].
157
+
158
+ Network Architectures. Following previous works [22, 56], we use a depth-3 ConvNet [39] on CIFAR-10 and CIFAR-100. For ImageNet subsets, we follow IDC [22] and adopt ResNetAP-10 for dataset distillation, a modified
159
+
160
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="4">Img/Cls</td><td rowspan="2">Speed Up</td><td rowspan="2">Acc. Gain</td></tr><tr><td>1</td><td>10</td><td>50</td><td></td></tr><tr><td rowspan="8">CIFAR-10</td><td>Full Dataset</td><td>88.1</td><td>88.1</td><td>88.1</td><td>-</td><td>-</td><td>-</td></tr><tr><td>IDC [22]</td><td>50.6 (21.7h)</td><td>67.5 (22.2h)</td><td>74.5 (29.4h)</td><td>1.00×</td><td>1.00×</td><td></td></tr><tr><td>CAFE [47]</td><td>30.3</td><td>46.3</td><td>55.5</td><td>-</td><td>0.54×</td><td></td></tr><tr><td>DSA [55]</td><td>28.2 (0.09h)</td><td>52.1 (1.94h)</td><td>60.6 (11.1h)</td><td>85.0×</td><td>0.71×</td><td></td></tr><tr><td>DM [56]</td><td>26.0 (0.25h)</td><td>48.9 (0.26h)</td><td>63.0 (0.31h)</td><td>89.0×</td><td>0.69×</td><td></td></tr><tr><td>TM [3]</td><td>46.3 (6.35h)</td><td>65.3 (6.69h)</td><td>71.6 (7.39h)</td><td>3.57×</td><td>0.94×</td><td></td></tr><tr><td>Ours5</td><td>49.2 (4.44h)</td><td>67.1 (4.45h)</td><td>73.8 (6.11h)</td><td>4.90×</td><td>0.99×</td><td></td></tr><tr><td>Ours10</td><td>48.5 (2.22h)</td><td>66.5 (2.23h)</td><td>73.1 (3.05h)</td><td>9.77×</td><td>0.97×</td><td></td></tr><tr><td rowspan="9">CIFAR-100</td><td>Full Dataset</td><td>56.2</td><td>56.2</td><td>56.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>IDC [22]</td><td>25.1 (125h)</td><td>45.1 (127h)</td><td>-</td><td>1.00×</td><td>1.00×</td><td></td></tr><tr><td>CAFE [47]</td><td>12.9</td><td>27.8</td><td>37.9</td><td>-</td><td>0.56×</td><td></td></tr><tr><td>DSA [55]</td><td>13.9 (0.83h)</td><td>32.3 (17.5h)</td><td>42.8 (221.1h)</td><td>78.9×</td><td>0.63×</td><td></td></tr><tr><td>DM [56]</td><td>11.4 (1.67h)</td><td>29.7 (2.64h)</td><td>43.6 (2.78h)</td><td>61.4×</td><td>0.55×</td><td></td></tr><tr><td>TM [3]</td><td>24.3 (7.74h)</td><td>40.1 (9.47h)</td><td>47.7 (-)</td><td>14.7×</td><td>0.92×</td><td></td></tr><tr><td>Ours5</td><td>29.8 (25.1h)</td><td>45.6 (25.6h)</td><td>52.6 (42.00h)</td><td>4.97×</td><td>1.10×</td><td></td></tr><tr><td>Ours10</td><td>29.4 (12.5h)</td><td>45.2 (12.8h)</td><td>52.2 (21.00h)</td><td>9.96×</td><td>1.09×</td><td></td></tr><tr><td>Ours20</td><td>29.1 (6.27h)</td><td>44.1 (6.40h)</td><td>52.1 (10.50h)</td><td>19.9×</td><td>1.07×</td><td></td></tr></table>
161
+
162
+ Table 1. Comparing efficiency and performance of dataset distillation methods on CIFAR-10 and CIFAR-100. Speed up represents the average acceleration amount of training time on a single RTX-2080 GPU with the same batch size 64. Acc. Gain represents the average improvement in test accuracy of network trained on the condensed dataset over IDC [22]. Training time is not reported for CAFE [47] that does not provide official implementation and IDC [22] that requires more than one GPU on CIFAR-100 for Img/Cls=50.
163
+
164
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="2">Img/Cls</td><td rowspan="2">Speed Up</td><td rowspan="2">Acc. Gain</td></tr><tr><td>10</td><td>20</td></tr><tr><td rowspan="5">ImageNet-10</td><td>Full Dataset</td><td>90.8</td><td>90.8</td><td>-</td><td>-</td></tr><tr><td>IDC [22]</td><td>72.8 (70.14h)</td><td>76.6 (92.78h)</td><td>1.00×</td><td>1.00×</td></tr><tr><td>DSA [55]</td><td>52.7 (26.95h)</td><td>57.4 (51.39h)</td><td>2.20×</td><td>0.73×</td></tr><tr><td>DM [56]</td><td>52.3 (1.39h)</td><td>59.3 (3.61h)</td><td>38.1×</td><td>0.74×</td></tr><tr><td>Ours5</td><td>74.6 (15.52h)</td><td>76.3 (20.05h)</td><td>4.57×</td><td>1.01×</td></tr><tr><td rowspan="5">ImageNet-100</td><td>Full Dataset</td><td>82.0</td><td>82.0</td><td>-</td><td>-</td></tr><tr><td>IDC [22]</td><td>46.7 (141h)</td><td>53.7 (185h)</td><td>1.00×</td><td>1.00×</td></tr><tr><td>DSA [55]</td><td>21.8 (9.72h)</td><td>30.7 (23.9h)</td><td>14.1×</td><td>0.51×</td></tr><tr><td>DM [56]</td><td>22.3 (2.78h)</td><td>30.4 (2.81h)</td><td>58.2×</td><td>0.52×</td></tr><tr><td>Ours5</td><td>48.4 (29.8h)</td><td>56.0 (38.6h)</td><td>4.76×</td><td>1.04×</td></tr></table>
165
+
166
+ Table 2. Comparing efficiency and performance of dataset distillation methods on ImageNet-10 and ImageNet-100. We measure the training time on a single RTX-A6000 GPU with the same training hyperparameters. For ImageNet-100, we follow IDC [22] to split the whole dataset into five tasks with 20 classes each for faster optimization. The training time reported in ImageNet-100 is for one task.
167
+
168
+ ResNet-10 [15] by replacing strided convolution as average pooling for downsampling.
169
+
170
+ Evaluation Metrics. We study several methods in terms of performance and efficiency. The performance is measured by the testing accuracy of networks trained on condensed datasets. The efficiency is measured by GPU hours required by the dataset distillation process [9]. For a fair comparison, all GPU hours are measured on a single GPU. The training
171
+
172
+ time of condensing CIFAR-10, CIFAR-100 and ImageNet subsets is evaluated on RTX-2080 GPU and RTX-A6000 GPU, respectively. We adopt FLOPs as a metric of computational efficiency.
173
+
174
+ Baselines. We compare our method with several prominent dataset condensation methods like (1) gradient-matching method including DSA [55] and IDC [22] (2) distribution-matching including DM [56] and CAFE [47] (3) parameter-
175
+
176
+ ![](images/b14fdc18589b40821bd06ec977756ed859754a888b05d1f5cfe3d5abc5f19f8a.jpg)
177
+ (a) CIFAR10 (Img/Cls=10)
178
+
179
+ ![](images/5a64a9ec01456fd102d7372c1f54f9d66de44e99c25bfd127bf584c910c3195f.jpg)
180
+ (b) CIFAR10 (Img/Cls=50)
181
+
182
+ ![](images/087e64256dc7592e26873d7dcc07608e72477992c66bb69935787eef1ea639c1.jpg)
183
+ (c) CIFAR100 (Img/Cls=10)
184
+ Figure 3. Performance comparison across a varying number of training steps.
185
+
186
+ ![](images/bf78df9f4430a1c80379bed32908965e883dbe5bb64d829305fe7f45d2bfa2ce.jpg)
187
+ (d) ImageNet-10 (Img/Cls=10)
188
+
189
+ ![](images/a8b8f3ac8711614af8ce3f6dda694eabad7c73e8f783a69de8f1ed233e7c491a.jpg)
190
+ (a) CIFAR-100 (Img/Cls=1).
191
+ Figure 4. Performance comparison across varying training time and FLOPs.
192
+
193
+ ![](images/5149c01545690541f6b8d753fe7107b5aab707c47d9752548743d0df051a5cad.jpg)
194
+ (b) CIFAR-10 (Img/Cls=10)
195
+
196
+ ![](images/6af7b47eebb6d47432d6b544acf85ea8c0f507e624130fa4110a19488ba935ac.jpg)
197
+ (c) CIFAR100 (Img/Cls=10)
198
+
199
+ ![](images/b3d216a57fab58a7d865afb4746c14f8dd0c1236729c7eb5eae6fb5dc98ebad2.jpg)
200
+ (d) ImageNet-10 (Img/Cls=10)
201
+
202
+ matching including TM [3]. We use the state-of-the-art dataset distillation method IDC as the strongest baseline to calculate the gap between other methods on performance and efficiency.
203
+
204
+ Training Details. We adopt IDC as the backbone of our method, which is the state-of-the-art gradient-matching dataset distillation method. The outer loops and learning rate of condensed data are 400/100 and 0.01/0.1 for CIFAR-10/100 and ImageNet-Subsets. We employ 5/10 pre-trained models for CIFAR-10/100 and ImageNet. The number of pre-train epochs is 2/5/10 for CIFAR-10/100, ImageNet-10, and ImageNet-100. The setting of other hyperparameters follows IDC [22] including the number of inner loops, batch size, and augmentation strategy.
205
+
206
+ # 5.2. Condensed Data Evaluation
207
+
208
+ CIFAR-10 & CIFAR-100. Our method achieves a better trade-off in task performance vs. the amount of training time and computation compared to other state-of-the-art baselines on CIFAR-10 and CIFAR-100. For instance, as shown in Tab. 1, our method is comparable to IDC while achieving $5 \times$ and $10 \times$ speed ups on CIFAR-10. Our method shows $10\%$ , $9\%$ , and $7\%$ performance improvements over IDC on CIFAR-100 while achieving $5 \times$ , $10 \times$ , and $20 \times$ acceleration, respectively.
209
+
210
+ To further demonstrate the advantages of our method, we report the evaluation results across a varying amount
211
+
212
+ of computational resources in the form of the number of training steps in Fig. 3, training time, and FLOPs in Fig. 4. We observe that our method consistently outperforms all the baselines across different training steps, training times, and FLOPs. This demonstrates the effectiveness of our distillation method in capturing informative features from early-stage training; and enhanced diversity of the models for better generalizability. Interestingly, our method obtains better performance and efficiency over state-of-the-art baselines on CIFAR-100 as compared to CIFAR-10. This demonstrates the effectiveness and scalability of our method on large-scale datasets which makes it more appealing for all practical purposes.
213
+
214
+ ImageNet. Apart from CIFAR-10/100, we further investigate the performance and efficiency of our method on the high-resolution dataset ImageNet. Following previous baselines [22, 46], we evaluate our method on ImageNet-subset consisting of 10 and 100 classes.
215
+
216
+ We observe that the dataset distillation methods on ImageNet suffer from severe efficiency challenges. As shown in Tab. 2, dataset distillation method IDC [22] achieves high performance while requiring almost 4 days on ImageNet-10; while DSA [57] and DM [55] are more efficient in training time with significantly poor performance. The accuracy of networks trained on condensed data generated by our method outperforms all existing state-of-the-art baselines with the least training time. For instance, our method
217
+
218
+ requires less than 1 day to condense ImageNet-10, which leads to $5 \times$ speedup over SOTA methods.
219
+
220
+ As shown in Fig. 3 and Fig. 4, we conduct extensive experiments with various training budgets. The results demonstrate that our method requires significantly fewer training steps, time, and computation resources to reach the same performance as the SOTA method IDC and achieves higher performance with the same training budgets. This indicates that utilizing early-stage models as initialization guides dataset distillation to focus on distinguishing features at the beginning of distillation. The exploration of diversity expands the parameter space and reduces the amount of time on learning repeated and redundant features.
221
+
222
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="3">Evaluation model</td></tr><tr><td>ConvNet-3</td><td>ResNet-10</td><td>DenseNet-121</td></tr><tr><td rowspan="2">CIFAR-100</td><td>IDC [22]</td><td>45.1</td><td>38.9</td><td>39.5</td></tr><tr><td>Ours5</td><td>46.5</td><td>38.4</td><td>39.6</td></tr></table>
223
+
224
+ (a) The performance of condensed CIFAR-100 dataset (10 images per class) trained on ConvNet-3 on different network architectures.
225
+
226
+ <table><tr><td rowspan="2">Dataset</td><td rowspan="2">Method</td><td colspan="3">Evaluation model</td></tr><tr><td>ResNetAP-10</td><td>ResNet-18</td><td>EfficientNet-B0</td></tr><tr><td rowspan="2">ImageNet-10</td><td>IDC [22]</td><td>74.0</td><td>73.1</td><td>74.3</td></tr><tr><td>Ours5</td><td>74.6</td><td>74.5</td><td>75.4</td></tr></table>
227
+
228
+ (b) The performance of condensed ImageNet-10 dataset (10 images per class) trained on ResNetAP-10 on different network architectures.
229
+
230
+ Table 3. Performance of synthetic data learned on CIFAR100 and ImageNet-10 datasets with different architectures. The networks are trained on condensed dataset and validated on test dataset.
231
+
232
+ Cross-Architecture Generalization. We also evaluate the performance of our condensed data on architectures different from the one used to distill it on the CIFAR-100 (1 and 10 images per class) and ImageNet-10 (10 images per class). In Tab. 3, we show the performance of our baselines ConvNet-3 and ResNetAP-10 evaluated on ResNet-18 [15], DenseNet-121 [18], and EfficientNet-B0 [45].
233
+
234
+ For IDC [22], we use condensed data provided by the official implementation for evaluation of their method. Our method obtains the best performance on all the transfer models except for ResNet-10 on CIFAR-100 (10 images per class) where we lie within one standard deviation of IDC - demonstrating the robustness of our method to changes in network architecture.
235
+
236
+ # 5.3. Analysis
237
+
238
+ We perform ablation studies on our efficient dataset distillation method described in Sec. 4. Specifically, we measure the impact of (1) the number of epochs of pre-training on real data, (2) the magnitude of parameter perturbation, (3) the number of early-stage models, and (4) the acceleration of training.
239
+
240
+ Epochs of Pre-training. We study the effect of pre-training
241
+
242
+ epochs on networks used in our method in terms of test accuracy on CIFAR-10 (10 images per class) and demonstrate results in Fig. 5a. We observe that early-stage networks pretrained with 2 epochs perform significantly better than randomly initialized networks and well-trained networks with 300 epochs. The results demonstrate that early-stage networks contain a more informative parameter space than randomly initialized networks, thereby helping the condensed datasets to capture features more efficiently. While it is generally known that well-trained networks perform better, well-trained networks tend to get stuck in local optima and lack diversity among parameter spaces. On the other hand, early-stage models provide flexible and informative guidance for dataset distillation.
243
+
244
+ ![](images/a65a6d598df8535ed174768967af5e24e2932a8782f782ff883c85998b29ada7.jpg)
245
+ (a) Effect of pre-train epochs
246
+
247
+ ![](images/d1d38dc250390564adf2cd0f8ff0135969ee800c7dbbb6219bd121850a7439fc.jpg)
248
+ (b) Effect of perturbation magnitude
249
+ Figure 5. Condensation performance from networks pre-trained for different epochs and varying magnitudes of parameter perturbation. The networks are trained with same hyper-parameters except for training epochs and perturbation magnitudes, respectively. Evaluation is performed on CIFAR-10 (10 images per class).
250
+
251
+ Magnitude of Parameter Perturbation. We study the effect of the magnitude $\alpha$ of parameter perturbation in terms of test accuracy on CIFAR-10 (10 images per class) and report results in Fig. 5b. We observe that condensed dataset achieves better performance on both accuracy and efficiency when magnitude $\alpha$ is carefully set as shown in Fig. 5b. When the magnitude is large, e.g., 10, the perturbed networks diverge from the original space; the perturbed parameter space contains less relevant and inconsistent information, thereby impacting performance and efficiency. When the magnitude is small, such as not employing parameter perturbation, the parameter space lacks diversity compared to well-designed perturbed parameter space. Experimental results show that $\alpha = 1$ is optimal for CIFAR in our setting which works consistently better across all training steps. Well-designed magnitude makes perturbed networks concentrated around the original network, thereby augmenting the parameter space with diversified and relevant information.
252
+
253
+ Number of Early-Stage Models. We study the effect of the number of early-stage models in our experiment and show the results in Fig. 6. It is observed that the number of
254
+
255
+ early-stage models $N$ has less impact on the test accuracy of the condensed dataset. We argue that parameter perturbation in our method plays an important role in exploring the diversity of early-stage models; such that the description of parameter space depends on the representation of models rather than the number of models. In our method, a few models, e.g. 5, can achieve comparable performance to SOTA [22], with two significant advantages. The first is to shorten training time as the number of outer loops in DD is closely related to the number of models $N$ . The second is to reduce computation resources in network pre-training. TM [4] also utilizes network pre-training in DD, however, the number of models in their method is relatively large, e.g. 50, which is $10 \times$ more than ours. Parameter perturbation in our method augments the diversity of models and improves efficiency with only a small number of models.
256
+
257
+ ![](images/863f066414e9a3f731882a776129fb6556aee2d54a26fe1ed59ea7160f4202b8.jpg)
258
+ Figure 6. Condensation performance from a varying number of early-stage models. Performances with a varying number of models are similar, which demonstrates that our method is not sensitive to the number of models to achieve high performance.
259
+
260
+ ![](images/9b0699799e5b4d82b583c72c682b6cbc92bdedd113763f2fb9aa67b3d47e0929.jpg)
261
+ Figure 7. Performance of our method applied to different dataset distillation methods on CIFAR-10 dataset (10 images per class). Our results are reported with $5 \times$ training acceleration.
262
+
263
+ Acceleration of Training. We study the effect of acceleration of training on existing DD methods [22, 55, 57] and our method. We observe our method to retain similar performance with minor regression to increased training acceleration / speed-ups – while the performance of existing methods drops dramatically in Tab. 4. Our method achieves
264
+
265
+ <table><tr><td>Speed up</td><td>DC [57]</td><td>DSA [55]</td><td>IDC [22]</td><td>Ours</td></tr><tr><td>1×</td><td>44.9</td><td>52.1</td><td>67.5</td><td>-</td></tr><tr><td>5×</td><td>41.6 (-3.3)</td><td>47.0 (-5.1)</td><td>66.2 (-1.3)</td><td>67.1</td></tr><tr><td>10×</td><td>39.2 (-5.7)</td><td>46.2 (-5.9)</td><td>65.0 (-2.5)</td><td>66.5 (-0.6)</td></tr><tr><td>20×</td><td>37.8 (-7.1)</td><td>44.8 (-7.3)</td><td>63.7 (-3.8)</td><td>65.2 (-1.9)</td></tr></table>
266
+
267
+ (a) CIFAR-10 (Img/Cls=10)
268
+
269
+ <table><tr><td>Speed up</td><td>DC [57]</td><td>DSA [55]</td><td>IDC [22]</td><td>Ours</td></tr><tr><td>1×</td><td>53.9</td><td>60.6</td><td>74.5</td><td>-</td></tr><tr><td>5×</td><td>50.3 (-3.6)</td><td>56.5 (-4.1)</td><td>73.3 (-1.2)</td><td>73.8</td></tr><tr><td>10×</td><td>47.3 (-6.6)</td><td>55.7 (-4.9)</td><td>72.0 (-2.5)</td><td>73.1 (-0.7)</td></tr><tr><td>20×</td><td>42.0 (-11.9)</td><td>54.1 (-6.5)</td><td>71.1 (-3.4)</td><td>71.7 (-2.1)</td></tr></table>
270
+
271
+ (b) CIFAR-10 (Img/Cls=50)
272
+
273
+ <table><tr><td>Speed up</td><td>DC [57]</td><td>DSA [55]</td><td>IDC [22]</td><td>Ours</td></tr><tr><td>1×</td><td>29.5</td><td>32.3</td><td>45.1</td><td>-</td></tr><tr><td>5×</td><td>23.1 (-6.4)</td><td>29.3 (-3.0)</td><td>43.4 (-1.9)</td><td>46.2</td></tr><tr><td>10×</td><td>21.1 (-8.4)</td><td>28.7 (-3.6)</td><td>41.6 (-3.5)</td><td>45.6 (-0.6)</td></tr><tr><td>20×</td><td>18.6 (-10.9)</td><td>27.9 (-4.4)</td><td>40.5 (-4.6)</td><td>45.0 (-1.2)</td></tr></table>
274
+
275
+ (c) CIFAR-100 (Img/Cls=10)
276
+
277
+ Table 4. Condensation performance with different acceleration / speed ups over state-of-the-art dataset distillation approaches. We show performance drop between increased speed up in brackets. Our method achieves higher performance over baseline methods at all levels of speed up. With increased speed up, our method shows minor regression in performance.
278
+
279
+ better performance than baselines at all levels of speed-up. This demonstrates the informativeness of our parameter space in terms of diversity and reduced redundancy; such that the condensed dataset does not learn similar information repeatedly and captures sufficient features efficiently. It is worth noting that our method performs better with less regression at higher levels of speed up on the more complex dataset, e.g., CIFAR-100. We also demonstrate our method can be orthogonally applied to other dataset distillation methods in Fig. 7. We apply parameter perturbation on other DD methods to accelerate the training $5 \times$ faster. This indicates better scalability and improved efficiency of our method in condensing large-scale datasets.
280
+
281
+ # 6. Conclusion
282
+
283
+ In this work, we introduce a novel method for improving the efficiency of gradient-matching based dataset distillation approaches. We leverage model augmentation strategies with early-stage training and parameter perturbation to increase the diversity of the parameter space as well as massively reduce the computation resource for dataset distillation. Our method is able to achieve $10 \times$ acceleration on CIFAR and $5 \times$ acceleration on ImageNet. As the first attempt to improve the efficiency of gradient-matching based dataset distillation, the proposed method successfully crafts a condensed dataset of ImageNet in 18 hours, making dataset distillation more applicable in real-world settings.
284
+
285
+ # References
286
+
287
+ [1] Alessandro Achille, Matteo Rovere, and Stefano Soatto. Critical learning periods in deep neural networks. CoRR, abs/1711.08856, 2017. 4
288
+ [2] Ondrej Bohdal, Yongxin Yang, and Timothy M. Hospedales. Flexible dataset distillation: Learn labels instead of images. CoRR, abs/2006.08572, 2020. 1, 2
289
+ [3] George Cazenavette, Tongzhou Wang, Antonio Torralba, Alexei A. Efros, and Jun-Yan Zhu. Dataset distillation by matching training trajectories. In CVPR, pages 10708-10717, 2022. 1, 2, 3, 5, 6
290
+ [4] George Cazenavette, Tongzhou Wang, Antonio Torralba, Alexei A. Efros, and Jun-Yan Zhu. Wearable imagenet: Synthesizing tileable textures via dataset distillation. In CVPR Workshops, pages 2277-2281, 2022. 2, 4, 8
291
+ [5] Jia Deng, Wei Dong, Richard Socher, Li-Jia Li, Kai Li, and Li Fei-Fei. Imagenet: A large-scale hierarchical image database. In CVPR, pages 248–255, 2009. 4
292
+ [6] Jiahua Dong, Yang Cong, Gan Sun, Bineng Zhong, and Xiaowei Xu. What can be transferred: Unsupervised domain adaptation for endoscopic lesions segmentation. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4022-4031, June 2020. 2
293
+ [7] Jiahua Dong, Lixu Wang, Zhen Fang, Gan Sun, Shichao Xu, Xiao Wang, and Qi Zhu. Federated class-incremental learning. In IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2022. 2
294
+ [8] Tian Dong, Bo Zhao, and Lingjuan Lyu. Privacy for free: How does dataset condensation help privacy? In ICML, volume 162, pages 5378-5396, 2022. 2
295
+ [9] Gongfan Fang, Kanya Mo, Xinchao Wang, Jie Song, Shitao Bei, Haofei Zhang, and Mingli Song. Up to 100x faster data-free knowledge distillation. In AAAI, pages 6597-6604, 2022. 5
296
+ [10] Jonathan Frankle, David J. Schwab, and Ari S. Morcos. The early phase of neural network training. In ICLR, 2020. 4
297
+ [11] Jack Goetz and Ambuj Tewari. Federated learning via synthetic data. CoRR, abs/2008.04489, 2020. 2
298
+ [12] Guy Gur-Ari, Daniel A. Roberts, and Ethan Dyer. Gradient descent happens in a tiny subspace. CoRR, abs/1812.04754, 2018. 4
299
+ [13] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 2
300
+ [14] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9729-9738, 2020. 2
301
+ [15] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016. 5, 7
302
+ [16] Timothy Hospedales, Antreas Antoniou, Paul Micaelli, and Amos Storkey. Meta-learning in neural networks: A survey.
303
+
304
+ IEEE transactions on pattern analysis and machine intelligence, 44(9):5149-5169, 2021. 1
305
+ [17] Shengyuan Hu, Jack Goetz, Kshitiz Malik, Hongyuan Zhan, Zhe Liu, and Yue Liu. Fedsynth: Gradient compression via synthetic data in federated learning. CoRR, abs/2204.01273, 2022. 1, 2
306
+ [18] Gao Huang, Zhuang Liu, Laurens van der Maaten, and Kilian Q. Weinberger. Densely connected convolutional networks. In CVPR, pages 2261-2269, 2017. 7
307
+ [19] Zixuan Jiang, Jiaqi Gu, Mingjie Liu, and David Z. Pan. Delving into effective gradient matching for dataset condensation. CoRR, abs/2208.00311, 2022. 2
308
+ [20] Wei Jin, Xianfeng Tang, Haoming Jiang, Zheng Li, Danqing Zhang, Jiliang Tang, and Bing Yin. Condensing graphs via one-step gradient matching. In KDD, pages 720-730, 2022. 2
309
+ [21] Wei Jin, Lingxiao Zhao, Shichang Zhang, Yozen Liu, Jiliang Tang, and Neil Shah. Graph condensation for graph neural networks. In ICLR, 2022. 1, 2
310
+ [22] Jang-Hyun Kim, Jinuk Kim, Seong Joon Oh, Sangdoo Yun, Hwanjun Song, Joonhyun Jeong, Jung-Woo Ha, and Hyun Oh Song. Dataset condensation via efficient synthetic-data parameterization. In ICML, volume 162, pages 11102-11118, 2022. 1, 2, 3, 4, 5, 6, 7, 8
311
+ [23] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 4
312
+ [24] Saehyung Lee, Sanghyuk Chun, Sangwon Jung, Sangdoo Yun, and Sungroh Yoon. Dataset condensation with contrastive signals. In ICML, volume 162, pages 12352-12364, 2022. 1, 2
313
+ [25] Guang Li, Ren Togo, Takahiro Ogawa, and Miki Haseyama. Soft-label anonymous gastric x-ray image distillation. In ICIP, pages 305-309, 2020. 1
314
+ [26] Guang Li, Ren Togo, Takahiro Ogawa, and Miki Haseyama. Compressed gastric image generation based on soft-label dataset distillation for medical data sharing. Computer Methods and Programs in Biomedicine, page 107189, 2022. 1
315
+ [27] Guang Li, Ren Togo, Takahiro Ogawa, and Miki Haseyama. Dataset distillation for medical dataset sharing. CoRR, abs/2209.14603, 2022. 2
316
+ [28] Hao Li, Zheng Xu, Gavin Taylor, Christoph Studer, and Tom Goldstein. Visualizing the loss landscape of neural nets. In NIPS, pages 6391-6401, 2018. 4
317
+ [29] Yongqi Li and Wenjie Li. Data distillation for text classification. CoRR, abs/2104.08448, 2021. 2
318
+ [30] Mengyang Liu, Shanchuan Li, Xinshi Chen, and Le Song. Graph condensation via receptive field distribution matching. CoRR, abs/2206.13697, 2022. 1
319
+ [31] Raphael Gontijo Lopes, Yann Dauphin, and Ekin Dogus Cubuk. No one representation to rule them all: Overlapping features of training methods. In ICLR, 2022. 3
320
+ [32] Dougal Maclaurin, David Duvenaud, and Ryan P. Adams. Gradient-based hyperparameter optimization through reversible learning. In ICML, volume 37, pages 2113-2122, 2015. 2
321
+ [33] Wojciech Masarczyk and Ivona Tautkute. Reducing catastrophic forgetting with learning on synthetic data. In CVPR Workshops, pages 1019-1024, 2020. 1, 2
322
+
323
+ [34] Giung Nam, Hyungi Lee, Byeongho Heo, and Juho Lee. Improving ensemble distillation with weight averaging and diversifying perturbation. In ICML, volume 162, pages 16353-16367, 2022. 4
324
+ [35] Giung Nam, Jongmin Yoon, Yoonho Lee, and Juho Lee. Diversity matters when learning from ensembles. In NIPS, pages 8367-8377, 2021. 4
325
+ [36] Timothy Nguyen, Zhourong Chen, and Jaehoon Lee. Dataset meta-learning from kernel ridge-regression. In ICLR, 2021. 1, 2
326
+ [37] Timothy Nguyen, Roman Novak, Lechao Xiao, and Jaehoon Lee. Dataset distillation with infinitely wide convolutional networks. In NIPS, pages 5186-5198, 2021. 2, 3
327
+ [38] Andrea Rosasco, Antonio Carta, Andrea Cossu, Vincenzo Lomonaco, and Davide Bacciu. Distilled replay: Overcoming forgetting through synthetic samples. CoRR, abs/2103.15851, 2021. 2
328
+ [39] Levent Sagun, Utku Evci, V. Ugur Güney, Yann N. Dauphin, and Léon Bottou. Empirical analysis of the hessian of overparametrized neural networks. In ICLR Workshop, 2018. 4
329
+ [40] Mattia Sangermano, Antonio Carta, Andrea Cossu, and Davide Bacciu. Sample condensation in online continual learning. In IJCNN, pages 1-8, 2022. 1, 2
330
+ [41] Robin Tibor Schirrmeister, Rosanne Liu, Sara Hooker, and Tonio Ball. When less is more: Simplifying inputs aids neural network understanding. arXiv preprint arXiv:2201.05610, 2022. 1
331
+ [42] Rui Song, Dai Liu, Dave Zhenyu Chen, Andreas Festag, Carsten Trinitis, Martin Schulz, and Alois C. Knoll. Federated learning via decentralized dataset distillation in resource-constrained edge environments. CoRR, abs/2208.11311, 2022. 1
332
+ [43] Felipe Petroski Such, Aditya Rawal, Joel Lehman, Kenneth O. Stanley, and Jeffrey Clune. Generative teaching networks: Accelerating neural architecture search by learning to generate synthetic training data. In ICML, volume 119, pages 9206-9216, 2020. 1, 2
333
+ [44] Ilia Sucholutsky and Matthias Schonlau. Soft-label dataset distillation and text dataset distillation. In IJCNN, pages 1-8, 2021. 2
334
+ [45] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In ICML, volume 97, pages 6105-6114, 2019. 7
335
+ [46] Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. In ECCV, volume 12356, pages 776-794, 2020. 6
336
+ [47] Kai Wang, Bo Zhao, Xiangyu Peng, Zheng Zhu, Shuo Yang, Shuo Wang, Guan Huang, Hakan Bilen, Xinchao Wang, and Yang You. CAFE: learning to condense dataset by aligning features. In CVPR, pages 12186-12195, 2022. 2, 5
337
+ [48] Tongzhou Wang, Jun-Yan Zhu, Antonio Torralba, and Alexei A. Efros. Dataset distillation. CoRR, abs/1811.10959, 2018. 1, 2
338
+ [49] Qingsong Wen, Liang Sun, Fan Yang, Xiaomin Song, Jingkun Gao, Xue Wang, and Huan Xu. Time series data augmentation for deep learning: A survey. In IJCAI, pages 4653-4660, 2021. 3
339
+
340
+ [50] Mitchell Wortsman, Gabriel Ilharco, Samir Ya Gadre, Rebecca Roelofs, Raphael Gontijo Lopes, Ari S. Morcos, Hongseok Namkoong, Ali Farhadi, Yair Carmon, Simon Kornblith, and Ludwig Schmidt. Model soups: averaging weights of multiple fine-tuned models improves accuracy without increasing inference time. In ICML, volume 162, pages 23965-23998, 2022. 3
341
+ [51] Sen Wu, Hongyang R. Zhang, Gregory Valiant, and Christopher Ré. On the generalization effects of linear transformations in data augmentation. In ICML, volume 119, pages 10410–10420, 2020. 3
342
+ [52] Yuanhao Xiong, Ruochen Wang, Minhao Cheng, Felix Yu, and Cho-Jui Hsieh. Feddm: Iterative distribution matching for communication-efficient federated learning. CoRR, abs/2207.09653, 2022. 2
343
+ [53] Jie Zhang, Bo Li, Chen Chen, Lingjuan Lyu, Shuang Wu, Shouhong Ding, and Chao Wu. Delving into the adversarial robustness of federated learning. arXiv preprint arXiv:2302.09479, 2023. 2
344
+ [54] Jie Zhang, Bo Li, Jianghe Xu, Shuang Wu, Shouhong Ding, Lei Zhang, and Chao Wu. Towards efficient data free blackbox adversarial attack. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15115-15125, 2022. 2
345
+ [55] Bo Zhao and Hakan Bilen. Dataset condensation with differentiable siamese augmentation. In ICML, volume 139, pages 12674-12685, 2021. 1, 2, 3, 5, 6, 8
346
+ [56] Bo Zhao and Hakan Bilen. Dataset condensation with distribution matching. In WACV, pages 6503-6512, 2023. 1, 2, 3, 4, 5
347
+ [57] Bo Zhao, Konda Reddy Mopuri, and Hakan Bilen. Dataset condensation with gradient matching. In ICLR, 2021. 1, 2, 3, 4, 6, 8
acceleratingdatasetdistillationviamodelaugmentation/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d07838fff0e42acf7a710c0a0fed96658225a2712a25641d59a6017ce42a8d8c
3
+ size 517109
acceleratingdatasetdistillationviamodelaugmentation/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d20c705f080dd5faefaa370cd16e62da8dbf250bc4532f8e0864cc54a737a21
3
+ size 439092
acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/e514fec0-dcec-43a5-8ee8-992c5bb1bf30_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3b98d144516537f1bd3acba2eb329aceb996390ddaa6eea0146d5aff15f60ed0
3
+ size 81762
acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/e514fec0-dcec-43a5-8ee8-992c5bb1bf30_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a19b3aac570ded4042f14b3fd8116a39943fb4156bd482f8986fb8424acb4bc6
3
+ size 97870
acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/e514fec0-dcec-43a5-8ee8-992c5bb1bf30_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f825d8107d5aeec7ce6bdefaa1dab67bf018b623722aa1fa160088f6a468f277
3
+ size 828642
acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/full.md ADDED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Accelerating Vision-Language Pretraining with Free Language Modeling
2
+
3
+ Teng Wang $^{1,2\dagger}$ , Yixiao Ge $^{3}$ , Feng Zheng $^{1,5*}$ , Ran Cheng $^{1}$ , Ying Shan $^{3}$ , Xiaohu Qie $^{4}$ , Ping Luo $^{2,6}$ $^{1}$ Southern University of Science and Technology $^{2}$ The University of Hong Kong
4
+ $^{3}$ ARC Lab, $^{4}$ Tencent PCG $^{5}$ Peng Cheng Laboratory $^{6}$ Shanghai AI Laboratory
5
+
6
+ tengwang@connect.hku.hk {yixiaoge, yingsshan, tigerqie}@tencent.com
7
+
8
+ f.zheng@ieee.org ranchengcn@gmail.com pluo@cs.hku.hk
9
+
10
+ # Abstract
11
+
12
+ The state of the arts in vision-language pretraining (VLP) achieves exemplary performance but suffers from high training costs resulting from slow convergence and long training time, especially on large-scale web datasets. An essential obstacle to training efficiency lies in the entangled prediction rate (percentage of tokens for reconstruction) and corruption rate (percentage of corrupted tokens) in masked language modeling (MLM), that is, a proper corruption rate is achieved at the cost of a large portion of output tokens being excluded from prediction loss. To accelerate the convergence of VLP, we propose a new pretraining task, namely, free language modeling (FLM), that enables a $100\%$ prediction rate with arbitrary corruption rates. FLM successfully frees the prediction rate from the tie-up with the corruption rate while allowing the corruption spans to be customized for each token to be predicted. FLM-trained models are encouraged to learn better and faster given the same GPU time by exploiting bidirectional contexts more flexibly. Extensive experiments show FLM could achieve an impressive $2.5 \times$ pretraining time reduction in comparison to the MLM-based methods, while keeping competitive performance on both vision-language understanding and generation tasks. Code will be public at https://github.com/TencentARC/FLM.
13
+
14
+ # 1. Introduction
15
+
16
+ Vision-language pretraining (VLP) has recently demonstrated impressive performance on a handful of vision-language tasks [7,10,14,18,19,22], e.g., visual question answering, cross-modal retrieval, and image captioning. Several factors are responsible for the success: the availability of large-scale image-text datasets collected from the web [30], high-capacity model architectures like Trans
17
+
18
+ ![](images/aa39c2b68a146bc75f2f35361edbc7342fcfa9bb534820879ab6b96ddcdb5ca4.jpg)
19
+
20
+ ![](images/56fbe94a130a75037830f264ec9df20cb8ec8c7e2675c1cae1739e62105457e2.jpg)
21
+ Figure 1. (a) Large prediction rate accelerates training. Given a fixed corruption rate, we vary the prediction rate by randomly selecting a subset of output tokens for prediction loss. The learning rate schedule follows METER [10]. (b) The proposed FLM achieves competitive performance compared with MLM meanwhile significantly accelerating the pretraining stage. The downstream performance on $\mathrm{NLVR}^2$ [32] is reported. We show accuracy curves before convergence for better visualization.
22
+
23
+ former [34], and effective pretraining objectives for cross-modal learning.
24
+
25
+ One of the dominant pretraining objectives is masked language modeling (MLM), which was first introduced in natural language processing [9] and has been applied to vision-language areas in recent years [19]. MLM is a generative pretraining task designed to reconstruct a few (usually $40\%$ for VLP) masked text tokens via reasoning
26
+
27
+ among the context of the remaining texts and the paired image. While effective in capturing cross-modal interactions, MLM-based methods [7,15,21] suffer from slow convergence and long training time, especially for large-scale models and noisy web data.
28
+
29
+ We argue that the limited prediction rate in MLM impedes the convergence speed of pretraining, since a large portion of tokens accompanied by corruption are excluded from prediction loss. As shown in Fig. 1 (top), under the same corruption rate, a larger prediction rate for MLM results in faster convergence of validation loss and downstream performance. It is intuitive to set a prediction rate of $100\%$ to fully exploit text tokens. However, a paradox emerges where a large prediction rate can only be achieved with a greater corruption rate in MLM, but an extremely large corruption rate leads to an extremely tough pretraining task that may cause training collapse.
30
+
31
+ Autoregressive language modeling (AR) provides a workable solution to enable a $100\%$ prediction rate. It predicts the next token given the observation of previous tokens. As shown in Fig. 1 (bottom), AR performs favorably in training efficiency against MLM, i.e., $6.1 \times$ speed-up for convergence. However, the converged performance by AR is, unfortunately, much inferior to MLM. It is probably caused by the sub-optimal unidirectional corruption pattern, which is insufficient for downstream understanding tasks that usually rely on bidirectional contexts.
32
+
33
+ A natural question arises, can we accelerate the convergence of VLP by predicting $100\%$ tokens like AR meanwhile achieving competitive performance with MLM? Towards this end, we introduce a new pretraining task, dubbed free language modeling (FLM), for VLP, that enjoys an extreme $100\%$ prediction rate and flexible bidirectional contextualized representations. We for the first time break up the entanglement between corruption and prediction rates, making the two factors freely determined. Furthermore, for each output token to be predicted, we allow independent and arbitrary-length spans (from one to $100\%$ tokens) as corrupted connections. Rather than the suffix-like corruption pattern as in AR (as well as PrefixLM [37]), the corruption span of FLM is primarily distributed in the middle of the sequence, establishing a flexible perception of bidirectional contexts for better adaptation to VL understanding tasks. The comparison between different pretraining objectives is illustrated in Fig. 2.
34
+
35
+ To perform VLP with FLM, we propose an encode-corrupt-predict framework, which performs feature encoding once and reconstructs several corrupted versions of the text sequence in parallel. In the encoding step, bidirectional representations are achieved by learning forward and reverse unidirectional representations respectively, the order of which is manipulated by (reverse) casual masks in the same text Transformer. Subsequently, we ensure a
36
+
37
+ $100\%$ prediction rate by customizing corruption-prediction tasks for predicting each input token. In each corruption-prediction task, a span of corruption is randomly sampled and attached to the encoded sequence, followed by a reconstructor to solve the prediction task by reasoning among the remaining contexts. Unlike previous works (e.g., MLM, AR) that adopt pre-encoding corruption, we inject corruptions after one-time feature encoding, encouraging flexible corruption patterns and efficient parallel prediction.
38
+
39
+ Our contributions are three-fold. (1) A novel pretraining objective for VLP, namely, free language modeling (FLM), is proposed to free the prediction rate from the constraints of corruption rate, enabling an appealing $100\%$ prediction rate for accelerating convergence speed during pretraining. (2) An encode-corrupt-predict framework built upon FLM objective is proposed, allowing efficient and effective learning of a set of prediction tasks by merely conducting feature encoding once. (3) Extensive experiments on VQA, NLVR², image captioning, and image-text retrieval demonstrate the effectiveness of our FLM, where comparable performances to MLM are achieved with less than $50\%$ pretraining time.
40
+
41
+ # 2. Related Work
42
+
43
+ Vision-Language Pretraining. Vision-language pretraining tasks can be divided into two categories: (i) discriminative tasks, e.g., image-text contrastive (ITC), image-text matching (ITM), and (ii) generative tasks, e.g., masked language modeling (MLM) and autoregressive language modeling (AR). Discriminative tasks consider the image-text pairs as multi-modal views of the same semantics. Contrastive or multi-view learning is adopted for learning the alignment between multiple modalities. For example, CLIP [29], ALIGN [14], and following works [18, 20, 40] utilize cross-modal contrastive learning by projecting the image and language information into a joint (structured) semantic space. Generative tasks aim to reconstruct the corrupted text (image) with the assistance of visual (text) modality. The main body of representative works [2, 7, 19, 22, 27, 35, 42, 43], employ the MLM-like objectives, where input texts(image) are partially masked and then interact with visual (text) tokens to reconstruct the corrupted part. SimVLM [37] introduces a single prefix language modeling (PrefixLM) objective for exploiting large-scale weak supervision in VLP. CoCa [41] further verifies the representation ability of autoregressive language modeling (AR) in the vision-language domain. While most existing methods combine discriminative and generative tasks for better representation learning, BEiT-3 [36] shows a single generative language modeling (e.g., MLM) could handle the vision-language interactions and alignments well with the mixture-of-expert transformer. Although superior performance has been attained, most existing methods based on MLM suffer from low utilization of output tokens and lead to a slow
44
+
45
+ ![](images/7ced1407434c225344286a28577146662186fdff0ff4bbbf2b1a7cf07c023d36.jpg)
46
+ Figure 2. Dependency matrix of different language modeling methods in vision-language pretraining. $r_{\mathrm{pred}}$ represents the proportion of output tokens for reconstruction. $r_{\mathrm{corr}}$ represents the proportion of corrupted inputs for each output token. $\overline{r}_{\mathrm{corr}}$ is the mean corruption rate of all reconstruction tokens. FLM has distinct advantages compared with others: 1) Different from MLM and PrefixLM that bind $r_{\mathrm{pred}}$ and $\overline{r}_{\mathrm{corr}}$ together by $r$ , the unbound prediction rate in FLM could achieve $100\%$ for accelerating training as much as possible. 2) Without relying on a position-aware unidirectional corruption in AR/PrefixLM or fixed corruption across all positions in MLM (see the right-side line graph), the corrupted span in FLM for each output token could be different, and the corrupted rate is independent of the position of the output token, enabling a more flexible corruption pattern for better exploiting the bidirectional context information.
47
+
48
+ convergence rate. This paper proposes a new generative language modeling method targeting pretraining acceleration.
49
+
50
+ Efficient Pretraining. While early VLP methods [7,22,33, 43] rely on time-consuming pretrained object detectors for visual representation, PixlBERT [13] and ViLT [15] directly apply grid/patch-level visual features to reduce computation complexity of the object-level visual encoder. Beyond the design of efficient model architecture, a few research focuses on data-efficient training. Bitton et al. [3] propose an alternative masking strategy that better focuses on visually-related physical words to improve VLP in low-resource settings. DeCLIP [23] enhances the CLIP by exploring more supervision signals, such as self-supervision within a single modality or multi-view supervision across different modalities. The most relevant work to this paper is GRIT-VLP [5], which assigns a larger mask rate for MLM and performs grouped in-batch negative sampling for ITC to accelerate the convergence. However, only half of the output tokens are assigned for the reconstruction task, where the under-used output tokens impede a further speed-up of pretraining. Our method decouples the corruption and reconstruction rate, making them freely chosen for a better combination between performance and efficiency.
51
+
52
+ Language Modeling. In NLP, MLM [9, 12, 25] and AR [4, 8] have been the two most popular generative pretraining objectives. AR aims to estimate the probability distribution of a given text sequence using the product rule by an auto-regressive model. However, unidirectional encoding may not be suitable for language understanding tasks that prefer bidirectional context information. MLM enables bidirectional contexts for language understanding tasks but can not be directly adopted into language generation tasks. Some works [11, 39] unify MLM and AR for better performance on both language understanding and generation tasks. Wettig et al. [38] study the choice of the mask ratio
53
+
54
+ in MLM from the perspective of both corruption and prediction. However, among previous methods, little attention has ever been devoted to the issue of training efficiency. We target accelerating vision-language pretraining meanwhile keeping decent performances on vision-language understanding and generation tasks.
55
+
56
+ # 3. Method
57
+
58
+ In this section, we first recap the representative language modeling methods for VLP from a corruption-prediction view in Sec. 3.1. Then we propose the new language modeling method FLM to decouple the prediction rate from the corruption rate in Sec. 3.2. Finally, we introduce FLM into VLP and propose a novel encode-corrupt-predict framework for accelerating VLP in Sec. 3.3.
59
+
60
+ # 3.1. Language Modeling as Corruption-Prediction
61
+
62
+ Given an input sequence $\mathbf{x} = \{x_{1},\dots,x_{L}\}$ , MLM aims to learn a deep bidirectional representation by randomly replacing part of input tokens with a special mask token, and then maximize the probability of reconstructing those masked tokens $P(\mathbf{x}_{\mathbf{m}}|\mathbf{x}_{\backslash \mathbf{m}})$ , where $\mathbf{x}_{\mathbf{m}}$ represents corrupted tokens. AR uses a left-to-right autoregressive factorization to model the density distribution of the sequence $\sum_{i = 1:L}\log P(x_i|x_{< i})$ . PrefixLM enables bidirectional perception between prefix tokens and left-to-right autoregressive factorization to model the density distribution of the remaining sequence $\sum_{i = L_p:L}\log P(x_i|x_{[L_p,i]},x_{< L_p})$ where $L_{p}$ represents the prefix length.
63
+
64
+ Note that all the above methods could be interpreted as a corruption-prediction problem because each prediction token only has a partial observation of the input data, i.e., the interactions between output tokens and some input tokens are corrupted. Therefore, their pretraining objectives could
65
+
66
+ be unified as maximizing the reconstruction probability:
67
+
68
+ $$
69
+ \mathbb {E} _ {\mathbf {M} \sim B (r)} \sum_ {i = 1: L} \mathbb {1} _ {m _ {i i} = 0} \log P \left(x _ {i} \mid \left\{x _ {j} \mid m _ {i j} = 1 \right\}\right), \tag {1}
70
+ $$
71
+
72
+ where $\mathbf{M} = [m_{ij}]_{1\leq i\leq L,1\leq j\leq L}$ represents the dependency matrix between the input and the prediction target, $m_{ij} = 1 / 0$ represents that $x_{j}$ is visible/invisible when predicting $x_{i}$ . $B(r)$ represents a distribution parameterized by $r$ , which is customized by specific models. The dependency matrices $\mathbf{M}$ of different language modeling methods are as follows (also illustrated in Fig. 2):
73
+
74
+ - For MLM, $\mathbf{m}_{1,:} = \dots = \mathbf{m}_{L,:} = \mathbf{p} \sim \mathrm{Binomial}(r_{\mathrm{mask}})$ . The corruption for predicting all $x_{i}$ is the same and it is sampled from a Binomial distribution.
75
+ - For LM, $m_{ij} = \mathbb{1}_{j < i}$ . The corruption for predicting $x_{i}$ depends on the position $i$ , which gets shorter with a larger $i$ .
76
+ - For PrefixLM, $m_{ij} = \mathbb{1}_{j < \max(i, L_p)}$ , where prefix length $L_p = (1 - r_{\mathrm{span}}) \cdot L$ and $r_{\mathrm{span}} \sim \mathrm{Uniform}(0, 1)$ represents the length ratio of the corrupted span.
77
+
78
+ # 3.2. Free Language Modeling (FLM)
79
+
80
+ From the above analysis, the representative MLM or AR PrefixLM methods have limited freedom of the dependency matrix, which is prone to the following issues: 1) the tie-up between the prediction and corruption rate in MLM may lead to a low convergence speed during training; 2) inflexible and non-customizable contexts for each prediction target result in sub-optimal context learning. For example, the suffix-like corruption in AR disables the bidirectional context modeling which is essential for downstream understanding tasks. Moreover, the autoregressive prior in AR results in uneven distribution of corruption rate. Latter tokens are always assigned with a smaller corruption rate, thus being easy-predictable compared with former ones. The position-related distribution of the corruption rate may cause a sub-optimal difficulty degree of pretraining tasks.
81
+
82
+ The goal of FLM is to disentangle the prediction and corruption rate for fully utilizing training signals to accelerate VLP. The model after disengagement has a more flexible corruption pattern that benefits bidirectional contextual representation learning. Following the unified formulation of the corruption-prediction problem in Eqn. 1, we introduce the dependency matrix of FLM:
83
+
84
+ $$
85
+ m _ {i j} = 1 \text {i f} m _ {i j} \notin \operatorname {s p a n} _ {i}, \text {o t h e r w i s e} 0. \tag {2}
86
+ $$
87
+
88
+ where $\mathrm{span}_i$ is random span corruption with length $L_{\mathrm{span}}^i$ that satisfies $i \in \mathrm{span}_i$ . The starting position and length of $\mathrm{span}_i$ could be customized or randomly sampled from a distribution. In our implementation, we sample $L_{\mathrm{span}}^i \sim \mathrm{Bernoulli}(L, r_{\mathrm{corr}})$ for each $i$ , $r_{\mathrm{corr}}$ is the hyperparameter indicating the expected corruption rate.
89
+
90
+ ![](images/b26cf1bd15179466ce31c69db79504e6d55823a705ab0d3650f7ce426f0ad4f4.jpg)
91
+ Figure 3. Overview of the proposed VLP framework with free language modeling (FLM). First, the image is patched and encoded by a vision transformer into a sequence of vision tokens. Then, the text transformer performs uni-modal text feature encoding in the bottom layers and multimodal fusion between visual and text features in the top layers. Bidirectional multimodal representations are achieved by learning forward and reverse unidirectional representations, respectively, the order of which is manipulated by (reverse) causal masks in the same text transformer. After feature encoding, we construct a set of independent corruption-prediction tasks. For each task, we inject a random span corruption into the multimodal representation and then introduce a reconstruction query that gathers informative contexts from the corrupted features for reconstructing a single target token. Benefiting from the flexibility of post-encoding corruption, $100\%$ text tokens could be efficiently reconstructed in parallel.
92
+
93
+ Note that the corrupted span in FLM could differ for different predictions, hopefully increasing the flexibility of bidirectional contextualized interactions and helping optimization. Since the choice of $\mathrm{span}_i$ does not interfere with each other for different $i$ , the prediction rate could increase to $100\%$ which allows all input tokens to be reconstructed. As for the corruption rate, we note that the minimal corruption rate is $1 / L$ , since at least one token should be corrupted to reconstruct itself to avoid information leakage.
94
+
95
+ # 3.3. Vision-Language Pretraining with FLM
96
+
97
+ Built upon FLM, a new encode-corrupt-predict pretraining framework is proposed for efficient pretraining with decoupled prediction and corruption rates. Given the input sequence $\mathbf{x} = \{x_{1},\dots,x_{L}\}$ , we formulate the reconstruction of all input tokens as $L$ independent corruption-prediction tasks. For $i$ -th task, the learning objective is to maximize $P(x_{i}|\{x_{j}|m_{ij} = 1\})$ by reasoning upon uncorrupted bidirectional tokens. Fig. 3 depicts the pipeline of the model.
98
+
99
+ Decomposed Bidirectional Encoding. Since FLM establishes a customized corruption span for each prediction task, a naive solution of repeating the MLM-style feature encoding (transformer with fully-visible attention mask) for
100
+
101
+ each task becomes time-consuming. Instead, we propose to share the intermediate features of different tasks for efficient inference by decomposing the token representation into two complementary views, left-to-right, and right-to-left features. The former adopted a text transformer with a casual mask, enforcing the latter tokens attending on previous tokens. The latter adopted the same transformer with a reverse causal mask for a right-to-left context flow.
102
+
103
+ Specifically, we first encode image features by a CLIP image transformer. For the text input, the $N_{\mathrm{bottom}}$ bottom layers of the text transformer perform the decomposed bidirectional encoding with the language input only, while $N_{\mathrm{top}}$ top layers further receive image features and fuse the multimodal features by cross-attention layers. After encoding, we obtain the bidirectional representation in text transformer, denoted as $E^n = \{e_1^n,\dots ,e_L^n\}$ , where $e_i = \{e_i^{12\mathrm{r},n},e_i^{\mathrm{r}2\mathrm{l},n}\}$ is the token representation for $x_{i}$ comprised of features from forward and reverse flow at the $n$ -th layer.
104
+
105
+ Reconstructor. Following the description in Sec. 3.2, we sample a dependency matrix $\mathbf{M}$ to construct several corruption-prediction tasks. As a consequence of the span corruption, some elements in $E^n$ that rely on corrupted inputs need to be neglected to avoid information leakage. To reconstruct $x_{i}$ , we gather context from uncorrupted features in $E$ by cross-attention:
106
+
107
+ $$
108
+ q _ {i} ^ {n + 1} = \text {C r o s s A t t e n t i o n} \left(q _ {i} ^ {n}, E _ {i} ^ {n}\right)
109
+ $$
110
+
111
+ $$
112
+ E _ {i} ^ {n} = \left\{e _ {j} ^ {\mathrm {l 2 r}, n} \mid m _ {i j} = 1, j < i \right\} \cup \left\{e _ {j} ^ {\mathrm {r 2 l}, n} \mid m _ {i j} = 1, j > i \right\}, \tag {3}
113
+ $$
114
+
115
+ where $E_{i}^{n}$ represents all uncorrupted elements in $E^n$ given $M$ . $q_{i}$ is a learnable reconstruction query, which is initialized as the $i$ -th positional embedding in the first layer. The selection process from $E^n$ to $E_{i}^{n}$ is implemented as a specific attention mask in cross-attention layers, as illustrated in Fig. 3. By forwarding on stacked cross-attention layers, $q_{i}^{n}$ aggregates deep bidirectional contexts for effective reconstruction of $x_{i}$ . The output features of the last layer in the reconstructor are input into an MLP for final prediction.
116
+
117
+ Note that $q_{i}^{n}$ works independently from each other, making a flexible addition or removal of some tasks. By sharing feature encoding, all reconstruction tasks run in parallel with low computation consumption.
118
+
119
+ Pretraining Objectives. The reconstruction objective is to minimize the negative log-likelihood of predicted tokens:
120
+
121
+ $$
122
+ \mathcal {L} _ {\mathrm {R}} = - \mathbb {E} _ {\mathbf {M} \sim B (r)} \sum_ {i = 1: L} \log P \left(x _ {i} \mid \left\{x _ {j} \mid m _ {i j} = 1 \right\}\right), \tag {4}
123
+ $$
124
+
125
+ To further enhance the model's representability, we introduce an intermediate prediction loss upon the $E^{N}$ that improves the local temporal dependency between words, where $\mathbf{N}$ represents the last layer of the text transformer. We supervised the forward/reverse sequence $e_i^{12\mathrm{r,N}} / e_i^{\mathrm{r}2\mathrm{l,N}}$ by their next/previous tokens. The intermediate loss
126
+
127
+ is the summation of two unidirectional prediction problems: $\mathcal{L}_{\mathrm{inter}} = \mathcal{L}_{\mathrm{l2r}} + \mathcal{L}_{\mathrm{r2l}} = \sum_{i = 1:L}\log P(x_i|x_{< i}) + \sum_{i = 1:L}\log P(x_i|x_{>i})$ The overall pretraining objective of FLM is calculated by $\mathcal{L}_{\mathrm{FLM}} = \mathcal{L}_{\mathrm{R}} + \mathcal{L}_{\mathrm{inter}}$
128
+
129
+ # 4. Experiments
130
+
131
+ # 4.1. Experimental Setting
132
+
133
+ Pretraining Data. Following previous work [10, 18], the pretraining data comes from four commonly used datasets, including COCO [24], Visual Genome [16], SBU Captions [28], and Conceptual Captions 3M [30], totally 4M images. An enlarged version of $\sim 13\mathrm{M}$ images is further used to boost performance by including Conceptual Caption 12M [6] $^{1}$ .
134
+
135
+ Downstream Tasks. During finetuning, we append a special [CLS] token into the reconstructor and use its output features as the global cross-modal representation. We follow [7] to adapt the pre-trained model to four downstream vision-language understanding tasks, visual question answering (VQA) [1], natural language for visual reasoning $(\mathrm{NLVR}^2)$ [32], image-text retrieval (TR), text-image retrieval (IR). We also test the performance on vision-language generation tasks, i.e., image captioning [24]. For image captioning, we drop the reconstructor and use the text transformer with a causal mask for sequence generation. More details can be found in supplementary materials.
136
+
137
+ Pretraining Details. Following ALBEF [18] and ME-TER [10], the visual transformer is initialized by CLIP-ViT [29] pretrained on 400M noisy image-text pairs. The visual transformer with ViT-B/32 is used as our base architecture for ablation study, and the one with ViT-L/14 is for scaling up to compare with other methods. We denote models with ViT-B image encoder as Ours and ViT-L as OursLARGE. The bottom six layers of the text transformer are initialized by the bottom six layers of RoBERTaBASE [25]. The reconstructor is implemented by a 12-layer $(N_{\mathrm{bottom}} = N_{\mathrm{top}} = 6)$ transformer decoder (remove self-attention layers and only keep cross-attention layers) with a hidden size of 768 and a head number of 12. The default corruption rate of FLM is $1 / L$ , i.e., in each corruption-prediction task, only a single token is corrupted and then reconstructed from their contexts. While minimal corruption achieves decent performance, we further explore the choice of corruption rates in Sec. 4.3.
138
+
139
+ We pretrain the model for a maximum of 30k steps, with a total batch size of 4096 on 16 TITAN V100 GPUs by AdamW [26] optimizer and gradient accumulation. Mixed-precision training is used to reduce memory consumption and accelerate training. We use a $5\%$ warm-up schedule with a maximum learning rate of 4e-4. Following [10], we
140
+
141
+ <table><tr><td rowspan="2">Method</td><td rowspan="2">rcorr</td><td rowspan="2">rpred</td><td rowspan="2">VQATest-dev</td><td rowspan="2">NLVR2dev</td><td rowspan="2">test</td><td rowspan="2">Retrieval (Flickr30K)IR@1</td><td rowspan="2">TR@1</td><td colspan="3">COCO Captioning</td><td rowspan="2">GPU Days (speed-up)</td></tr><tr><td>BLEU</td><td>METER</td><td>CIDEr</td></tr><tr><td>AR</td><td>50%</td><td>100%</td><td>72.85</td><td>75.79</td><td>76.29</td><td>66.59</td><td>84.10</td><td>35.70</td><td>28.86</td><td>120.6</td><td>9.6 (6.1×)</td></tr><tr><td>PrefixLM</td><td>25%</td><td>50%</td><td>72.64</td><td>75.73</td><td>76.17</td><td>66.21</td><td>82.70</td><td>35.50</td><td>28.79</td><td>119.4</td><td>10.0 (5.9×)</td></tr><tr><td>MLM</td><td>15%</td><td>15%</td><td>73.52</td><td>77.46</td><td>78.28</td><td>71.33</td><td>88.40</td><td>34.90</td><td>28.50</td><td>117.5</td><td>58.7 (1×)</td></tr><tr><td>MLM</td><td>40%</td><td>40%</td><td>73.95</td><td>77.62</td><td>78.60</td><td>73.41</td><td>89.20</td><td>35.50</td><td>28.79</td><td>120.3</td><td>58.7 (1×)</td></tr><tr><td>FLM (Ours)</td><td>1/L</td><td>100%</td><td>73.85</td><td>77.99</td><td>78.63</td><td>72.81</td><td>87.40</td><td>36.68</td><td>29.17</td><td>123.0</td><td>22.7 (2.5×)</td></tr></table>
142
+
143
+ Table 1. Performance Comparison between different language modeling methods. $r_{\mathrm{corr}}$ and $r_{\mathrm{pred}}$ refer to the corruption and prediction rates. All models are based on CLIP-B/32 image encoder and a text transformer initialized by RoBERTa. Note that the default $r_{\mathrm{corr}}$ of FLM is set to $1 / L$ for better efficiency, while FLM's performance will be further improved by an optimal $r_{\mathrm{corr}}$ , as indicated in Table 3d.
144
+
145
+ <table><tr><td>Method</td><td>VQATest-dev</td><td>\(NLVR^2\)dev</td><td>test</td><td>CaptioningCIDEr</td><td>GPU Days</td></tr><tr><td colspan="6">CLIP-B/32 on 13M data</td></tr><tr><td>AR</td><td>73.46</td><td>76.60</td><td>77.21</td><td>121.5</td><td>21.3 (5.4×)</td></tr><tr><td>MLM</td><td>74.25</td><td>78.63</td><td>79.19</td><td>122.6</td><td>116.0 (1×)</td></tr><tr><td>FLM</td><td>74.28</td><td>78.73</td><td>79.52</td><td>122.6</td><td>32.0 (3.6×)</td></tr><tr><td colspan="6">CLIP-B/16 on 4M data</td></tr><tr><td>AR</td><td>75.05</td><td>77.38</td><td>78.79</td><td>126.0</td><td>12.3 (5.0×)</td></tr><tr><td>MLM</td><td>75.76</td><td>79.93</td><td>79.83</td><td>125.4</td><td>61.4 (1×)</td></tr><tr><td>FLM</td><td>75.95</td><td>79.02</td><td>80.03</td><td>126.5</td><td>16.1 (3.8×)</td></tr></table>
146
+
147
+ Table 2. Performance comparison of different pretraining objectives with a larger data scale (from 4M to 13M) or a larger number of patches (patch size from 32 to 16). For 13M data, we extend the training iteration of MLM to 200k.
148
+
149
+ assign a lower learning rate of 8e-5 for all pretrained layers. The text sequence length is limited to 50 subwords. More details are in the supplementary materials.
150
+
151
+ Baselines. We also pretrain some generative language modeling methods for comparison, including MLM, AR, and PrefixLM [37]. Specifically, we directly input the (corrupted) text sequence into the text transformer and then build an MLP layer upon the last layer of the text transformer for reconstructing the original input. For AR and PrefixLM, we follow the same learning rate schedule as FLM. For MLM, we follow [10] to train the model for 100k iterations with a maximum learning rate of 5e-5 and a warm-up rate of $10\%$ . To compare the convergence speed of different methods, we report the GPU days when reaching the best validation performance (i.e., reconstruction accuracy on the COCO validation set).
152
+
153
+ # 4.2. Comparison with Language Modeling Methods
154
+
155
+ As shown in Table 1, compared with MLM, the proposed FLM achieves a $2.5 \times$ speed-up while keeping the comparable performance on VL understanding tasks and superior performance on VL generation tasks.
156
+
157
+ AR achieves decent performance on image captioning generation but inferior performance on other VL understanding tasks, due to the lack of ability to capture bidirectional interactions among sequences. Moreover, AR has a faster convergence rate and high training efficiency.
158
+
159
+ Although PrefixLM enables bidirectional interactions between partial inputs, which is beneficial for VL classification tasks, the performance of PrefixLM is similar to AR. However, the reconstruction target mainly falls on the right side of the sequence, where the uneven distribution may push the learned representation towards an unsatisfactory language prior. For MLM, we found a corruption rate of $40\%$ achieves the best VQA performance among $\{10\%, 20\%, \dots, 80\}$ , indicating an appropriate corruption rate is essential to control the task difficulty. However, the convergence rate of MLM is slow, making larger training steps necessary to achieve decent performance. Our method FLM surpasses MLM on $\mathrm{NLVR}^2$ and image captioning, also showing an impressive $2.5\times$ speed-up of training time.
160
+
161
+ In Table 2, we show that the superiority of the proposed FLM consistently holds with a larger data scale or with more powerful visual features. The reason may be that FLM learns bidirectional context patterns by encoding the text sequence once and densely predicting the $100\%$ input in parallel, while MLM usually needs more pretraining steps to see such diverse patterns. Therefore, FLM is a friendly pretext task for accelerating training under low-resource scenarios, which to some extent, enjoys the high efficiency of AR/PrefixLM and the high performance of MLM.
162
+
163
+ As for underperformed retrieval performance compared with MLM, we conjecture that FLM with span corruptions has fewer corruption variants than MLM with random corruptions but focuses more on local semantics, which favors fine-grained tasks like VQA/captioning more than retrieval.
164
+
165
+ # 4.3. Ablation Studies
166
+
167
+ FLM Loss. The ablation study for the loss terms in FLM is shown in Table 3a. With merely reconstruction loss $\mathcal{L}_R$ , our model achieves better performance (73.04 on VQA) compared with AR (72.85) or PrefixLM (72.64). When further introducing left-to-right or right-to-left intermediate caption loss, the model gains consistent improvements over two downstream tasks. Note that left-to-right loss shows non-trivial superiority to the right-to-left one, verifying the effectiveness of the causal relationships between words. By combing bidirectional caption loss, the model achieves 0.81/1.45 absolute gains over the model with merely recon
168
+
169
+ <table><tr><td>Loss</td><td>VQA</td><td>NLVR2</td></tr><tr><td>LR</td><td>73.04</td><td>77.18</td></tr><tr><td>LR+Lr2l</td><td>73.38</td><td>77.59</td></tr><tr><td>LR+L12r</td><td>73.67</td><td>78.00</td></tr><tr><td>LR+Linter</td><td>73.85</td><td>78.63</td></tr></table>
170
+
171
+ (a) Loss term. Intermediate losses are effective and complementary to FLM loss.
172
+
173
+ <table><tr><td>Text Encoder</td><td>VQA</td><td>NLVR2</td></tr><tr><td>Unshared text encoder</td><td>73.46</td><td>77.51</td></tr><tr><td>Shared text encoder</td><td>73.85</td><td>78.63</td></tr></table>
174
+
175
+ (b) Parameter sharing. Sharing two unidirectional encoders is effective and efficient.
176
+
177
+ <table><tr><td>Prediction rate</td><td>VQA</td><td>NLVR2</td></tr><tr><td>50%</td><td>73.74</td><td>77.47</td></tr><tr><td>75%</td><td>73.89</td><td>77.65</td></tr><tr><td>90%</td><td>74.00</td><td>78.17</td></tr><tr><td>100%</td><td>73.85</td><td>78.63</td></tr></table>
178
+
179
+ (c) Prediction rate. FLM with a larger prediction rate improves performance.
180
+
181
+ <table><tr><td>Corruption</td><td>VQA</td><td>NLVR2</td></tr><tr><td>span corruption (1/L)</td><td>73.85</td><td>78.63</td></tr><tr><td>span corruption (30%)</td><td>73.96</td><td>78.83</td></tr><tr><td>span corruption (40%)</td><td>74.04</td><td>78.82</td></tr><tr><td>span corruption (50%)</td><td>74.01</td><td>77.84</td></tr><tr><td>random corruption (15%)</td><td>73.93</td><td>78.38</td></tr><tr><td>random corruption (30%)</td><td>73.69</td><td>77.74</td></tr></table>
182
+
183
+ (d) Corruption Rate. FLM enables a flexible choice of the corruption rate.
184
+
185
+ <table><tr><td>Bottom</td><td>Top</td><td>VQA</td><td>NLVR2</td></tr><tr><td>×</td><td>1</td><td>73.46</td><td>77.49</td></tr><tr><td>×</td><td>3</td><td>73.62</td><td>78.20</td></tr><tr><td>×</td><td>6</td><td>73.74</td><td>78.14</td></tr><tr><td>3</td><td>6</td><td>73.69</td><td>78.20</td></tr><tr><td>6</td><td>6</td><td>73.85</td><td>78.63</td></tr></table>
186
+
187
+ (e) Number of reconstruction Layers. FLM benefits from a deeper reconstructor.
188
+
189
+ Table 3. FLM ablation experiments with ViT-B/32 pretrained on 4M data. We report the finetuned accuracy (%) on the VQA test-dev and NLVR $^2$ test set. Default settings are marked in gray.
190
+
191
+ striction loss.
192
+
193
+ Parameter Sharing. During decomposed bidirectional encoding, parameter sharing is used in the text transformer for two unimodal encodings with different attention masks. Table 3b shows that the shared text transformer clearly surpasses the unshared one, indicating that the two unidirectional representations could implicitly benefit each other by sharing the same feature space.
194
+
195
+ Number of Reconstruction Layers. The reconstructor aims to construct several corrupted sequences upon the high-level representations and reconstruct the corrupted information in parallel. Table 3e shows that a deep structure of the reconstructor helps the downstream tasks. The multi-layer reconstructor gathers text and multimodal features from low to high levels, promising to enhance the representation ability.
196
+
197
+ Prediction Rate. We test the converged performance of the pretrained model with different prediction rates. To this end, we randomly mask a subset of output tokens from loss calculation. As shown in Table 3c, a lower prediction rate tends to achieve poor performance both on VQA and NLVR $^2$ , probably suggesting that the prediction loss containing a larger number of tokens helps the optimization.
198
+
199
+ Corruption Rate. The corruption rate determines how much context should be used for predicting the corrupted tokens. It controls the difficulty of the reconstruction problem and closely affects model performance. We study the influence of corruption strategies in FLM. As shown in Fig. 3d, First, we test the length of span corruption. With the growth of span length, the VQA and $\mathrm{NLVR}^2$ performance steadily reach their maximum values at the $30\% \sim 40\%$ corruption rate. Our method keeps a $100\%$ prediction rate while allowing a customized corruption rate, which is hopeful to serve
200
+
201
+ as a replacement for the widely-used MLM to improve convergence speed.
202
+
203
+ Besides the span corruption which occurs after feature encoding, we also test the influence of pre-encoding corruption. We assign random corruptions to each token of the input sequence and then perform FLM to reconstruct all input tokens. With a $15\%$ corruption rate, random corruption could slightly increase the VQA score. But unfortunately, the $\mathrm{NLVR}^2$ hurts with a larger corruption rate. We found that the optimal corruption rate may differ for different corruption methods. How to effectively fuse different types of corruption may be a promising direction to increase the diversity of contexts further.
204
+
205
+ # 4.4. Comparison with State-of-the-Arts
206
+
207
+ The comparisons on VQA, $\mathrm{NLVR}^2$ and image captioning are shown in Table 4, without using complicated pretraining tasks like ITM and ITC, our method achieves competitive performance by merely using FLM as the pretraining task. Compared with prior arts, our method has appealing advantages regarding pretraining time: First, the proposed FLM helps the convergence speed by enabling $100\%$ token prediction. Second, we leverage FLM as the single pretraining objective, without relying on additional time-consuming pretraining objectives like ITM. Third, we use the patch-level image features instead of a heavy object detection used in [22, 43].
208
+
209
+ The performance on cross-modal retrieval is shown in Table 5. Our FLM-trained model performs poorly if directly fine-tuned on target downstream datasets. Note that retrieval is heavily required for cross-modal alignment learning (e.g., ITM or ITC) on large-scale datasets since negative samples are essential to learning discriminative features.
210
+
211
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Pretrain. Task</td><td rowspan="2">Pretrain. Time (GPU Days)</td><td colspan="2">VQAv2</td><td colspan="2">NLVR2</td><td rowspan="2">BLEU4</td><td colspan="3">COCO Captioning</td></tr><tr><td>test-dev</td><td>test-standard</td><td>dev</td><td>test</td><td>METEOR</td><td>CIDEr</td><td>SPICE</td></tr><tr><td colspan="11">Pre-trained with &lt;10M images</td></tr><tr><td>UNITERLARGE [7]</td><td>MLM, ITM, MVM, WRA</td><td>152 (V100)</td><td>73.82</td><td>74.02</td><td>79.12</td><td>79.98</td><td>-</td><td>-</td><td></td><td></td></tr><tr><td>UNIMOLARGE [21]</td><td>MLM, MVM, ITC</td><td>640 (V100)</td><td>75.06</td><td>75.27</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td><td></td></tr><tr><td>OSCAR</td><td>MLM, ITM</td><td>220 (V100)</td><td>73.61</td><td>73.82</td><td>79.12</td><td>80.37</td><td>37.4</td><td>30.7</td><td>127.8</td><td>23.5</td></tr><tr><td>VinVLBASE [43]</td><td>MLM, ITM</td><td>320 (V100)</td><td>75.95</td><td>76.12</td><td>82.05</td><td>83.08</td><td>38.2</td><td>30.3</td><td>129.3</td><td>23.6</td></tr><tr><td>VinVL LARGE [43]</td><td>MLM, ITM</td><td>320 (V100)</td><td>76.52</td><td>76.60</td><td>82.67</td><td>83.98</td><td>38.5</td><td>30.4</td><td>130.8</td><td>23.4</td></tr><tr><td>PixelBERT [13]</td><td>MLM, ITM</td><td>-</td><td>74.45</td><td>74.55</td><td>76.5</td><td>77.2</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>CLIP-ViL [31]</td><td>MLM, ITM, VQA</td><td>40 (A100)</td><td>76.48</td><td>76.70</td><td>-</td><td>-</td><td>40.2*</td><td>29.7*</td><td>134.2*</td><td>23.8*</td></tr><tr><td>ViLT [43]</td><td>MLM, ITM, WRA</td><td>192 (V100)</td><td>71.26</td><td>-</td><td>75.70</td><td>76.13</td><td>-</td><td>-</td><td></td><td></td></tr><tr><td>ALBEF (4M) [18]</td><td>MLM, ITM</td><td>28 (A100)</td><td>71.40</td><td>-</td><td>-</td><td>77.51</td><td>-</td><td>-</td><td></td><td></td></tr><tr><td>ALBEF (4M) [18]</td><td>MLM, ITM, ITC</td><td>28 (A100)</td><td>74.54</td><td>74.70</td><td>80.24</td><td>80.50</td><td>-</td><td>-</td><td></td><td></td></tr><tr><td>METERBASE [10]</td><td>MLM, ITM</td><td>64 (A100)</td><td>77.68</td><td>77.64</td><td>82.33</td><td>83.05</td><td>38.8</td><td>30.0</td><td>128.2</td><td>23.0</td></tr><tr><td>OursLARGE (4M)</td><td>FLM</td><td>18 (V100)</td><td>77.80</td><td>77.84</td><td>81.77</td><td>81.83</td><td>38.3</td><td>30.2</td><td>130.9</td><td>-</td></tr><tr><td colspan="11">Pre-trained with 10M~100M images</td></tr><tr><td>ALBEF (14M) [18]</td><td>MLM, ITM, ITC</td><td>140 (A100)</td><td>75.84</td><td>76.04</td><td>82.55</td><td>83.14</td><td>-</td><td>-</td><td></td><td></td></tr><tr><td>BLIP (14M) [17]</td><td>AR, ITM, ITC</td><td>112 (A100)</td><td>77.54</td><td>77.62</td><td>82.67</td><td>82.30</td><td>38.6</td><td>-</td><td>129.7</td><td>-</td></tr><tr><td>OursLARGE (13M)</td><td>FLM</td><td>75 (V100)</td><td>78.18</td><td>78.24</td><td>82.90</td><td>83.86</td><td>39.1</td><td>30.3</td><td>132.7</td><td>-</td></tr><tr><td colspan="11">Pre-trained with &gt;100M images</td></tr><tr><td>SimVLMBASE (1.8B) [37]</td><td>PrefixLM</td><td>-</td><td>77.87</td><td>78.14</td><td>81.72</td><td>81.77</td><td>39.0</td><td>32.9</td><td>134.8</td><td>24.0</td></tr><tr><td>SimVLMHUGE (1.8B) [37]</td><td>PrefixLM</td><td>-</td><td>80.03</td><td>80.34</td><td>84.53</td><td>85.15</td><td>40.6</td><td>33.7</td><td>143.3</td><td>25.4</td></tr><tr><td>LEMON (400M)</td><td>MLM</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>40.3</td><td>30.2</td><td>133.3</td><td>23.3</td></tr></table>
212
+
213
+ Table 4. Comparisons with models on visual question answering, visual reasoning, and image captioning tasks. The best scores are in bold, and the second best scores are in underlined. MVM, ITC, ITM, and WRA represent masked vision modeling, image-text contrast, image-text matching, and word-region alignment, respectively. OursLARGE are trained with $30\mathrm{k} / 100\mathrm{k}$ steps on 4M/13M data, respectively.
214
+
215
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Pretrain. Task</td><td rowspan="2">Pretrain. Time (GPU Days)</td><td colspan="6">Flickr30k</td><td colspan="6">COCO</td></tr><tr><td>IR@1</td><td>IR@5</td><td>IR@10</td><td>TR@1</td><td>TR@5</td><td>TR@10</td><td>IR@1</td><td>IR@5</td><td>IR@10</td><td>TR@1</td><td>TR@5</td><td>TR@10</td></tr><tr><td colspan="15">Pre-trained with &lt;10M images</td></tr><tr><td>UNITERLARGE [7]</td><td>MLM, ITM, MVM, WRA</td><td>152 (V100)</td><td>75.56</td><td>94.08</td><td>96.76</td><td>87.30</td><td>98.00</td><td>99.20</td><td>52.93</td><td>79.93</td><td>87.95</td><td>65.68</td><td>88.56</td><td>93.76</td></tr><tr><td>UNIMOLARGE [21]</td><td>MLM, MVM, ITC</td><td>640 (V100)</td><td>78.04</td><td>94.24</td><td>97.12</td><td>89.40</td><td>98.90</td><td>99.80</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VinVLARGE [43]</td><td>MLM, ITM</td><td>320 (V100)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>58.8</td><td>83.5</td><td>90.3</td><td>75.4</td><td>92.9</td><td>96.2</td></tr><tr><td>PixelBERT [13]</td><td>MLM, ITM</td><td>-</td><td>71.5</td><td>92.1</td><td>95.8</td><td>87.0</td><td>98.9</td><td>99.5</td><td>50.1</td><td>77.6</td><td>86.2</td><td>63.6</td><td>87.5</td><td>93.6</td></tr><tr><td>ViLT [43]</td><td>MLM, ITM, WRA</td><td>192 (V100)</td><td>64.4</td><td>88.7</td><td>93.8</td><td>83.5</td><td>96.7</td><td>98.6</td><td>42.7</td><td>72.9</td><td>83.1</td><td>61.5</td><td>86.3</td><td>92.7</td></tr><tr><td>ALBEF (4M) [18]</td><td>MLM, ITM, ITC</td><td>28 (A100)</td><td>82.8</td><td>96.7</td><td>98.4</td><td>94.3</td><td>99.4</td><td>99.8</td><td>56.8</td><td>81.5</td><td>89.2</td><td>73.1</td><td>91.4</td><td>96.0</td></tr><tr><td>METERBASE [10]</td><td>MLM, ITM</td><td>64 (A100)</td><td>82.22</td><td>96.34</td><td>98.36</td><td>94.30</td><td>99.60</td><td>99.90</td><td>57.08</td><td>82.66</td><td>90.07</td><td>76.16</td><td>93.16</td><td>96.82</td></tr><tr><td>OursLARGE (4M)</td><td>FLM</td><td>18 (V100)</td><td>74.53</td><td>93.96</td><td>97.26</td><td>88.10</td><td>98.30</td><td>99.60</td><td>46.46</td><td>75.43</td><td>85.09</td><td>62.84</td><td>86.64</td><td>93.00</td></tr><tr><td>OursLARGE (4M)</td><td>FLM, ITM</td><td>57 (V100)</td><td>83.40</td><td>97.04</td><td>98.72</td><td>95.00</td><td>99.50</td><td>99.90</td><td>56.55</td><td>82.02</td><td>89.63</td><td>73.52</td><td>91.95</td><td>95.97</td></tr><tr><td colspan="15">Pre-trained with &gt;10M images</td></tr><tr><td>ALBEF (14M) [18]</td><td>MLM, ITM, ITC</td><td>60 (A100)</td><td>85.6</td><td>97.5</td><td>98.9</td><td>95.9</td><td>99.8</td><td>100.0</td><td>60.7</td><td>84.3</td><td>90.5</td><td>77.6</td><td>94.3</td><td>97.2</td></tr><tr><td>BLIP (14M)</td><td>AR, ITM, ITC</td><td>112 (A100)</td><td>87.2</td><td>97.5</td><td>98.8</td><td>96.6</td><td>99.8</td><td>100.0</td><td>63.1</td><td>85.3</td><td>91.1</td><td>80.6</td><td>95.2</td><td>97.6</td></tr></table>
216
+
217
+ Table 5. Performance comparisons with models pre-trained on Flickr30k and COCO image retrieval (IR) and text retrieval (TR) tasks in the finetuning setting. The best scores are in bold, and the second best scores are in underlined.
218
+
219
+ Therefore, we jointly use ITM and FLM to conduct pretraining to facilitate cross-modal alignments. By doing so, we obtain considerable performance gain and reach superior performance on Flickr30K and competitive performance on COCO over prior arts, suggesting the complementarity of FLM and other alignment-oriented objectives.
220
+
221
+ # 5. Conclusion
222
+
223
+ In this paper, we propose free language modeling (FLM), a new pretraining objective for accelerating vision-language pretraining. Different from previous language modeling methods, such as MLM and AR, FLM seamlessly disentangles the prediction rate from the tie-up with the corrup
224
+
225
+ tion rate, meanwhile allowing a flexible corruption pattern for each prediction target. Experiments verify the effectiveness of the proposed FLM both in accuracy and efficiency. Our model could converge faster with a decent reduction of training time compared to MLM, while achieving comparable performance on multiple multimodal downstream tasks.
226
+
227
+ Acknowledgement. This paper is partially supported by the National Key R&D Program of China No. 2022ZD0161000, the General Research Fund of HK No.17200622, the National Natural Science Foundation of China under Grant No. 62122035 and 61972188. We thank Chengyue Wu for his technical assistance, and Jianhui Xu, Zhengxiao Du, and Zhichao Lu for their helpful comments.
228
+
229
+ # References
230
+
231
+ [1] Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C Lawrence Zitnick, and Devi Parikh. VQA: Visual question answering. In International Conference on Computer Vision (ICCV), 2015. 5
232
+ [2] Hangbo Bao, Wenhui Wang, Li Dong, and Furu Wei. Vlbeit: Generative vision-language pretraining. arXiv preprint arXiv:2206.01127, 2022. 2
233
+ [3] Yonatan Bitton, Gabriel Stanovsky, Michael Elhadad, and Roy Schwartz. Data efficient masked language modeling for vision and language. arXiv preprint arXiv:2109.02040, 2021. 3
234
+ [4] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 3
235
+ [5] Jaeseok Byun, Taebaek Hwang, Jianlong Fu, and Taesup Moon. Grit-vlp: Grouped mini-batch sampling for efficient vision and language pre-training. arXiv preprint arXiv:2208.04060, 2022.3
236
+ [6] Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. Conceptual 12m: Pushing web-scale image-text pretraining to recognize long-tail visual concepts. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 5
237
+ [7] Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. UNITER: Universal image-text representation learning. In European Conference on Computer Vision (ECCV), 2020. 1, 2, 3, 5, 8
238
+ [8] Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311, 2022. 3
239
+ [9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Conference of the North American Chapter of the Association for Computational Linguistics (NAACL), 2019. 1, 3
240
+ [10] Zi-Yi Dou, Yichong Xu, Zhe Gan, Jianfeng Wang, Shuohang Wang, Lijuan Wang, Chenguang Zhu, Pengchuan Zhang, Lu Yuan, Nanyun Peng, et al. An empirical study of training end-to-end vision-and-language transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18166-18176, 2022. 1, 5, 6, 8
241
+ [11] Zhengxiao Du, Yujie Qian, Xiao Liu, Ming Ding, Jiezhong Qiu, Zhilin Yang, and Jie Tang. Glm: General language model pretraining with autoregressive blank infilling. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 320-335, 2022. 3
242
+ [12] Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. DeBERTa: Decoding-enhanced bert with disentangled attention. arXiv preprint, 2020. 3
243
+
244
+ [13] Zhicheng Huang, Zhaoyang Zeng, Bei Liu, Dongmei Fu, and Jianlong Fu. Pixel-BERT: Aligning image pixels with text by deep multi-modal transformers. arXiv preprint, 2020. 3, 8
245
+ [14] Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V Le, Yunhsuan Sung, Zhen Li, and Tom Duerig. Scaling up visual and vision-language representation learning with noisy text supervision. arXiv preprint, 2021. 1, 2
246
+ [15] Wonjae Kim, Bokyung Son, and Ildoo Kim. ViLT: Vision-and-language transformer without convolution or region supervision. In International Conference on Machine Learning (ICML), 2021. 2, 3
247
+ [16] Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A Shamma, et al. Visual Genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision (IJCV), 2017. 5
248
+ [17] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. arXiv preprint arXiv:2201.12086, 2022. 8
249
+ [18] Junnan Li, Ramprasaath R Selvaraju, Akhilesh Deepak Gotmare, Shafiq Joty, Caiming Xiong, and Steven Hoi. Align before fuse: Vision and language representation learning with momentum distillation. In Conference on Neural Information Processing Systems (NeurIPS), 2021. 1, 2, 5, 8
250
+ [19] Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. VisualBERT: A simple and performant baseline for vision and language. arXiv preprint, 2019. 1, 2
251
+ [20] Liunian Harold Li, Pengchuan Zhang, Haotian Zhang, Jianwei Yang, Chunyuan Li, Yiwu Zhong, Lijuan Wang, Lu Yuan, Lei Zhang, Jenq-Neng Hwang, et al. Grounded language-image pre-training. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10965-10975, 2022. 2
252
+ [21] Wei Li, Can Gao, Guocheng Niu, Xinyan Xiao, Hao Liu, Jiachen Liu, Hua Wu, and Haifeng Wang. Unimo: Towards unified-modal understanding and generation via cross-modal contrastive learning. In Annual Meeting of the Association for Computational Linguistics (ACL), 2021. 2, 8
253
+ [22] Xiujun Li, Xi Yin, Chunyuan Li, Pengchuan Zhang, Xiaowei Hu, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, et al. Oscar: Object-semantics aligned pre-training for vision-language tasks. In European Conference on Computer Vision (ECCV), 2020. 1, 2, 3, 7
254
+ [23] Yangguang Li, Feng Liang, Lichen Zhao, Yufeng Cui, Wanli Ouyang, Jing Shao, Fengwei Yu, and Junjie Yan. Supervision exists everywhere: A data efficient contrastive language-image pre-training paradigm. arXiv preprint arXiv:2110.05208, 2021. 3
255
+ [24] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In European Conference on Computer Vision (ECCV), 2014. 5
256
+ [25] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettle
257
+
258
+ moyer, and Veselin Stoyanov. RoBERTa: A robustly optimized bert pretraining approach. arXiv preprint, 2019. 3, 5
259
+ [26] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 5
260
+ [27] Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In Conference on Neural Information Processing Systems (NeurIPS), 2019. 2
261
+ [28] Vicente Ordonez, Girish Kulkarni, and Tamara Berg. Im2text: Describing images using 1 million captioned photographs. In Conference on Neural Information Processing Systems (NeurIPS), 2011. 5
262
+ [29] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International Conference on Machine Learning (ICML), 2021. 2, 5
263
+ [30] Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. Conceptual captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In Annual Meeting of the Association for Computational Linguistics (ACL), 2018. 1, 5
264
+ [31] Sheng Shen, Liunian Harold Li, Hao Tan, Mohit Bansal, Anna Rohrbach, Kai-Wei Chang, Zhewei Yao, and Kurt Keutzer. How much can clip benefit vision-and-language tasks? arXiv preprint, 2021. 8
265
+ [32] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. In Annual Meeting of the Association for Computational Linguistics (ACL), 2019. 1, 5
266
+ [33] Hao Tan and Mohit Bansal. LXMERT: Learning cross-modality encoder representations from transformers. In Conference on Empirical Methods in Natural Language Processing (EMNLP), 2019. 3
267
+ [34] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Conference on Neural Information Processing Systems (NeurIPS), 2017. 1
268
+ [35] Teng Wang, Wenhao Jiang, Zhichao Lu, Feng Zheng, Ran Cheng, Chengguo Yin, and Ping Luo. Vlmixer: Unpaired vision-language pre-training via cross-modal cutmix. In International Conference on Machine Learning, pages 22680-22690. PMLR, 2022. 2
269
+ [36] Wenhui Wang, Hangbo Bao, Li Dong, Johan Bjorck, Zhiliang Peng, Qiang Liu, Kriti Aggarwal, Owais Khan Mohammed, Saksham Singhal, Subhojit Som, et al. Image as a foreign language: Beit pretraining for all vision and vision-language tasks. arXiv preprint arXiv:2208.10442, 2022. 2
270
+ [37] Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. Simvlm: Simple visual language model pretraining with weak supervision. arXiv preprint, 2021. 2, 6, 8
271
+ [38] Alexander Wettig, Tianyu Gao, Zexuan Zhong, and Danqi Chen. Should you mask $15\%$ in masked language modeling? arXiv preprint arXiv:2202.08005, 2022. 3
272
+
273
+ [39] Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. Xlnet: Generalized autoregressive pretraining for language understanding. Advances in neural information processing systems, 32, 2019. 3
274
+ [40] Lewei Yao, Runhui Huang, Lu Hou, Guansong Lu, Minzhe Niu, Hang Xu, Xiaodan Liang, Zhenguo Li, Xin Jiang, and Chunjing Xu. Filip: Fine-grained interactive language-image pre-training. arXiv preprint arXiv:2111.07783, 2021. 2
275
+ [41] Jiahui Yu, Zirui Wang, Vijay Vasudevan, Legg Yeung, Mojtaba Seyedhosseini, and Yonghui Wu. Coca: Contrastive captioners are image-text foundation models. arXiv preprint arXiv:2205.01917, 2022. 2
276
+ [42] Yan Zeng, Xinsong Zhang, and Hang Li. Multi-grained vision language pre-training: Aligning texts with visual concepts. arXiv preprint arXiv:2111.08276, 2021. 2
277
+ [43] Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. VinVL: Revisiting visual representations in vision-language models. In Conference on Computer Vision and Pattern Recognition (CVPR), 2021. 2, 3, 7, 8
acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17ef5ffeb6b9df779054e283b5bae26622f22149371265df65a5fc9ddfda8ae1
3
+ size 621803
acceleratingvisionlanguagepretrainingwithfreelanguagemodeling/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88eaf3b405ddd9fcdad0725765915afcc3047df87d062b5fd36e7c394d26ebaf
3
+ size 392074
accelirtaskawareimagecompressionforacceleratingneuralrestoration/3ee3bce8-b085-4d08-be6f-5f7d348c3f3f_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0abdaa36644f3e150b9dfd1803e20b83cd47db8d9269935409c074aa5d45c4e9
3
+ size 88321
accelirtaskawareimagecompressionforacceleratingneuralrestoration/3ee3bce8-b085-4d08-be6f-5f7d348c3f3f_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e11a564a20391dc87c82c65b2dfc9042239188175173eb2740479dd9a4be719
3
+ size 114942