Add Batch d61897a0-e7f7-474b-a46b-fe4c084a3df2
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/e9e9dcd2-0a7f-4d71-bdd3-86b688a22d12_content_list.json +3 -0
- acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/e9e9dcd2-0a7f-4d71-bdd3-86b688a22d12_model.json +3 -0
- acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/e9e9dcd2-0a7f-4d71-bdd3-86b688a22d12_origin.pdf +3 -0
- acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/full.md +398 -0
- acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/images.zip +3 -0
- acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/layout.json +3 -0
- achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/ec9a1135-4936-4dd0-8f02-af317fa4bca3_content_list.json +3 -0
- achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/ec9a1135-4936-4dd0-8f02-af317fa4bca3_model.json +3 -0
- achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/ec9a1135-4936-4dd0-8f02-af317fa4bca3_origin.pdf +3 -0
- achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/full.md +375 -0
- achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/images.zip +3 -0
- achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/layout.json +3 -0
- actionconditioned3dhumanmotionsynthesiswithtransformervae/46ebd66c-652f-4e2e-9f0b-a7a5e111b5b3_content_list.json +3 -0
- actionconditioned3dhumanmotionsynthesiswithtransformervae/46ebd66c-652f-4e2e-9f0b-a7a5e111b5b3_model.json +3 -0
- actionconditioned3dhumanmotionsynthesiswithtransformervae/46ebd66c-652f-4e2e-9f0b-a7a5e111b5b3_origin.pdf +3 -0
- actionconditioned3dhumanmotionsynthesiswithtransformervae/full.md +282 -0
- actionconditioned3dhumanmotionsynthesiswithtransformervae/images.zip +3 -0
- actionconditioned3dhumanmotionsynthesiswithtransformervae/layout.json +3 -0
- activedomainadaptationviaclusteringuncertaintyweightedembeddings/b04281d8-c6f4-4c50-afb3-6b770196404a_content_list.json +3 -0
- activedomainadaptationviaclusteringuncertaintyweightedembeddings/b04281d8-c6f4-4c50-afb3-6b770196404a_model.json +3 -0
- activedomainadaptationviaclusteringuncertaintyweightedembeddings/b04281d8-c6f4-4c50-afb3-6b770196404a_origin.pdf +3 -0
- activedomainadaptationviaclusteringuncertaintyweightedembeddings/full.md +329 -0
- activedomainadaptationviaclusteringuncertaintyweightedembeddings/images.zip +3 -0
- activedomainadaptationviaclusteringuncertaintyweightedembeddings/layout.json +3 -0
- activelearningfordeepobjectdetectionviaprobabilisticmodeling/ff739fee-deb8-48c9-98ba-2c7874439ba7_content_list.json +3 -0
- activelearningfordeepobjectdetectionviaprobabilisticmodeling/ff739fee-deb8-48c9-98ba-2c7874439ba7_model.json +3 -0
- activelearningfordeepobjectdetectionviaprobabilisticmodeling/ff739fee-deb8-48c9-98ba-2c7874439ba7_origin.pdf +3 -0
- activelearningfordeepobjectdetectionviaprobabilisticmodeling/full.md +320 -0
- activelearningfordeepobjectdetectionviaprobabilisticmodeling/images.zip +3 -0
- activelearningfordeepobjectdetectionviaprobabilisticmodeling/layout.json +3 -0
- activelearningforlanedetectionaknowledgedistillationapproach/782e7183-c722-4ff4-9021-42918924e6a7_content_list.json +3 -0
- activelearningforlanedetectionaknowledgedistillationapproach/782e7183-c722-4ff4-9021-42918924e6a7_model.json +3 -0
- activelearningforlanedetectionaknowledgedistillationapproach/782e7183-c722-4ff4-9021-42918924e6a7_origin.pdf +3 -0
- activelearningforlanedetectionaknowledgedistillationapproach/full.md +337 -0
- activelearningforlanedetectionaknowledgedistillationapproach/images.zip +3 -0
- activelearningforlanedetectionaknowledgedistillationapproach/layout.json +3 -0
- activeuniversaldomainadaptation/7b2c6630-9b03-41fe-9663-1b4d1ef9d4bd_content_list.json +3 -0
- activeuniversaldomainadaptation/7b2c6630-9b03-41fe-9663-1b4d1ef9d4bd_model.json +3 -0
- activeuniversaldomainadaptation/7b2c6630-9b03-41fe-9663-1b4d1ef9d4bd_origin.pdf +3 -0
- activeuniversaldomainadaptation/full.md +331 -0
- activeuniversaldomainadaptation/images.zip +3 -0
- activeuniversaldomainadaptation/layout.json +3 -0
- actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/43c3af89-ebd3-440b-870b-e655eced5ada_content_list.json +3 -0
- actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/43c3af89-ebd3-440b-870b-e655eced5ada_model.json +3 -0
- actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/43c3af89-ebd3-440b-870b-e655eced5ada_origin.pdf +3 -0
- actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/full.md +289 -0
- actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/images.zip +3 -0
- actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/layout.json +3 -0
- adaattnrevisitattentionmechanisminarbitraryneuralstyletransfer/63d5ce94-5551-487f-8d44-d134ec17d02b_content_list.json +3 -0
- adaattnrevisitattentionmechanisminarbitraryneuralstyletransfer/63d5ce94-5551-487f-8d44-d134ec17d02b_model.json +3 -0
acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/e9e9dcd2-0a7f-4d71-bdd3-86b688a22d12_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39c613cc51f3f9a28c559aa43315c541a3abb0568112ef7aceaba4202d19027c
|
| 3 |
+
size 79246
|
acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/e9e9dcd2-0a7f-4d71-bdd3-86b688a22d12_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:941c2b92041cecd72f159eb345db8153e70600172ecbf9191385600599ff45ba
|
| 3 |
+
size 98046
|
acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/e9e9dcd2-0a7f-4d71-bdd3-86b688a22d12_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65e2ae3cac8ad7861a388aa19b401569ea9219d8552f24a380352fdbb6c88234
|
| 3 |
+
size 9505440
|
acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/full.md
ADDED
|
@@ -0,0 +1,398 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Accelerating Atmospheric Turbulence Simulation via Learned Phase-to-Space Transform
|
| 2 |
+
|
| 3 |
+
Zhiyuan Mao, Nicholas Chimitt, Stanley H. Chan
|
| 4 |
+
|
| 5 |
+
School of Electrical and Computer Engineering, Purdue University, West Lafayette, Indiana USA
|
| 6 |
+
|
| 7 |
+
{mao114, nchimitt, stanch@purdue.edu}
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Fast and accurate simulation of imaging through atmospheric turbulence is essential for developing turbulence mitigation algorithms. Recognizing the limitations of previous approaches, we introduce a new concept known as the phase-to-space (P2S) transform to significantly speed up the simulation. P2S is built upon three ideas: (1) reformulating the spatially varying convolution as a set of invariant convolutions with basis functions, (2) learning the basis function via the known turbulence statistics models, (3) implementing the P2S transform via a light-weight network that directly converts the phase representation to spatial representation. The new simulator offers $300 \times 1000 \times$ speed up compared to the mainstream split-step simulators while preserving the essential turbulence statistics.
|
| 12 |
+
|
| 13 |
+
# 1. Introduction
|
| 14 |
+
|
| 15 |
+
Despite several decades of research, imaging through atmospheric turbulence remains an open problem in optics and image processing. The challenge is not only in reconstructing images from a stack of distorted frames but also in a less known image formation model that can be used to formulate and evaluate image reconstruction algorithms such as deep neural networks. Simulating images distorted by atmospheric turbulence has received considerable attention in the optics community [29, 3, 11, 24], but using these simulators to develop deep learning image reconstruction algorithms remains a challenge as there is no physically justifiable approach to synthesize large-scale datasets at a low computational cost for training and testing.
|
| 16 |
+
|
| 17 |
+
Recognizing the demand for a fast, accurate, and open-source simulator, we present a new method to generate a dense-grid image distorted by turbulence with theoretically verifiable statistics. The simulator consists of mostly optics/signal processing steps and a lightweight shallow neural network to perform a new concept called the Phase-to-Space (P2S) transform. By parallelizing the computation
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
24.36 sec / frame (GPU)
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
(a) Hardie et al. [11]
|
| 24 |
+
0.026 sec / frame (GPU)
|
| 25 |
+
(b) Ours
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
Figure 1. This paper presents a new turbulence simulator that is substantially $(1000\times)$ faster than the prior art, while preserving the essential turbulence statistics.
|
| 29 |
+
(a) Input (real)
|
| 30 |
+
Figure 2. Using our simulator to synthesize training set for training an image reconstruction network (U-Net [28]) offers a considerable amount of improvement in image quality. The network is identical for both (b) and (c); only the simulator used to synthesize the training data is different.
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
(b) [17]+U-Net
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
(c) Ours+U-Net
|
| 37 |
+
|
| 38 |
+
across the pixels, the simulator offers a $1000 \times$ speed-up compared to the mainstream approach as shown in Figure 1. When using the new simulator to synthesize training data to train a deep neural network image reconstruction model, the resulting network outperforms the same architecture trained with data synthesized by a less sophisticated simulator, as illustrated in Figure 2.
|
| 39 |
+
|
| 40 |
+
An overview of the proposed simulator is illustrated in Figure 3. Our proposed approach is based on linking the following two ideas:
|
| 41 |
+
|
| 42 |
+
- Convolution via basis functions (Section 3.1). While conventional approaches model the turbulence distortion as a spatially varying convolution, we reformu
|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
Figure 3. This paper introduces three ideas to significantly speed up the simulation. The three ideas are: (Section 3.1) Approximating the spatially varying convolution by invariant convolutions, (Section 3.2) learning the basis representation via known turbulence statistics, (Section 3.3) implementing the Phase-to-Space transform network.
|
| 46 |
+
|
| 47 |
+
late the problem by modeling the distortion as a sum of spatially invariant convolutions. The idea is to utilize a basis representation of the point spread functions (PSFs). This concept is similar to the prior work of [23], but in a different context.
|
| 48 |
+
|
| 49 |
+
- Learning the basis functions (Section 3.2). To enable the previous idea, we need to have the basis functions. This is done by utilizing [5] to draw Zernike samples for all high-order aberrations. Then, principal component analysis is used to construct the basis functions as proposed by Mao et al. [21]. This is also reminiscent to the dictionary approach proposed by Hunt et al. [13].
|
| 50 |
+
|
| 51 |
+
The missing piece between these two ideas is the relationship between the basis coefficients in the phase and spatial domains. This is an open problem, and there is no known analytic solution. We circumvent this difficulty by introducing a new concept known as the Phaseto-Space transform (Section 3.3). To do so, we construct a lightweight shallow neural network to transform from the phase domain to the spatial domain. Integrating this network into the two aforementioned ideas, our overall simulator adheres to the physics while offering significant speed up and additional reconstruction utility.
|
| 52 |
+
|
| 53 |
+
# 2. Background
|
| 54 |
+
|
| 55 |
+
In this section we provide a brief summary of the turbulence physics and prior work in turbulence simulation. The theory of imaging through atmospheric turbulence can be traced back to the work of Kolmogorov [14] and Tatarski [32], followed by a series of major breakthroughs by Fried [6, 7, 8] and Noll [22]. Readers are encouraged to check out [27, 10] for an introduction.
|
| 56 |
+
|
| 57 |
+

|
| 58 |
+
Figure 4. Split-step propagation [11] models the turbulence as a discrete set of phase screens where the wavefront distortion is caused by cropping regions of the phase screen at every pixel location. The key operations are Fresnel propagation and Kolmogorov phase imparting. The end result of a sequence of these operations is a PSF for one pixel. The overlaps of the phase screens create the spatial correlations. See [11] for detailed description.
|
| 59 |
+
|
| 60 |
+
# 2.1. Split-step simulation
|
| 61 |
+
|
| 62 |
+
The image formation process through turbulence is best described in the phase domain. In free space, an emitted wave propagates spherically outward and, if at a sufficiently long distance, arrives upon the aperture approximately flat. If the medium contains random fluctuations, the phase of the wavefront will be distorted along the path of propagation. We can imagine the wave leading and lagging in phase in reference to its unperturbed counterpart as a result of spatially varying indices of refraction.
|
| 63 |
+
|
| 64 |
+
The most widely used simulation approach to the above process is the split-step propagation [29, 3, 11]. The idea is to discretize the wave propagation path as illustrated in Figure 4. Split-step simulation propagates every point in the object plane through a discrete set of phase screens, alternating between free space propagation, given by Fresnel diffraction, and phase imparting. The statistical behavior of the phase screens is defined through its power spectral density (PSD) [11, 29], many of which are related to the Kolmogorov PSD. This sequence of operations is best described by the equation
|
| 65 |
+
|
| 66 |
+
Fresnel $\rightarrow$ Kolmogorov $\rightarrow \ldots \rightarrow$ Fresnel $\rightarrow$ Kolmogorov.
|
| 67 |
+
|
| 68 |
+
After passing through a turbulent medium, the point spread functions (PSFs) will be spatially varying as illustrated in Figure 5.
|
| 69 |
+
|
| 70 |
+
The benefit of split-step is two fold: (1) it is interpretable, as it mirrors the physical process, (2) spatial correlations are obtained with minimal effort, as neighboring point sources share overlapping cropped phase screens. The drawback of split-step propagation is its computational requirements: each Fresnel propagation requires a pair of
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
Figure 5. If we image a grid of point sources through turbulence, we will observe a set of spatially varying point spread functions (PSFs). The shape and orientation of the PSFs are determined by the phase structure of the turbulence.
|
| 74 |
+
|
| 75 |
+
Fourier transforms. This is repeated for every point and every step along the path. Moreover, performing the spatially varying convolution adds another layer of computational cost [11, 29].
|
| 76 |
+
|
| 77 |
+
# 2.2. Phase-over-aperture simulation
|
| 78 |
+
|
| 79 |
+
Our proposed simulator is inspired by the work of Chittt and Chan [5]. The idea is to collapse the split-step propagation into the resultant phase across the aperture. Compared to split-step which uses global phase screens, the collapsed model generates the local phase realization directly, which we illustrate in Figure 6.
|
| 80 |
+
|
| 81 |
+

|
| 82 |
+
Figure 6. The collapsed phase-over-aperture model [5] replaces the global phase screens and Fresnel diffraction by local phase screens per pixel. This translates the wave propagation to a spatially varying convolution with PSFs that are characterized by the tilts and aberrations per-pixel. While the phase cropping and propagation of the split-step method is eliminated, Fourier transforms at every pixel location are still needed.
|
| 83 |
+
|
| 84 |
+
In the collapsed model, the local per-pixel phase is generated using Noll's idea [22] that the phase $\phi (\pmb {\rho})$ (defined over the aperture of diameter $D$ with $\pmb{\rho}$ being the coordinate) can be represented via the Zernike basis functions
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\phi (\boldsymbol {\rho}) = \sum_ {j = 1} ^ {K} \alpha_ {j} Z _ {j} (\boldsymbol {\rho}), \tag {1}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $Z_{j}(\pmb {\rho})$ is the Zernike basis and $\alpha_{j}$ are the Zernike coefficients $[\alpha_{1},\alpha_{2},\dots \alpha_{K}] = \alpha \sim \mathcal{N}(\mathbf{0},R_{Z})$ , with [22] providing the expression for $R_Z$ . The resultant incoherent
|
| 91 |
+
|
| 92 |
+
PSF is formed via
|
| 93 |
+
|
| 94 |
+
$$
|
| 95 |
+
\boldsymbol {h} = \left| \mathcal {F} \left\{W (\boldsymbol {\rho}) e ^ {- j \phi (\boldsymbol {\rho})} \right\} \right| ^ {2}, \tag {2}
|
| 96 |
+
$$
|
| 97 |
+
|
| 98 |
+
omitting a few constants for brevity, with $W(\pmb{\rho})$ as the pupil function of the aperture.
|
| 99 |
+
|
| 100 |
+
The Zernike representation offers a natural grouping of terms as suggested in Figure 6: tilt and higher order aberrations. The terms $\alpha_{2}$ and $\alpha_{3}$ correspond to the horizontal and vertical tilt of the plane of best fit to the phase distortion $\phi$ . The terms $\alpha_{4}, \alpha_{5}, \ldots$ correspond to the higher order aberrations and account for the complicated distortions the phase of the wave exhibits. Computationally, these two groups can be separated by generating the high order aberrations, applying the resultant PSFs to the image, then locally shifting the image according to its tilt statistics.
|
| 101 |
+
|
| 102 |
+
A technical challenge of the collapsed model is ensuring the Zernike coefficients are also spatially correlated. In [5], this correlation is enabled through the invention of a multi-aperture approximation in which the correlations could be described analytically by leveraging several classic works [2, 4, 31]. With the correlation matrix defined, the spatially correlated tilts can be generated. For the higher-order terms, it was suggested in [5] that one can define a grid of PSFs and spatially interpolate between them.
|
| 103 |
+
|
| 104 |
+
# 2.3. Limitations of phase-over-aperture
|
| 105 |
+
|
| 106 |
+
As reported in [5], the collapsed model is significantly faster than the standard split-step propagation. However, by evaluating the simulator, it is evident that there are several limiting factors:
|
| 107 |
+
|
| 108 |
+
- The collapsed model exclusively draws Zernike coefficients to create the distortion. However, even with all Zernike coefficients available, one still needs to convert them to PSFs through (2) at every pixel. This is the biggest bottleneck.
|
| 109 |
+
- It was suggested that in order to reduce the number of Fourier transforms, one can construct the PSFs for a grid of points, then interpolate between them spatially. However, mathematically this is incorrect, as the superposition in the spatial domain is not the same as superposition in the phase domain.
|
| 110 |
+
- Even if we can resolve the above two problems, to finally simulate a distorted image, we still need to perform the spatially varying convolution. This involves storing the PSFs, and executing the convolution, both of which are resource demanding.
|
| 111 |
+
|
| 112 |
+
# 2.4. Other simulators
|
| 113 |
+
|
| 114 |
+
Ray Tracing. An alternative to the split-step simulation is ray tracing [24, 15], which requires tracing each point sources through the propagation medium. There are also
|
| 115 |
+
|
| 116 |
+
ray tracing techniques developed in computer graphics [30]. However, the lack of quantitative evaluation based on turbulence statistics makes it difficult to assess these methods.
|
| 117 |
+
|
| 118 |
+
Warp-and-blur. For faster simulations, one can compromise the accuracy by simulating only the pixel-shifts, commonly referred to as tilts, and assuming a spatially-invariant blur [26, 19]. These simulations and models are widely used in the image processing literature [36, 16, 1, 20], where the goal was to provide quick evaluations of the reconstruction algorithms. However, these methods fail to match the known statistical behavior of the distortions.
|
| 119 |
+
|
| 120 |
+
# 3. Method
|
| 121 |
+
|
| 122 |
+
The paper includes two key building blocks: (1) reformulating the spatially varying convolution via a set of spatially invariant convolutions, (2) constructing the invariant convolutions by learning the basis functions. The major invention here is the linkage between the two for which we introduce the P2S transform to convert the Zernike coefficients to the PSF coefficients.
|
| 123 |
+
|
| 124 |
+
# 3.1. Idea 1: Convolution via basis functions
|
| 125 |
+
|
| 126 |
+
The turbulent distortions can be modeled as a spatially varying convolution at each pixel. Denoting $\pmb{x} \in \mathbb{R}^N$ as the source image, and $\pmb{y} \in \mathbb{R}^N$ as the pupil image, the spatially varying convolution says that $\pmb{y}$ is formed by
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\boldsymbol {y} = \left[ \begin{array}{c} y _ {1} \\ \vdots \\ y _ {N} \end{array} \right] = \boldsymbol {H} \boldsymbol {x} = \left[ \begin{array}{c} \boldsymbol {h} _ {1} ^ {T} \boldsymbol {x} \\ \vdots \\ \boldsymbol {h} _ {N} ^ {T} \boldsymbol {x} \end{array} \right], \tag {3}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
where $\{h_n|n = 1,\ldots ,N\}$ are the $N$ spatially varying PSFs stored as rows of the linear operator $\pmb {H}\in \mathbb{R}^{N\times N}$
|
| 133 |
+
|
| 134 |
+
The first key idea of the paper is to write $\pmb{h}_n$ as
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\boldsymbol {h} _ {n} = \sum_ {m = 1} ^ {M} \beta_ {m, n} \varphi_ {m}, \tag {4}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
for some basis functions $\varphi_{m}$ (to be discussed) of the PSFs, and coefficients $\beta_{m,n}$ of the $m^{\mathrm{th}}$ basis at the $n^{\mathrm{th}}$ pixel. Then, each pixel $y_{n}$ in (3) can be written as
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
y _ {n} = \sum_ {m = 1} ^ {M} \beta_ {m, n} \varphi_ {m} ^ {T} \boldsymbol {x}, n = 1, \dots , N. \tag {5}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
Since convolution is linear, this turns the $N$ spatially varying convolutions $\{\pmb{h}_n^T\pmb{x}\}_{n=1}^N$ in (3) into $M$ spatially invariant convolutions $\{\pmb{\varphi}_m^T\pmb{x}\}_{m=1}^M$ in (5). If $M \ll N$ , the computational cost of (5) can be much lower.
|
| 147 |
+
|
| 148 |
+
To enable the convolution using the basis functions, there are two quantities we need to learn from the data. These are the basis functions $\varphi_{m}$ and the coefficients $\beta_{m,n}$ . If we
|
| 149 |
+
|
| 150 |
+
are able to find both, the image can be formed by a simple multiply-add between the basis convolved images $\varphi_m^T\pmb{x}$ and the representation coefficients $\beta_{m,n}$ , as illustrated in Figure 3.
|
| 151 |
+
|
| 152 |
+
# 3.2. Idea 2: Learning the basis functions
|
| 153 |
+
|
| 154 |
+
To generate the basis functions $\varphi_{m}$ , we consider the process described in [5] of forming a zero-mean Gaussian vector with a covariance matrix $R_Z$ from [22]. The strength of correlation is dictated by the optical parameters as well as the relationship $D / r_0$ , where $D$ is the aperture diameter and $r_0$ is the Fried parameter [7]. Figure 7 (the upper half) illustrates the generation of the tilts; removing these does not change the shape of the PSF, but instead centers it. We then seek a basis representation of the resulting centered PSFs, which we show in the lower half of Figure 7.
|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
Figure 7. The basis representation is generated in two different ways. For the tilts, we follow the work of [5] to draw spatially correlated tilts by multiplying an i.i.d. Gaussian vector with the tilt correlation matrix. For the high-order aberration terms, we consider the multi-aperture concept of [5] and the analytic solution derived in [31]. Principal component analysis is conducted to extract the spatial basis functions.
|
| 158 |
+
|
| 159 |
+
To generate the basis functions $\{\varphi_m\}_{m = 1}^M$ , we use the above procedure to construct a dataset containing 50,000 PSFs from weak to strong turbulence levels. (See supplementary material for details.) Given the dataset, we perform a principal component analysis. For the numerical experiments reported in this paper, a total of $M = 100$ basis functions were used. The basis functions are then combined with the tilts, and are sent to the phase-to-space (P2S) transform to determine the basis coefficients $\{\beta_{m,n}\}$ .
|
| 160 |
+
|
| 161 |
+
# 3.3. Idea 3: Phase-to-Space (P2S) transform
|
| 162 |
+
|
| 163 |
+
The third idea, and the most important one, is the phaseto-space transform. The goal is to define a nonlinear mapping that converts the per-pixel Zernike coefficients $\alpha = [\alpha_{1},\dots ,\alpha_{K}]$ to their associated PSF basis coefficients $\beta = [\beta_{1},\dots ,\beta_{M}]$ , where we've dropped the pixel index subscript $n$ for notational clarity.
|
| 164 |
+
|
| 165 |
+
At the first glance, since the basis functions $\{\varphi_m\}_{m=1}^M$ are already found, a straightforward approach is to project
|
| 166 |
+
|
| 167 |
+
the PSF $h$ (which is defined at each pixel location) onto $\{\varphi_m\}_{m=1}^M$ . However, doing so will defeat the purpose of skipping the retrieval of $h$ from the Zernike coefficients as this is the computational bottleneck. One may also consider analytically describing the PSF in terms of $\varphi_m$ and the Zernike coefficients,
|
| 168 |
+
|
| 169 |
+
$$
|
| 170 |
+
\boldsymbol {h} = \left| \mathcal {F} \left\{W (\boldsymbol {\rho}) e ^ {- j \phi (\boldsymbol {\rho})} \right\} \right| ^ {2} \stackrel {?} {=} \sum_ {m = 1} ^ {M} \beta_ {m} \varphi_ {m}. \tag {6}
|
| 171 |
+
$$
|
| 172 |
+
|
| 173 |
+
However, doing so (i.e., establishing the equality in (6) by writing an equation for $\beta_{m}$ ) is an open problem. Even if we focus on a special case with just a single Zernike coefficient, the calculation of the basis functions will involve non-trivial integration over the circular aperture [9].
|
| 174 |
+
|
| 175 |
+
To bypass the complication arising from (6), we introduce a computational technique. The idea is to build a shallow neural network to perform the conversion from $\alpha \in \mathbb{R}^K$ to $\beta \in \mathbb{R}^M$ . We refer to the process as the phase-to-space transform and the network as the P2S network, as the input-output relationship is from the phase domain to the spatial (PSF) domain.
|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
Figure 8. Illustration of the Phase-to-Space transform. We bypass the computationally expensive PSF formation process by a learned mapping between the Zernike and spatial domain. We also note the sizes of the P2S layers here.
|
| 179 |
+
|
| 180 |
+
A schematic diagram of the P2S transform is shown in Figure 8. Given the two Zernike coefficients representing the tilts and the other Zernike coefficients representing the higher-order aberrations, the P2S transform uses the first two Zernike coefficients to displace the pixels, and uses the network to convert the last $K - 2$ Zernike coefficients to $M$ basis representations.
|
| 181 |
+
|
| 182 |
+
The architecture of the P2S transform network consists of three fully connected layers as summarized in Figure 8. In terms of training, we re-use the 50,000 PSFs generated for Idea 2 train the P2S network. The training loss is defined as the $\ell_2$ distance between the predicted basis coefficients and the true coefficients (found offline by projecting the PSF onto the learned basis functions). Note that this
|
| 183 |
+
|
| 184 |
+
network is light-weight because the P2S transform is performed per pixel. For an image with a large field-of-view, the P2S network can be executed in parallel. Therefore, even with a $512 \times 512$ image, the entire transformation is done in a single pass.
|
| 185 |
+
|
| 186 |
+
# 3.4. Interpolation across the grid
|
| 187 |
+
|
| 188 |
+
We now address the computational difficulty for generating a dense set of Zernike coefficients $\alpha \in \mathbb{R}^K$ for a high resolution image. To accomplish this goal, we partition the image into a user-defined grid of anchor points, for example, a $64 \times 64$ grid. This grid corresponds to a correlation matrix of size $64^2 \times 64^2 = 4096 \times 4096$ which can be precomputed. Following Figure 7, 4096 sets of Zernike coefficients are drawn from the correlation matrix. To go from the grid of $64 \times 64$ anchor points to the full image, we interpolate the Zernike coefficients using bilinear interpolation.
|
| 189 |
+
|
| 190 |
+
For generation of the anchor points, we implement the angle-of-arrival statistics according to [31], in conjunction with [2, 4]. The process is mathematically tedious but conceptually simple: One just needs to rewrite the entries of the correlation matrix in [5] with the formula provided by [31]. The output of the new correlation matrix is a set of spatially correlated Zernike coefficients.
|
| 191 |
+
|
| 192 |
+
It is important to emphasize the difference between the way we interpolate and the interpolation used in [5]. In [5], the interpolation is performed in the spatial domain where two PSFs are superimposed to generate a new PSF. In our simulator, we interpolate the Zernike coefficients to super-impose two phase functions. If the phase $\phi$ and the PSF $h$ is related by the P2S transform, $\phi \stackrel{\mathrm{P2S}}{\longleftrightarrow} h$ , it is important to note that for any $0 \leq \lambda \leq 1$ ,
|
| 193 |
+
|
| 194 |
+
$$
|
| 195 |
+
\lambda \phi_ {1} + (1 - \lambda) \phi_ {2} \xleftarrow {\mathrm {P 2 8}} \lambda \boldsymbol {h} _ {1} + (1 - \lambda) \boldsymbol {h} _ {2}.
|
| 196 |
+
$$
|
| 197 |
+
|
| 198 |
+
Therefore, the interpolation used in [5] is less justifiable. In Figure 9 we illustrate the two interpolation schemes. We have selected a realistic and easily-observable case for illustration in which interpolation in the Zernike spaces generates a near-diffraction-limited PSF (the lucky effect [8]) but in the spatial domain is missed.
|
| 199 |
+
|
| 200 |
+
# 3.5. Extension to color images
|
| 201 |
+
|
| 202 |
+
Most deep neural networks today are designed to handle color images. To ensure that our simulator is compatible with these networks, we extend it to handle color.
|
| 203 |
+
|
| 204 |
+
In principle, the spectral response of the turbulent medium is wavelength dependent, and the distortion must be simulated for a dense set of wavelengths. However, if the turbulence level is moderate, wavelength-dependent behavior of the Fried parameter is less significant for the visible spectrum (roughly $400\mathrm{nm}$ to $700\mathrm{nm}$ ) when compared to other factors of the turbulence.
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
Figure 9. Comparison between the spatial interpolation scheme from [5] and our interpolation in the phase domain. For both cases, we show the PSFs and example resultant images. [Top] Spatial interpolation of two PSFs is performed via $\lambda h_1 + (1 - \lambda)h_2$ , which is a superposition of the two PSFs. [Bottom] Phase interpolation is performed via $\lambda \phi_1 + (1 - \lambda)\phi_2$ . In this example, the superposition of the two phase functions will lead to a PSF with very mild phase distortion known as a lucky observation [8]. This lucky observation is absent in the spatial domain interpolation.
|
| 208 |
+
|
| 209 |
+
To illustrate this observation, we show in Figure 10 the individual PSFs for several wavelength from $400\mathrm{nm}$ (blue) to $700\mathrm{nm}$ (red). It is evident that the shape of the PSFs barely changes from one wavelength to another. In the same figure, we simulate two color images. The first image is simulated by using a single PSF $(525\mathrm{nm})$ for the color channels (and displayed as an RGB image). The second image is simulated by considering 3 PSFs with wavelengths $450\mathrm{nm}$ , $540\mathrm{nm}$ , and $570\mathrm{nm}$ . We note that (c) is a more realistic simulation but requires $3\times$ computation. However, the similar PSFs across the color makes difference is visually indistinguishable, as seen in (d). The small gap demonstrated in Figure 10 suggests that we can simulate the RGB channels identically in such conditions.
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
(a)
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
(b)
|
| 216 |
+
Figure 10. (a) PSFs across the visible spectrum. (b) Same distortion applied to three channels using center wavelength of the visible spectrum. (c) Wavelength dependent distortions applied to three channels. (d) Error map between (b) and (d).
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
(c)
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
(d)
|
| 223 |
+
|
| 224 |
+
# 4. Experimental Evaluation
|
| 225 |
+
|
| 226 |
+
Our experimental results consist of four parts: (i) Quantitative evaluation based on known turbulence statistics, (ii) visual comparison with real turbulence data, (iii) impact to deep neural network image reconstruction methods, (iv) run time comparison. Additionally, videos are included in the supplementary materials.
|
| 227 |
+
|
| 228 |
+
# 4.1. Quantitative evaluation
|
| 229 |
+
|
| 230 |
+
Evaluation schemes. In the turbulence simulation literature, there are two standard ways to quantitatively evaluate a simulator: (i) the Z-tilt and the differential tilt statistics, and (ii) the short and long exposure statistics. For a simulator to be valid, it is necessary to match the simulated data with the theoretical curves.
|
| 231 |
+
|
| 232 |
+
Turbulence conditions. To conduct this evaluation, we follow a similar setting as [5] and [11]. The parameters of the turbulence are listed in the supplementary material.
|
| 233 |
+
|
| 234 |
+
Evaluation 1: Tilt statistics. We first report the Z-tilt and the differential-tilt statistics. The Z-tilt and the differential-tilt statistics measure tilt correlation across the angle-of-arrivals. For example, the Z-tilt should drop as the angle-of-arrival increases, because two pixels that are far apart should have less (but non-zero) correlation. The results of the Z-tilt and the differential-tilt are shown in Figure 11. It is evident that the tilt statistics of the proposed simulator matches well with the theoretical predictions.
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
Figure 11. The Z-tilt and differential-tilt statistics produced by our simulator match with the theoretical values.
|
| 238 |
+
|
| 239 |
+
Evaluation 2: Long and short exposure. We also analyze the long and short exposure (LE and SE, respectively) behavior of the generated PSFs. The LE PSF is a standard temporal average over the PSF realizations, while the SE is a temporal average over the centered PSFs. Since the LE includes pixel shifts, the spread of the LE PSF is larger than its SE counterpart. Furthermore, the SE is a valuable metric as it quantifies the blur the system experiences regardless of its shift behavior. We present these results in Figure 12, where we again see a match between the simulated and theoretical behavior.
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
(a) Short exposure
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
(b) Long exposure
|
| 246 |
+
Figure 12. The short and long exposure PSFs produced by our simulator match with the theoretical PSFs.
|
| 247 |
+
|
| 248 |
+
# 4.2. Visual comparison with real data
|
| 249 |
+
|
| 250 |
+
We emphasize the results of the previous quantitative discussion are significant statistically. However, visual comparisons with real data, while subjective, are an important consideration and serve as a useful reality check. In the following discussion, we present data simulated at the same optical parameters as those provided and show their real counterparts for visual comparison.
|
| 251 |
+
|
| 252 |
+
NATO dataset. The carefully recorded NATO RTG40 dataset [25, 33] contains both optical and estimated turbulence parameters. For these particular sets of images, the target is $1\mathrm{km}$ from the imaging system using passive visible light for imaging. Turbulence parameters were measured that help to evaluate what the appropriate turbulence level was at the time of taking the images. We select these parameters to use in our simulation technique, with comparisons shown in Figure 13.
|
| 253 |
+
|
| 254 |
+
In comparing simulated against their real counterparts, we can see a match in blur and shifting effects. At higher turbulence levels, there are some small observable differences, though we argue this is inherent to modeling just the phase in this type of problem as well as differences in illumination (e.g. digital representation of target pattern vs. illumination by the sun).
|
| 255 |
+
|
| 256 |
+
Datasets used in [12] and [1]. In addition to the NATO dataset, there are also those used in [12] and [1]. The images in Figure 14 show a method of collecting turbulence data that uses stream of gas in front of the camera to produce images at different turbulence levels. While this is a different scenario than the typical long-distance imaging sequences, this data serves as a decent proxy and is useful as it is easier to collect and can provide ground truth by simply turning the gas system off. We present for visual comparison the results in Figure 14 and note the similarity in random draws vs. observations.
|
| 257 |
+
|
| 258 |
+
# 4.3. Impact on training deep networks
|
| 259 |
+
|
| 260 |
+
We conduct an experiment to demonstrate the impact of the proposed simulator on a multi-frame turbulence image reconstruction task. The goal of this experiment is to show that a deep neural network trained with the data synthesized by the proposed simulator outperforms the same network
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
|
| 264 |
+

|
| 265 |
+
|
| 266 |
+

|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
(a) real
|
| 276 |
+
Figure 13. Contrast balanced NATO RTG-40 dataset reported by [25, 33]. The optical parameters are listed in supplementary materials.
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
(b) simulated
|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
(c) tilt map
|
| 283 |
+
|
| 284 |
+

|
| 285 |
+
(a) ground truth
|
| 286 |
+
|
| 287 |
+

|
| 288 |
+
Figure 14. Visual comparison of simulated and real turbulence data. With comparing individual frames, we can see similar blurring and warping effects.
|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
(b) real frame
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
(c) sim. frame
|
| 297 |
+
|
| 298 |
+

|
| 299 |
+
|
| 300 |
+
trained with the data generated by simulators that are less physically justified.
|
| 301 |
+
|
| 302 |
+
To demonstrate the impact of the simulator, we do not use any sophisticated network structure or training strategy. Our network has a simple U-Net architecture [28] with 50 input channels and is trained with an MSE loss for 200 epochs. The network is trained with 5000 simulated sequences, where each sequence contains 50 degraded frames. The ground truth images used for simulation are obtained from the Places dataset [35]. The sequences are simulated with a turbulence level $D / r_0$ uniformly sampled from [1,8].
|
| 303 |
+
|
| 304 |
+
For comparison, we train the same network using a simu
|
| 305 |
+
|
| 306 |
+

|
| 307 |
+
|
| 308 |
+

|
| 309 |
+
|
| 310 |
+

|
| 311 |
+
|
| 312 |
+

|
| 313 |
+
|
| 314 |
+

|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
(a) Input (real)
|
| 318 |
+
|
| 319 |
+

|
| 320 |
+
(b) Temp. Avg.
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
(c) Mao et al. [21]
|
| 324 |
+
Figure 15. Image reconstruction using real data (so ground truth is not available). For (d) and (e), we train a UNet using data synthesized by [17] and our simulator, respectively. Notice the artifacts in (d).
|
| 325 |
+
|
| 326 |
+

|
| 327 |
+
(d) [17]+U-Net
|
| 328 |
+
|
| 329 |
+

|
| 330 |
+
(e) Ours+U-Net
|
| 331 |
+
|
| 332 |
+
<table><tr><td>D/r0</td><td>Mao et al. [21]</td><td>Ours+U-Net</td><td>[17]+U-Net</td></tr><tr><td>1.5</td><td>27.33dB</td><td>27.18dB</td><td>26.59dB</td></tr><tr><td>3.0</td><td>27.04dB</td><td>26.98dB</td><td>26.11dB</td></tr><tr><td>4.5</td><td>25.85dB</td><td>26.01dB</td><td>25.40dB</td></tr></table>
|
| 333 |
+
|
| 334 |
+
lation technique proposed by Lau et al. [17]. This simulator has been used in several recent works [18, 34]. To ensure a fair comparison, we perform a uniform sweep for the Gaussian blur $(\sigma^2$ sampled from [1, 3]) and tilt strength (sampled from [0.1, 0.4]). As a reference, we also report the results of a deterministic (non-learning based) state-of-the-art reconstruction method by Mao et al. [21].
|
| 335 |
+
|
| 336 |
+
Two qualitative reconstruction results are shown in Figure 15. It can be seen that the network trained with proposed simulator has performance close to state-of-the-art. Visible artifacts are generated from the network trained with [17]. We also include a quantitative evaluation, where a split-step simulator [11] is used to generate 30 testing sequences under low, medium, and high $(D / r_0 = 1.5, 3,$ and 4.5). PSNR values are reported in Table 1. It is worth noting that the network trained with the data synthesized by our simulator achieves a comparable performance to the state-of-the-art.
|
| 337 |
+
|
| 338 |
+
# 4.4. Run time
|
| 339 |
+
|
| 340 |
+
Finally, we compare the run time of the proposed method with several existing methods [5, 11, 17]. The simulators are run on a computing cluster node with Intel Xeon "Sky Lake" processors (16 cores) and a Tesla V100 GPU. We use $16 \times 16$ PSF grid for [5], which is comparable to our initial PSF grid. The for-loop in [17] is executed 1000 times as suggested by the authors. The run time of [11] is reported
|
| 341 |
+
|
| 342 |
+
Table 1. PSNR values of the reconstruction results, averaged over 30 testing sequences. The testing data is synthesized by the split-step propagation method [11].
|
| 343 |
+
|
| 344 |
+
<table><tr><td>Reference</td><td>Method</td><td>CPU (s)</td><td>GPU (s)</td></tr><tr><td>Hardie et al. [11]</td><td>split-step</td><td>119.63</td><td>24.36</td></tr><tr><td>Chimitt-Chan [5]</td><td>collapsed</td><td>5.88</td><td>N/A</td></tr><tr><td>Lau et al. [17]</td><td>subsampling</td><td>3.13</td><td>N/A</td></tr><tr><td>Ours</td><td>P2S</td><td>0.35</td><td>0.026</td></tr></table>
|
| 345 |
+
|
| 346 |
+
Table 2. Average run time for each method to process a $256 \times 256$ frame. Unit are in seconds.
|
| 347 |
+
|
| 348 |
+
by the authors. The required time to process a $256 \times 256$ frame is reported in Table 2. The proposed method offers $300 \times -1000 \times$ speed up compared to Hardie et al. [11].
|
| 349 |
+
|
| 350 |
+
# 5. Conclusion
|
| 351 |
+
|
| 352 |
+
The simulation approach towards imaging through atmospheric turbulence we have presented in this work has desirable advantages over existing methods. The key innovation of the P2S transform network allows for significant speedup and additional reconstruction utility. With respect to deep-learning based reconstruction, the outlined approach allows for the generation of large amounts of training data not previously feasible. Additionally, the ability to use the simulation approach as a differentiable module in a neural-network suggests additional benefit towards reconstruction. Finally, we expect the ability to produce statistically accurate data far more efficiently will allow for further statistical analysis of turbulent imaging properties through numerical analysis methods not previously possible.
|
| 353 |
+
|
| 354 |
+
# Acknowledgement
|
| 355 |
+
|
| 356 |
+
The work is supported, in part, by the National Science Foundation under the grants CCF-1763896 and ECCS-2030570.
|
| 357 |
+
|
| 358 |
+
# References
|
| 359 |
+
|
| 360 |
+
[1] Nantheera Anantrasirichai, Alin Achim, Nick G. Kingsbury, and David R. Bull. Atmospheric turbulence mitigation using complex wavelet-based fusion. IEEE Transactions on Image Processing, 22(6):2398-2408, June 2013. 4, 7
|
| 361 |
+
[2] Santasri Basu, Jack E. McCrae, and Steven T. Fiorino. Estimation of the path averaged atmospheric refractive index structure constant from time lapse imagery. In Proc. SPIE 9465, Laser Radar Technology and Applications XX; and Atmospheric Propagation XII, pages 1-9, May 2015. 3, 5
|
| 362 |
+
[3] Jeremy P. Bos and Michael C. Roggemann. Technique for simulating anisoplanatic image formation over long horizontal paths. Optical Engineering, 51(10):1-9-9, 2012. 1, 2
|
| 363 |
+
[4] Gary A. Chanan. Calculation of wave-front tilt correlations associated with atmospheric turbulence. Journal of Optical Society of America A, 9(2):298-301, Feb. 1992. 3, 5
|
| 364 |
+
[5] Nicholas Chimitt and Stanley H. Chan. Simulating anisoplanatic turbulence by sampling intermodal and spatially correlated Zernike coefficients. Optical Engineering, 59(8):1-26, 2020. 2, 3, 4, 5, 6, 8
|
| 365 |
+
[6] David L. Fried. Statistics of a geometric representation of wavefront distortion. Journal of the Optical Society of America, 55(11):1427-1435, Nov. 1965. 2
|
| 366 |
+
[7] David L. Fried. Optical resolution through a randomly inhomogeneous medium for very long and very short exposures. Journal of Optical Society of America, 56(10):1372-1379, 1966. 2, 4
|
| 367 |
+
[8] David L. Fried. Probability of getting a lucky short-exposure image through turbulence*. J. Opt. Soc. Am., 68(12):1651-1658, Dec 1978. 2, 5, 6
|
| 368 |
+
[9] Joseph W. Goodman. Introduction to Fourier Optics. Roberts and Company, Englewood, Colorado, 3 edition, 2005. 5
|
| 369 |
+
[10] Joseph W. Goodman. Statistical Optics. John Wiley and Sons Inc., Hoboken, New Jersey, 2 edition, 2015. 2
|
| 370 |
+
[11] Russell C. Hardie, Jonathan D. Power, Daniel A. LeMaster, Douglas R. Droege, Szymon Gladysz, and Santasri Bose-Pillai. Simulation of anisoplanatic imaging through optical turbulence using numerical wave propagation with new validation analysis. Optical Engineering, 56(7):1 - 16, 2017. 1, 2, 3, 6, 8
|
| 371 |
+
[12] Michael Hirsch, Suvirt Sra, Bernhard Schölkopf, and Stefan Harmeling. Efficient filter flow for space-variant multiframe blind deconvolution. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 607–614, June 2010. 7
|
| 372 |
+
[13] Bobby R. Hunt, Amber L. Iler, Christopher A. Bailey, and Michael A. Rucci. Synthesis of atmospheric turbulence point spread functions by sparse and redundant representations. Optical Engineering, 57(2):1-11, Feb. 2018. 2
|
| 373 |
+
[14] Andrei N. Kolmogorov. The Local Structure of Turbulence in Incompressible Viscous Fluid for Very Large Reynolds' Numbers. Akademiia Nauk SSSR Doklady, 30:301-305, 1941. 2
|
| 374 |
+
[15] Svetlana L. Lachinova, Mikhail A. Vorontsov, Grigorii A. Filimonov, Daniel A. LeMaster, and Matthew E. Trippel.
|
| 375 |
+
|
| 376 |
+
Comparative analysis of numerical simulation techniques for incoherent imaging of extended objects through atmospheric turbulence. Optical Engineering, 56(7):071509.1-11, 2017. 3
|
| 377 |
+
[16] Chun Pong Lau, Yu Hin Lai, and Lok Ming Lui. Restoration of atmospheric turbulence-distorted images via RPCA and quasiconformal maps. Inverse Problems, Mar. 2019. 4
|
| 378 |
+
[17] Chun Pong Lau and Lok Ming Lui. Subsampled turbulence removal network. Mathematics, Computation and Geometry of Data, 1(1):1-33, 2021. 1, 8
|
| 379 |
+
[18] Chun Pong Lau, Hossein Souri, and Rama Chellappa. Atfacegan: Single face semantic aware image restoration and recognition from atmospheric turbulence. IEEE Transactions on Biometrics, Behavior, and Identity Science, pages 1-1, 2021. 8
|
| 380 |
+
[19] Kevin R. Leonard, Jonathan Howe, and David E. Oxford. Simulation of atmospheric turbulence effects and mitigation algorithms on stand-off automatic facial recognition. In Proc. SPIE 8546, Optics and Photonics for Counterterrorism, Crime Fighting, and Defence VIII, pages 1-18, Oct. 2012. 4
|
| 381 |
+
[20] Yifei Lou, Sung Ha Kang, Stefano Soatto, and Andrea Bertozzi. Video stabilization of atmospheric turbulence distortion. Inverse Problems and Imaging, 7(3):839-861, Aug. 2013. 4
|
| 382 |
+
[21] Zhiyuan Mao, Nicholas Chimitt, and Stanley H. Chan. Image reconstruction of static and dynamic scenes through anisoplanatic turbulence. IEEE Transactions on Computational Imaging, 6:1415-1428, 2020. 2, 8
|
| 383 |
+
[22] Robert J. Noll. Zernike polynomials and atmospheric turbulence. J. Opt. Soc. Am., 66(3):207-211, Mar 1976. 2, 3, 4
|
| 384 |
+
[23] Timothy Popkin, Andrea Cavallaro, and David Hands. Accurate and efficient method for smoothly space-variant gaussian blurring. IEEE Transactions on Image Processing, 19(5):1362–1370, 2010. 2
|
| 385 |
+
[24] Guy Potvin, Luc Forand, and Denis Dion. A simple physical model for simulating turbulent imaging. In Proceedings of SPIE, volume 8014, pages 80140Y.1-13, 2011. 1, 3
|
| 386 |
+
[25] Endre Repasi and Robert Weiss. Analysis of image distortions by atmospheric turbulence and computer simulation of turbulence effects. Proc SPIE, 6941, 05 2008. 7
|
| 387 |
+
[26] Endre Repasi and Robert Weiss. Computer simulation of image degradations by atmospheric turbulence for horizontal views. In Proc. SPIE 8014, Defense, Security, and Sensing, May 2011. 4
|
| 388 |
+
[27] Micheal C. Roggemann and Byron M. Welsh. Imaging through Atmospheric Turbulence. Laser & Optical Science & Technology. Taylor & Francis, 1996. 2
|
| 389 |
+
[28] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Nassir Navab, Joachim Hornegger, William M. Wells, and Alejandro F. Frangi, editors, Medical Image Computing and Computer-Assisted Intervention – MICCAI 2015, pages 234–241, Cham, 2015. Springer International Publishing. 1, 7
|
| 390 |
+
[29] Jason D. Schmidt. Numerical simulation of optical wave propagation: With examples in MATLAB. SPIE Press, Jan. 2010. 1, 2, 3
|
| 391 |
+
|
| 392 |
+
[30] Armin Schwartzman, Marina Alterman, Rotem Zamir, and Yoav Y. Schechner. Turbulence-induced 2D correlated image distortion. In Proc. International Conference on Computational Photography, pages 1-12, 2017. 4
|
| 393 |
+
[31] Naruhisa Takato and Ichirou Yamaguchi. Spatial correlation of Zernike phase-expansion coefficients for atmospheric turbulence with finite outer scale. Journal of Optical Society of America A, 12(5):958-963, May 1995. 3, 4, 5
|
| 394 |
+
[32] Valeryan I. Tatarski. Wave Propagation in a Turbulent Medium. New York: Dover Publications, 1961. 2
|
| 395 |
+
[33] David Tofsted, Sean O'Brien, Jimmy Yarbrough, David Quintis, and Manuel Bustillos. Characterization of atmospheric turbulence during the NATO RTG-40 land field trials. In Cynthia Y. Young and G. Charmaine Gilbreath, editors, Atmospheric Propagation IV, volume 6551, pages 199 - 208. International Society for Optics and Photonics, SPIE, 2007. 7
|
| 396 |
+
[34] Rajeev Yasarla and Vishal M. Patel. Learning to restore a single face image degraded by atmospheric turbulence using cnns. ArXiv, abs/2007.08404, 2020. 8
|
| 397 |
+
[35] Bolei Zhou, Agata Lapedriza, Aditya Khosla, Aude Oliva, and Antonio Torralba. Places: A 10 million image database for scene recognition. IEEE Transactions on Pattern Analysis and Machine Intelligence, 40(6):1452-1464, 2018. 7
|
| 398 |
+
[36] Xiang Zhu and Peyman Milanfar. Removing atmospheric turbulence via space-invariant deconvolution. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(1):157-170, Jan. 2013. 4
|
acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0cdda5cf776895acdede1a6dce8cec439a50dc423d33b1edd78bcbf668a621b1
|
| 3 |
+
size 612138
|
acceleratingatmosphericturbulencesimulationvialearnedphasetospacetransform/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68714ce1210fbede703cb11bf4191ef595a7151fa248dd9f415a7ff128e65da7
|
| 3 |
+
size 449674
|
achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/ec9a1135-4936-4dd0-8f02-af317fa4bca3_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94f16ffcb3b6bbdba2b136793238e1901da590e5d155a9c1b9c10a44ce7b8728
|
| 3 |
+
size 88056
|
achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/ec9a1135-4936-4dd0-8f02-af317fa4bca3_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3171cc8698c7a891ed98d542597171f8aa34ba3bc9c231ea526d045b98500f52
|
| 3 |
+
size 116122
|
achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/ec9a1135-4936-4dd0-8f02-af317fa4bca3_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3498bde289ed8bde894aa6897afa1a02c5fa3a438f62a0d8926f311daf8412a4
|
| 3 |
+
size 3282117
|
achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/full.md
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Achieving on-Mobile Real-Time Super-Resolution with Neural Architecture and Pruning Search
|
| 2 |
+
|
| 3 |
+
Zheng Zhan\*, Yifan Gong\*, Pu Zhao\*, Geng Yuan\*, Wei Niu\*, Yushu Wu\*, Tianyun Zhang\*, Malith Jayaweera\*, David Kaeli\*, Bin Ren\*, Xue Lin\*, Yanzhi Wang\*
|
| 4 |
+
$^{1}$ Northeastern University, $^{2}$ College of William & Mary, $^{3}$ Cleveland State University
|
| 5 |
+
|
| 6 |
+
{zhan.zhe, gong.yifa, zhao.pu, xue.lin, yanz.wang}@northeastern.edu, t.zhang85@csuohio.edu, kaeli@ece.neu.edu, bren@cs.wm.edu
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
Though recent years have witnessed remarkable progress in single image super-resolution (SISR) tasks with the prosperous development of deep neural networks (DNNs), the deep learning methods are confronted with the computation and memory consumption issues in practice, especially for resource-limited platforms such as mobile devices. To overcome the challenge and facilitate the real-time deployment of SISR tasks on mobile, we combine neural architecture search with pruning search and propose an automatic search framework that derives sparse superresolution (SR) models with high image quality while satisfying the real-time inference requirement. To decrease the search cost, we leverage the weight sharing strategy by introducing a supernet and decouple the search problem into three stages, including supernet construction, compiler-aware architecture and pruning search, and compiler-aware pruning ratio search. With the proposed framework, we are the first to achieve real-time SR inference (with only tens of milliseconds per frame) for implementing 720p resolution with competitive image quality (in terms of PSNR and SSIM) on mobile platforms (Samsung Galaxy S20).
|
| 11 |
+
|
| 12 |
+
# 1. Introduction
|
| 13 |
+
|
| 14 |
+
In recent year, people have ever-increasing demands for image processing to achieve higher resolutions, leading to the rapid development of SR. In general, the SR principle is to convert low-resolution images to high-resolution images with clearer details and more information. It has been adopted in various applications such as crime scene analysis to identify unnoticeable evidence or medical image processing for more accurate diagnosis.
|
| 15 |
+
|
| 16 |
+
With the fast growth of live streaming and video recording, video contents enjoy high popularity. However, videos often have lower resolution due to the limited communication bandwidth or higher resolution of the display. Besides, live streaming usually has a real-time requirement that the latency of each frame should not exceed a threshold. Thus, it is desirable to achieve real-time SR for video locally.
|
| 17 |
+
|
| 18 |
+
Compared with the classic interpolation algorithms to improve image or video resolution, deep learning-based SR can deliver higher visual qualities by learning the mappings from the low-resolution to high-resolution images from external datasets. Despite its superior visual performance, deep learning-based SR is usually more expensive with large amounts of computations and huge power consumption (typically hundreds of watts on powerful GPUs) [19, 17, 53], leading to difficulties for the real-time implementations. Moreover, in practice, as SR is often deployed on edge devices such as mobile phones for live streaming or video capturing due to the wide spread of mobile phones, the limited memory and computing resources on edge devices make it even harder for achieving real-time SR.
|
| 19 |
+
|
| 20 |
+
Weight pruning [60, 22, 26] is often adopted to remove the redundancy in DNNs to reduce the resource requirement and accelerate the inference. There are various pruning schemes including unstructured pruning [23, 22, 20, 44], coarse-grained structured pruning [50, 71, 70, 47, 42], and fine-grained structured pruning [45, 18, 21]. Unstructured pruning removes arbitrary weights, leading to irregular pruned weight matrices and limited hardware parallelism. Structured pruning maintains a full matrix format of the remaining weights such that the pruned model is compatible with GPU acceleration for inference. Recently, fine-grained structured pruning including pattern-based pruning and block-based pruning are proposed to provide a finer pruning granularity for higher accuracy while exhibiting
|
| 21 |
+
|
| 22 |
+
certain regularities which can be optimized with compilers to improve hardware parallelism. To achieve inference acceleration of SR models, we focus on conventional structured pruning and fine-grained structured pruning.
|
| 23 |
+
|
| 24 |
+
Prior works usually use fixed pruning schemes for the whole model. As different pruning schemes can achieve different SR and acceleration performance, a new optimization dimension is introduced to find the most-suitable pruning configuration for each layer instead of for the whole model. Besides, as the performance of pruning depends on the original unpruned model, it is also essential to search an unpruned starting model with high SR performance.
|
| 25 |
+
|
| 26 |
+
In this paper, to facilitate the real-time SR deployment on edge devices, we propose a framework incorporating architecture and pruning search to find the most suitable cell-wise SR block configurations and layer-wise pruning configurations. Our implementation can achieve real-time SR inference with competitive SR performance on mobile devices. We summarize our contribution as follows.
|
| 27 |
+
|
| 28 |
+
- We propose an architecture and pruning search framework to automatically find the best configuration of the SR block in each cell and pruning scheme for each layer, achieving real-time SR implementation on mobile devices with high image quality.
|
| 29 |
+
- We train a supernet to provide a well-trained unpruned model for all possible combinations of the SR block in each supernet cell before the architecture and pruning search. Thus there is no need to train a separate unpruned model for each combination with multiple epochs, saving tremendous training efforts.
|
| 30 |
+
- Different from previous works with fixed pruning scheme for all layers or fixed SR blocks for all cells, we automatically search the best-suited SR block for each cell and pruning scheme for each layer. To reduce the complexity, we decouple the pruning ratio search and employ Bayesian optimization (BO) to accelerate the SR block and pruning scheme search.
|
| 31 |
+
- With the proposed method, we are the first to achieve real-time SR inference (with only tens of milliseconds per frame) for implementing 720p resolution with competitive image quality (in terms of PSNR and SSIM) on mobile platforms (Samsung Galaxy S20). Our achievements facilitate various practical applications with real-time requirements such as live streaming or video communication.
|
| 32 |
+
|
| 33 |
+
# 2. Background and Related Works
|
| 34 |
+
|
| 35 |
+
# 2.1. Preliminaries on Deep Learning-based SR
|
| 36 |
+
|
| 37 |
+
SISR aims to generate a high resolution image from the low-resolution version. The usage of DNNs for SR task
|
| 38 |
+
|
| 39 |
+
was first proposed in SRCNN [16] and later works try to improve the upscaling characteristic and image quality with larger networks [33, 41, 68, 67, 14]. However, SR models are resource-intensive due to maintaining or upscaling the spatial dimensions of the feature map for each layer. Therefore, the number of multiply-accumulate (MAC) operations is typically counted in gigabits, leading to high inference latency (seconds per image) on a powerful GPU.
|
| 40 |
+
|
| 41 |
+
Several attempts were made to design lightweight SR models for practical applications, including using upsampling operator at the end of a network [17, 53], adopting channel splitting [31], and using wider activation [64]. Specifically, work [64] proposed WDSR-A and WDSRB blocks, which are two of the state-of-the-art SR building blocks with high image quality. Besides, inspired by the success of neural architecture search (NAS), latest SR works try to establish more efficient and lightweight SR models by leveraging NAS approaches [12, 54, 37, 13]. But the proposed models are still too large with tremendous resource demands. Furthermore, they do not consider practical mobile deployments with limited hardware resource. For mobile deployment, the winner of the PIRM challenge [57] and MobiSR [38] are the few works that make progress for SR inference on mobiles. But the latency is still far from real time, requiring nearly one second per frame.
|
| 42 |
+
|
| 43 |
+
# 2.2. DNN Model Pruning
|
| 44 |
+
|
| 45 |
+
Weight pruning reduces the redundancy in DNNs for less storage and computations. Existing pruning schemes can be divided into unstructured pruning, coarse-grained structured pruning, and fine-grained structured pruning.
|
| 46 |
+
|
| 47 |
+
Unstructured pruning allows weights at arbitrary locations to be removed [22, 20, 15], as shown in Figure 1 (a). Despite the high accuracy, its irregular weight matrices are not compatible with GPU acceleration. Coarse-grained structured pruning [60, 27, 26, 65, 28] keeps structured regularity of remaining weights such as channel pruning prunes entire channels as in Figure 1 (b). The key advantage is that a full matrix format is maintained, thus facilitating hardware acceleration. However, coarse-grained structured pruning often leads to non-negligible accuracy degradation [59].
|
| 48 |
+
|
| 49 |
+
Fine-grained structured pruning includes block-based pruning [18] and pattern-based pruning [51, 45, 21]. They incorporate the benefits from fine-grained pruning while maintaining structures that can be exploited for hardware accelerations with the help of compiler. Block-based pruning divides the weight matrix of a DNN layer into multiple equal-sized blocks and applies structured pruning independently to each block, as shown in Figure 1 (c). Pattern-based pruning is a combination of kernel pattern pruning and connectivity pruning, as illustrated in Figure 1 (d). Kernel pattern pruning removes weights by forcing the remaining weights in a kernel to form a specific kernel pattern.
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
(c) Fine-grained structured pruning (block-based pruning)
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
(d) Fine-grained structured pruning (pattern-based pruning)
|
| 56 |
+
Figure 1. (a) Unstructured pruning; (b) coarse-grained structured pruning (channel); (c) fine-grained structured pruning (block-based); and (d) fine-grained structured sparsity (pattern-based).
|
| 57 |
+
|
| 58 |
+
Connectivity pruning removes entire redundant kernels and is the supplementary to kernel pattern pruning for a higher compression rate. With an appropriate pruning regularity degree, compiler-level code generation can be exploited to achieve a high hardware parallelism.
|
| 59 |
+
|
| 60 |
+
# 2.3. DNN Acceleration Frameworks on Mobile
|
| 61 |
+
|
| 62 |
+
On-mobile DNN inference has attracted many interests from both industry and academia [35, 36, 61, 32, 63, 25]. Representative DNN acceleration frameworks, including Tensorflow-Lite [1], Alibaba MNN [2], Pytorch-Mobile [3], and TVM [10], are designed to support inference acceleration on mobile. Several graph optimization techniques are used in these frameworks, including layer fusion, constant folding, and runtime optimizations on both mobile CPU and GPU. But the missing piece is that sparse (pruned) models for further speedup are not supported. Recently, some efforts are made to accelerate pattern-based pruned models on mobile with compiler-based optimizations [51, 45]. But they suffer difficulties when generalized to DNN layers other than $3 \times 3$ convolutional (CONV) layers.
|
| 63 |
+
|
| 64 |
+
# 2.4. Motivation
|
| 65 |
+
|
| 66 |
+
State-of-the-art SR methods leverage huge DNNs to pursue high image quality, causing extremely high computation cost. Thus, it is difficult to achieve real-time SR even on powerful GPUs, not to mention mobile devices with limited resource. But due to the widespread of mobile phones and the popular video communication and live streaming applications with high resolution requirements, it is desirable to implement on-mobile real-time SR with high image quality.
|
| 67 |
+
|
| 68 |
+
SR models usually constitute several cascaded SR blocks. Different blocks have different latency performance, while different combinations can form various SR models with different image quality. Meanwhile, with
|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
Figure 2. Inference acceleration rate vs. pruning ratio of different pruning schemes. Results are measured on a Samsung Galaxy S20 smartphone, and a typical $3 \times 3$ CONV layer in WDSR block with 24/48 input/output channels and $320 \times 180$ feature size is used.
|
| 72 |
+
|
| 73 |
+
weight pruning for acceleration, each layer may prefer a different pruning scheme, resulting in different accuracy and acceleration performance. For instance, Figure 2 illustrates the acceleration curves of different pruning schemes on a given $3 \times 3$ CONV layer. Hence, it is desirable to find the best-suited combination of SR blocks and per-layer pruning scheme and ratio to achieve high image quality while satisfying the real-time execution requirement.
|
| 74 |
+
|
| 75 |
+
Finding the satisfied network architecture and pruning configurations is too complex to be solved manually. Thus an automatic architecture and pruning search method [58] is desired. However, it is expensive to directly search in a large space, including block number (depth), block type, per-layer pruning scheme, and per-layer pruning ratio. Hence, we decouple the search into several stages and solve them separately.
|
| 76 |
+
|
| 77 |
+
# 3. Framework Overview
|
| 78 |
+
|
| 79 |
+
The objective is to combine architecture search with pruning search to find sparse SR models facilitating various practical applications such as live streaming or video communication. The sparse SR models should satisfy the real-time inference requirement (with only tens of milliseconds per frame) for high upscaling resolution such as 720p $(1280\times 720)$ on mobile devices, with competitive image quality with the state-of-the-art methods.
|
| 80 |
+
|
| 81 |
+
The searching problem involves the determination of the number of stacked cells, the type of selected block in each cell, and pruning scheme and pruning ratio for each layer of the SR network. Direct search in such a high-dimensional search space is computationally expensive. To reduce the search cost in terms of time and computation, we leverage the weight sharing strategy by introducing a supernet and decouple the search problem into three stages: 1) supernet construction, 2) compiler-aware architecture and pruning search, and 3) compiler-aware pruning ratio determination. Supernet construction includes supernet initialization that determines the number of stacked cells, and supernet training that provides a good starting point for the following two steps. Then, a combination of block determination and
|
| 82 |
+
|
| 83 |
+
pruning scheme selection for each layer is performed. The goal is to find a desirable structure that maximizes the image quality while satisfying the target latency $t$ with the aid of compiler optimizations. Specifically, when $t \leq 50\mathrm{ms}$ , the target latency meets the real-time requirement. The following step is the automatic pruning ratio determination with the reweighted dynamic regularization method. We show the overall framework in Figure 3.
|
| 84 |
+
|
| 85 |
+
# 4. Supernet Construction
|
| 86 |
+
|
| 87 |
+
In architecture and pruning search, the accuracy of a model (architecture) after pruning largely depends on the accuracy of unpruned starting model. To obtain the well-trained starting models for various architectures with satisfying SR performance, the straightforward method is to perform training for each new architecture, which usually costs huge training efforts. Instead of training separate models respectively, we train a supernet such that, for any new model, we can activate the corresponding path in the supernet to derive the well-trained unpruned model immediately without further efforts to train each new model from scratch. Thus, the supernet can significantly reduce the training time for the unpruned models, thereby accelerating the search.
|
| 88 |
+
|
| 89 |
+
The architecture of the supernet is illustrated in the Figure 3 (a). We encode the architecture search space $\mathcal{A}$ with a supernet, denoted as $\mathcal{S}(\mathcal{A}, W)$ , where $W$ represents the weight collection. The supernet is constituted of $N$ stacked cells and each cell contains $K$ SR block choices. In our work, we adopt WDSR-A and WDSR-B, which are two highly efficient SR blocks with high image quality, as block choices. Note that our framework is not restricted by the WDSR blocks and can be generalized to different kinds of SR residual blocks. The output of each SR block $k$ in cell $n$ connects with all of the SR blocks in the next cell $n + 1$ . We define the choice of one SR block (WDSR-A or WDSR-B) for each supernet cell as one path segment, and all of the possible combinations of the $N$ path segments form the architecture search space $\mathcal{A}$ with a size of $K^N$ . Then one path is the collection of $N$ path segments for all cells, denoting one SR candidate model. During supernet computation, only one path is activated while other unselected SR blocks do not participate in the computation.
|
| 90 |
+
|
| 91 |
+
To construct a supernet, there are two necessary steps: 1) determine the number of stacked cells of the supernet and initialize the supernet, and 2) fully train the supernet to provide a good starting point with high image quality and low overhead for the following SR candidate nets search.
|
| 92 |
+
|
| 93 |
+
# 4.1. Determine Cell Number with Latency Models
|
| 94 |
+
|
| 95 |
+
The number of stacked cells $N$ of the supernet should be determined beforehand to guarantee the SR candidate models have the potential to satisfy the target latency $t$ on mobile devices. Several widely used techniques in SR such as pixel
|
| 96 |
+
|
| 97 |
+
shuffling (a.k.a., sub-pixel convolution) and global residual path are often hard to optimize and accelerate, resulting in a fixed latency overhead. Moreover, the skip (identity) connection structure in a block of a cell leads to a certain execution overhead that is difficult to be reduced and is accumulated with the number of stacked blocks.
|
| 98 |
+
|
| 99 |
+
To determine the number of stacked cells, we build a latency model enabling fast and precise estimation of the overall model inference latency on the target device (e.g., Samsung S20 smartphone). The latency model contains the look-up-tables of inference latency for different types of layers used in SR models (e.g., $1 \times 1$ CONV, $3 \times 3$ CONV, $5 \times 5$ CONV, skip connection, and sub-pixel convolution). For each layer type, several different settings are considered, including the number of filters and input and output feature map size. Our latency model is compiler-aware, built by measuring real-world inference latency on the target device with compiler optimizations incorporated. More details about our compiler optimization techniques are shown in Appendix A. The latency model building time can be ignored since no training process is involved, and the building process can be conducted in parallel with the supernet training. We only build once for a specific device. Moreover, we also include the sparse inference latency for different types of layers under different pruning schemes and pruning ratios in our latency model, which will be used in the pruning search stage (more discussion in Section 5.1).
|
| 100 |
+
|
| 101 |
+
Therefore, the overall inference latency on the target device can be estimated by accumulating the per-layer latency inquired from our latency model. With a target latency $t$ for the SR candidate models, the suitable number of stacked cells can be determined. Furthermore, decoupling the supernet depth determination from the search space of the candidate SR models can greatly reduce the search complexity.
|
| 102 |
+
|
| 103 |
+
# 4.2. Supernet Training
|
| 104 |
+
|
| 105 |
+
After the supernet is initialized, the next step is to train its weights $\mathcal{W}$ to minimize the loss function $\mathcal{L}(\mathcal{A},\mathcal{W})$ . The well-trained supernet provides a good starting point for the following network architecture and pruning search as candidate net architecture $a$ directly inherits weights from the path $\mathcal{W}(a)$ in the supernet. Note that the weights $\mathcal{W}$ of the supernet should be optimized in a way that all the candidate architectures $a\in \mathcal{A}$ with weights $\mathcal{W}(a)$ are optimized simultaneously. However, jointly optimizing the architecture parameters $a$ and model parameters $\mathcal{W}(a)$ often introduces extra complexities. Furthermore, it may lead to the situation that some nodes in the graph are well trained while others are poorly trained, incurring unfair comparison for paths of different levels of maturity in the supernet.
|
| 106 |
+
|
| 107 |
+
To mitigate this problem, we adopt a single-path sampling & training strategy to accelerate the convergence of supernet training. Specifically, for each training batch, we
|
| 108 |
+
|
| 109 |
+

|
| 110 |
+
Figure 3. Framework overview. The framework is composed of three stages to reduce the search cost: (a) stage 1: supernet construction, (b) stage 2: architecture and pruning search, and (c) stage 3: pruning ratio determination.
|
| 111 |
+
|
| 112 |
+
only activate and train one random path while other unselected SR blocks are skipped. In this way, the architecture selection and model weights updating are decoupled. This strategy is hyper-parameter free, and each path is a SR model providing a well-trained unpruned starting point for the following architecture and pruning search.
|
| 113 |
+
|
| 114 |
+
# 5. Architecture and Pruning Search
|
| 115 |
+
|
| 116 |
+
We define each architecture and pruning candidate as a configuration to select one SR block for each supernet cell together with choosing the pruning scheme for each layer. The architecture and pruning search aims to find the best cell-wise SR block selection and layer-wise pruning scheme configuration, i.e., the candidate with the highest image quality satisfying the target latency $t$ . The search consists of two main steps: 1) candidate generation and 2) candidate evaluation. In each iteration, candidate generation samples architecture and pruning candidates, which are further evaluated in the candidate evaluation process. To improve search efficiency, we adopt evolutionary-based candidate updating in candidate generation and BO in candidate evaluation to obtain the best candidate.
|
| 117 |
+
|
| 118 |
+
# 5.1. Candidate Generation
|
| 119 |
+
|
| 120 |
+
# 5.1.1 Candidate Sampling
|
| 121 |
+
|
| 122 |
+
The candidate generation samples architecture and pruning candidates from the search space. Each candidate $g$ is a directed acyclic graph denoting the cell-wise SR block selection and layer-wise pruning scheme selection. For SR block selection in each supernet cell, we can choose from WDSR-A block or WDSR-B block. For the pruning scheme, we can choose channel pruning [60], pattern-based pruning [45], or block-based pruning [18] for each layer. Different from previous works with fixed pruning schemes for all layers, we can choose different pruning schemes for different layers, which is also supported by our compiler code generation. Note that the difference between the candidate $g$ and the
|
| 123 |
+
|
| 124 |
+
candidate network architecture $a$ is that $g$ includes the perlayer pruning scheme selection.
|
| 125 |
+
|
| 126 |
+
We encode each candidate with a binary vector by assigning a binary feature for each possible cell-wise block choice and layer-wise pruning scheme selection, denoting whether the block or pruning scheme is adopted or not.
|
| 127 |
+
|
| 128 |
+
Decoupling pruning ratio search. To prune the model, we also need to configure the layer-wise pruning ratio corresponding to each pruning scheme. As it is expensive to search the continuous pruning ratio values for each layer, at this step, we simply set the layer-wise pruning ratio to a minimal value satisfying the target latency $t$ . Therefore, we can focus on pruning scheme search first. To determine the minimal pruning ratio, we can estimate the latency of the unpruned model $t'$ and the target latency $t$ , and obtain the minimal speedup required for the whole model, which is $t'/t$ . To satisfy the overall speedup, we simply require each layer to achieve this minimal speedup $t'/t^2$ . Then, according to the latency model (detailed in Section 4.1) and the layer-wise speedup, we can obtain the layer-wise minimal pruning ratio corresponding to each pruning scheme.
|
| 129 |
+
|
| 130 |
+
# 5.1.2 Candidate Updating
|
| 131 |
+
|
| 132 |
+
In each iteration, we need to generate a pool of new candidates. To make the candidates updating more efficient, the evolutionary-based candidate updating method is adopted. We keep a record of all evaluated candidates with their evaluation performance. To generate new candidates, we mutate the candidates with the best evaluation performance in the records by randomly changing one SR block of one random cell or one pruning scheme of one random layer. Specifically, we first select $H$ candidates with highest evaluation performance, and mutate each of them iteratively until $C$ new proposals are derived.
|
| 133 |
+
|
| 134 |
+
# Algorithm 1 Evaluation with BO
|
| 135 |
+
|
| 136 |
+
Input: Observation data $\mathcal{D}$ , BO batch size $B$ , BO acquisition function $\phi (\cdot)$
|
| 137 |
+
Output: The best candidate $g^{*}$
|
| 138 |
+
for steps do Generate a pool of candidates $\mathcal{G}_c$ Train an ensemble of neural predictors with $\mathcal{D}$ Select $\{\hat{g}^i\}_{i = 1}^B = \arg \max_{g\in \mathcal{G}_c}\phi (g);$ Evaluate the candidate and obtain reward $\{r^i\}_{i = 1}^B$ of $\{\hat{g}^i\}_{i = 1}^B$ .. $\mathcal{D}\gets \mathcal{D}\cup (\{\hat{g}^i\}_{i = 1}^B,\{r^i\}_{i = 1}^B);$
|
| 139 |
+
end for
|
| 140 |
+
|
| 141 |
+
# 5.2. Candidate Evaluation
|
| 142 |
+
|
| 143 |
+
As it incurs a high time cost to prune and retrain the model following each candidate, BO [11] is adopted to expedite the candidate evaluation. With the generated $C$ candidates, we first use BO to select $B$ ( $B < C$ ) candidates with potentially better performance. Then the selected candidates are evaluated to obtain the accurate SR performance while the unselected candidates are not evaluated. The number of actually evaluated candidates is reduced in this way.
|
| 144 |
+
|
| 145 |
+
BO includes two main components, i.e., training an ensemble of neural predictors and selecting candidates based on acquisition function values enabled by the predictor ensemble. To make use of BO, the ensemble of neural predictors provides an average SR prediction with its corresponding uncertainty estimate for each unseen candidate. Then BO is able to choose the candidate which maximizes the acquisition function. We show the full algorithm in Algorithm 1 and specify BO in the following.
|
| 146 |
+
|
| 147 |
+
# 5.2.1 Bayesian Optimization with Neural Predictors
|
| 148 |
+
|
| 149 |
+
Neural predictor. The neural predictor is a neural network repeatedly trained on the current set of evaluated candidates with their evaluation performance to predict the reward of unseen candidates. It is a neural network with 8 sequential fully-connected layers of width 40 trained by the Adam optimizer with a learning rate of 0.01. For the loss function to train neural predictors, mean absolute percentage error (MAPE) is adopted as it can give a higher weight to candidates with higher evaluation performance:
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\mathcal {L} \left(m _ {\text {p r e d}}, m _ {\text {t r u e}}\right) = \frac {1}{n} \sum_ {i = 1} ^ {n} \left| \frac {m _ {\text {p r e d}} ^ {(i)} - m _ {\mathrm {U B}}}{m _ {\text {t r u e}} ^ {(i)} - m _ {\mathrm {U B}}} - 1 \right|, \tag {1}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
where $m_{\mathrm{pred}}^{(i)}$ and $m_{\mathrm{true}}^{(i)}$ are the predicted and true values, respectively, of the reward for the $i$ -th candidate in a batch, and $m_{\mathrm{UB}}$ is a global upper bound on the maximum true reward. Note that the training of the predictors does not cost too much efforts due to their simple architectures.
|
| 156 |
+
|
| 157 |
+
Ensemble of neural predictors. To incorporate BO, it also needs an uncertainty estimate for the prediction. So we adopt an ensemble of neural predictors to provide the uncertainty estimate. More specifically, we train $P$ neural predictors using different random weight initializations and training data orders. Then for any candidate $g$ , we can obtain the mean and standard deviation of these $P$ predictions. Formally, we train an ensemble of $P$ predictors, $\{f_p\}_{p=1}^P$ , where $f_p(g)$ provides a predicted reward for a candidate $g$ . The mean prediction and its deviation are given by
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
\hat {f} (g) = \frac {1}{P} \sum_ {p = 1} ^ {P} f _ {p} (g), \text {a n d} \hat {\sigma} (g) = \sqrt {\frac {\sum_ {p = 1} ^ {P} \left(f _ {p} (g) - \hat {f} (g)\right) ^ {2}}{P - 1}}. \tag {2}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
Selection with acquisition function. After training an ensemble of neural predictors, we can obtain the acquisition function value for candidates in the pool and select a small portion of candidates with the largest acquisition function values. We choose the upper confidence bound (UCB) [55] as the acquisition function shown below:
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
\phi_ {\mathrm {U C B}} (g) = \hat {f} (g) + \beta \hat {\sigma} (g), \tag {3}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
where the tradeoff parameter $\beta$ is set to 0.5.
|
| 170 |
+
|
| 171 |
+
# 5.2.2 Evaluation with magnitude-based pruning
|
| 172 |
+
|
| 173 |
+
After selecting the candidates from the pool, we need to measure the performance of the selected candidate $g$ to update the neural predictors. For faster evaluation, magnitude-based pruning framework [23] (with two steps including pruning and retraining) is adopted to perform the actual pruning for candidate $g$ to obtain its evaluation performance. Note that multiple candidates can be evaluated in parallel. Once the evaluation finishes, their actual performances are recorded as a reference such that the candidate generation can sample better candidates.
|
| 174 |
+
|
| 175 |
+
# 6. Pruning Ratio Determination
|
| 176 |
+
|
| 177 |
+
After finding the best SR block configuration for each cell and the pruning scheme for each layer, we adopt a pruning ratio determination process to derive the suitable layerwise pruning ratio. Unlike prior works (i.e., group Lasso regularization [60, 27, 43] or Alternating Direction Methods of Multipliers (ADMM) [66, 52, 39]) that suffers from significant accuracy loss or complicated pruning ratio tuning, we adopt the reweighted group Lasso [9, 46] method to determine the layer-wise prune ratio automatically.
|
| 178 |
+
|
| 179 |
+
The basic idea is to assign a penalty to each weight or pruning pattern, and dynamically reweight the penalties. More specifically, during the training (pruning) process, the reweighted method reduces the penalties on weights with larger magnitudes, thus enlarging the more critical weights,
|
| 180 |
+
|
| 181 |
+
and increases the penalties on weights with smaller magnitudes, thus decreasing negligible weights. After convergence, the desired pruning ratio for each layer is determined automatically. The reweighted method can be adopted for different pruning schemes and layer types. We show the detailed reweighted pruning algorithm in Appendix B.
|
| 182 |
+
|
| 183 |
+
# 7. Experiments
|
| 184 |
+
|
| 185 |
+
# 7.1. Methodology
|
| 186 |
+
|
| 187 |
+
Datasets: All SR models were trained on the training set of the DIV2K [4] dataset with 800 training images. For the evaluation, four benchmark datasets Set5 [6], Set14 [62], B100 [48] and Urban100 [29] are employed as test sets, and the PSNR and SSIM indices are calculated on the luminance channel (a.k.a. Y channel) of YCbCr color space.
|
| 188 |
+
|
| 189 |
+
Evaluation Platforms and Running Configurations: The training codes are implemented with the PyTorch API. 8 Nvidia TITAN RTX GPUs are used to conduct the architecture and pruning search. We train an ensemble of 20 predictors and 8 models are evaluated in parallel in each step. Since we start from a well-trained supernet, we retrain 2 epochs for each one-shot pruned candidate model for fast evaluation. The search process takes 6 GPU days. The latency is measured on the GPU of an off-the-shelf Samsung Galaxy S20 smartphone, which has the QualcommSnapdragon 865 mobile platform with a Qualcomm Kryo 585 Octa-core CPU and a Qualcomm Adreno 650 GPU. Each test takes 50 runs on different inputs with 8 threads on CPU, and all pipelines on GPU. As different runs do not vary greatly, only the average time is reported for readability.
|
| 190 |
+
|
| 191 |
+
# 7.2. Comparison with State-of-the-Art
|
| 192 |
+
|
| 193 |
+
The comparison of our SR model obtained through the proposed framework with state-of-the-art methods are shown in Table 1. Some extremely large models [68, 67, 14] could take several seconds for them to upscale only one image on a large GPU. Therefore, those results are not included in Table 1. PSNR and SSIM are adopted as metrics to evaluate the image quality by convention. The evaluations are conducted on tasks with different scales including $\times 2$ , $\times 3$ , and $\times 4$ . For a fair comparison, we start from different low resolution inputs but the outputs have the same high resolution (720p-1280 $\times$ 720).
|
| 194 |
+
|
| 195 |
+
To make a comprehensive study, we set the target latency $t$ to different values for each scale. Particularly, as real-time execution typically requires at least 20 frames per second (FPS), we adopt $t = 50\mathrm{ms}$ for $\times 2$ and $\times 3$ upscaling task and $t = 40\mathrm{ms}$ for $\times 4$ upscaling task to obtain models that satisfy real-time inference requirement. As shown in Table 1, with a target latency $t = 450\mathrm{ms}$ , our model outperforms CARN-M and FALSR-C with higher PSNR/SSIM using much fewer MACs for a $\times 2$ upscal
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
302008 from B100
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
HR
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Bicubic
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
FSRCNN
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
FEQE-P
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
img 074 from
|
| 214 |
+
Urban100
|
| 215 |
+
Figure 4. Visual comparison with other SR models on $\times 4$ scale. Model parameters and MACs are listed under model name. More results can be found in Appendix D.
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
CARN-M
|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
412 k, 32.5 G 1203 k, 69.3 G
|
| 222 |
+
Bicubic
|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
WDSR
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
12 k, 4.6 G
|
| 229 |
+
Ours
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
67.3 k, 3.9 G
|
| 233 |
+
FSRCNN
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
96 k, 5.64 G
|
| 237 |
+
Ours
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
125k,7.1G
|
| 241 |
+
FEQE-P
|
| 242 |
+
|
| 243 |
+

|
| 244 |
+
CARN-M
|
| 245 |
+
412 k, 32.5 G
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
WDSR
|
| 249 |
+
1203 k, 69.3 G
|
| 250 |
+
|
| 251 |
+

|
| 252 |
+
12 k, 4.6 G
|
| 253 |
+
Ours
|
| 254 |
+
67.3 k, 3.9 G
|
| 255 |
+
|
| 256 |
+

|
| 257 |
+
96 k, 5.64 G
|
| 258 |
+
Ours
|
| 259 |
+
125 k, 7.1 G
|
| 260 |
+
|
| 261 |
+
ing. With $t = 150\mathrm{ms}$ , our model has better PSNR/SSIM than FSRCNN, MOREMNAS-C, and TPSR-NOGAN with similar or even fewer MACs. Furthermore, both of our models for the two different target latency cases achieve higher PSNR/SSIM with fewer MACs compared with SRCNN and LapSRN. Compared with ESRN-V, EDSR, and WDSR, our model greatly saves the MACs while still maintaining high PSNR/SSIM. Specially, we even obtain a extremely lightweight model that meets the real-time requirement by setting $t = 50\mathrm{ms}$ and the model still maintains satisfying PSNR/SSIM. As for the $\times 4$ scaling task, our model obtained with a target latency $t = 120\mathrm{ms}$ prevails SRCNN, FSRCNN and FEQE-P over MACs, PSNR, and SSIM on the four datasets. With a target latency $t = 170\mathrm{ms}$ , our model outperforms DI-based and CARN-M in PSNR/SSIM with similar or even much fewer MACs. Moreover, with $t = 40\mathrm{ms}$ , our model attains real-time inference while keeping competitive PSNR/SSIM.
|
| 262 |
+
|
| 263 |
+
# 7.3. Searched Results for Real-Time SR on Mobile
|
| 264 |
+
|
| 265 |
+
We further examine the real-time performance of our SR model assisted with the compiler-based optimizations. As shown in Figure 5, with the same SR model derived with the proposed method, our method with compiler optimizations achieves the highest FPS for various scales compared with implementations by other acceleration frameworks including MNN [2] and PyTorch Mobile [3]. The models are obtained by setting $t = 50\mathrm{ms}$ for $\times 2$ and $\times 3$ , and $t = 40\mathrm{ms}$ for $\times 4$ . We can observe from Figure 5 that our proposed method can satisfy the real-time requirement with a FPS higher than 20 for $\times 2$ and $\times 3$ , and higher than 25 for $\times 4$ .
|
| 266 |
+
|
| 267 |
+
MobiSR and FEQE-P also conduct SR inference on mobile devices. They achieve 2792ms and 912ms inference
|
| 268 |
+
|
| 269 |
+
<table><tr><td>Scale</td><td>Model</td><td>Params(K)</td><td>Multi-Adds(G)</td><td>Set5(PSNR/SSIM)</td><td>Set14(PSNR/SSIM)</td><td>B100(PSNR/SSIM)</td><td>Urban100(PSNR/SSIM)</td></tr><tr><td rowspan="13">× 2</td><td>SRCNN [16]</td><td>57</td><td>52.7</td><td>36.66/0.9542</td><td>32.42/0.9063</td><td>31.36/0.8879</td><td>29.50/0.8946</td></tr><tr><td>FSRCNN [17]</td><td>12</td><td>6.0</td><td>37.00/0.9558</td><td>32.63/0.9088</td><td>31.53/0.8920</td><td>29.88/0.9020</td></tr><tr><td>MOREMNAS-C [13]</td><td>25</td><td>5.5</td><td>37.06/0.9561</td><td>32.75/0.9094</td><td>31.50/0.8904</td><td>29.92/0.9023</td></tr><tr><td>TPSR-NOGAN [37]</td><td>60</td><td>14.0</td><td>37.38/0.9583</td><td>33.00/0.9123</td><td>31.75/0.8942</td><td>30.61/0.9119</td></tr><tr><td>LAPSRN [34]</td><td>813</td><td>29.9</td><td>37.52/0.9590</td><td>33.08/0.9130</td><td>31.80/0.8950</td><td>30.41/0.9100</td></tr><tr><td>CARN-M [31]</td><td>412</td><td>91.2</td><td>37.53/0.9583</td><td>33.26/0.9141</td><td>31.92/0.8960</td><td>31.23/0.9193</td></tr><tr><td>FALSR-C [12]</td><td>408</td><td>93.7</td><td>37.66/0.9586</td><td>33.26/0.9140</td><td>31.96/0.8965</td><td>31.24/0.9187</td></tr><tr><td>ESRN-V [54]</td><td>324</td><td>73.4</td><td>37.85/0.9600</td><td>33.42/0.9161</td><td>32.10/0.8987</td><td>31.79/0.9248</td></tr><tr><td>EDSR [41]</td><td>1518</td><td>458.0</td><td>37.99/0.9604</td><td>33.57/0.9175</td><td>32.16/0.8994</td><td>31.98/0.9272</td></tr><tr><td>WDSR [64]</td><td>1203</td><td>274.1</td><td>38.10/0.9608</td><td>33.72/0.9182</td><td>32.25/0.9004</td><td>32.37/0.9302</td></tr><tr><td>Ours (t=450ms)</td><td>106</td><td>24.3</td><td>37.81/0.9599</td><td>33.37/0.9153</td><td>32.07/0.8980</td><td>31.58/0.9225</td></tr><tr><td>Ours (t=150ms)</td><td>52</td><td>11.7</td><td>37.52/0.9582</td><td>33.24/0.9140</td><td>31.88/0.8953</td><td>31.18/0.9180</td></tr><tr><td>Ours (t=50ms,real-time)</td><td>14</td><td>3.1</td><td>37.32/0.9549</td><td>33.17/0.9071</td><td>31.67/0.8885</td><td>30.35/0.8986</td></tr><tr><td rowspan="14">× 4</td><td>SRCNN [16]</td><td>57</td><td>52.7</td><td>30.48/0.8628</td><td>27.49/0.7503</td><td>26.90/0.7101</td><td>24.52/0.7221</td></tr><tr><td>FSRCNN [17]</td><td>12</td><td>4.6</td><td>30.71/0.8657</td><td>27.59/0.7535</td><td>26.98/0.7150</td><td>24.62/0.7280</td></tr><tr><td>TPSR-NOGAN [37]</td><td>61</td><td>3.6</td><td>31.10/0.8779</td><td>27.95/0.7663</td><td>27.15/0.7214</td><td>24.97/0.7456</td></tr><tr><td>FEQE-P [57]</td><td>96</td><td>5.6</td><td>31.53/0.8824</td><td>28.21/0.7714</td><td>27.32/0.7273</td><td>25.32/0.7583</td></tr><tr><td>DI-BASED [28]</td><td>92</td><td>7.0</td><td>31.84/0.889</td><td>28.38/0.775</td><td>27.40/0.730</td><td>25.51/0.765</td></tr><tr><td>CARN-M [31]</td><td>412</td><td>32.5</td><td>31.92/0.8903</td><td>28.42/0.7762</td><td>27.44/0.7304</td><td>25.62/0.7694</td></tr><tr><td>ESRN-V [54]</td><td>324</td><td>20.7</td><td>31.99/0.8919</td><td>28.49/0.7779</td><td>27.50/0.7331</td><td>25.87/0.7782</td></tr><tr><td>EDSR [41]</td><td>1518</td><td>114.5</td><td>32.09/0.8938</td><td>28.58/0.7813</td><td>27.57/0.7357</td><td>26.04/0.7849</td></tr><tr><td>DHP-20 [40]</td><td>790</td><td>34.1</td><td>31.94/-</td><td>28.42/-</td><td>27.47/-</td><td>25.69/-</td></tr><tr><td>IMDN [30]</td><td>715</td><td>-</td><td>32.21/0.8948</td><td>28.58/0.7811</td><td>27.56/0.7353</td><td>26.04/0.7838</td></tr><tr><td>WDSR [64]</td><td>1203</td><td>69.3</td><td>32.27/0.8963</td><td>28.67/0.7838</td><td>27.64/0.7383</td><td>26.26/0.7911</td></tr><tr><td>Ours (t=170ms)</td><td>125</td><td>7.1</td><td>31.93/0.8906</td><td>28.42/0.7763</td><td>27.44/0.7307</td><td>25.66/0.7715</td></tr><tr><td>Ours (t=120ms)</td><td>67</td><td>3.9</td><td>31.77/0.8886</td><td>28.34/0.7730</td><td>27.33/0.7280</td><td>25.41/0.7615</td></tr><tr><td>Ours (t=40ms,real-time)</td><td>12</td><td>0.7</td><td>30.74/0.8671</td><td>27.68/0.7562</td><td>26.98/0.7156</td><td>24.65/0.7299</td></tr></table>
|
| 270 |
+
|
| 271 |
+
$\dagger$ Results on $\times 3$ scaling task are shown in Appendix C.
|
| 272 |
+
|
| 273 |
+
Table 1. Comparison of searched results with state-of-the-art efficient SR models.
|
| 274 |
+
|
| 275 |
+

|
| 276 |
+
Figure 5. On-mobile inference comparisons with state-of-the-art mobile acceleration frameworks.
|
| 277 |
+
|
| 278 |
+
latency on a mobile GPU, respectively, which are far from the real-time requirement. We highlight that we are the first to achieve real-time SR inference (higher than 20 FPS for $\times 2$ and $\times 3$ , and 25 for $\times 4$ ) for implementing 720p resolution upsaling with competitive image quality (in terms of PSNR) on mobile platforms (Samsung Galaxy S20).
|
| 279 |
+
|
| 280 |
+
# 7.4. Ablation study
|
| 281 |
+
|
| 282 |
+
We investigate the influence of architecture search and pruning search separately. For $\times 2$ upscaling, architecture search only achieves a 37.84 PSNR on Set5, slightly higher than ours. But as the computations are not reduced by pruning, it suffers from low inference speed (1.82 FPS). Starting from WDSR blocks, pruning search only with $t = 150\mathrm{ms}$
|
| 283 |
+
|
| 284 |
+
achieves 6.8 FPS with a lower PSNR (37.40 on Set5). Thus, we can see that pruning search significantly improves the speed performance while architecture search helps mitigate the SR performance loss due to pruning.
|
| 285 |
+
|
| 286 |
+
To promote the reproducibility and evaluate speedup using the same framework, we also implement our derived models and other baseline models including CARN-M [31] and FSRCNN [17] with the open-source MNN framework. We compare their PSNR and FPS performance and observe that we can achieve higher FPS and PSNR than the baselines. More details are attached in Appendix E.
|
| 287 |
+
|
| 288 |
+
# 8. Conclusion
|
| 289 |
+
|
| 290 |
+
We combine architecture search with pruning search and propose an automatic search framework that derives sparse SR models satisfying real-time execution requirement on mobile devices with competitive image quality.
|
| 291 |
+
|
| 292 |
+
# Acknowledgment
|
| 293 |
+
|
| 294 |
+
The work is partly supported by Army Research Office/Army Research Laboratory via grant W911NF-20-1-0167 (YIP) to Northeastern University, the NSF CCF-2047516 (CAREER), and Jeffress Memorial Trust Awards in Interdisciplinary Research to William & Mary.
|
| 295 |
+
|
| 296 |
+
# References
|
| 297 |
+
|
| 298 |
+
[1] https://www.tensorflow.org/mobile/tfflite/. 3, 12
|
| 299 |
+
[2] https://github.com/alibaba/MNN.3,7,12
|
| 300 |
+
[3] https://pytorch.org/mobile/home. 3, 7, 12
|
| 301 |
+
[4] Eirikur Agustsson and Radu Timofte. Ntire 2017 challenge on single image super-resolution: Dataset and study. In The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, July 2017. 7
|
| 302 |
+
[5] Arash Ashari, Shirish Tatikonda, Matthias Boehm, Berthold Reinwald, Keith Campbell, John Keenleyside, and P Sadayappan. On optimizing machine learning workloads via kernel fusion. ACM SIGPLAN Notices, 50(8):173-182, 2015. 12
|
| 303 |
+
[6] Marco Bevilacqua, Aline Roumy, Christine Guillemot, and Marie Line Alberi-Morel. Low-complexity single-image super-resolution based on nonnegative neighbor embedding. 2012. 7
|
| 304 |
+
[7] Jeff Bezanson, Alan Edelman, Stefan Karpinski, and Viral B Shah. Julia: A fresh approach to numerical computing. SIAM review, 59(1):65-98, 2017. 12
|
| 305 |
+
[8] Matthias Boehm, Berthold Reinwald, Dylan Hutchison, Alexandre V Evfimievski, and Prithviraj Sen. On optimizing operator fusion plans for large-scale machine learning in systemml. arXiv preprint arXiv:1801.00829, 2018. 12
|
| 306 |
+
[9] Emmanuel J Candes, Michael B Wakin, and Stephen P Boyd. Enhancing sparsity by reweighted 1 minimization. Journal of Fourier analysis and applications, 14(5-6):877-905, 2008. 6, 13
|
| 307 |
+
[10] Tianqi Chen, Thierry Moreau, et al. Tvm: An automated end-to-end optimizing compiler for deep learning. In USENIX, pages 578-594, 2018. 3, 12
|
| 308 |
+
[11] Yutian Chen, Aja Huang, et al. Bayesian optimization in alphago. arXiv:1812.06855, 2018. 6
|
| 309 |
+
[12] Xiangxiang Chu, Bo Zhang, Hailong Ma, Ruijun Xu, and Qingyuan Li. Fast, accurate and lightweight superresolution with neural architecture search. arXiv preprint arXiv:1901.07261, 2019. 2, 8
|
| 310 |
+
[13] Xiangxiang Chu, Bo Zhang, and Ruijun Xu. Multi-objective reinforced evolution in mobile neural architecture search. In European Conference on Computer Vision (ECCV) Workshops, pages 99-113. Springer, 2020. 2, 8
|
| 311 |
+
[14] Tao Dai, Jianrui Cai, Yongbing Zhang, Shu-Tao Xia, and Lei Zhang. Second-order attention network for single image super-resolution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11065-11074, 2019. 2, 7
|
| 312 |
+
[15] Xiaoliang Dai, Hongxu Yin, and Niraj Jha. Nest: A neural network synthesis tool based on a grow-and-prune paradigm. IEEE Transactions on Computers, 2019. 2
|
| 313 |
+
[16] Chao Dong, Chen Change Loy, Kaiming He, and Xiaou Tang. Learning a deep convolutional network for image super-resolution. In European conference on computer vision, pages 184-199, 2014. 2, 8, 15
|
| 314 |
+
[17] Chao Dong, Chen Change Loy, and Xiaou Tang. Accelerating the super-resolution convolutional neural network. In European conference on computer vision, pages 391-407. Springer, 2016. 1, 2, 8, 14, 15
|
| 315 |
+
|
| 316 |
+
[18] Peiyan Dong, Siyue Wang, et al. Rtmobile: Beyond real-time mobile acceleration of rnns for speech recognition. arXiv:2002.11474, 2020. 1, 2, 5
|
| 317 |
+
[19] Xin Dong, Shangyu Chen, and Sinno Pan. Learning to prune deep neural networks via layer-wise optimal brain surgeon. In Advances in Neural Information Processing Systems, pages 4860-4874, 2017. 1
|
| 318 |
+
[20] Jonathan Frankle and Michael Carbin. The lottery ticket hypothesis: Finding sparse, trainable neural networks. *ICLR*, 2018. 1, 2
|
| 319 |
+
[21] Yifan Gong, Zheng Zhan, Zhengang Li, Wei Niu, Xiaolong Ma, Wenhao Wang, Bin Ren, Caiwen Ding, Xue Lin, Xiaolin Xu, et al. A privacy-preserving-oriented dnn pruning and mobile acceleration framework. In Proceedings of the 2020 on Great Lakes Symposium on VLSI, pages 119-124, 2020. 1, 2
|
| 320 |
+
[22] Yiwen Guo, Anbang Yao, and Yurong Chen. Dynamic network surgery for efficient dnns. In NeurIPS, pages 1379-1387, 2016. 1, 2
|
| 321 |
+
[23] Song Han, Jeff Pool, et al. Learning both weights and connections for efficient neural network. In NeurIPS, pages 1135-1143, 2015. 1, 6
|
| 322 |
+
[24] Song Han, Jeff Pool, et al. Learning both weights and connections for efficient neural network. In NeuIPS, pages 1135-1143, 2015. 15
|
| 323 |
+
[25] Seungyeop Han, Haichen Shen, Matthai Philipose, Sharad Agarwal, Alec Wolman, and Arvind Krishnamurthy. Mcdnn: An approximation-based execution framework for deep stream processing under resource constraints. In Proceedings of the 14th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys), pages 123-136. ACM, 2016. 3
|
| 324 |
+
[26] Yang He, Ping Liu, et al. Filter pruning via geometric median for deep convolutional neural networks acceleration. In CVPR, 2019. 1, 2
|
| 325 |
+
[27] Yihui He, Xiangyu Zhang, and Jian Sun. Channel pruning for accelerating very deep neural networks. In ICCV, pages 1389-1397, 2017. 2, 6, 13
|
| 326 |
+
[28] Zejiang Hou and Sun-Yuan Kung. Efficient image super resolution via channel discriminative deep neural network pruning. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 3647-3651. IEEE, 2020. 2, 8
|
| 327 |
+
[29] Jia-Bin Huang, Abhishek Singh, and Narendra Ahuja. Single image super-resolution from transformed self-exemplars. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5197-5206, 2015. 7
|
| 328 |
+
[30] Zheng Hui, Xinbo Gao, Yunchu Yang, and Xiumei Wang. Lightweight image super-resolution with information multistillation network. In Proceedings of the 27th ACM International Conference on Multimedia, pages 2024-2032, 2019. 8
|
| 329 |
+
[31] Zheng Hui, Xiumei Wang, and Xinbo Gao. Fast and accurate single image super-resolution via information distillation network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 723-731, 2018, 2, 8, 14, 15
|
| 330 |
+
|
| 331 |
+
[32] Loc N Huynh, Youngki Lee, and Rajesh Krishna Balan. Deepmon: Mobilegpu-based deep learning framework for continuous vision applications. In Proceedings of the 15th Annual International Conference on Mobile Systems, Applications, and Services (MobiSys), pages 82-95. ACM, 2017. 3
|
| 332 |
+
[33] Jiwon Kim, Jung Kwon Lee, and Kyoung Mu Lee. Accurate image super-resolution using very deep convolutional networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1646-1654, 2016. 2
|
| 333 |
+
[34] Wei-Sheng Lai, Jia-Bin Huang, Narendra Ahuja, and Ming-Hsuan Yang. Deep laplacian pyramid networks for fast and accurate super-resolution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 624-632, 2017. 8
|
| 334 |
+
[35] Nicholas D Lane, Sourav Bhattacharya, Petko Georgiev, Claudio Forlivesi, Lei Jiao, Lorena Qendro, and Fahim Kawsar. Deepx: A software accelerator for low-power deep learning inference on mobile devices. In Proceedings of the 15th International Conference on Information Processing in Sensor Networks, page 23. IEEE Press, 2016. 3
|
| 335 |
+
[36] Nicholas D Lane, Petko Georgiev, and Lorena Qendro. Deeper: robust smartphone audio sensing in unconstrained acoustic environments using deep learning. In Proceedings of the 2015 ACM International Joint Conference on Pervasive and Ubiquitous Computing, pages 283-294. ACM, 2015. 3
|
| 336 |
+
[37] Royson Lee, Łukasz Dudziak, Mohamed Abdelfattah, Stylianos I Venieris, Hyeji Kim, Hongkai Wen, and Nicholas D Lane. Journey towards tiny perceptual superresolution. In European Conference on Computer Vision (ECCV), pages 85-102. Springer, 2020. 2, 8
|
| 337 |
+
[38] Royson Lee, Stylianos I Venieris, Lukasz Dudziak, Sourav Bhattacharya, and Nicholas D Lane. MobISR: Efficient on-device super-resolution through heterogeneous mobile processors. In The 25th Annual International Conference on Mobile Computing and Networking, pages 1-16, 2019. 2
|
| 338 |
+
[39] Tuanhui Li, Baoyuan Wu, Yujiu Yang, Yanbo Fan, Yong Zhang, and Wei Liu. Compressing convolutional neural networks via factorized convolutional filters. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 3977-3986, 2019. 6, 13
|
| 339 |
+
[40] Yawei Li, Shuhang Gu, Kai Zhang, Luc Van Gool, and Radu Timofte. Dhp: Differentiable meta pruning via hypernetworks. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part VIII 16, pages 608-624. Springer, 2020. 8
|
| 340 |
+
[41] Bee Lim, Sanghyun Son, Heewon Kim, Seungjun Nah, and Young Mu Lee. Enhanced deep residual networks for single image super-resolution. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 136-144, 2017. 2, 8, 15
|
| 341 |
+
[42] Ning Liu, Xiaolong Ma, et al. Autocompress: An automatic dnn structured pruning framework for ultra-high compression rates. In AAAI, 2020. 1
|
| 342 |
+
[43] Zhuang Liu, Jianguo Li, Zhiqiang Shen, Gao Huang, Shoumeng Yan, and Changshui Zhang. Learning efficient
|
| 343 |
+
|
| 344 |
+
convolutional networks through network slimming. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 2736-2744, 2017. 6, 13
|
| 345 |
+
[44] Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. Rethinking the value of network pruning. arXiv preprint arXiv:1810.05270, 2018. 1
|
| 346 |
+
[45] Xiaolong Ma et al. Pconv: The missing but desirable sparsity in dnn weight pruning for real-time execution on mobile devices. In AAAI, 2020. 1, 2, 3, 5, 12
|
| 347 |
+
[46] Xiaolong Ma, Zhengang Li, Yifan Gong, Tianyun Zhang, Wei Niu, Zheng Zhan, Pu Zhao, Jian Tang, Xue Lin, Bin Ren, et al. Blk-rew: A unified block-based dnn pruning framework using reweighted regularization method. arXiv preprint arXiv:2001.08357, 2020. 6
|
| 348 |
+
[47] Xiaolong Ma, Geng Yuan, Sheng Lin, Caiwen Ding, Fuxun Yu, Tao Liu, Wujie Wen, Xiang Chen, and Yanzhi Wang. Tiny but accurate: A pruned, quantized and optimized memristor crossbar framework for ultra efficient dnn implementation. In ASP-DAC, 2020. 1
|
| 349 |
+
[48] David Martin, Charless Fowlkes, Doron Tal, and Jitendra Malik. A database of human segmented natural images and its application to evaluating segmentation algorithms and measuring ecological statistics. In Proceedings Eighth IEEE International Conference on Computer Vision. ICCV 2001, volume 2, pages 416-423. IEEE, 2001. 7
|
| 350 |
+
[49] Robert B. Miller. Response time in man-computer conversational transactions. AFIPS 1968 (Fall, part I). 1
|
| 351 |
+
[50] Chuhan Min, Aosen Wang, Yiran Chen, Wenyao Xu, and Xin Chen. 2pfpce: Two-phase filter pruning based on conditional entropy. arXiv preprint arXiv:1809.02220, 2018. 1
|
| 352 |
+
[51] Wei Niu et al. Patdnn: Achieving real-time dnn execution on mobile devices with pattern-based weight pruning. arXiv:2001.00138, 2020. 2, 3, 12, 13
|
| 353 |
+
[52] Ao Ren, Tianyun Zhang, Shaokai Ye, Jiayu Li, Wenyao Xu, Xuehai Qian, Xue Lin, and Yanzhi Wang. Admm-nn: An algorithm-hardware co-design framework of dnns using alternating direction methods of multipliers. In Proceedings of the Twenty-Fourth International Conference on Architectural Support for Programming Languages and Operating Systems, pages 925–938. ACM, 2019. 6, 13, 15
|
| 354 |
+
[53] Wenzhe Shi, Jose Caballero, Huszar, et al. Real-time single image and video super-resolution using an efficient subpixel convolutional neural network. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1874-1883, 2016. 1, 2
|
| 355 |
+
[54] Dehua Song, Chang Xu, Xu Jia, Yiyi Chen, Chunjing Xu, and Yunhe Wang. Efficient residual dense block search for image super-resolution. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 12007-12014, 2020. 2, 8, 15
|
| 356 |
+
[55] Niranjan Srinivas, Andreas Krause, et al. Gaussian process optimization in the bandit setting: No regret and experimental design. In ICML, 2010. 6
|
| 357 |
+
[56] Mingxing Tan, Bo Chen, Ruoming Pang, Vijay Vasudevan, Mark Sandler, Andrew Howard, and Quoc V Le. Mnasnet: Platform-aware neural architecture search for mobile. In Proceedings of the IEEE Conference on Computer Vision
|
| 358 |
+
|
| 359 |
+
and Pattern Recognition (CVPR), pages 2820-2828, 2019. 15
|
| 360 |
+
[57] Thang Vu, Cao Van Nguyen, Trung X Pham, Tung M Luu, and Chang D Yoo. Fast and efficient image quality enhancement via desubpixel convolutional neural networks. In Proceedings of the European Conference on Computer Vision (ECCV) Workshops, pages 0-0, 2018. 2, 8
|
| 361 |
+
[58] Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Hanrui Wang, Yujun Lin, and Song Han. Apq: Joint search for network architecture, pruning and quantization policy. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 2078-2087, 2020. 3, 15
|
| 362 |
+
[59] Yanzhi Wang, Shaokai Ye, Zhezhi He, Xiaolong Ma, Linfeng Zhang, Sheng Lin, Geng Yuan, Sia Huat Tan, Zhengang Li, Deliang Fan, et al. Non-structured dnn weight pruning considered harmful. arXiv preprint arXiv:1907.02124, 2019. 2
|
| 363 |
+
[60] Wei Wen, Chunpeng Wu, et al. Learning structured sparsity in deep neural networks. In NeurIPS, pages 2074-2082, 2016. 1, 2, 5, 6, 13
|
| 364 |
+
[61] Mengwei Xu, Mengze Zhu, Yunxin Liu, Felix Xiaozhu Lin, and Xuanzhe Liu. Deepcache: Principled cache for mobile deep vision. In Proceedings of the 24th Annual International Conference on Mobile Computing and Networking, pages 129-144. ACM, 2018. 3
|
| 365 |
+
[62] Jianchao Yang, John Wright, Thomas S Huang, and Yi Ma. Image super-resolution via sparse representation. IEEE transactions on image processing, 19(11):2861-2873, 2010. 7
|
| 366 |
+
[63] Shuochao Yao, Shaohan Hu, Yiran Zhao, Aston Zhang, and Tarek Abdelzaher. Deepsense: A unified deep learning framework for time-series mobile sensing data processing. In Proceedings of the 26th International Conference on World Wide Web, pages 351-360, 2017. 3
|
| 367 |
+
[64] Jiahui Yu, Yuchen Fan, Jianchao Yang, Ning Xu, Zhaowen Wang, Xinchao Wang, and Thomas Huang. Wide activation for efficient and accurate image super-resolution. arXiv preprint arXiv:1808.08718, 2018. 2, 8, 15
|
| 368 |
+
[65] Ruichi Yu, Ang Li, et al. Nisp: Pruning networks using neuron importance score propagation. In CVPR, pages 9194-9203, 2018. 2
|
| 369 |
+
[66] Tianyun Zhang, Shaokai Ye, et al. Systematic weight pruning of dnns using alternating direction method of multipliers. ECCV, 2018. 6, 13, 15
|
| 370 |
+
[67] Yulun Zhang, Kunpeng Li, Kai Li, Lichen Wang, Bineng Zhong, and Yun Fu. Image super-resolution using very deep residual channel attention networks. In Proceedings of the European conference on computer vision (ECCV), pages 286-301, 2018. 2, 7
|
| 371 |
+
[68] Yulun Zhang, Yapeng Tian, Yu Kong, Bineng Zhong, and Yun Fu. Residual dense network for image super-resolution. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2472-2481, 2018. 2, 7
|
| 372 |
+
[69] Zhao Zhong, Junjie Yan, Wei Wu, Jing Shao, and Cheng-Lin Liu. Practical block-wise neural network architecture generation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2423-2432, 2018. 15
|
| 373 |
+
|
| 374 |
+
[70] Xiaotian Zhu, Wengang Zhou, and Houqiang Li. Improving deep neural network sparsity through decorrelation regularization. In IJCAI, 2018. 1
|
| 375 |
+
[71] Zhuangwei Zhuang, Mingkui Tan, et al. Discrimination-aware channel pruning for deep neural networks. In NeurIPS, pages 875-886, 2018. 1
|
achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e21255a0ef1d2fa426cc9229c5ab44b52361889100c27ce9c4ec0e724001c79a
|
| 3 |
+
size 439203
|
achievingonmobilerealtimesuperresolutionwithneuralarchitectureandpruningsearch/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e9e4b12be56dccfec666c1aac8169a11ce258635fafe9db7d12b62ecc3954ac
|
| 3 |
+
size 490867
|
actionconditioned3dhumanmotionsynthesiswithtransformervae/46ebd66c-652f-4e2e-9f0b-a7a5e111b5b3_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4ab3b4910fd392e992b4a0797e7bc22ba00cf068d64dfb81be6d7cbfd49bff92
|
| 3 |
+
size 83170
|
actionconditioned3dhumanmotionsynthesiswithtransformervae/46ebd66c-652f-4e2e-9f0b-a7a5e111b5b3_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ed14ef4a9d2cffe79a593f1659de3358d2287263b3dbfc6df49df01ebd62af9
|
| 3 |
+
size 106196
|
actionconditioned3dhumanmotionsynthesiswithtransformervae/46ebd66c-652f-4e2e-9f0b-a7a5e111b5b3_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5aa0dda430840c58efdd0bfb4d024843f9bccc98272473b38531e1afb1ded6a9
|
| 3 |
+
size 5028960
|
actionconditioned3dhumanmotionsynthesiswithtransformervae/full.md
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Action-Conditioned 3D Human Motion Synthesis with Transformer VAE
|
| 2 |
+
|
| 3 |
+
Mathis Petrovich<sup>1</sup> Michael J. Black<sup>2</sup> Gül Varol<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> LIGM, École des Ponts, Univ Gustave Eiffel, CNRS, France
|
| 6 |
+
|
| 7 |
+
$^{2}$ Max Planck Institute for Intelligent Systems, Tübingen, Germany
|
| 8 |
+
|
| 9 |
+
{mathis.petrovich,gul.varol}@enpc.fr,black@tue.mpg.de
|
| 10 |
+
|
| 11 |
+
https://imagine.enpc.fr/~petrovim/actor
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
We tackle the problem of action-conditioned generation of realistic and diverse human motion sequences. In contrast to methods that complete, or extend, motion sequences, this task does not require an initial pose or sequence. Here we learn an action-aware latent representation for human motions by training a generative variational autoencoder (VAE). By sampling from this latent space and querying a certain duration through a series of positional encodings, we synthesize variable-length motion sequences conditioned on a categorical action. Specifically, we design a Transformer-based architecture, ACTOR, for encoding and decoding a sequence of parametric SMPL human body models estimated from action recognition datasets. We evaluate our approach on the NTU RGB+D, HumanAct12 and UESTC datasets and show improvements over the state of the art. Furthermore, we present two use cases: improving action recognition through adding our synthesized data to training, and motion denoising. Code and models are available on our project page [53].
|
| 16 |
+
|
| 17 |
+
# 1. Introduction
|
| 18 |
+
|
| 19 |
+
Despite decades of research on modeling human motions [4, 5], synthesizing realistic and controllable sequences remains extremely challenging. In this work, our goal is to take a semantic action label like "Throw" and generate an infinite number of realistic 3D human motion sequences, of varying length, that look like realistic throwing (Figure 1). A significant amount of prior work has focused on taking one pose, or a sequence of poses, and then predicting future motions [3, 6, 21, 67, 70]. This is an overly constrained scenario because it assumes that one already has a motion sequence and just needs more of it. On the other hand, many applications such as virtual reality and character control [26, 57] require generating motions of a given type (semantic action label) with a specified duration.
|
| 20 |
+
|
| 21 |
+
We address this problem by training an action-conditioned generative model with 3D human motion data that
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Fig. 1: Goal: Action-Conditioned Transformer VAE (ACTOR) learns to synthesize human motion sequences conditioned on a categorical action and a duration, $T$ . Sequences are generated by sampling from a single motion representation latent vector, $z$ , as opposed to the frame-level embedding space in prior work.
|
| 25 |
+
|
| 26 |
+
has corresponding action labels. In particular, we construct a Transformer-based encoder-decoder architecture and train it with the VAE objective. We parameterize the human body using SMPL [43] as it can output joint locations or the body surface. This paves the way for better modeling of interaction with the environment, as the surface is necessary to model contact. Moreover, such a representation allows the use of several reconstruction losses: constraining part rotations in the kinematic tree, joint locations, or surface points. The literature [37] and our results suggest that a combination of losses gives the most realistic generated motions.
|
| 27 |
+
|
| 28 |
+
The key challenge of motion synthesis is to generate sequences that are perceptually realistic while being diverse. Many approaches for motion generation have taken an autoregressive approach such as LSTMs [15] and GRUs [46]. However, these methods typically regress to the mean pose
|
| 29 |
+
|
| 30 |
+
after some time [46] and are subject to drift. The key novelty in our Transformer model is to provide positional encodings to the decoder and to output the full sequence at once. Positional encoding has been popularized by recent work on neural radiance fields [47]; we have not seen it used for motion generation as we do. This allows the generation of variable length sequences without the problem of the motions regressing to the mean pose. Moreover, our approach is, to our knowledge, the first to create an action-conditioned sequence-level embedding. The closest work is Action2Motion [20], which, in contrast, presents an autoregressive approach where the latent representation is at the frame-level. Getting a sequence-level embedding requires pooling the time dimension: we introduce a new way of combining Transformers and VAEs for this purpose, which also significantly improves performance over baselines.
|
| 31 |
+
|
| 32 |
+
A challenge specific to our action-condition generation problem is that there exists limited motion capture (MoCap) data paired with distinct action labels, typically on the order of 10 categories [29, 59]. We instead rely on monocular motion estimation methods [35] to obtain 3D sequences for actions and present promising results on 40 fine-grained categories of the UESTC action recognition dataset [30]. In contrast to [20], we do not require multi-view cameras to process monocular trajectory estimates, which makes our model potentially applicable to larger scales. Despite being noisy, monocular estimates prove sufficient for training and, as a side benefit of our model, we are able to denoise the estimated sequences by encoding-decoding through our learned motion representation.
|
| 33 |
+
|
| 34 |
+
An action-conditioned generative model can augment existing MoCap datasets, which are expensive and limited in size [45, 59]. Recent work, which renders synthetic human action videos for training action recognition models [61], shows the importance of motion diversity and large amounts of data per action. Such approaches can benefit from an infinite source of action-conditioned motion synthesis. We explore this through our experiments on action recognition. We observe that, despite a domain gap, the generated motions can serve as additional training data, specially in low-data regimes. Finally, a compact action-aware latent space for human motions can be used as a prior in other tasks such as human motion estimation from videos.
|
| 35 |
+
|
| 36 |
+
Our contributions are fourfold: (i) We introduce ACTOR, a novel Transformer-based conditional VAE, and train it to generate action-conditioned human motions by sampling from a sequence-level latent vector. (ii) We demonstrate that it is possible to learn to generate realistic 3D human motions using noisy 3D body poses estimated from monocular video; (iii) We present a comprehensive ablation study of the architecture and loss components, obtaining state-of-the-art performance on multiple datasets; (iv) We illustrate two use cases for our model on action recognition and MoCap denoising. The code is available on our project page [53].
|
| 37 |
+
|
| 38 |
+
# 2. Related Work
|
| 39 |
+
|
| 40 |
+
We briefly review relevant literature on motion prediction, motion synthesis, monocular motion estimation, as well as Transformers in the context of VAEs.
|
| 41 |
+
|
| 42 |
+
Future human motion prediction. Research on human motion analysis has a long history dating back to 1980s [5, 16, 18, 49]. Given past motion or an initial pose, predicting future frames has been referred as motion prediction. Statistical models have been employed in earlier studies [7, 17]. Recently, several works show promising results following progress in generative models with neural networks, such as GANs [19] or VAEs [34]. Examples include HP-GAN [6] and recurrent VAE [21] for future motion prediction. Most work treats the body as a skeleton, though recent work exploits full 3D body shape models [3, 70]. Similar to [70], we also go beyond sparse joints and incorporate vertices on the body surface. DLow [67] focuses on diversifying the sampling of future motions from a pretrained model. [10] performs conditional future prediction using contextual cues about the object interaction. Very recently, [39] presents a Transformer-based method for dance generation conditioned on music and past motion. Duan et al. [13] use Transformers for motion completion. There is a related line of work on motion "in-betweening" that takes both past and future poses and "inpaints" plausible motions between them; see [22] for more. In contrast to this prior work, our goal is to synthesize motions without any past observations.
|
| 43 |
+
|
| 44 |
+
Human motion synthesis. While there is a vast literature on future prediction, synthesis from scratch has received relatively less attention. Very early work used PCA [48] and GPLVMs [60] to learn statistical models of cyclic motions like walking and running. Conditioning synthesis on multiple, varied, actions is much harder. DVGANs [40] train a generative model conditioned on a short text representing actions in MoCap datasets such as Human3.6M [28, 29] and CMU [59]. Text2Action [1] and Language2Pose [2] similarly explore conditioning the motion generation on textual descriptions. Music-to-Dance [36] and [38] study music-conditioned generation. QuaterNet [52] focuses on generating locomotion actions such as walking and running given a ground trajectory and average speed. [65] presents a convolution-based generative model for realistic, but unconstrained motions without specifying an action. Similarly, [69] synthesizes arbitrary sequences, focusing on unbounded motions in time.
|
| 45 |
+
|
| 46 |
+
Many methods for unconstrained motion synthesis are often dominated by actions such as walking and running. In contrast, our model is able to sample from more general, acyclic, pre-defined action categories, compatible with action recognition datasets. In this direction, [71] introduces a Bayesian approach, where Hidden semi-Markov Models are used for jointly training generative and discriminative models. Similar to us, [71] shows that their generated motions can serve as additional training data for action recognition. However, their generated sequences are pseudo
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Figure 2: Method overview: We illustrate the encoder (left) and the decoder (right) of our Transformer-based VAE model that generates action-conditioned motions. Given a sequence of body poses $P_{1}, \ldots, P_{T}$ and an action label $a$ , the encoder outputs distribution parameters on which we define a KL loss $(\mathcal{L}_{KL})$ . We use extra learnable tokens per action $(\mu_{a}^{token}$ and $\Sigma_{a}^{token}$ ) as a way to obtain $\mu$ and $\Sigma$ from the Transformer encoder. Using $\mu$ and $\Sigma$ , we sample the motion latent representation $z \in \mathbf{M}$ . The decoder takes the latent vector $z$ , an action label $a$ , and a duration $T$ as input. The action determines the learnable $b_{a}^{token}$ additive token, and the duration determines the number of positional encodings (PE) to input to the decoder. The decoder outputs the whole sequence $\widehat{P}_1, \ldots, \widehat{P}_T$ against which the reconstruction loss $\mathcal{L}_P$ is computed. In addition, we compute vertices with a differentiable SMPL layer to define a vertex loss $(\mathcal{L}_V)$ . For training $z$ is obtained as the output of the encoder; for generation it is randomly sampled from a Gaussian distribution.
|
| 50 |
+
|
| 51 |
+
labelled with actions according to the discriminator classification results. On the other hand, our conditional model can synthesize motions in a controlled way, e.g. balanced training set. Most similar to our work is Action2Motion [20], a per-frame VAE on actions, using a GRU-based architecture. Our sequence-level VAE latent space, in conjunction with the Transformer-based design provides significant advantages, as shown in our experiments.
|
| 52 |
+
|
| 53 |
+
Other recent works [23, 68] use normalizing flows to address human motion estimation and generation problems. Several works [27, 33, 63] learn a motion manifold, and use it for motion denoising, which is one of our use cases.
|
| 54 |
+
|
| 55 |
+
There is also a significant graphics literature on the topic, which tends to focus on animator control. See, for example, [25] on learning motion matching and [37] on character animation. Most relevant here are the phase-functioned neural networks [26] and neural state machines [57]. Both exploit the notion of actions being driven by the phase of a sinusoidal function. This is related to the idea of positional encoding, but unlike our approach, their methods require manual labor to segment actions and build these phase functions.
|
| 56 |
+
|
| 57 |
+
Monocular human motion estimation. Motion estimation from videos [32, 35, 44] has recently made significant progress but is beyond our scope. In this work, we adopt VIBE [35] to obtain training motion sequences from action-labelled video datasets.
|
| 58 |
+
|
| 59 |
+
Transformer VAEs. Recent successes of Transformers in language tasks has increased interest in attention-based neural network models. Several works use Transformers in
|
| 60 |
+
|
| 61 |
+
conjunction with generative VAE training. Particular examples include story generation [14], sentiment analysis [9], response generation [41], and music generation [31]. The work of [31] learns latent embeddings per timeframe, while [9] averages the hidden states to obtain a single latent code. On the other hand, [14] performs attention averaging to pool over time. In contrast to these works, we adopt learnable tokens as in [11, 12] to summarize the input into a sequence-level embedding.
|
| 62 |
+
|
| 63 |
+
# 3. Action-Conditioned Motion Generation
|
| 64 |
+
|
| 65 |
+
Problem definition. Actions defined by body-motions can be characterized by the rotations of body parts, independent of identity-specific body shape. To be able to generate motions with actors of different morphology, it is desirable to disentangle the pose and the shape. Consequently, without loss of generality, we employ the SMPL body model [43], which is a disentangled body representation (similar to recent models [50, 51, 54, 64]). Ignoring shape, our goal, is then to generate a sequence of pose parameters. More formally, given an action label $a$ (from a set of predefined action categories $a \in A$ ) and a duration $T$ , we generate a sequence of body poses $R_1, \ldots, R_T$ and a sequence of translations of the root joint represented as displacements, $D_1, \ldots, D_T$ (with $D_t \in \mathbb{R}^3$ , $\forall t \in \{1, \ldots, T\}$ ).
|
| 66 |
+
|
| 67 |
+
Motion representation. SMPL pose parameters per-frame represent 23 joint rotations in the kinematic tree and one global rotation. We adopt the continuous 6D rotation representation for training [72], making $R_{t} \in \mathbb{R}^{24 \times 6}$ . Let $P_{t}$ be
|
| 68 |
+
|
| 69 |
+
the combination of $R_{t}$ and $D_{t}$ , representing the pose and location of the body in a single frame, $t$ . The full motion is the sequence $P_{1},\ldots ,P_{T}$ . Given a generator output pose $P_{t}$ and any shape parameter, we can obtain body mesh vertices $(V_{t})$ and body joint coordinates $(J_{t})$ differentiably using [43].
|
| 70 |
+
|
| 71 |
+
# 3.1. Conditional Transformer VAE for Motions
|
| 72 |
+
|
| 73 |
+
We employ a conditional variational autoencoder (CVAE) model [56] and input the action category information to both the encoder and the decoder. More specifically, our model is an action-conditioned Transformer VAE (ACTOR), whose encoder and decoder consist of Transformer layers (see Figure 2 for an overview).
|
| 74 |
+
|
| 75 |
+
Encoder. The encoder takes an arbitrary-length sequence of poses, and an action label $a$ as input, and outputs distribution parameters $\mu$ and $\Sigma$ of the motion latent space. Using the reparameterization trick [34], we sample from this distribution a latent vector $z \in \mathbf{M}$ with $\mathbf{M} \subset \mathbb{R}^d$ . All the input pose parameters $(R)$ and translations $(D)$ are first linearly embedded into a $\mathbb{R}^d$ space. As we embed arbitrary-length sequences into one latent space (sequence-level embedding), we need to pool the temporal dimension. In other domains, a [class] token has been introduced for pooling purposes, e.g., in NLP with BERT [11] and more recently in computer vision with ViT [12]. Inspired by this approach, we similarly prepend the inputs with learnable tokens, and only use the corresponding encoder outputs as a way to pool the time dimension. To this end, we include two extra learnable parameters per action, $\mu_a^{token}$ and $\Sigma_a^{token}$ , which we called "distribution parameter tokens". We append the embedded pose sequences to these tokens. The resulting Transformer encoder input is the summation with the positional encodings in the form of sinusoidal functions. We obtain the distribution parameters $\mu$ and $\Sigma$ by taking the first two outputs of the encoder corresponding to the distribution parameter tokens (i.e., discarding the rest).
|
| 76 |
+
|
| 77 |
+
Decoder. Given a single latent vector $z$ and an action label $a$ , the decoder generates a realistic human motion for a given duration in one shot (i.e., not autoregressive).
|
| 78 |
+
|
| 79 |
+
We use a Transformer decoder model where we feed time information as a query (in the form of $T$ sinusoidal positional encodings), and the latent vector combined with action information, as key and value. To incorporate the action information, we simply add a learnable bias $b_{a}^{token}$ to shift the latent representation to an action-dependent space. The Transformer decoder outputs a sequence of $T$ vectors in $\mathbb{R}^d$ from which we obtain the final poses $\widehat{P}_1, \ldots, \widehat{P}_T$ following a linear projection. A differentiable SMPL layer is used to obtain vertices and joints given the pose parameters as output by the decoder.
|
| 80 |
+
|
| 81 |
+
# 3.2. Training
|
| 82 |
+
|
| 83 |
+
We define several loss terms to train our model and present an ablation study in Section 4.2.
|
| 84 |
+
|
| 85 |
+
Reconstruction loss on pose parameters $(\mathcal{L}_P)$ . We use an
|
| 86 |
+
|
| 87 |
+
L2 loss between the ground-truth poses $P_{1},\ldots ,P_{T}$ , and our predictions $\widehat{P}_1,\dots ,\widehat{P}_T$ as $\mathcal{L}_P = \sum_{t = 1}^T\| P_t - \widehat{P}_t\| _2^2$ Note that this loss contains both the SMPL rotations and the root translations. When we experiment by discarding the translations, we break this term into two: $\mathcal{L}_R$ and $\mathcal{L}_D$ , for rotations and translations, respectively.
|
| 88 |
+
|
| 89 |
+
Reconstruction loss on vertex coordinates $(\mathcal{L}_V)$ . We feed the SMPL poses $P_{t}$ and $\widehat{P}_{t}$ to a differentiable SMPL layer (without learnable parameters) with a mean shape (i.e., $\beta = \vec{0}$ ) to obtain the root-centered vertices of the mesh $V_{t}$ and $\widehat{V}_{t}$ . We define an L2 loss by comparing to the ground-truth vertices $V_{t}$ as $\mathcal{L}_V = \sum_{t=1}^{T} \| V_t - \widehat{V}_t \|^2_2$ . We further experiment with a loss $\mathcal{L}_J$ on a more sparse set of points such as joint locations $\widehat{J}_t$ obtained through the SMPL joint regressor. However, as will be shown in Section 4.2, we do not include this term in the final model.
|
| 90 |
+
|
| 91 |
+
KL loss $(\mathcal{L}_{KL})$ . As in a standard VAE, we regularize the latent space by encouraging it to be similar to a Gaussian distribution with $\mu$ the null vector and $\Sigma$ the identity matrix. We minimize the Kullback-Leibler (KL) divergence between the encoder distribution and this target distribution.
|
| 92 |
+
|
| 93 |
+
The resulting total loss is defined as the summation of different terms: $\mathcal{L} = \mathcal{L}_P + \mathcal{L}_V + \lambda_{KL}\mathcal{L}_{KL}$ . We empirically show the importance of weighting with $\lambda_{KL}$ (equivalent to the $\beta$ term in $\beta$ -VAE [24]) in our experiments to obtain a good trade-off between diversity and realism (see Section A.1 of the appendix). The remaining loss terms are simply equally weighed, further improvements are potentially possible with tuning. We use the AdamW optimizer with a fixed learning rate of 0.0001. The minibatch size is set to 20 and we found that the performance is sensitive to this hyperparameter (see Section A.2 of the appendix). We train our model for 2000, 5000 and 1000 epochs on NTU-13, HumanAct12 and UESTC datasets, respectively. Overall, more epochs produce improved performance, but we stop training to retain a low computational cost. Note that to allow faster iterations, for ablations on loss and architecture, we train our models for 1000 epochs on NTU-13 and 500 epochs on UESTC. The remaining implementation details can be found in Section C of the appendix.
|
| 94 |
+
|
| 95 |
+
# 4. Experiments
|
| 96 |
+
|
| 97 |
+
We first introduce the datasets and performance measures used in our experiments (Section 4.1). Next, we present an ablation study (Section 4.2) and compare to previous work (Section 4.3). Then, we illustrate use cases in action recognition (Sections 4.4). Finally, we provide qualitative results and discuss limitations (Section 4.5).
|
| 98 |
+
|
| 99 |
+
# 4.1. Datasets and evaluation metrics
|
| 100 |
+
|
| 101 |
+
We use three datasets originally proposed for action recognition, mainly for skeleton-based inputs. Each dataset is temporally trimmed around one action per sequence. Next, we briefly describe them.
|
| 102 |
+
|
| 103 |
+
<table><tr><td rowspan="2">Loss</td><td rowspan="2">FIDtr↓</td><td rowspan="2">FIDtest↓</td><td rowspan="2">UESTC Acc.↑</td><td rowspan="2">Div.→</td><td rowspan="2">Multimod.→</td><td colspan="4">NTU-13</td></tr><tr><td>FIDtr↓</td><td>Acc.↑</td><td>Div.→</td><td>Multimod.→</td></tr><tr><td>Real</td><td>2.93±0.26</td><td>2.79±0.29</td><td>98.8±0.1</td><td>33.34±0.32</td><td>14.16±0.06</td><td>0.02±0.00</td><td>99.8±0.0</td><td>7.07±0.02</td><td>2.27±0.01</td></tr><tr><td>LJ</td><td>3M*</td><td>3M*</td><td>3.3±0.2</td><td>267.68±346.06</td><td>153.62±50.62</td><td>0.49±0.00</td><td>93.6±0.2</td><td>7.04±0.04</td><td>2.12±0.01</td></tr><tr><td>LR</td><td>292.54±113.35</td><td>316.29±26.05</td><td>42.4±1.7</td><td>23.16±0.47</td><td>14.37±0.08</td><td>0.23±0.00</td><td>95.4±0.2</td><td>7.08±0.04</td><td>2.18±0.02</td></tr><tr><td>LV</td><td>4M*</td><td>4M*</td><td>2.7±0.2</td><td>314.66±476.18</td><td>169.49±27.90</td><td>0.25±0.00</td><td>95.8±0.3</td><td>7.08±0.04</td><td>2.07±0.01</td></tr><tr><td>LR+LV</td><td>20.49±2.31</td><td>23.43±2.20</td><td>91.1±0.3</td><td>31.96±0.36</td><td>14.66±0.03</td><td>0.19±0.00</td><td>96.2±0.2</td><td>7.09±0.04</td><td>2.08±0.01</td></tr></table>
|
| 104 |
+
|
| 105 |
+
Table 1: Reconstruction loss: We define the loss on the SMPL pose parameters which represent the rotations in the kinematic tree $(\mathcal{L}_R)$ , their joint coordinates $(\mathcal{L}_J)$ , as well as vertex coordinates $(\mathcal{L}_V)$ . We show that constraining both rotations and vertex coordinates is critical to obtain smooth motions. In particular, coordinate-based losses alone do not converge to a meaningful solution on UESTC $(*) \to$ means motions are better when the metric is closer to real.
|
| 106 |
+
|
| 107 |
+
NTU RGB+D dataset [42, 55]. To be able to compare to the work of [20], we use their subset of 13 action categories. [20] provides SMPL parameters obtained through VIBE estimations. Their 3D root translations, obtained through multi-view constraints, are not publicly available, therefore we use their approximately origin-centered version. We refer to this data as NTU-13 and use it for training.
|
| 108 |
+
|
| 109 |
+
HumanAct12 dataset [20]. Similarly, we use this data for state-of-the-art comparison. HumanAct12 is adapted from the PHSPD dataset [73] that releases SMPL pose parameters and root translations in camera coordinates for 1191 videos. HumanAct12 temporally trims the videos, annotates them into 12 action categories, and only provides their joint coordinates in a canonical frame. We also process the SMPL poses to align them to the frontal view.
|
| 110 |
+
|
| 111 |
+
UESTC dataset [30]. This recent dataset consists of 25K sequences across 40 action categories (mostly exercises, and some represent cyclic movements). To obtain SMPL sequences, we apply VIBE on each video and select the person track that corresponds best to the Kinect skeleton provided in case there are multiple people. We use all 8 static viewpoints (we discard the rotating camera) and canonicalize all bodies to the frontal view. We use the official cross-subject protocol to separate train and test splits, instead of the cross-view protocols since generating different viewpoints is trivial for our model. This results in 10650 training sequences that we use for learning the generative model, as well as the recognition model: the effective diversity of this set can be seen as 33 sequences per action on average (10K divided by 8 views, 40 actions). The remaining 13350 sequences are used for testing. Since the protocols on NTU-13 and HumanAct12 do not provide test splits, we rely on UESTC for recognition experiments.
|
| 112 |
+
|
| 113 |
+
Evaluation metrics. We follow the performance measures employed in [20] for quantitative evaluations. We measure FID, action recognition accuracy, overall diversity, and per-action diversity (referred to as multimodality in [20]). For all these metrics, a pretrained action recognition model is used, either for extracting motion features to compute FID, diversity, and multimodality; or directly the accuracy of recognition. For experiments on NTU-13 and HumanAct12, we directly use the provided recognition models of [20] that operate on joint coordinates. For UESTC, we train our own recognition model based on pose parameters ex
|
| 114 |
+
|
| 115 |
+
pressed as 6D rotations (we observed that the joint-based models of [20] are sensitive to global viewpoint changes). We generate sets of sequences 20 times with different random seeds and report the average together with the confidence interval at $95\%$ . We refer to [20] for further details. One difference in our evaluation is the use of average shape parameter $(\beta = \tilde{0})$ when obtaining joint coordinates from the mesh for both real and generated sequences. Note also that [20] only reports FID score comparing to the training split $\mathrm{(FID_{tr})}$ , since NTU-13 and HumanAct12 datasets do not provide test splits. On UESTC, we additionally provide an FID score on the test split as $\mathrm{FID}_{test}$ , which we rely most on to make conclusions.
|
| 116 |
+
|
| 117 |
+
# 4.2. Ablation study
|
| 118 |
+
|
| 119 |
+
We first ablate several components of our approach in a controlled setup, studying the loss and the architecture.
|
| 120 |
+
|
| 121 |
+
Loss study. Here, we investigate the influence of the reconstruction loss formulation when using the parametric SMPL body model in our VAE. We first experiment with using (i) only the rotation parameters $\mathcal{L}_R$ , (ii) only the joint coordinates $\mathcal{L}_J$ , (iii) only the vertex coordinates $\mathcal{L}_V$ , and (iv) the combination $\mathcal{L}_R + \mathcal{L}_V$ . Here, we initially discard the root translation to only assess the pose representation. Note that for representing the rotation parameters, we use the 6D representation from [72] (further studies on losses with different rotation representations can be found in Section A.4 of the appendix). In Table 1, we observe that a single loss is not sufficient to constrain the problem, especially losses on the coordinates do not converge to a meaningful solution on UESTC. On NTU-13, qualitatively, we also observe invalid body shapes since joint locations alone do not fully constrain the rotations along limb axes. We provide examples in our qualitative analysis. We conclude that using a combined loss significantly improves the results, constraining the pose space more effectively. We further provide an experiment on the influence of the weight parameter $\lambda_{KL}$ controlling the KL divergence loss term $\mathcal{L}_{KL}$ in Section A.1 of the appendix and note its importance to obtain high diversity performance.
|
| 122 |
+
|
| 123 |
+
Root translation. Since we estimate the 3D human body motion from a monocular camera, obtaining the 3D trajectory of the root joint is not trivial for real training sequences,
|
| 124 |
+
|
| 125 |
+
<table><tr><td rowspan="2">Architecture</td><td rowspan="2">FIDtr↓</td><td rowspan="2">FIDtest↓</td><td rowspan="2">UESTC Acc.↑</td><td rowspan="2">Div.→</td><td rowspan="2">Multimod.→</td><td colspan="4">NTU-13</td></tr><tr><td>FIDtr↓</td><td>Acc.↑</td><td>Div.→</td><td>Multimod.→</td></tr><tr><td>Real</td><td>2.93±0.26</td><td>2.79±0.29</td><td>98.8±0.1</td><td>33.34±0.32</td><td>14.16±0.06</td><td>0.02±0.00</td><td>99.8±0.0</td><td>7.07±0.02</td><td>2.27±0.01</td></tr><tr><td rowspan="2">Fully connected GRU</td><td>562.09±48.12</td><td>548.13±38.34</td><td>10.5±0.5</td><td>12.96±0.11</td><td>10.87±0.05</td><td>0.47±0.00</td><td>88.7±0.6</td><td>6.93±0.03</td><td>3.05±0.01</td></tr><tr><td>25.96±3.02</td><td>27.08±2.98</td><td>87.3±0.4</td><td>30.66±0.33</td><td>15.24±0.08</td><td>0.28±0.00</td><td>94.8±0.2</td><td>7.08±0.04</td><td>2.20±0.01</td></tr><tr><td>Transformer</td><td>20.49±2.31</td><td>23.43±2.20</td><td>91.1±0.3</td><td>31.96±0.36</td><td>14.66±0.03</td><td>0.19±0.00</td><td>96.2±0.2</td><td>7.09±0.04</td><td>2.08±0.01</td></tr><tr><td>a) w/ autoreg. decoder</td><td>55.75±2.62</td><td>60.10±4.87</td><td>88.4±0.6</td><td>33.46±0.69</td><td>10.62±0.10</td><td>2.62±0.01</td><td>88.0±0.5</td><td>6.80±0.03</td><td>1.76±0.01</td></tr><tr><td>b) w/out μa token, Σa token</td><td>27.46±3.43</td><td>31.37±3.04</td><td>86.2±0.4</td><td>31.82±0.38</td><td>15.71±0.12</td><td>0.26±0.00</td><td>94.7±0.2</td><td>7.09±0.03</td><td>2.15±0.01</td></tr><tr><td>c) w/out btaoken</td><td>24.38±2.37</td><td>28.52±2.55</td><td>89.4±0.7</td><td>32.11±0.33</td><td>14.52±0.09</td><td>0.16±0.00</td><td>96.2±0.2</td><td>7.08±0.04</td><td>2.19±0.02</td></tr></table>
|
| 126 |
+
|
| 127 |
+
Table 2: Architecture: We compare various architectural designs, such as the encoder and the decoder of the VAE, and different components of the Transformer model, on both NTU-13 and UESTC datasets.
|
| 128 |
+
|
| 129 |
+
and is subject to depth ambiguity. We assume a fixed focal length and approximate the distance from the camera based on the ratio between the 3D body height and the 2D projected height. Similar to [61], we observe reliable translation in $xy$ image plane, but considerable noise in $z$ depth. Nevertheless, we still train with this type of data and visualize generated examples in Figure 3 with and without the loss on translation $\mathcal{L}_D$ . Certain actions are defined by their trajectory (e.g., 'Left Stretching') and we are able to generate the semantically relevant translations despite noisy data. Compared to the real sequences, we observe much less noise in our generated sequences (see the supplemental video at [53]).
|
| 130 |
+
|
| 131 |
+
Architecture design. Next, we ablate several architectural choices. The first question is whether an attention-based design (i.e., Transformer) has advantages over the more widely used alternatives such as a simple fully-connected autoencoder or a GRU-based recurrent neural network. In Table 2, we see that our Transformer model outperforms both fully-connected and GRU encoder-decoder architectures on two datasets by a large margin. In contrast to the fully-connected architecture, we are also able to handle variable-length sequences. We further note that our sequence-level decoding strategy is key to obtain an improvement with Transformers, as opposed to an autoregressive Transformer decoder as in [62] (Table 2, a). At training time, the autoregressive model uses teacher forcing, i.e., using the ground-truth pose for the previous frame. This creates a gap with test time, where we observed poor autoencoding reconstructions such as decoding a left-hand waving encoding into a right-hand waving.
|
| 132 |
+
|
| 133 |
+
We also provide a controlled experiment by changing certain blocks of our Transformer VAE. Specifically, we remove the $\mu_{a}^{token}$ and $\Sigma_{a}^{token}$ distribution parameter tokens and instead obtain $\mu$ and $\Sigma$ by averaging the outputs of the encoder, followed by two linear layers (Table 2, b). This results in considerable drop in performance. Moreover, we investigate the additive $b_{a}^{token}$ token and replace it with a one-hot encoding of the action label concatenated to the latent vector, followed by a linear projection (Table 2, c). Although this improves a bit the results on the NTU-13 dataset, we observe a large decrease in performance on the UESTC dataset which has a larger number of action classes.
|
| 134 |
+
|
| 135 |
+
Based on an architectural ablation of the number of
|
| 136 |
+
|
| 137 |
+

|
| 138 |
+
Figure 3: Generating the 3D root translation: Despite our model learning from noisy 3D trajectories, we show that our generations are smooth and they capture the semantics of the action. Examples are provided from the UESTC dataset for translations in $x$ ('Left Stretching'), $y$ (Rope Skipping), and $z$ ('Forward Lugging') with and without the loss on the root displacement $\mathcal{L}_D$ .
|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
Figure 4: Generating variable-length sequences: We evaluate the capability of the models trained on UESTC with (left) fixed-size 60 frames and (right) variable-size between [60, 100] frames on generating various durations. We report accuracy and FID metrics. For the fixed model, we observe that the best performance is when tested at the seen duration of 60, but over $85\%$ accuracy is retained even at ranges between [40, 120] frames. The performance is overall improved when the model has previously seen duration variations in training; there is a smaller drop in performance beyond the seen range (denoted with dashed lines).
|
| 142 |
+
|
| 143 |
+
Transformer layers (see Section A.3 of the appendix), we set this parameter to 8.
|
| 144 |
+
|
| 145 |
+
Training with sequences of variable durations. A key advantage of sequence-modeling with architectures such as Transformers is to be able to handle variable-length motions. At generation time, we control how long the model should synthesize, by specifying a sequence of positional encodings to the decoder. We can trivially generate more diversity by synthesizing sequences of different durations. However, so far we have trained our models with fixed-size inputs, i.e., 60 frames. Here, we first analyze whether a fixed-size trained model can directly generate variable sizes. This is presented in Figure 4 (left). We plot the performance
|
| 146 |
+
|
| 147 |
+
<table><tr><td rowspan="2">Method</td><td colspan="4">NTU-13</td><td colspan="4">HumanAct12</td></tr><tr><td>FIDtr↓</td><td>Acc.↑</td><td>Div.→</td><td>Multimod.→</td><td>FIDtr↓</td><td>Acc.↑</td><td>Div.→</td><td>Multimod.→</td></tr><tr><td>Real [20]</td><td>0.03±0.00</td><td>99.9±0.1</td><td>7.11±0.05</td><td>2.19±0.03</td><td>0.09±0.01</td><td>99.7±0.1</td><td>6.85±0.05</td><td>2.45±0.04</td></tr><tr><td>Real*</td><td>0.02±0.00</td><td>99.8±0.0</td><td>7.07±0.02</td><td>2.25±0.01</td><td>0.02±0.00</td><td>99.4±0.0</td><td>6.86±0.03</td><td>2.60±0.01</td></tr><tr><td>CondGRU ([20]†)</td><td>28.31±0.14</td><td>7.8±0.1</td><td>3.66±0.02</td><td>3.58±0.03</td><td>40.61±0.14</td><td>8.0±0.2</td><td>2.38±0.02</td><td>2.34±0.04</td></tr><tr><td>Two-stage GAN [8] ([20]†)</td><td>13.86±0.09</td><td>20.2±0.3</td><td>5.33±0.04</td><td>3.49±0.03</td><td>10.48±0.09</td><td>42.1±0.6</td><td>5.96±0.05</td><td>2.81±0.04</td></tr><tr><td>Act-MoCoGAN [58] ([20]†)</td><td>2.72±0.02</td><td>99.7±0.1</td><td>6.92±0.06</td><td>0.91±0.01</td><td>5.61±0.11</td><td>79.3±0.4</td><td>6.75±0.07</td><td>1.06±0.02</td></tr><tr><td>Action2Motion [20]</td><td>0.33±0.01</td><td>94.9±0.1</td><td>7.07±0.04</td><td>2.05±0.03</td><td>2.46±0.08</td><td>92.3±0.2</td><td>7.03±0.04</td><td>2.87±0.04</td></tr><tr><td>ACTOR (ours)</td><td>0.11±0.00</td><td>97.1±0.2</td><td>7.08±0.04</td><td>2.08±0.01</td><td>0.12±0.00</td><td>95.5±0.8</td><td>6.84±0.03</td><td>2.53±0.02</td></tr></table>
|
| 148 |
+
|
| 149 |
+
Table 3: State-of-the-art comparison: We compare to the recent work of [20] on the NTU-13 and HumanAct12 datasets. Note that due to differences in implementation (e.g., random sampling, using zero shape parameter), our metrics for the ground truth real data (Real*) are slightly different than the ones reported in their paper. The performance improvement with our Transformer-based model shows a clear gap from Action2Motion. $\dagger$ Baselines implemented by [20].
|
| 150 |
+
|
| 151 |
+
over several sets of generations of different lengths between 40 and 120 frames (with a step size of 5). Since our recognition model used for evaluation metrics is trained on fixed-size 60-frame inputs, we naturally observe performance decrease outside of this length. However, the accuracy still remains high which indicates that our model is already capable of generating diverse durations.
|
| 152 |
+
|
| 153 |
+
Next, we train our generative model with variable-length inputs by randomly sampling a sequence between 60 and 100 frames. However, simply training this way from random weight initialization converges to a poor solution, leading all generated motions to be frozen in time. We address this by pretraining at 60-frame fixed size and finetuning at variable sizes. We see in Figure 4 (right) that the performance is greatly improved with this model.
|
| 154 |
+
|
| 155 |
+
Furthermore, we investigate how the generations longer or shorter than their average durations behave. We observe qualitatively that shorter generations produce partial actions e.g., picking up without reaching the floor, and longer generations slow down somewhat non-uniformly in time. We refer to the supplemental video [53] for qualitative results.
|
| 156 |
+
|
| 157 |
+
# 4.3. Comparison to the state of the art
|
| 158 |
+
|
| 159 |
+
Action2Motion [20] is the only prior work that generates action-conditioned motions. We compare to them in Table 3 on their NTU-13 and HumanAct12 datasets. On both datasets, we obtain significant improvements over this prior work that uses autoregressive GRU blocks, as well as other baselines implemented by [20] by adapting other works [8, 58]. The improvements over [20] can be explained mainly by removing autoregression and adding the proposed learnable tokens (Table 2). Note that our GRU implementation obtains similar performance as [20], while using the same hyperparameters as the Transformer. In addition to the quantitative performance improvement, measured with recognition models based on joint coordinates, our model can directly output human meshes, which can further be diversified with varying the shape parameters. [20] instead applies an optimization step to fit SMPL models on their generated joint coordinates, which is typically substantially slower than a neural network forward pass.
|
| 160 |
+
|
| 161 |
+
<table><tr><td></td><td colspan="2">Test accuracy (%)</td></tr><tr><td></td><td>Realorig</td><td>Realdenoised</td></tr><tr><td>Realorig</td><td>91.8</td><td>93.2</td></tr><tr><td>Realdenoised</td><td>83.8</td><td>97.0</td></tr><tr><td>Realinterpolated</td><td>77.6</td><td>93.9</td></tr><tr><td>Generated</td><td>80.7</td><td>97.0</td></tr><tr><td>Realorig + Generated</td><td>91.9</td><td>98.3</td></tr></table>
|
| 162 |
+
|
| 163 |
+
Table 4: Action recognition: We employ a standard architecture (ST-GCN [66]) and perform action recognition experiments using several sets of training data on the UESTC cross-subject protocol [30]. Training only with generated samples obtains $80\%$ accuracy on the real test set which is another indication our action-conditioning performs well. Nevertheless, we observe a domain gap between generated and real samples, mainly due to the noise present in the real data. We show that simply by encoding-decoding the test sequences, we observe a denoising effect, which in turn shows better performance. However, one should note that the last-column experiments are not meant to improve performance in the benchmark since it uses the action label information.
|
| 164 |
+
|
| 165 |
+
# 4.4. Use cases in action recognition
|
| 166 |
+
|
| 167 |
+
In this section, we test the limits of our approach by illustrating the benefits of our generative model and our learned latent representation for the skeleton-based action recognition task. We adopt a standard architecture, ST-GCN [66], that employs spatio-temporal graph convolutions to classify actions. We show that we can use our latent encoding for denoising motion estimates and our generated sequences as data augmentation to action recognition models.
|
| 168 |
+
|
| 169 |
+
Use case I: Human motion denoising. In the case when our motion data source relies on monocular motion estimation such as [35], the training motions remain noisy. We observe that by simply encoding-decoding the real motions through our learned embedding space, we obtain much cleaner motions. Since it is difficult to show motion quality results on static figures, we refer to our supplemental video at [53] to see this effect. We measure the denoising capability of our model through an action recognition experiment in Table 4. We change both the training and test set motions with the encoded-decoded versions. We show improved performance when trained and tested on Real_denoised mo
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
Throw
|
| 173 |
+
Figure 5: Qualitative results: We illustrate the diversity of our generations on the 'Throw' action from NTU-13 by showing 3 sequences. The horizontal axis represent the time axis and 20 equally spaced frames are visualized out of 60-frame generations. We demonstrate that our model is capable of generating different ways of performing a given action. More results can be found in Section B of the appendix and the supplemental video at [53].
|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
Figure 6: Data augmentation: We show the benefit of augmenting the real data with our generative model (real+gen), especially at low-data regime. We have limited gains when the real data is sufficiently large.
|
| 177 |
+
|
| 178 |
+
tions $(97.0\%)$ compared to Real<sub>orig</sub> (91.8). Note that this result on its own is not sufficient for this claim, but is only an indication since our decoder might produce less diversity than real motions. Moreover, the action label is given at denoising time. We believe that such denoising can be beneficial in certain scenarios where the action is known, e.g., occlusion or missing markers during MoCap collection.
|
| 179 |
+
|
| 180 |
+
Use case II: Augmentation for action recognition. Next, we augment the real training data $(\mathrm{Real}_{orig})$ , by adding generated motions to the training. We first measure the action recognition performance without using real sequences. We consider interpolating existing $\mathrm{Real}_{orig}$ motions that fall within the same action category in our embedding space to create intra-class variations (Real interpolated). We then synthesize motions by sampling noise vectors conditioned on each action category (Generated). Table 4 summarizes the results. Training only on synthetic data performs $80.7\%$ on the real test set, which is promising. However, there is a domain gap between the noisy real motions and our smooth generations. Consequently, adding generated motions to real training only marginally improves the performance. In Figure 6, we investigate whether the augmented training helps for low-data regimes by training at several fractions of the data. In each minibatch we equally sample real and generated motions. However, in theory we have access to infinitely many generations. We see that the im
|
| 181 |
+
|
| 182 |
+
provement is more visible at low-data regime.
|
| 183 |
+
|
| 184 |
+
# 4.5. Qualitative results
|
| 185 |
+
|
| 186 |
+
In Figure 5, we visualize several examples from our generations. We observe a great diversity in the way a given action is performed. For example, the 'Throw' action is performed with left or right hand. We notice that the model keeps the essence of the action semantics while changing nuances (angles, speed, phase) or action-irrelevant body parts. We refer to the supplemental video at [53] and Section B of the appendix for further qualitative analyses.
|
| 187 |
+
|
| 188 |
+
One limitation of our model is that the maximum duration it can generate depends on computational resources since we output all the sequence at once. Moreover, the actions are from a predefined set. Future work will explore open-vocabulary actions, which might become possible with further progress in 3D motion estimation from unconstrained videos.
|
| 189 |
+
|
| 190 |
+
# 5. Conclusions
|
| 191 |
+
|
| 192 |
+
We presented a new Transformer-based VAE model to synthesize action-conditioned human motions. We provided a detailed analysis to assess different components of our proposed approach. We obtained state-of-the-art performance on action-conditioned motion generation, significantly improving over prior work. Furthermore, we explored various use cases in motion denoising and action recognition. One especially attractive property of our method is that it operates on a sequence-level latent space. Future work can therefore exploit our model to impose priors on motion estimation or action recognition problems.
|
| 193 |
+
|
| 194 |
+
Acknowledgements. This work was granted access to the HPC resources of IDRIS under the allocation 2021-101535 made by GENCI. The authors would like to thank Mathieu Aubry and David Picard for helpful feedback, Chuan Guo and Shihao Zou for their help with Action2Motion details.
|
| 195 |
+
|
| 196 |
+
Disclosure: MJB has received research funds from Adobe, Intel, Nvidia, Facebook, and Amazon. While MJB is a part-time employee of Amazon, his research was performed solely at, and funded solely by, Max Planck. MJB has financial interests in Amazon, Datagen Technologies, and Meshcapade GmbH.
|
| 197 |
+
|
| 198 |
+
# References
|
| 199 |
+
|
| 200 |
+
[1] Hyemin Ahn, Timothy Ha, Yunho Choi, Hwiyeon Yoo, and Songhwai Oh. Text2Action: Generative adversarial synthesis from language to action. In International Conference on Robotics and Automation (ICRA), pages 5915-5920, 2018. 2
|
| 201 |
+
[2] Chaitanya Ahuja and Louis-Philippe Morency. Language2Pose: Natural language grounded pose forecasting. In 2019 International Conference on 3D Vision (3DV), pages 719-728, 2019. 2
|
| 202 |
+
[3] Emre Aksan, Manuel Kaufmann, and Otmar Hilliges. Structured prediction helps 3D human motion modelling. In International Conference on Computer Vision (ICCV), pages 7143-7152, 2019. 1, 2
|
| 203 |
+
[4] Norman Badler. Temporal Scene Analysis: Conceptual Descriptions of Object Movements. PhD thesis, University of Toronto, 1975. 1
|
| 204 |
+
[5] Norman I. Badler, Cary B. Phillips, and Bonnie Lynn Webber. Simulating Humans: Computer Graphics Animation and Control. Oxford University Press, Inc., New York, NY, USA, 1993. 1, 2
|
| 205 |
+
[6] Emad Barsoum, John Kender, and Zicheng Liu. HP-GAN: Probabilistic 3D human motion prediction via GAN. In Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 1499-149909, 2018. 1, 2
|
| 206 |
+
[7] Richard Bowden. Learning statistical models of human motion. In Conference on Computer Vision and Pattern Recognition (CVPR), Workshop on Human Modeling, Analysis and Synthesis, 2000. 2
|
| 207 |
+
[8] Haoye Cai, Chunyan Bai, Yu-Wing Tai, and Chi-Keung Tang. Deep video generation, prediction and completion of human action sequences. In European Conference on Computer Vision (ECCV), pages 374–390, 2018. 7
|
| 208 |
+
[9] Xingyi Cheng, Weidi Xu, Taifeng Wang, Wei Chu, Weipeng Huang, Kunlong Chen, and Junfeng Hu. Variational semi-supervised aspect-term sentiment analysis via transformer. In Computational Natural Language Learning (CoNLL), pages 961-969, 2019. 3
|
| 209 |
+
[10] Enric Corona, Albert Pumarola, Guillem Alenyà, and Francesc Moreno-Noguer. Context-aware human motion prediction. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 6990-6999, 2020. 2
|
| 210 |
+
[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In North American Chapter of the Association for Computational Linguistics (NAACL), pages 4171–4186, 2019. 3, 4
|
| 211 |
+
[12] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations (ICLR), 2021. 3, 4
|
| 212 |
+
[13] Yinglin Duan, Tianyang Shi, Zhengxia Zou, Yenan Lin, Zhehui Qian, Bohan Zhang, and Yi Yuan. Single-shot motion completion with transformer. arXiv:2103.00776, 2021. 2
|
| 213 |
+
[14] Le Fang, Tao Zeng, Chaochun Liu, Liefeng Bo, Wen Dong, and Changyou Chen. Transformer-based conditional variational autoencoder for controllable story generation. arXiv:2101.00828, 2021. 3
|
| 214 |
+
[15] Katerina Fragkiadaki, Sergey Levine, Panna Felsen, and Jitendra Malik. Recurrent network models for human dynam-
|
| 215 |
+
|
| 216 |
+
ics. In International Conference on Computer Vision (ICCV), pages 4346-4354, 2015. 1
|
| 217 |
+
[16] Robert P. Futrelle and Glen C. Speckert. Extraction of motion data by interactive processing. In Conference Pattern Recognition and Image Processing (CP), pages 405-408, 1978. 2
|
| 218 |
+
[17] Aphrodite Galata, Neil Johnson, and David Hogg. Learning variable length markov models of behaviour. Computer Vision and Image Understanding (CVIU), 81:398-413, 2001.
|
| 219 |
+
[18] Dariu M. Gavrila. The visual analysis of human movement: A survey. Computer Vision and Image Understanding (CVIU), 73:82-98, 1999. 2
|
| 220 |
+
[19] Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. Generative adversarial nets. In Advances in Neural Information Processing Systems (NeurIPS), volume 27, 2014. 2
|
| 221 |
+
[20] Chuan Guo, Xinxin Zuo, Sen Wang, Shihao Zou, Qingyao Sun, Annan Deng, Minglun Gong, and Li Cheng. Action2motion: Conditioned generation of 3d human motions. In ACM International Conference on Multimedia (ACMMM), pages 2021-2029, 2020. 2, 3, 5, 7
|
| 222 |
+
[21] Ikhsanul Habibie, Daniel Holden, Jonathan Schwarz, Joe Yearsley, and Taku Komura. A recurrent variational autoencoder for human motion synthesis. In British Machine Vision Conference (BMVC), pages 119.1-119.12, 2017. 1, 2
|
| 223 |
+
[22] Félix G. Harvey, Mike Yurick, Derek Nowrouzezahrai, and C. Pal. Robust motion in-betweening. ACM Transactions on Graphics (TOG), 39:60:1 - 60:12, 2020. 2
|
| 224 |
+
[23] Gustav Eje Henter, Simon Alexanderson, and Jonas Beskow. MoGlow: Probabilistic and controllable motion synthesis using normalising flows. ACM Transactions on Graphics (TOG), 39(6), 2020. 3
|
| 225 |
+
[24] Irina Higgins, Loic Matthew, Arka Pal, Christopher Burgess, Xavier Glorot, Matt Botvinick, Shakir Mohamed, and Alexander Lerchner. beta-VAE: Learning basic visual concepts with a constrained variational framework. In International Conference on Learning Representations (ICLR), 2017. 4
|
| 226 |
+
[25] Daniel Holden, Ouussama Kanoun, Maksym Perepichka, and Tiberiu Popa. Learned motion matching. ACM Transactions on Graphics (TOG), 39:53:1 - 53:12, 2020. 3
|
| 227 |
+
[26] Daniel Holden, Taku Komura, and Jun Saito. Phase-functional neural networks for character control. ACM Transactions on Graphics (TOG), 36(4), 2017. 1, 3
|
| 228 |
+
[27] Daniel Holden, Jun Saito, and Taku Komura. A deep learning framework for character motion synthesis and editing. ACM Transactions on Graphics (TOG), 35(4), 2016. 3
|
| 229 |
+
[28] Catalin Ionescu, Fuxin Li, and Cristian Sminchisescu. Latent structured models for human pose estimation. In International Conference on Computer Vision (ICCV), pages 2220-2227, 2011. 2
|
| 230 |
+
[29] Catalin Ionescu, Dragos Papava, Vlad Olaru, and Cristian Sminchisescu. Human3.6M: Large scale datasets and predictive methods for 3D human sensing in natural environments. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 36(7):1325-1339, 2014. 2
|
| 231 |
+
[30] Yanli Ji, Feixiang Xu, Yang Yang, Fumin Shen, Heng Tao Shen, and Wei-Shi Zheng. A large-scale RGB-D database for arbitrary-view human action recognition. In ACM International Conference on Multimedia (ACMMM), page
|
| 232 |
+
|
| 233 |
+
1510-1518,2018.2,5,7
|
| 234 |
+
[31] Junyan Jiang, Gus G. Xia, Dave B. Carlton, Chris N. Anderson, and Ryan H. Miyakawa. Transformer VAE: A hierarchical model for structure-aware and interpretable music representation learning. In IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 516-520, 2020. 3
|
| 235 |
+
[32] Angjoo Kanazawa, Jason Y. Zhang, Panna Felsen, and Jitendra Malik. Learning 3D human dynamics from video. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 5607-5616, 2019. 3
|
| 236 |
+
[33] Seong Uk Kim, Hanyoung Jang, and Jongmin Kim. Human motion denoising using attention-based bidirectional recurrent neural network. In SIGGRAPH Asia, 2019. 3
|
| 237 |
+
[34] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. In International Conference on Learning Representations (ICLR), 2014. 2, 4
|
| 238 |
+
[35] Muhammed Kocabas, Nikos Athanasiou, and Michael J. Black. Vibe: Video inference for human body pose and shape estimation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 5252-5262, 2020. 2, 3, 7
|
| 239 |
+
[36] Hsin-Ying Lee, Xiaodong Yang, Ming-Yu Liu, Ting-Chun Wang, Yu-Ding Lu, Ming-Hsuan Yang, and Jan Kautz. Dancing to music. In Advances in Neural Information Processing Systems (NeurIPS), 2019. 2
|
| 240 |
+
[37] Kyungho Lee, Seyoung Lee, and Jehee Lee. Interactive character animation by learning multi-objective control. ACM Transactions on Graphics (TOG), 2018. 1, 3
|
| 241 |
+
[38] Jiaman Li, Yihang Yin, Hang Chu, Yi Zhou, Tingwu Wang, Sanja Fidler, and Hao Li. Learning to generate diverse dance motions with transformer. arXiv:2008.08171, 2020. 2
|
| 242 |
+
[39] Ruilong Li, Shan Yang, David A. Ross, and Angjoo Kanazawa. Ai choreographer: Music conditioned 3D dance generation with AIST++, 2021. 2
|
| 243 |
+
[40] X. Lin and M. Amer. Human motion modeling using DV-GANs. arXiv:1804.10652, 2018. 2
|
| 244 |
+
[41] Zhaojiang Lin, Genta Indra Winata, Peng Xu, Zihan Liu, and Pascale Fung. Variational transformers for diverse response generation. arXiv:2003.12738, 2020. 3
|
| 245 |
+
[42] Jun Liu, Amir Shahroudy, Mauricio Perez, Gang Wang, Ling-Yu Duan, and Alex C. Kot. NTU RGB+D 120: A large-scale benchmark for 3D human activity understanding. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), pages 1-18, 2019. 5
|
| 246 |
+
[43] Matthew Loper, Naureen Mahmood, Javier Romero, Gerard Pons-Moll, and Michael J. Black. SMPL: A skinned multiperson linear model. ACM Transactions on Graphics (TOG), 34(6), Oct. 2015. 1, 3, 4
|
| 247 |
+
[44] Zhengyi Luo, S. Alireza Golestaneh, and Kris M. Kitani. 3D human motion estimation via motion compression and refinement. In Asian Conference on Computer Vision (ACCV), 2020. 3
|
| 248 |
+
[45] Naureen Mahmood, Nima Ghorbani, Nikolaus F. Troje, Gerard Pons-Moll, and Michael Black. AMASS: Archive of motion capture as surface shapes. In International Conference on Computer Vision (ICCV), pages 5441-5450, 2019. 2
|
| 249 |
+
[46] Julieta Martinez, Michael J. Black, and Javier Romero. On human motion prediction using recurrent neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 4674-4683, 2017. 1, 2
|
| 250 |
+
[47] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik,
|
| 251 |
+
|
| 252 |
+
Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. NeRF: Representing scenes as neural radiance fields for view synthesis. In European Conference on Computer Vision (ECCV), pages 405-421, 2020. 2
|
| 253 |
+
[48] Dirk Ormoneit, Michael J. Black, Trevor Hastie, and Hedvig Kjellström. Representing cyclic human motion using functional analysis. Image and Vision Computing, 23(14):1264-1276, 2005. 2
|
| 254 |
+
[49] Joseph O'Rourke and Norman I. Badler. Model-based image analysis of human motion using constraint propagation. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), PAMI-2(6):522-536, 1980. 2
|
| 255 |
+
[50] Ahmed A. A. Osman, Timo Bolkart, and Michael J. Black. STAR: Sparse trained articulated human body regressor. In European Conference on Computer Vision (ECCV), pages 598-613, 2020. 3
|
| 256 |
+
[51] Georgios Pavlakos, Vasileios Choutas, Nima Ghorbani, Timo Bolkart, Ahmed A. Osman, Dimitrios Tzionas, and Michael J. Black. Expressive body capture: 3D hands, face, and body from a single image. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 10967-10977, 2019. 3
|
| 257 |
+
[52] Dario Pavllo, David Grangier, and Michael Auli. QuaterNet: A quaternion-based recurrent model for human motion. In British Machine Vision Conference (BMVC), 2018. 2
|
| 258 |
+
[53] Mathis Petrovich, Michael J. Black, and Gül Varol. ACTOR project page: Action-conditioned 3D human motion synthesis with Transformer VAE. https://imagine.enpc.fr/~petrovim/actor.1,2,6,7,8
|
| 259 |
+
[54] Javier Romero, Dimitrios Tzionas, and Michael J. Black. Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (TOG), 36(6), 2017. 3
|
| 260 |
+
[55] Amir Shahroudy, Jun Liu, Tian-Tsong Ng, and Gang Wang. NTU RGB+D: A large scale dataset for 3d human activity analysis. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1010-1019, 2016. 5
|
| 261 |
+
[56] Kihyuk Sohn, Honglak Lee, and Xinchen Yan. Learning structured output representation using deep conditional generative models. In Advances in Neural Information Processing Systems (NeurIPS), volume 28. Curran Associates, Inc., 2015. 4
|
| 262 |
+
[57] Sebastian Starke, He Zhang, Taku Komura, and Jun Saito. Neural state machine for character-scene interactions. ACM Transactions on Graphics (TOG), 38(6), 2019. 1, 3
|
| 263 |
+
[58] Sergey Tulyakov, Ming-Yu Liu, Xiaodong Yang, and Jan Kautz. MoCoGAN: Decomposing motion and content for video generation. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 1526-1535, 2018. 7
|
| 264 |
+
[59] Carnegie Mellon University. CMU graphics lab motion capture database. http://mocap.cs.cmu.edu/. 2
|
| 265 |
+
[60] Raquel Urtasun, David J. Fleet, and Neil D. Lawrence. Modeling human locomotion with topologically constrained latent variable models. In Ahmed Elgammal, Bodo Rosenhahn, and Reinhard Klette, editors, Human Motion – Understanding, Modeling, Capture and Animation, pages 104–118, 2007. 2
|
| 266 |
+
[61] Gül Varol, Ivan Laptev, Cordelia Schmid, and Andrew Zisserman. Synthetic humans for action recognition from unseen viewpoints. International Journal of Computer Vision (IJCV), page 2264-2287, 2021. 2, 6
|
| 267 |
+
[62] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszko
|
| 268 |
+
|
| 269 |
+
reit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. In Advances in Neural Information Processing Systems (NeurIPS), volume 30, 2017. 6
|
| 270 |
+
[63] He Wang, Edmond S. L. Ho, Hubert P. H. Shum, and Zhanxing Zhu. Spatio-temporal manifold learning for human motions via long-horizon modeling. IEEE Transactions on Visualization and Computer Graphics (TVCG), 27:216-227, 2021. 3
|
| 271 |
+
[64] Hongyi Xu, Eduard Gabriel Bazavan, Andrei Zanfir, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. GHUM and GHUML: Generative 3D human shape and articulated pose models. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 6183-6192, 2020. 3
|
| 272 |
+
[65] Sijie Yan, Zhizhong Li, Yuanjun Xiong, Huahan Yan, and Dahua Lin. Convolutional sequence generation for skeleton-based action synthesis. In International Conference on Computer Vision (ICCV), pages 4393-4401, 2019. 2
|
| 273 |
+
[66] Sijie Yan, Yuanjun Xiong, and Dahua Lin. Spatial temporal graph convolutional networks for skeleton-based action recognition. In AAAI Conference on Artificial Intelligence, 2018. 7
|
| 274 |
+
[67] Ye Yuan and Kris Kitani. Dlow: Diversifying latent flows for diverse human motion prediction. In Andrea Vedaldi, Horst
|
| 275 |
+
|
| 276 |
+
Bischof, Thomas Brox, and Jan-Michael Frahm, editors, European Conference on Computer Vision (ECCV), pages 346-364, 2020. 1, 2
|
| 277 |
+
[68] Andrei Zanfir, Eduard Gabriel Bazavan, Hongyi Xu, William T. Freeman, Rahul Sukthankar, and Cristian Sminchisescu. Weakly supervised 3d human pose and shape reconstruction with normalizing flows. In European Conference on Computer Vision (ECCV), pages 465-481, 2020. 3
|
| 278 |
+
[69] Y. Zhang, Michael J. Black, and Siyu Tang. Perpetual motion: Generating unbounded human motion. arXiv:2007.13886, 2020. 2
|
| 279 |
+
[70] Yan Zhang, Michael J. Black, and Siyu Tang. We are more than our joints: Predicting how 3D bodies move. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 3372-3382, 2021. 1, 2
|
| 280 |
+
[71] Rui Zhao, Hui Su, and Qiang Ji. Bayesian adversarial human motion synthesis. In Conference on Computer Vision and Pattern Recognition (CVPR), pages 6224-6233, 2020. 2
|
| 281 |
+
[72] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 3, 5
|
| 282 |
+
[73] Shihao Zou, Xinxin Zuo, Yiming Qian, Sen Wang, Chi Xu, Minglun Gong, and Li Cheng. 3D human shape reconstruction from a polarization image. In European Conference on Computer Vision (ECCV), pages 351-368, 2020. 5
|
actionconditioned3dhumanmotionsynthesiswithtransformervae/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c085a5dd77fe561e9e894a395cd158901bf9b8ecd5d60244a6e3a84784b55d5f
|
| 3 |
+
size 422156
|
actionconditioned3dhumanmotionsynthesiswithtransformervae/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3efd1c8c36ce3bb4905600ee928392f4e67463da7685a4cf635e1df7dd067691
|
| 3 |
+
size 416831
|
activedomainadaptationviaclusteringuncertaintyweightedembeddings/b04281d8-c6f4-4c50-afb3-6b770196404a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c01350958d6c26d9191b0e53ef95e9fb6f0d615ef94e2aaa221ba8efe1105437
|
| 3 |
+
size 82896
|
activedomainadaptationviaclusteringuncertaintyweightedembeddings/b04281d8-c6f4-4c50-afb3-6b770196404a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66d1d75309754d19484a47b7ce290cdd25f451b49fbe66128f8afea12d80d4d1
|
| 3 |
+
size 105578
|
activedomainadaptationviaclusteringuncertaintyweightedembeddings/b04281d8-c6f4-4c50-afb3-6b770196404a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1b2798c124156d91ff4fb86cd2c731dc6bb20e1e2fa6288cd1f61b321fa144cd
|
| 3 |
+
size 2953556
|
activedomainadaptationviaclusteringuncertaintyweightedembeddings/full.md
ADDED
|
@@ -0,0 +1,329 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Domain Adaptation via Clustering Uncertainty-weighted Embeddings
|
| 2 |
+
|
| 3 |
+
Viraj Prabhu<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Georgia Tech
|
| 6 |
+
|
| 7 |
+
Arjun Chandrasekaran\*2
|
| 8 |
+
|
| 9 |
+
2Max Planck Institute for Intelligent Systems, Tübingen
|
| 10 |
+
|
| 11 |
+
{virajp, judy}@gatech.edu
|
| 12 |
+
|
| 13 |
+
Kate Saenko3
|
| 14 |
+
|
| 15 |
+
achandrasekaran@tue.mpg.de
|
| 16 |
+
|
| 17 |
+
Judy Hoffman<sup>1</sup>
|
| 18 |
+
|
| 19 |
+
$^{3}$ Boston University
|
| 20 |
+
|
| 21 |
+
saenko@bu.edu
|
| 22 |
+
|
| 23 |
+
# Abstract
|
| 24 |
+
|
| 25 |
+
Generalizing deep neural networks to new target domains is critical to their real-world utility. In practice, it may be feasible to get some target data labeled, but to be cost-effective it is desirable to select a maximally-informative subset via active learning (AL). We study the problem of AL under a domain shift, called Active Domain Adaptation (Active DA). We demonstrate how existing AL approaches based solely on model uncertainty or diversity sampling are less effective for Active DA. We propose Clustering Uncertainty-weighted Embeddings (CLUE), a novel label acquisition strategy for Active DA that performs uncertainty-weighted clustering to identify target instances for labeling that are both uncertain under the model and diverse in feature space. CLUE consistently outperforms competing label acquisition strategies for Active DA and AL across learning settings on 6 diverse domain shifts for image classification. Our code is available at https://github.com/virajprabhu/CLUE.
|
| 26 |
+
|
| 27 |
+
# 1. Introduction
|
| 28 |
+
|
| 29 |
+
Deep neural networks excel at learning from large labeled datasets but struggle to generalize this knowledge to new target domains [32, 42]. This limits their real-world utility, as it is impractical to collect a large new dataset for every new deployment domain. Further, all target instances are usually not equally informative, and it is far more cost-effective to identify maximally informative target instances for labeling. While Active Learning [2, 6, 8, 9, 35, 36] has extensively studied the problem of identifying informative instances for labeling, it typically focuses on learning a model from scratch and does not operate under a domain shift. In many practical scenarios, models are trained in a source domain and deployed in a different target domain, often with additional domain adaptation [10, 17, 32, 44]. In this work, we study the problem of active learning under such a domain shift, called Active Domain Adaptation [29] (Active DA).
|
| 30 |
+
|
| 31 |
+
Concretely, given i) labeled data in a source domain, ii) unlabeled data in a target domain, and iii) the ability to obtain labels for a fixed budget of target instances, the goal of Active DA is to select target instances for labeling and
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
Figure 1: The goal of Active Domain Adaptation [29] (Active DA) is to adapt a source model to an unlabeled target domain by acquiring labels for selected target instances via an oracle. Existing active learning (AL) methods based solely on uncertainty [9, 30, 45] or diversity-sampling [13, 35] are less effective for Active DA (Row 1, 2). We propose CLUE, an AL method designed for Active DA that selects instances that are both uncertain (thus informative to the model) and diverse in feature space (thus minimizing redundancy, Row 3), and leads to more cost-effective adaptation than competing AL and Active DA methods (Sec. 4.4).
|
| 35 |
+
|
| 36 |
+
learn a model with high accuracy on the target test set. Active DA has widespread utility as a means of cost-effective adaptation from cheaper to more expensive sources of labels (e.g. synthetic to real data), as well as when the quantity (e.g. autonomous driving) or cost (e.g. medical diagnosis) of labeling in the target domain is prohibitive. Despite its practical utility, it is a challenging task that has seen limited follow-up work since its introduction over ten years ago [5, 29, 40].
|
| 37 |
+
|
| 38 |
+
The traditional AL setting typically focuses on techniques to select samples to efficiently learn a model from scratch, rather than adapting under a domain shift [36]. As a result, existing state-of-the-art AL methods based on either uncertainty or diversity sampling are less effective for Active DA. Uncertainty sampling selects instances that are highly uncertain under the model's beliefs [8, 9, 20, 41]. Under a domain shift, uncertainty estimates on the target domain may
|
| 39 |
+
|
| 40 |
+
be miscalibrated [39] and lead to sampling uninformative, outlier, or redundant instances (Fig. 1, top). A parallel line of work in AL based on diversity sampling instead selects instances dissimilar to one another in a learned embedding space [13, 35, 38]. In Active DA, this can lead to sampling uninformative instances from regions of the feature space that are already well-aligned across domains (Fig. 1, middle). As a result, solely using uncertainty or diversity sampling is suboptimal for Active DA, as we demonstrate in Sec 4.4.
|
| 41 |
+
|
| 42 |
+
Recent work in AL and Active DA has sought to combine uncertainty and diversity sampling. AADA [40], the state-of-the-art Active DA method, combines uncertainty with diversity measured by 'targetness' under a learned domain discriminator. However, targetness does not ensure that the selected instances are representative of the entire target data distribution (i.e. not outliers), or dissimilar to one another. Ash et al. [2] instead propose performing clustering in a hallucinated "gradient embedding" space. However, they rely on distance-based clustering in high-dimensional spaces, which often leads to suboptimal results.
|
| 43 |
+
|
| 44 |
+
In this work, we propose a novel label acquisition strategy for Active DA that combines uncertainty and diversity sampling in a principled manner without the need for complex gradient or domain discriminator-based diversity measures. Our approach, Clustering Uncertainty-weighted Embeddings (CLUE), identifies informative and representative target instances from dense regions of the feature space. To do so, CLUE clusters deep embeddings of target instances weighted by the corresponding uncertainty of the target model. Our weighting scheme effectively increases the density of instances proportional to their uncertainty. To construct nonredundant batches, CLUE then selects nearest neighbors to the inferred cluster centroids for labeling. Our algorithm then leverages the acquired target labels and, optionally, the labeled source and unlabeled target data, to update the model, consistently leading to more cost-effective domain alignment than competing (and frequently more complex) alternatives.
|
| 45 |
+
|
| 46 |
+
# Contributions:
|
| 47 |
+
|
| 48 |
+
1. We benchmark the performance of state-of-the-art methods for active learning on challenging domain shifts, and find that methods based purely on uncertainty or diversity sampling are not effective for Active DA.
|
| 49 |
+
2. We present CLUE, a novel and easy-to-implement label acquisition strategy for Active DA that uses uncertainty-weighted clustering to identify instances that are both uncertain under the model and diverse in feature space.
|
| 50 |
+
3. We present results on 6 diverse domain shifts from the DomainNet [27], Office [32], and DIGITS [22, 26] benchmarks for image classification. Our method CLUE improves upon both the previous state-of-the-art in Active DA across shifts (by as much as $9\%$ in some cases), as well as state-of-the-art methods for active learning, across multiple learning strategies.
|
| 51 |
+
|
| 52 |
+
# 2. Related Work
|
| 53 |
+
|
| 54 |
+
Active Learning (AL) for CNN's. AL for CNN's has focused on the batch-mode setting due to the instability associated with single-instance updates. The two most successful paradigms in AL have been uncertainty sampling and diversity sampling [2]. Uncertainty-based methods select instances with the highest uncertainty under the current model [8, 9, 34, 41], using measures such as entropy [45], classification margins [30], or confidence. Diversity-based methods select instances that are representative of the entire dataset, and optimize for diversity in a learned embedding space, via clustering, or core-set selection [12, 13, 35, 38].
|
| 55 |
+
|
| 56 |
+
Some approaches combine these two paradigms [2, 3, 18, 49]. Active Learning by Learning [18] formulates this as a multi-armed bandit problem of selecting between coreset and uncertainty sampling at each step. Zhdanov et al. [49] use K-Means clustering [15] to increase batch diversity after prefiltering based on uncertainty. More recently, BADGE [2] runs KMeans++ on hallucinated "gradient embeddings". We propose CLUE, an AL method for sampling under a domain shift, that uses uncertainty-weighted clustering to select diverse and informative target instances.
|
| 57 |
+
|
| 58 |
+
Domain Adaptation. The task of transferring models trained on a labeled source domain to an unlabeled [10, 17, 32, 44] or partially-labeled [7, 33, 47] target domain has been studied extensively. Initial approaches aligned feature spaces by optimizing discrepancy statistics between the source and target [23, 44], while in recent years adversarial learning of a feature space encoder alongside a domain discriminator has become a popular alignment strategy [10, 11, 43]. In this work, we propose a label acquisition strategy for active learning under a domain shift that generalizes across multiple domain adaptation strategies.
|
| 59 |
+
|
| 60 |
+
Active Domain Adaptation (Active DA). Unlike semi-supervised domain adaptation which assumes labels for a random subset of target instances, Active DA focuses on selecting target instances to label for domain adaptation. Rai et al. [29] first studied the task of Active DA applied to sentiment classification from text data. They propose ALDA, which samples instances based on model uncertainty and a learned domain separator. Chattopadhyay et al. [5] select target instances and learn importance weights for source points by solving a convex optimization problem of minimizing MMD between features. More recently, Su et al. [40] study Active DA in the context of deep CNN's and propose AADA, wherein target instances are selected based on predictive entropy and targetness measured by an adversarial trained domain discriminator, followed by adversarial domain adaptation via DANN [11]. We propose CLUE, a novel label acquisition strategy for Active DA that identifies uncertain and diverse instances for labeling that outperforms prior work on diverse shifts across multiple learning strategies.
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Figure 2: We propose CLustering Uncertainty-weighted Embeddings (CLUE), a novel label acquisition strategy for Active DA that identifies a diverse set of target instances that are informative and representative (Eq. 4). First, deep embeddings of target instances are reweighted based on model entropy to emphasize uncertain regions of feature space (left). Next, to select diverse instances, these uncertainty-weighted embeddings are clustered, and the instance closest to each cluster centroid is acquired for labeling (middle). Finally, the acquired target labels (and optionally, the labeled source and unlabeled target data) are used to update the model, leading to well-classified target data (right).
|
| 64 |
+
|
| 65 |
+
# 3. Approach
|
| 66 |
+
|
| 67 |
+
We address active domain adaptation (Active DA), where the goal is to generalize a model trained on a source domain to an unlabeled target domain, with the option to query an oracle for labels for a subset of target instances. While individual facets of this task – adapting to a new domain and selective acquisition of labels, have been well-studied as the problems of Domain Adaptation (DA) and Active Learning (AL) respectively, Active DA presents the new challenge of identifying target instances that will, once labeled, result in the most sample-efficient domain alignment. Further, the answer to this question may vary based on the properties of the specific domain shift. In this section, we present CLUE, a novel label acquisition strategy for Active DA which performs consistently well across diverse domain shifts.
|
| 68 |
+
|
| 69 |
+
# 3.1. Notation & Preliminaries
|
| 70 |
+
|
| 71 |
+
In Active DA, the learning algorithm has access to labeled instances from the source domain $(X_S, Y_S)$ (solid pink in Fig. 2), unlabeled instances from the target domain $X_{\mathcal{U}\mathcal{T}}$ (blue outline in Fig. 2), and a budget $B (= 3$ in Fig. 2) which is much smaller than the amount of unlabeled target data. The learning algorithm may query an oracle to obtain labels for at most $B$ instances from $X_{\mathcal{U}\mathcal{T}}$ , and add them to the set of labeled target instances $X_{\mathcal{L}\mathcal{T}}$ . The entire target domain data is $X_{\mathcal{T}} = X_{\mathcal{L}\mathcal{T}} \cup X_{\mathcal{U}\mathcal{T}}$ . The task is to learn a function $h: X \to Y$ (a convolutional neural network (CNN) parameterized by $\Theta$ ) that achieves good predictive performance on the target. In this work, we consider Active DA in the context of $C$ -way image classification - the samples $\mathbf{x}_S \in X_S$ , $\mathbf{x}_{\mathcal{T}} \in X_{\mathcal{T}}$ are images, and labels $y_S \in Y_S$ , $y_{\mathcal{T}} \in Y_{\mathcal{T}}$ are categorical variables $y \in \{1, 2, .., C\}$ .
|
| 72 |
+
|
| 73 |
+
Active Learning. The goal of active learning (AL) is to identify target instances that, once labeled and used for training the model, minimize its expected future loss. In practice, prior works in AL identify such instances based primarily on two proxy measures, uncertainty and diversity (see Sec. 2). We first revisit these terms in the context of Active DA.
|
| 74 |
+
|
| 75 |
+
Uncertainty. Prior work in AL has proposed using several measures of model uncertainty as a proxy for informativeness (see Sec. 2). However, in the context of Active DA, using model uncertainty to select informative samples presents a conundrum. On the one hand, models benefit from initialization on a related source domain rather than learning from scratch. On the other hand, under a strong distribution shift, model uncertainty may often be miscalibrated [39]. Unfortunately however, without access to target labels, it is impossible to evaluate the reliability of model uncertainty!
|
| 76 |
+
|
| 77 |
+
Diversity. Acquiring labels solely based on uncertainty often leads to sampling batches of similar instances with high redundancy, or to sampling outliers. A parallel line of work in active learning instead proposes sampling diverse instances that are representative of the unlabeled pool of data. Several definitions of "diverse" exist in the literature: some works define diversity as coverage in feature [35] or "gradient embedding" space [2], while prior work in Active DA measures diversity by how "target-like" an instance is [40]. In Active DA, training on a related source domain (optionally followed by unsupervised domain alignment), results in some classes being better aligned across domains than others. Thus, in order to be cost-efficient it is important to avoid sampling from already well-learned regions of the feature space. However, purely diversity-based AL methods are unable to account for this, and lead to sampling redundant instances.
|
| 78 |
+
|
| 79 |
+
While sampling instances that are either uncertain or diverse may be useful to learning, an optimal label acquisition strategy for Active DA would ideally capture both jointly. We now introduce CLUE, a label acquisition strategy for Active DA that captures both uncertainty and diversity.
|
| 80 |
+
|
| 81 |
+
# 3.2. Clustering Uncertainty-weighted Embeddings
|
| 82 |
+
|
| 83 |
+
To measure informativeness we use predictive entropy $\mathcal{H}(Y|\mathbf{x};\Theta)$ [45] $(\mathcal{H}(Y|\mathbf{x})$ for brevity), which for $C$ -way classification, is defined as:
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathcal {H} (Y | \mathbf {x}) = - \sum_ {c = 1} ^ {C} p _ {\Theta} (Y = c | \mathbf {x}) \log p _ {\Theta} (Y = c | \mathbf {x}) \quad (1)
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
Under a domain shift, entropy can be viewed as capturing both uncertainty and domainness. Rather than training an explicit domain discriminator [10, 40], we consider an implicit domain classifier $d(\mathbf{x})$ [33] based on entropy thresholding:
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
d (\mathbf {x}) = \left\{ \begin{array}{l l} 1, & \text {i f} \mathcal {H} (Y | \mathbf {x})) \geq \gamma \\ 0, & \text {o t h e r w i s e} \end{array} \right. \tag {2}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
where 1 and 0 denote target and source domain labels, and $\gamma$ is a threshold value. The probability of an instance belonging to the target domain is thus given by:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
p (d (\mathbf {x}) = 1) = \frac {\mathcal {H} (Y | \mathbf {x})}{\log (C)} \propto \mathcal {H} (Y | \mathbf {x}) [ C \text {i s c o n s t a n t} ] \tag {3}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
where $\log (C)$ is the maximum possible entropy of a $C$ -way distribution. Next, we measure diversity based on feature-space coverage. Let $\phi (\mathbf{x})$ denote feature embeddings extracted from model $h$ . We identify diverse instances by partitioning $X_{T}$ into $K$ diverse sets via a partition function $\mathcal{S}: X_{T} \to \{X_{1},X_{2},\dots,X_{K}\}$ . Let $\{\mu_1,\mu_2,\dots,\mu_K\}$ denote the corresponding centroid of each set. Each set $X_{k}$ should have a small variance $\sigma^2 (X_k)$ . Expressed in terms of pairs of samples, $\sigma^2 (X_k) = \frac{1}{2|X_k|^2}\sum_{\mathbf{x_i},\mathbf{x_j}\in X_k}||\phi (\mathbf{x_i}) - \phi (\mathbf{x_j})||^2$ [48]. The goal is to group target instances that are similar in the CNN's feature space, into a set $X_{k}$ . However, while $\sigma^2 (X_k)$ is a function of the target data distribution and feature space $\phi (.)$ , it does not account for uncertainty.
|
| 102 |
+
|
| 103 |
+
To jointly capture both diversity and uncertainty, we propose weighting samples based on their uncertainty (given by Eq. 1), and compute the weighted population variance [28]. The overall set-partitioning objective is:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\underset {\mathcal {S}, \mu} {\operatorname {a r g m i n}} \sum_ {k = 1} ^ {K} \frac {1}{Z _ {k}} \sum_ {\mathbf {x} \in X _ {k}} \mathcal {H} (Y | \mathbf {x}) \left| \left| \phi (\mathbf {x}) - \mu_ {\mathbf {k}} \right| \right| ^ {2} \tag {4}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
where the normalization $Z_{k} = \sum_{x\in X_{k}}\mathcal{H}(Y|\mathbf{x})$ Our weighted set partitioning can also be viewed as standard
|
| 110 |
+
|
| 111 |
+
Algorithm 1 CLUE: Our proposed Active DA method, which uses Clustering Uncertainty-weighted Embeddings (CLUE) to select instances for labeling followed by a model update via semi-supervised domain adaptation.
|
| 112 |
+
|
| 113 |
+
1: Require: Neural network $h = f(\phi(.))$ , parameterized by $\Theta$ , labeled source data $(X_S, Y_S)$ , unlabeled target data $X_{\mathcal{T}}$ , Per-round budget $B$ , Total rounds $R$ .
|
| 114 |
+
2: Define: Labeled target set $X_{\mathcal{L}\mathcal{T}} = \emptyset$
|
| 115 |
+
3: Train source model $\Theta^1$ on $(X_S, Y_S)$ .
|
| 116 |
+
4: Adapt model to unlabeled target domain (optional).
|
| 117 |
+
5: for $\rho = 1$ to $R$ do
|
| 118 |
+
6: CLUE: For all instances $\mathbf{x} \in X_{\mathcal{T}} \setminus X_{\mathcal{L}\mathcal{T}}$ :
|
| 119 |
+
|
| 120 |
+
1. Compute deep embedding $\phi (\mathbf{x})$
|
| 121 |
+
2. Run Weighted K-Means until convergence (Eq. 4):
|
| 122 |
+
|
| 123 |
+
(a) Init. $K(= B)$ centroids $\{\mu_{\mathrm{i}}\}_{i = 1}^{B}$ (KMeans++)
|
| 124 |
+
(b) Assign:
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
X _ {k} \leftarrow \{\mathbf {x} | k = \operatorname * {a r g m i n} _ {i = 1, \dots , K} \left| \left| \phi (\mathbf {x}) - \mu_ {\mathbf {i}} \right| \right| ^ {2} \} _ {\forall \mathbf {x}}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
(c) Update: $\mu_{\mathbf{k}}\gets \frac{\sum_{\mathbf{x}\in X_k}\mathcal{H}(Y|\mathbf{x})\phi(\mathbf{x})}{\sum_{\mathbf{x}\in X_k}\mathcal{H}(Y|\mathbf{x})}\forall k$
|
| 131 |
+
|
| 132 |
+
3. Acquire labels for nearest-neighbor to centroids $X_{\mathcal{LT}}^{\rho}\gets \{\mathbf{NN}(\mu_{\mathrm{i}})\}_{i = 1}^{B}$
|
| 133 |
+
4. $X_{\mathcal{LT}} = X_{\mathcal{LT}} \cup X_{\mathcal{LT}}^{\rho}$
|
| 134 |
+
|
| 135 |
+
7: Semi-supervised DA: Update model $\Theta^{\rho +1}$
|
| 136 |
+
8: Return: Final model parameters $\Theta^{R + 1}$ .
|
| 137 |
+
|
| 138 |
+
set partitioning in an alternate feature space, where the density of instances is artificially increased proportional to their predictive entropy. Intuitively, this emphasizes representative sampling from uncertain regions of the feature space.
|
| 139 |
+
|
| 140 |
+
Since the objective in Eq. 4 is NP-hard, we approximate it using a Weighted K-Means algorithm [19] (see Algorithm 1 - uncertainty-weighting is used in the update step). We set $K = B$ (budget), and use activations from the penultimate CNN layer as $\phi (\mathbf{x})$ . After clustering, to select representative instances (i.e. non-outliers), we acquire labels for the nearest neighbor to the weighted-mean of each set $\mu_{\mathbf{k}}$ in Eq. 4. Note that Eq. 4 equivalently maximizes the sum of squared deviations between instances in different sets [21], ensuring that the constructed batch of instances has minimum redundancy. Trading-off uncertainty and diversity. CLUE captures an implicit tradeoff between model uncertainty (via entropy-weighting) and feature-space coverage (via clustering). Consider the predictive probability distribution for instance $\mathbf{x}$ :
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
p _ {\Theta} (Y | \mathbf {x}) = \sigma \left(\frac {h (\mathbf {x})}{T}\right) \tag {5}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
where $\sigma$ denotes the softmax function and $T$ denotes its temperature. We observe that by modulating $T$ , we can control the uncertainty-diversity tradeoff. For example, by
|
| 147 |
+
|
| 148 |
+
increasing $T$ , we obtain more diffuse softmax distributions for all points leading to similar uncertainty estimates across points; correspondingly, we expect diversity to play a bigger role. Similarly, at lower values of $T$ we expect uncertainty to have greater influence.
|
| 149 |
+
|
| 150 |
+
Our full label acquisition approach, Clustering Uncertainty-weighted Embeddings (CLUE), thus identifies instances that are both uncertain and diverse (see Fig. 2).
|
| 151 |
+
|
| 152 |
+
Domain adaptation. After acquiring labels via CLUE, we proceed to the next step of active adaptation: we update the model using the acquired target labels and optionally, the labeled source and unlabeled target data (see Fig. 2, right). In our main experiments (Sec. 4.4), we experiment with 3 learning strategies: i) finetuning on target labels, ii) domain-adversarial learning via DANN [10] with an additional target cross-entropy loss, and iii) semi-supervised adaptation via minimax entropy (MME [33]). In Sec 4.6 we also combine CLUE with additional DA methods from the literature.
|
| 153 |
+
|
| 154 |
+
Algorithm 1 describes our full approach when using CLUE in combination with semi-supervised domain adaptation. Given a model trained on labeled source instances, we align its representations with unlabeled target instances via unsupervised domain adaptation. For $R$ rounds with per-round budget $B$ , we iteratively i) acquire labels for $B$ target instances that are identified via our proposed sampling approach (CLUE), and ii) Update the model using a semi-supervised domain alignment strategy.
|
| 155 |
+
|
| 156 |
+
# 4. Experiments
|
| 157 |
+
|
| 158 |
+
We begin by describing our datasets and metrics, implementation details, and baselines (Sec 4.1- 4.3). Next, we benchmark the performance of CLUE across 6 domain shifts of varying difficulty against state-of-the-art methods for Active DA and AL, across different learning settings (Sec 4.4). We then ablate our method, analyze its sensitivity to various hyperparameters, and visualize its behavior (Sec 4.5). Finally, we combine our method with various DA strategies, and study its effectiveness in learning from scratch (Sec 4.6). We follow the standard batch active learning setting [4], in which we perform multiple rounds of batch active sampling, label acquisition, and model updates.
|
| 159 |
+
|
| 160 |
+
# 4.1. Datasets and Metrics
|
| 161 |
+
|
| 162 |
+
DomainNet. DomainNet [27] is a large domain adaptation benchmark for image classification, containing 0.6 million images from 6 distinct domains spanning 345 categories. We study four shifts of increasing difficulty as measured by source→target transfer accuracy (TA): Real→Clipart (easy, TA=40.6%), Clipart→Sketch (moderate, TA=34.7%), Sketch→Painting (hard, TA=30.3%), and Clipart→Quickdraw (very hard, TA=11.9%).
|
| 163 |
+
|
| 164 |
+
DIGITS and Office. We also report performance on the SVHN [26] $\rightarrow$ MNIST [22] and DSLR $\rightarrow$ Amazon [32] shifts.
|
| 165 |
+
|
| 166 |
+
Metric. We compute model accuracy on the target test split versus the number of labels used from the target train split at each round. We run each experiment 3 times and report mean accuracies. For clarity, we report performance at 3 randomly chosen intermediate budgets in the main paper and include full plots (mean accuracies and 1 standard deviation over all rounds) in the supplementary.
|
| 167 |
+
|
| 168 |
+
# 4.2. Implementation details
|
| 169 |
+
|
| 170 |
+
DomainNet. We use a ResNet34 [16] CNN, and perform 10 rounds of Active DA with a randomly selected per-round budget $= 500$ instances (total of 5000 labels). On DomainNet, we use the Clipart $\rightarrow$ Sketch shift as a validation shift and use a small target validation set to select a softmax temperature of $T = 0.1$ which we use for all other DomainNet shifts (details in supplementary). We include a sensitivity analysis over $T$ and $B$ in Sec. 4.5.
|
| 171 |
+
|
| 172 |
+
DIGITS. We match the experimental setting to Su et al. [40]: we use a modified LeNet architecture [17], and perform 30 rounds of Active DA with $\mathrm{B} = 10$ .
|
| 173 |
+
|
| 174 |
+
Office. We use a ResNet34 CNN and perform 10 rounds of Active DA with $\mathrm{B} = 30$ . On DIGITS and Office, we use the default value of $T = 1.0$ . Across datasets, we use penultimate layer embeddings for CLUE and implement weighted K-means with $K = B$ . All models are first trained on the labeled source domain. When adapting via semi-supervised domain adaptation, we additionally employ unsupervised feature alignment to the target domain at round 0. For additional details see supplementary.
|
| 175 |
+
|
| 176 |
+
# 4.3. Baselines
|
| 177 |
+
|
| 178 |
+
We compare CLUE against several state-of-the-art methods for Active DA and Active Learning.
|
| 179 |
+
|
| 180 |
+
1) AADA: Active Adversarial Domain Adaptation [40] (AADA) is a state-of-the-art Active DA method which performs alternate rounds of active sampling and adversarial domain adaptation via DANN [11]. It samples points with high predictive entropy and high probability of belonging to the target domain as predicted by the domain discriminator. Further, we also benchmark the performance of 4 diverse AL strategies from prior work in the Active DA setting.
|
| 181 |
+
2) entropy [45]: Selects instances for which the model has highest predictive entropy.
|
| 182 |
+
3) margin [30]: Selects instances for which the score difference between the model's top-2 predictions is the smallest.
|
| 183 |
+
4) coreset [35]: Core-set formulates active sampling as a set-cover problem, and solves the K-Center [46] problem. We use the greedy version proposed in Sener et al. [35].
|
| 184 |
+
5) BADGE [2]: BADGE is a recently proposed state-of-the-art active learning strategy that constructs diverse batches by running KMeans++ [1] on "gradient embeddings" that incorporate model uncertainty and diversity.
|
| 185 |
+
|
| 186 |
+
Methods (2) and (3) are uncertainty based, (4) is diversity-based, and (1) and (5) are hybrid approaches.
|
| 187 |
+
|
| 188 |
+
<table><tr><td rowspan="2">DA method</td><td rowspan="2">AL method</td><td rowspan="2">AL Type</td><td colspan="3">R → C (easy)</td><td colspan="3">C → S (moderate)</td><td colspan="3">S → P (hard)</td><td colspan="3">C → Q (very hard)</td><td>AVG 2k</td><td>5k</td><td></td></tr><tr><td>1k</td><td>2k</td><td>5k</td><td>1k</td><td>2k</td><td>5k</td><td>1k</td><td>2k</td><td>5k</td><td>1k</td><td>2k</td><td>5k</td><td>1k</td><td>2k</td><td></td></tr><tr><td rowspan="5">ft from source</td><td>uniform</td><td>-</td><td>51.5</td><td>55.3</td><td>60.6</td><td>42.1</td><td>44.4</td><td>47.0</td><td>41.1</td><td>43.8</td><td>47.2</td><td>23.3</td><td>28.1</td><td>35.3</td><td>39.5</td><td>42.9</td><td>47.5</td></tr><tr><td>entropy [45]</td><td>U</td><td>48.1</td><td>52.1</td><td>58.6</td><td>41.1</td><td>42.7</td><td>45.7</td><td>41.2</td><td>43.8</td><td>47.2</td><td>21.9</td><td>26.4</td><td>34.0</td><td>38.1</td><td>41.3</td><td>46.4</td></tr><tr><td>margin [30]</td><td>U</td><td>51.0</td><td>54.8</td><td>60.7</td><td>42.3</td><td>44.3</td><td>47.0</td><td>41.4</td><td>44.0</td><td>47.1</td><td>23.6</td><td>28.4</td><td>35.8</td><td>39.6</td><td>42.9</td><td>47.7</td></tr><tr><td>coreset [35]</td><td>D</td><td>50.0</td><td>54.0</td><td>59.6</td><td>41.2</td><td>42.8</td><td>44.9</td><td>40.1</td><td>42.2</td><td>45.4</td><td>22.4</td><td>26.0</td><td>32.4</td><td>38.4</td><td>41.3</td><td>45.6</td></tr><tr><td>BADGE [2]</td><td>H</td><td>52.4</td><td>56.1</td><td>61.7</td><td>42.8</td><td>45.2</td><td>48.1</td><td>41.7</td><td>44.9</td><td>47.9</td><td>23.1</td><td>28.2</td><td>35.5</td><td>39.8</td><td>43.6</td><td>48.3</td></tr><tr><td></td><td>CLUE (Ours)</td><td>H</td><td>52.9</td><td>57.1</td><td>62.0</td><td>43.3</td><td>45.8</td><td>48.6</td><td>42.4</td><td>45.3</td><td>48.3</td><td>24.3</td><td>28.8</td><td>35.5</td><td>40.7</td><td>44.3</td><td>48.6</td></tr><tr><td rowspan="5">MME [33] from source</td><td>uniform</td><td>-</td><td>55.2</td><td>59.3</td><td>63.5</td><td>45.7</td><td>47.8</td><td>49.7</td><td>42.9</td><td>45.3</td><td>47.8</td><td>24.5</td><td>30.3</td><td>38.1</td><td>42.1</td><td>45.7</td><td>49.8</td></tr><tr><td>entropy [45]</td><td>U</td><td>53.8</td><td>58.6</td><td>64.4</td><td>44.2</td><td>45.7</td><td>48.5</td><td>41.6</td><td>43.9</td><td>47.2</td><td>21.9</td><td>25.7</td><td>32.8</td><td>40.4</td><td>43.5</td><td>48.2</td></tr><tr><td>margin [30]</td><td>U</td><td>55.6</td><td>60.7</td><td>65.7</td><td>46.0</td><td>48.1</td><td>50.8</td><td>42.2</td><td>44.8</td><td>48.2</td><td>23.1</td><td>28.3</td><td>36.6</td><td>41.7</td><td>45.5</td><td>50.3</td></tr><tr><td>coreset [35]</td><td>D</td><td>54.3</td><td>59.1</td><td>64.6</td><td>45.1</td><td>46.7</td><td>48.9</td><td>42.4</td><td>44.2</td><td>47.1</td><td>23.9</td><td>27.8</td><td>34.3</td><td>41.4</td><td>44.5</td><td>48.7</td></tr><tr><td>BADGE [2]</td><td>H</td><td>56.2</td><td>60.6</td><td>65.7</td><td>45.8</td><td>48.2</td><td>50.7</td><td>43.1</td><td>45.7</td><td>48.7</td><td>24.3</td><td>29.6</td><td>38.3</td><td>42.4</td><td>46.0</td><td>50.9</td></tr><tr><td></td><td>CLUE (Ours)</td><td>H</td><td>56.3</td><td>60.7</td><td>65.3</td><td>46.8</td><td>49.0</td><td>51.4</td><td>43.7</td><td>46.5</td><td>49.4</td><td>25.6</td><td>31.1</td><td>38.9</td><td>43.1</td><td>46.8</td><td>51.3</td></tr><tr><td>DANN [10]</td><td>AADA [40]</td><td>H</td><td>53.2</td><td>57.4</td><td>62.8</td><td>44.8</td><td>46.5</td><td>49.2</td><td>41.3</td><td>43.5</td><td>46.1</td><td>21.9</td><td>25.8</td><td>32.4</td><td>40.3</td><td>43.3</td><td>47.6</td></tr><tr><td>from source</td><td>CLUE (Ours)</td><td>H</td><td>54.6</td><td>58.9</td><td>63.8</td><td>45.3</td><td>47.9</td><td>50.8</td><td>43.2</td><td>45.5</td><td>48.3</td><td>24.4</td><td>29.2</td><td>35.4</td><td>41.9</td><td>45.4</td><td>49.6</td></tr></table>
|
| 189 |
+
|
| 190 |
+
Table 1: Accuracies on target test set for 4 DomainNet shifts of increasing difficulty spanning 5 domains: Real (R), Clipart (C), Sketch (S), Painting (P) and Quickdraw (Q). We perform 10 rounds of Active DA with $B = 500$ and report results at 3 intermediate rounds (full plots in supplementary), as well as the 4-shift average (AVG). We compare CLUE against state-of-the-art methods for AL (entropy [45], margin [30], coreset [35], BADGE [2]) and Active DA (AADA), spanning different AL paradigms: uncertainty sampling (U), diversity sampling (D), and hybrid (H) combinations of the two. We use multiple learning strategies: finetuning (ft), MME [33] (state-of-the-art semi-supervised DA method), and semi-supervised DA via DANN [10]. Best performance is in bold, gray rows are our method.
|
| 191 |
+
|
| 192 |
+
<table><tr><td rowspan="2">DA method</td><td rowspan="2">AL method</td><td colspan="2">SVHN →</td><td>MNIST</td><td colspan="2">DSLR →</td><td>Amazon</td></tr><tr><td>30</td><td>60</td><td>150</td><td>30</td><td>60</td><td>150</td></tr><tr><td rowspan="6">ft from source</td><td>uniform</td><td>77.7</td><td>88.2</td><td>95.2</td><td>54.3</td><td>58.0</td><td>67.5</td></tr><tr><td>entropy [45]</td><td>65.8</td><td>75.6</td><td>92.9</td><td>51.2</td><td>52.4</td><td>59.1</td></tr><tr><td>margin [30]</td><td>82.0</td><td>89.3</td><td>95.5</td><td>52.4</td><td>54.4</td><td>65.5</td></tr><tr><td>coreset [35]</td><td>71.6</td><td>76.5</td><td>87.9</td><td>53.9</td><td>55.8</td><td>67.2</td></tr><tr><td>BADGE [2]</td><td>78.7</td><td>88.2</td><td>95.2</td><td>55.8</td><td>59.2</td><td>71.0</td></tr><tr><td>CLUE (Ours)</td><td>83.9</td><td>89.4</td><td>94.5</td><td>56.4</td><td>60.5</td><td>70.5</td></tr><tr><td rowspan="6">MME [33] from source</td><td>uniform</td><td>85.5</td><td>91.2</td><td>95.0</td><td>58.3</td><td>61.7</td><td>70.0</td></tr><tr><td>entropy [45]</td><td>81.3</td><td>85.7</td><td>93.9</td><td>54.9</td><td>56.5</td><td>66.2</td></tr><tr><td>margin [30]</td><td>88.4</td><td>91.5</td><td>96.6</td><td>54.7</td><td>58.5</td><td>70.6</td></tr><tr><td>coreset [35]</td><td>85.8</td><td>89.1</td><td>94.6</td><td>57.7</td><td>61.0</td><td>70.5</td></tr><tr><td>BADGE [2]</td><td>89.9</td><td>93.1</td><td>96.4</td><td>58.2</td><td>61.6</td><td>71.3</td></tr><tr><td>CLUE (Ours)</td><td>91.1</td><td>93.9</td><td>96.2</td><td>60.2</td><td>65.6</td><td>72.7</td></tr><tr><td>DANN [10]</td><td>AADA [40]</td><td>88.8</td><td>90.7</td><td>95.4</td><td>54.2</td><td>56.6</td><td>65.4</td></tr><tr><td>from source</td><td>CLUE (Ours)</td><td>90.9</td><td>93.1</td><td>95.3</td><td>59.1</td><td>64.5</td><td>72.1</td></tr></table>
|
| 193 |
+
|
| 194 |
+
Table 2: Active DA accuracies on target test set at 3 intermediate budgets (30, 60, 150) for: Middle: 30 rounds with $B = 10$ from SVHN→MNIST (DIGITS). Right: 10 rounds with $B = 30$ from DSLR→Amazon (Office). Best performance is in bold, gray rows are our method. For full plots see supplementary.
|
| 195 |
+
|
| 196 |
+
# 4.4. Results
|
| 197 |
+
|
| 198 |
+
We evaluate all methods across three ways of learning in the presence of a domain shift with the acquired labels:
|
| 199 |
+
|
| 200 |
+
1) FT from source: Finetuning a model trained on the source domain with acquired target labels.
|
| 201 |
+
2) MME [33] from source: Minimax entropy [33] (MME) is a state-of-the-art semi-supervised DA method that starts from a source model and minimizes an adversarial entropy loss for unsupervised domain alignment in addition to finetuning on labeled source and target data.
|
| 202 |
+
|
| 203 |
+
Tables 1 and 2 demonstrate our results on DomainNet, DIGITS, and Office. We make the following observations:
|
| 204 |
+
|
| 205 |
+
$\triangleright$ Uncertainty and diversity sampling are less effective for Active DA, frequently underperforming even random sampling. Approaches solely based on uncertainty (e.g. margin [30]) work well on relatively easier shifts $(\mathbb{R}\to \mathbb{C}$ with MME, SVHN $\rightarrow$ MNIST), but overall we find uncertainty-based (margin, entropy), and diversity-based (coreset) approaches generalize poorly to challenging shifts (e.g. S $\rightarrow$ P, C $\rightarrow$ Q), frequently underperforming even random sampling! On the other hand, hybrid approaches (CLUE and BADGE) that combine uncertainty and diversity are versatile across shift difficulties.
|
| 206 |
+
|
| 207 |
+
$\triangleright$ CLUE outperforms prior AL methods in the Active DA setting. Across learning strategies, shifts, benchmarks, and most rounds, CLUE consistently performs best. Averaged over 4 DomainNet shifts, CLUE outperforms margin-based uncertainty sampling and coreset-based diversity sampling at $B = 2k$ by $1.4\%$ and $3\%$ when finetuning, and $1.3\%$ and $2.3\%$ when adapting via MME (Tab. 1). Similarly, CLUE outperforms the next best-performing method (BADGE) by $0.7\%$ at $B = 2k$ when finetuning and $0.8\%$ with MME (4 shift average). While BADGE [2] is also a hybrid AL method that combines uncertainty and diversity sampling, it does so by clustering in a high-dimensional "gradient-embedding" space ( $\sim$ 176k dimensions on $\mathbf{C} \rightarrow \mathbf{S}$ with a ResNet34, versus 512-dimensional embeddings used by CLUE, details in Tab. 3), in which distance-based diversity measures may be less meaningful due to the curse of dimensionality. We note here that DomainNet is a complex benchmark with 345 categories and significant label noise, which often leads to relatively small absolute margins of improvement; however, our results demonstrate CLUE's versatility in generalizing
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
Figure 3: SVHN $\rightarrow$ MNIST: We visualize the logits of a subset of incorrect (large, opaque circles) and correct (partly transparent circles) model predictions on the target domain after round 0, along with examples sampled by different methods. entropy [45] (left) acquires redundant samples, whereas coreset [35] (middle) does not account for areas of the feature space that are already well-aligned across domains. CLUE (right) constructs batches of dissimilar samples from dense regions with high uncertainty.
|
| 211 |
+
|
| 212 |
+
across diverse shifts without shift-specific tuning.
|
| 213 |
+
|
| 214 |
+
On DIGITS and Office, CLUE's gains are even more significant (Tab. 2). For instance at $B = 30$ on SVHN $\rightarrow$ MNIST, CLUE improves upon margin, coreset, and BADGE when finetuning by $1.9\%$ , $12.3\%$ , and $5.2\%$ , and $4\%$ , $2.5\%$ and $0.6\%$ on DSLR $\rightarrow$ Amazon.
|
| 215 |
+
|
| 216 |
+
Additional unsupervised adaptation helps in the Active DA setting. Across AL methods, we observe adaptation with MME to consistently outperform finetuning (e.g. by 2.4-2.7% accuracy on DomainNet).
|
| 217 |
+
|
| 218 |
+
$\triangleright$ CLUE significantly outperforms the state-of-the-art Active DA method AADA. AADA [40] acquires labels by using a domain classifier learned via DANN [10]. Thus, it is undefined in the FT and MME settings. For an apples-to-apples comparison, we report performance of CLUE +DANN in the last 2 rows of Tabs. 1 and 2. As seen, CLUE +DANN consistently outperforms AADA, the state-of-the-art Active DA method, e.g. by $0.4\% - 2\%$ on DomainNet. Further, we find the performance gap between our method and AADA [40] increases with increasing shift difficulty, as predictive uncertainty becomes increasingly unreliable ( $3.4\%$ gain at $B = 2k$ on the very hard $\mathrm{C} \rightarrow \mathrm{Q}$ shift). We observe similar improvements over AADA on the DIGITS and Office (Tab. 2) benchmarks, e.g. $2.4\%$ and $7.9\%$ at $B = 60$ . Further, our best performing CLUE + MME strategy improves the gains still further to as much as $3.5\%$ at $B = 2k$ on DomainNet and $9\%$ at $B = 60$ on Office!
|
| 219 |
+
|
| 220 |
+
As discussed in Sec. 3, the optimal label acquisition criterion may vary across shifts and stages of training as the model's uncertainty estimates and feature space evolve, and it is challenging for a single approach to work well consistently. Despite this, CLUE effectively trades-off uncertainty and diversity to generalize reliably across shifts.
|
| 221 |
+
|
| 222 |
+
# 4.5. Analyzing and Ablating CLUE
|
| 223 |
+
|
| 224 |
+
Visualizing CLUE via t-SNE. We provide an illustrative comparison of sampling strategies using t-SNE [25]. Fig. 3
|
| 225 |
+
|
| 226 |
+
shows an initial feature landscape together with points selected by entropy-based uncertainty sampling, diversity-based coreset sampling, and CLUE at Round 0 on the SVHN $\rightarrow$ MNIST shift. We find that entropy [45] (left) samples uncertain but redundant points, coreset [35] samples diverse but not necessarily uncertain points, while our method, CLUE, samples both diverse and uncertain points. In the supplementary, we include visualizations over several rounds and find that CLUE consistently selects diverse target instances from dense, uncertain regions of the feature space. Varying uncertainty measure in CLUE. In Fig. 4a, we consider alternative uncertainty measures for CLUE on the $\mathrm{C}\to \mathrm{S}$ shift. We show that our proposed use of sample entropy significantly outperforms a uniform sample weight and narrowly outperforms an alternative uncertainty measure - sample margin score (difference between scores for top-2 most likely classes). This illustrates the importance of using uncertainty-weighting to bias CLUE towards informative samples. We also experimented (not shown) with last-layer embeddings (instead of penultimate) for CLUE, and observed near-identical performance across multiple shifts, suggesting that CLUE is not sensitive to this choice.
|
| 227 |
+
|
| 228 |
+
Sensitivity to parameters. In Fig. 4 we measure CLUE's sensitivity to two parameters: the softmax temperature hyperparameter T and experimental parameter budget B.
|
| 229 |
+
|
| 230 |
+
i) Sensitivity to softmax temperature $T$ . Recall from Sec. 3 that by tuning the softmax temperature in CLUE, we can vary the trade-off between uncertainty and diversity. In Fig. 4b we run a sweep over temperature values used for CLUE on $\mathrm{C} \rightarrow \mathrm{S}$ . As seen, lower values of temperature (which emphasizes the role of uncertainty) improve performance, particularly at later rounds when uncertainty estimates are more reliable. We note that $T$ is an optional hyperparameter that may be tuned if a small target validation set is available, but CLUE obtains strong state-of-the-art results across across DIGITS, Office, and DomainNet even with the default value of $T = 1.0$ . On DomainNet, we further improve performance
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
(a) Varying uncertainty measure in CLUE.
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
(b) Measuring CLUE's sensitivity to temperature T.
|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
Figure 4: (a), (b), (c): Ablating and analyzing CLUE on $\mathbf{C} \rightarrow \mathbf{S}$ . (d): Combining CLUE with different DA strategies on $\mathbf{C} \rightarrow \mathbf{S}$ . Best viewed in color. We perform 10 rounds of Active DA with $B = 500$ , and report accuracy mean and 1 standard deviation (via shading) over 3 runs.
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
(c) Measuring CLUE's sensitivity to budget B.
|
| 243 |
+
(d) Varying DA method while sampling via CLUE.
|
| 244 |
+
|
| 245 |
+
by selecting $T = 0.1$ via grid search on a single $\mathrm{C} \to \mathrm{S}$ shift and find that it generalizes to other DomainNet shifts.
|
| 246 |
+
|
| 247 |
+
ii) Sensitivity to budget B. We now vary the per-round budget (and consequently the total number of active adaptation rounds) and report performance on the Clipart $\rightarrow$ Sketch shift. As seen in Fig. 4c, CLUE performs well across budget values of 100, 500, 1k and $2.5\mathrm{k}$ . We also observe consistent performance with a different budget ( $B = 30$ ) on the SVHN $\rightarrow$ MNIST shift (details in supplementary).
|
| 248 |
+
|
| 249 |
+
Time complexity. Table 3 shows the average case complexity and AL querying time-per-round on SVHN $\rightarrow$ MNIST and C $\rightarrow$ S. CLUE and BADGE, which achieve the best accuracy, are slower to run due to a (CPU) clustering step. CLUE can be optimized further via GPU acceleration, using last (instead of penultimate) layer embeddings, or pre-filtering data before clustering.
|
| 250 |
+
|
| 251 |
+
<table><tr><td></td><td>AL
|
| 252 |
+
Strategy</td><td>Query
|
| 253 |
+
Complexity</td><td>Query Time
|
| 254 |
+
(DIGITS, C→S)</td></tr><tr><td rowspan="3">fwd + cluster</td><td>CLUE (Ours)</td><td>O(tNBD)</td><td>(60s, 16.2m)</td></tr><tr><td>BADGE [2]</td><td>O(NBDC)</td><td>(103s, 16.3m)</td></tr><tr><td>coreset [35]</td><td>O(CNB)</td><td>(52s, 2.8m)</td></tr><tr><td rowspan="3">fwd + rank</td><td>AADA [40]</td><td>O(NC)</td><td>(3.7s, 139s)</td></tr><tr><td>entropy [45]</td><td>O(NC)</td><td>(3.5s, 45s)</td></tr><tr><td>margin [30]</td><td>O(NC)</td><td>(3.2s, 45s)</td></tr></table>
|
| 255 |
+
|
| 256 |
+
Table 3: Query complexity and time-per-round for CLUE and prior AL strategies. $C$ and $N$ denotes number of classes and instances, $D$ denotes embedding dimensionality, $B$ denotes budget, $t =$ number of clustering iterations, and fwd stands for forward pass.
|
| 257 |
+
|
| 258 |
+
# 4.6. CLUE across learning strategies
|
| 259 |
+
|
| 260 |
+
CLUE with different DA strategies. We now study CLUE's compatibility with a few additional domain adaptation strategies from the literature. In addition to the finetuning, DANN [10] and MME [33] already studied in Sec. 4.4, we fix our sampling strategy to CLUE and vary the learning strategy to: i) MMD [24], a discrepancy-statistic based DA method,
|
| 261 |
+
|
| 262 |
+
and ii) VADA [37], a domain-classifier based method that uses virtual adversarial training. iii) ENT [14]: A variant of the MME method using standard rather than adversarial entropy minimization. Initial performance varies across methods since we employ unsupervised DA at Round 0.
|
| 263 |
+
|
| 264 |
+
In Fig. 4d, we observe that domain alignment with MME significantly outperforms all alternative methods. With all DA methods except VADA, we observe improvements over finetuning; however, MME clearly performs best. The improvements over DANN and VADA are consistent with Saito et al. [33], who find that domain-classifier based methods are less effective when some target labels are available.
|
| 265 |
+
|
| 266 |
+
How well does CLUE learn from scratch? While CLUE is designed for active learning under a domain shift, for completeness we also evaluate its performance against prior work when learning from "scratch" as is conventional in AL. We find that it outperforms prior work when finetuning using ImageNet [31] initialization on $\mathrm{C} \rightarrow \mathrm{S}$ , and performs on par with competing methods when finetuning from scratch on SVHN [26] (details in supplementary).
|
| 267 |
+
|
| 268 |
+
# 5. Conclusion
|
| 269 |
+
|
| 270 |
+
We address active domain adaptation, where the goal is to select target instances for labeling so as to generalize a trained source model to a new target domain. We show how existing active learning strategies based solely on uncertainty or diversity sampling are not effective for Active DA. We present CLUE, a novel label acquisition strategy for active sampling under a domain shift, that performs uncertainty-weighted clustering to select diverse, informative target instances for labeling from dense regions of the feature space. We demonstrate CLUE's effectiveness over competing active learning and Active DA methods across learning settings and domain shifts, and comprehensively analyze its behavior.
|
| 271 |
+
|
| 272 |
+
Acknowledgements. This work was supported by the DARPA LwLL program. We would like to thank Devi Parikh for guidance, and Prithvijit Chattopadhyay, Cornelia Kohler, and Shruti Venkatram for feedback on the draft.
|
| 273 |
+
|
| 274 |
+
# References
|
| 275 |
+
|
| 276 |
+
[1] David Arthur and Sergei Vassilvitskii. k-means++: The advantages of careful seeding. Technical report, Stanford, 2006.
|
| 277 |
+
[2] Jordan T Ash, Chicheng Zhang, Akshay Krishnamurthy, John Langford, and Alekh Agarwal. Deep batch active learning by diverse, uncertain gradient lower bounds. In International Conference on Learning Representations, 2020.
|
| 278 |
+
[3] Yoram Baram, Ran El Yaniv, and Kobi Luz. Online choice of active learning algorithms. Journal of Machine Learning Research, 5(Mar):255-291, 2004.
|
| 279 |
+
[4] Klaus Brinker. Incorporating diversity in active learning with support vector machines. In Proceedings of the 20th international conference on machine learning (ICML-03), pages 59–66, 2003.
|
| 280 |
+
[5] Rita Chattopadhyay, Wei Fan, Ian Davidson, Sethuraman Panchanathan, and Jieping Ye. Joint transfer and batch-mode active learning. In International Conference on Machine Learning, pages 253-261, 2013.
|
| 281 |
+
[6] David Cohn, Les Atlas, and Richard Ladner. Improving generalization with active learning. Machine learning, 15(2):201-221, 1994.
|
| 282 |
+
[7] Jeff Donahue, Judy Hoffman, Erik Rodner, Kate Saenko, and Trevor Darrell. Semi-supervised domain adaptation with instance constraints. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 668-675, 2013.
|
| 283 |
+
[8] Melanie Ducoffe and Frederic Precioso. Adversarial active learning for deep networks: a margin based approach. arXiv preprint arXiv:1802.09841, 2018.
|
| 284 |
+
[9] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with image data. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pages 1183-1192. JMLR.org, 2017.
|
| 285 |
+
[10] Yaroslav Ganin and Victor Lempitsky. Unsupervised domain adaptation by backpropagation. In International Conference on Machine Learning, pages 1180-1189, 2015.
|
| 286 |
+
[11] Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Laviolette, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. The Journal of Machine Learning Research, 17(1):2096-2030, 2016.
|
| 287 |
+
[12] Yonatan Geifman and Ran El-Yaniv. Deep active learning over the long tail. arXiv preprint arXiv:1711.00941, 2017.
|
| 288 |
+
[13] Daniel Gissin and Shai Shalev-Shwartz. Discriminative active learning. arXiv preprint arXiv:1907.06347, 2019.
|
| 289 |
+
[14] Yves Grandvalet, Yoshua Bengio, et al. Semi-supervised learning by entropy minimization. In CAP, pages 281-296, 2005.
|
| 290 |
+
[15] John A Hartigan and Manchek A Wong. Algorithm as 136: A k-means clustering algorithm. Journal of the Royal Statistical Society. Series C (Applied Statistics), 28(1):100-108, 1979.
|
| 291 |
+
[16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016.
|
| 292 |
+
[17] Judy Hoffman, Eric Tzeng, Taesung Park, Jun-Yan Zhu, Phillip Isola, Kate Saenko, Alexei Efros, and Trevor Darrell. Cycada: Cycle-consistent adversarial domain adaptation. In International Conference on Machine Learning, pages
|
| 293 |
+
|
| 294 |
+
1989-1998, 2018.
|
| 295 |
+
[18] Wei-Ning Hsu and Hsuan-Tien Lin. Active learning by learning. In Twenty-Ninth AAAI conference on artificial intelligence, 2015.
|
| 296 |
+
[19] Joshua Zhexue Huang, Michael K Ng, Hongqiang Rong, and Zichen Li. Automated variable weighting in k-means type clustering. IEEE transactions on pattern analysis and machine intelligence, 27(5):657-668, 2005.
|
| 297 |
+
[20] Andreas Kirsch, Joost van Amersfoort, and Yarin Gal. Batchbald: Efficient and diverse batch acquisition for deep bayesian active learning. In Advances in Neural Information Processing Systems, pages 7024-7035, 2019.
|
| 298 |
+
[21] Hans-Peter Kriegel, Erich Schubert, and Arthur Zimek. The (black) art of runtime evaluation: Are we comparing algorithms or implementations? Knowledge and Information Systems, 52(2):341-378, 2017.
|
| 299 |
+
[22] Yann LeCun, Léon Bottou, Yoshua Bengio, and Patrick Haffner. Gradient-based learning applied to document recognition. Proceedings of the IEEE, 86(11):2278-2324, 1998.
|
| 300 |
+
[23] Mingsheng Long, Yue Cao, Jianmin Wang, and Michael Jordan. Learning transferable features with deep adaptation networks. In International Conference on Machine Learning, pages 97-105, 2015.
|
| 301 |
+
[24] Mingsheng Long, Jianmin Wang, Guiguang Ding, Jiaguang Sun, and Philip S Yu. Transfer feature learning with joint distribution adaptation. In Proceedings of the IEEE international conference on computer vision, pages 2200-2207, 2013.
|
| 302 |
+
[25] Laurens van der Maaten and Geoffrey Hinton. Visualizing data using t-sne. Journal of machine learning research, 9(Nov):2579-2605, 2008.
|
| 303 |
+
[26] Yuval Netzer, Tao Wang, Adam Coates, Alessandro Bissacco, Bo Wu, and Andrew Y Ng. Reading digits in natural images with unsupervised feature learning. In Neural Information Processing Systems (NeurIPS), 2011.
|
| 304 |
+
[27] Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In Proceedings of the IEEE International Conference on Computer Vision, pages 1406-1415, 2019.
|
| 305 |
+
[28] George R Price. Extension of covariance selection mathematics. Annals of human genetics, 35(4):485-490, 1972.
|
| 306 |
+
[29] Piyush Rai, Avishek Saha, Hal Daumé III, and Suresh Venkatasubramanian. Domain adaptation meets active learning. In Proceedings of the NAACL HLT 2010 Workshop on Active Learning for Natural Language Processing, pages 27-32. Association for Computational Linguistics, 2010.
|
| 307 |
+
[30] Dan Roth and Kevin Small. Margin-based active learning for structured output spaces. In European Conference on Machine Learning, pages 413-424. Springer, 2006.
|
| 308 |
+
[31] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, et al. Imagenet large scale visual recognition challenge. International journal of computer vision, 115(3):211-252, 2015.
|
| 309 |
+
[32] Kate Saenko, Brian Kulis, Mario Fritz, and Trevor Darrell. Adapting visual category models to new domains. In European conference on computer vision, pages 213-226. Springer, 2010.
|
| 310 |
+
[33] Kuniaki Saito, Donghyun Kim, Stan Sclaroff, Trevor Darrell, and Kate Saenko. Semi-supervised domain adaptation via
|
| 311 |
+
|
| 312 |
+
minimax entropy. In Proceedings of the IEEE International Conference on Computer Vision, pages 8050-8058, 2019.
|
| 313 |
+
[34] Greg Schohn and David Cohn. Less is more: Active learning with support vector machines. In ICML, volume 2, page 6. CiteSeer, 2000.
|
| 314 |
+
[35] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A core-set approach. In International Conference on Learning Representations, 2018.
|
| 315 |
+
[36] Burr Settles. Active learning literature survey. Technical report, University of Wisconsin-Madison Department of Computer Sciences, 2009.
|
| 316 |
+
[37] Rui Shu, Hung H Bui, Hirokazu Narui, and Stefano Ermon. A dirt-t approach to unsupervised domain adaptation. In Proc. 6th International Conference on Learning Representations, 2018.
|
| 317 |
+
[38] Samarth Sinha, Sayna Ebrahimi, and Trevor Darrell. Variational adversarial active learning. In Proceedings of the IEEE International Conference on Computer Vision, pages 5972-5981, 2019.
|
| 318 |
+
[39] Jasper Snoek, Yaniv Ovadia, Emily Fertig, Balaji Lakshminarayanan, Sebastian Nowozin, D Sculley, Joshua Dillon, Jie Ren, and Zachary Nado. Can you trust your model's uncertainty? evaluating predictive uncertainty under dataset shift. In Advances in Neural Information Processing Systems, pages 13969-13980, 2019.
|
| 319 |
+
[40] Jong-Chyi Su, Yi-Hsuan Tsai, Kihyuk Sohn, Buyu Liu, Subhransu Maji, and Manmohan Chandraker. Active adversarial domain adaptation. In The IEEE Winter Conference on Applications of Computer Vision, pages 739-748, 2020.
|
| 320 |
+
[41] Simon Tong and Daphne Koller. Support vector machine active learning with applications to text classification. Journal of machine learning research, 2(Nov):45-66, 2001.
|
| 321 |
+
[42] Antonio Torralba and Alexei A Efros. Unbiased look at dataset bias. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1521-1528. IEEE, 2011.
|
| 322 |
+
[43] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7167-7176, 2017.
|
| 323 |
+
[44] Eric Tzeng, Judy Hoffman, Ning Zhang, Kate Saenko, and Trevor Darrell. Deep domain confusion: Maximizing for domain invariance. arXiv preprint arXiv:1412.3474, 2014.
|
| 324 |
+
[45] Dan Wang and Yi Shang. A new active labeling method for deep learning. In 2014 International joint conference on neural networks (IJCNN), pages 112-119. IEEE, 2014.
|
| 325 |
+
[46] Gert W Wolf. Facility location: concepts, models, algorithms and case studies. series: Contributions to management science: edited by zanjirani farahani, reza and hekmatfar, masoud, heidelberg, germany, physica-verlag, 2009, 2011.
|
| 326 |
+
[47] Ting Yao, Yingwei Pan, Chong-Wah Ngo, Houqiang Li, and Tao Mei. Semi-supervised domain adaptation with subspace learning for visual recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 2142-2150, 2015.
|
| 327 |
+
[48] Yuli Zhang, Huaiyu Wu, and Lei Cheng. Some new deformation formulas about variance and covariance. In 2012 Proceedings of International Conference on Modelling, Identification and Control, pages 987-992. IEEE, 2012.
|
| 328 |
+
|
| 329 |
+
[49] Fedor Zhdanov. Diverse mini-batch active learning. arXiv preprint arXiv:1901.05954, 2019.
|
activedomainadaptationviaclusteringuncertaintyweightedembeddings/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:390e38ee85a6aff8e1e3bb63dc763aa59b3aad958c7d61d1bb395283c0a475bf
|
| 3 |
+
size 586415
|
activedomainadaptationviaclusteringuncertaintyweightedembeddings/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b70c18bd1573074dd94a047d79d0576b669b4d3f3a25299f8adb6d2f39d6e10
|
| 3 |
+
size 450721
|
activelearningfordeepobjectdetectionviaprobabilisticmodeling/ff739fee-deb8-48c9-98ba-2c7874439ba7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bdd1fa223cf258c164027065f3483dc66afd03f7ef80b7e3ee63fa15ea17aa16
|
| 3 |
+
size 79624
|
activelearningfordeepobjectdetectionviaprobabilisticmodeling/ff739fee-deb8-48c9-98ba-2c7874439ba7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d8f5c3fb1eae8334e2d614d8a86455a6d2686d09a4124ecc82ad25f01d5a91bd
|
| 3 |
+
size 98629
|
activelearningfordeepobjectdetectionviaprobabilisticmodeling/ff739fee-deb8-48c9-98ba-2c7874439ba7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b1f6e69184af9d2612e319a90c0fb0794eabf73d4ad1328ce4f4460afefa86b
|
| 3 |
+
size 2465522
|
activelearningfordeepobjectdetectionviaprobabilisticmodeling/full.md
ADDED
|
@@ -0,0 +1,320 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Learning for Deep Object Detection via Probabilistic Modeling
|
| 2 |
+
|
| 3 |
+
Jiwoong Choi $^{1,3}$ , Ismail Elezi $^{2,3}$ , Hyuk-Jae Lee $^{1}$ , Clement Farabet $^{3}$ , and Jose M. Alvarez $^{3}$ $^{1}$ Seoul National University, $^{2}$ Technical University of Munich, $^{3}$ NVIDIA
|
| 4 |
+
|
| 5 |
+
{jwchoi, hjlee}@capp.snu.ac.kr, ismail.elezi@tum.de, {cfarabet, josea}@nvidia.com
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Active learning aims to reduce labeling costs by selecting only the most informative samples on a dataset. Few existing works have addressed active learning for object detection. Most of these methods are based on multiple models or are straightforward extensions of classification methods, hence estimate an image's informativeness using only the classification head. In this paper, we propose a novel deep active learning approach for object detection. Our approach relies on mixture density networks that estimate a probabilistic distribution for each localization and classification head's output. We explicitly estimate the aleatoric and epistemic uncertainty in a single forward pass of a single model. Our method uses a scoring function that aggregates these two types of uncertainties for both heads to obtain every image's informativeness score. We demonstrate the efficacy of our approach in PASCAL VOC and MS-COCO datasets. Our approach outperforms single-model based methods and performs on par with multi-model based methods at a fraction of the computing cost. Code is available at https://github.com/NVlabs/AL-MDN.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
The performance of deep detection networks is dependent on the size of the labeled data [31, 32]. Motivated by this, researchers have explored smart strategies to select the most informative samples in the dataset for labeling, known as active learning [35]. Typically, this is done by devising a scoring function that computes the network's uncertainty, selecting to label the samples for which the network is least confident with regard to its predictions [2, 4, 40].
|
| 14 |
+
|
| 15 |
+
In general, the predictive uncertainty is decomposed into aleatoric and epistemic uncertainty [15, 20]. The former refers to the inherent noise in the data, such as sensor noise, and can be attributed to occlusions or lack of visual features [10, 24]. The latter refers to the uncertainty caused by the lack of knowledge of the model and is inversely proportional to the density of training data [38]. Modeling and distinguishing these two types of uncertainty is very impor
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Our approach predicts aleatoric and epistemic uncertainties for both the localization and classification heads in a single forward pass of a single model. We propose an scoring function that aggregates epistemic and aleatoric uncertainties from both heads into single value. Then, those data points with the top- $K$ scores are sent for labeling.
|
| 19 |
+
|
| 20 |
+
tant in active learning, as it allows the deep learning models to know about their limitations [21, 38], i.e., recognize suspicious predictions in the sample (aleatoric uncertainty) and recognize samples that do not resemble the training set (epistemic uncertainty). To compute these types of uncertainty, researchers use multi-model based approaches, such as ensembles [2] or Monte Carlo (MC) dropout [13]. These methods reach good results but come with several limitations [11, 16]. In particular, by being multi-models, they require a much higher computing cost, and in the case of ensembles, they also increase the number of the network's parameters [2]. Additionally, they rely only on classification uncertainty, totally ignoring the localization uncertainty.
|
| 21 |
+
|
| 22 |
+
In this paper, we propose a novel active learning approach for deep object detection. Our approach uses a single model with a single forward pass, significantly reducing the computing cost compared to multiple model-based methods. Despite this, our method reaches high accuracy. To manage so, our method utilizes both localization and classification-based aleatoric and epistemic uncertainties. As shown in Fig. 1, we base our method on a mixture density networks [3] that learns a Gaussian mixture model (GMM) for each of the network's outputs, i.e., localization and classification, to compute both aleatoric and
|
| 23 |
+
|
| 24 |
+
epistemic uncertainties. To efficiently train the network, we propose a loss function that serves as a regularizer for inconsistent data, leading to more robust models. Our method estimates every image's informativeness score by aggregating all of the localization and classification-based uncertainties for every object contained in the image. We empirically show that leveraging both types of uncertainty coming from classification and localization heads is a critical factor for improving the accuracy. We demonstrate the benefits of our approach on PASCAL VOC [9] and MS-COCO [30] in a single-stage architecture such as SSD [31], and show generalized performance in a two-stage architecture such as Faster-RCNN [32]. Our approach consistently outperforms single-model based methods, and compared to methods using multi-models, our approach yields a similar accuracy while significantly reducing the computing cost.
|
| 25 |
+
|
| 26 |
+
In summary, our contributions are the following:
|
| 27 |
+
|
| 28 |
+
- We propose a novel deep active learning method for object detection that leverages the aleatoric and epistemic uncertainties, by considering both the localization and classification information. Our method is efficient and uses a single forward pass in a single model.
|
| 29 |
+
- We propose a novel loss to train the GMM-based object detection network that leads to overall performance improvements in the network.
|
| 30 |
+
- We demonstrate the effectiveness of our approach using different models on two different datasets.
|
| 31 |
+
|
| 32 |
+
# 2. Related Work
|
| 33 |
+
|
| 34 |
+
Deep active learning for object detection has recently acquired interest. The work of [16] trains an ensemble [2] of neural networks and then selects the samples with the highest score defined by some acquisition function, i.e., entropy [36] or mutual information [5]. Concurrent work [11] explores similar directions, but by approximating the uncertainty via MC-dropout [12, 26]. The work of [1] presents a method of calculating pixel scores and using them for selecting informative samples. Another approach [33] proposes a query by committee paradigm to choose the set of images to be queried. The work of [34] uses the feature space to select representative samples in the dataset, reaching good performance in object detection [40]. A different solution was given by [23] where the authors define two different scores: localization tightness which is the overlapping ratio between the region proposal and the final prediction; and localization stability that is based on the variation of predicted object locations when input images are corrupted by noise. In all cases, images with the highest scores are chosen to be labeled. The state-of-the-art (SOTA) method of [40] offers a heuristic but elegant solution while
|
| 35 |
+
|
| 36 |
+
outperforming the other single model-based methods. During the training, the method learns to predict the target loss for each sample. During the active learning stage, it chooses to label the samples with the highest predicted loss.
|
| 37 |
+
|
| 38 |
+
Most of the above-mentioned methods [11, 16, 23] require multiple models or multiple forward passes to calculate the image's informativeness score, resulting in a high computational cost. In addition, all those studies, despite focusing in active learning for object detection, either rely on heuristic methods to estimate localization uncertainty [23, 40], or cannot estimate it at all [1, 11, 16, 33, 34]. Therefore, while giving promising directions, they are less than satisfactory in terms of accuracy and computing cost. In contrast to those methods, our approach estimates and leverages both the localization and classification uncertainties to reach high accuracy, while using a single forward pass of a single model, significantly reducing the computational cost.
|
| 39 |
+
|
| 40 |
+
Mixture density networks have been recently used for several deep learning tasks. The approach of [8] focuses on the regression task for the steering angle. The works of [18, 39] attempt to solve a multimodal regression task. The work of [41] focuses on density estimation, while the work of [7] attempts to explore the supervised learning problem with corrupted data. However, previous studies do not consider the classification task, which is an essential part of object detection [8, 18, 39]. Additionally, all these studies do not take into account both types of uncertainty coming from the bounding box regression and the classification tasks [7, 8, 18, 39, 41]. Moreover, none of those studies address the problem of active learning for object detection. In contrast, our approach estimates and leverages both the aleatoric and epistemic uncertainties for both tasks in the context of active learning for object detection.
|
| 41 |
+
|
| 42 |
+
# 3. Active Learning for Object Detection
|
| 43 |
+
|
| 44 |
+
The key novelty of our approach is designing the output layers of the neural network to predict a probability distribution, instead of predicting a single value for each output of the network (see Fig. 2a). To this end, we propose to make use of a mixture density network where the output of the network consists of the parameters of a GMM: the mean $\mu^k$ , the variance $\Sigma^k$ , and the mixture weight $\pi^k$ for the $k$ -th component of the GMM. Given these parameters, we can estimate the aleatoric $u_{al}$ and epistemic $u_{ep}$ uncertainties [8]:
|
| 45 |
+
|
| 46 |
+
$$
|
| 47 |
+
u _ {a l} = \sum_ {k = 1} ^ {K} \pi^ {k} \Sigma^ {k}, u _ {e p} = \sum_ {k = 1} ^ {K} \pi^ {k} \| \mu^ {k} - \sum_ {i = 1} ^ {K} \pi^ {i} \mu^ {i} \| ^ {2}, \tag {1}
|
| 48 |
+
$$
|
| 49 |
+
|
| 50 |
+
where $K$ is the number of components in the GMM.
|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
Figure 2: An overview of the proposed object detection network. The main difference with conventional object detectors [31, 32] is in the localization and classification heads (branches). a) Instead of having deterministic outputs, our approach learns the parameters of $K$ -components GMM for each of the outputs: coordinates of the bounding box in the localization head and the class density distribution in the classification (confidence) head (see Section 3.1). b) A classification head that improves the efficiency by eliminating variance parameters from GMM's classification head (see Section 3.2).
|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
(a)
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
|
| 60 |
+

|
| 61 |
+
(b)
|
| 62 |
+
|
| 63 |
+
# 3.1. Object detection with probabilistic modeling
|
| 64 |
+
|
| 65 |
+
To introduce our approach, we first focus on the localization task and then extend it to the classification task. As we will show later in our experiments, our method is applicable to both single-stage and two-stage object detectors.
|
| 66 |
+
|
| 67 |
+
Localization: In object detection, a bounding box $b$ is defined by its coordinates for the center $(x$ and $y)$ , its width $(w)$ , and its height $(h)$ . In our work, instead of predicting a deterministic value, our mixture model predicts 3 groups of parameters for each bounding box: the mean $(\hat{\mu}_x, \hat{\mu}_y, \hat{\mu}_w,$ and $\hat{\mu}_h)$ , the variance $(\hat{\Sigma}_x, \hat{\Sigma}_y, \hat{\Sigma}_w,$ and $\hat{\Sigma}_h)$ , and the weights of the mixture $(\hat{\pi}_x, \hat{\pi}_y, \hat{\pi}_w,$ and $\hat{\pi}_h)$ .
|
| 68 |
+
|
| 69 |
+
Let $\{\hat{\pi}_b^k,\hat{\mu}_b^k,\hat{\Sigma}_b^k\}_{k = 1}^K$ $b\in \{x,y,w,h\}$ be the bounding box outputs obtained using our network. The parameters of a GMM with $K$ models for each coordinate of the bounding box are obtained as follows:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\pi_ {b} ^ {k} = \frac {e ^ {\hat {\pi} _ {b} ^ {k}}}{\sum_ {j = 1} ^ {K} e ^ {\hat {\pi} _ {b} ^ {j}}}, \mu_ {b} ^ {k} = \hat {\mu} _ {b} ^ {k}, \Sigma_ {b} ^ {k} = \sigma (\hat {\Sigma} _ {b} ^ {k}), \tag {2}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $\pi$ is the mixture weight for each component, $\mu$ is the predicted value for each bounding box coordinate, and $\Sigma$ is the variance for each coordinate representing its aleatoric uncertainty. As suggested in [8], we use a softmax function to keep $\pi$ in probability space and use a sigmoid function to satisfy the positiveness constraint of the variance, $\Sigma_b^k > = 0$ .
|
| 76 |
+
|
| 77 |
+
Localization loss: The conventional bounding box regression loss, the smooth L1 loss [14], only considers the coordinates of the predicted bounding box and ground-truth (GT) box. Therefore, it cannot take into account the ambiguity (aleatoric uncertainty) of the bounding box. For training the mixture density network for localization, we propose a localization loss based on the negative log-likelihood loss.
|
| 78 |
+
|
| 79 |
+
Our loss regresses the parameters of the GMM to the offsets of the center $(x, y)$ , width $(w)$ , and height $(h)$ of the anchor (default) box $(d)$ for positive matches:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\begin{array}{l} L _ {l o c} (\lambda , l, g) = - \sum_ {i \in P o s} ^ {N} \sum_ {b} \lambda_ {G} ^ {i j} l o g \left(\sum_ {k = 1} ^ {K} \pi_ {b} ^ {i k} \mathcal {N} \left(\hat {g} _ {b} ^ {j} \mid \mu_ {b} ^ {i k}, \Sigma_ {b} ^ {i k}\right) + \varepsilon\right), \\ \lambda_ {G} ^ {i j} = \left\{ \begin{array}{l l} 1, & \text {i f I o U > 0 . 5 .} \\ 0, & \text {o t h e r w i s e .} \end{array} \right., \hat {g} _ {x} ^ {j} = \frac {(g _ {x} ^ {j} - d _ {x} ^ {i})}{d _ {w} ^ {i}}, \hat {g} _ {y} ^ {j} = \frac {(g _ {y} ^ {j} - d _ {y} ^ {i})}{d _ {h} ^ {i}}, \\ \hat {g} _ {w} ^ {j} = \log \left(\frac {g _ {w} ^ {j}}{d _ {w} ^ {i}}\right), \hat {g} _ {h} ^ {j} = \log \left(\frac {g _ {h} ^ {j}}{d _ {h} ^ {i}}\right), \tag {3} \\ \end{array}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
where $l$ is GMM parameters of bounding box $(\pi_b^{ik},\mu_b^{ik}$ , and $\Sigma_{b}^{ik})$ $N$ is the number of matched anchor boxes (called positive matches), $K$ is the number of mixtures, $\lambda_G^{ij}$ is an indicator for matching the $i$ -th anchor box $d_b^i$ to the $j$ -th GT box of category $G$ ,and $\hat{g}_b^j$ is the $j$ -th GT box. In experiments, we set $\varepsilon = 10^{-9}$ for the numerical stability of the logarithm function.
|
| 86 |
+
|
| 87 |
+
Classification: We now focus on the classification head of the object detector. We model the output of every class as a GMM (see Fig. 2a). Our approach estimates the mean $\hat{\mu}_p^k$ and variance $\hat{\Sigma}_p^k$ for each class, and the weights of the mixture $\hat{\pi}^k$ for each component of the GMM. We process the parameters of the GMM following Eq. 2, and obtain the class probability distribution for the $k$ -th mixture by using the reparameterization trick [25] of applying Gaussian noise and variance $\Sigma_p^k$ to $\mu_p^k$ [24]:
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\hat {c} _ {p} ^ {k} = \mu_ {p} ^ {k} + \sqrt {\Sigma_ {p} ^ {k}} \gamma , \quad \gamma \sim \mathcal {N} (0, 1), \tag {4}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $\gamma$ is the auxiliary noise variable and has the same size as $\mu_p^k$ and $\Sigma_p^k$ .
|
| 94 |
+
|
| 95 |
+
Classification loss: For training the mixture density network for classification, we propose a loss function that takes into account the IoU of the anchor box compared to the GT box and considers the hard negative mining. More precisely, we formulate the classification loss as a combination of two terms $L_{cl}^{Pos}$ and $L_{cl}^{Neg}$ representing the contribution of positive and negative matches:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
L _ {c l} ^ {P o s} (\lambda , c) = - \sum_ {i \in P o s} ^ {N} \lambda_ {G} ^ {i j} \sum_ {k = 1} ^ {K} \pi^ {i k} \left(\hat {c} _ {G} ^ {j} - \log \sum_ {p = 0} ^ {C} e ^ {\hat {c} _ {p} ^ {i k}}\right) \tag {5}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
L _ {c l} ^ {N e g} (c) = - \sum_ {i \in N e g} ^ {M \times N} \sum_ {k = 1} ^ {K} \pi^ {i k} (\hat {c} _ {0} ^ {i} - l o g \sum_ {p = 0} ^ {C} e ^ {\hat {c} _ {p} ^ {i k}}),
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
where $N$ is the number of positive matches, $K$ is the number of mixtures, $C$ is the number of classes, with 0 representing the background class $\hat{c}_0^i$ , $\hat{c}_G^j$ is the GT class for the $j$ -th GT box, $\hat{c}_p^{ik}$ is the result calculated by Eq. 4, $\lambda_G^{ij}$ is the same as used in Eq. 3, and $M$ is the hard negative mining ratio. Instead of using all the negative matches, we sort them using the proposed mixture classification loss and choose top $M \times N$ as final negative matches for training. In experiments, we set $M$ to 3 as suggested in [31].
|
| 106 |
+
|
| 107 |
+
Final loss: We define the overall loss to train the object detector using mixture density network as:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
L = \left\{ \begin{array}{l l} \frac {1}{N} \left(L _ {\text {l o c}} (\lambda , l, g) / \eta + L _ {c l} ^ {\text {P o s}} (\lambda , c) + L _ {c l} ^ {\text {N e g}} (c)\right), & \text {i f} N > 0. \\ 0, & \text {o t h e r w i s e .} \end{array} \right. \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $N$ is the number of positive matches. In experiments, we set $\eta$ to 2 as suggested in [6].
|
| 114 |
+
|
| 115 |
+
At inference, we can compute the coordinates of the bounding box $R_{b}$ and the confidence score for each class $P_{i}$ by summing the components of the mixture model as follows:
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\text {L o c a l i z a t i o n :} R _ {b} = \sum_ {k = 1} ^ {K} \pi_ {b} ^ {k} \mu_ {b} ^ {k}, \tag {7}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
Classification: $P_{i} = \sum_{k = 1}^{K}\pi^{k}\frac{e^{\mu_{i}^{k}}}{\sum_{j = 0}^{C}e^{\mu_{j}^{k}}}$
|
| 122 |
+
|
| 123 |
+
# 3.2. Improving parameter efficiency
|
| 124 |
+
|
| 125 |
+
In order to predict a probability distribution of the output values, our approach involves modifying the last layer of the network and therefore incurs an increase in the number of parameters, especially in the classification head. More precisely, for an output feature map of size $F \times F$ , with $C$ classes, $D$ anchor boxes, and each bounding box defined using 4 coordinates, the number of parameters in the new layer added to estimate a $K$ -component GMM with 3 parameters is $F \times F \times D \times (4 \times 3 \times K)$ for the localization and $F \times F \times D \times (C \times 2 \times K + K)$ for classification. We see that the number of parameters in the classification head is proportional to the number of classes.
|
| 126 |
+
|
| 127 |
+
In this section, we focus on improving the efficiency of the algorithm by reducing the number of parameters in the classification head. To this end, as shown in Fig. 2b, we relax the problem of estimating the variance $\Sigma_{p}$ , in order to reduce the number of parameters with $F\times F\times D\times (C\times K + K)$ . Instead, we obtain class probabilities as $\hat{c}_p^k = Softmax(\mu_p^k)$ , and use them to estimate the aleatoric uncertainty as follows:
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
u _ {a l} = \sum_ {k = 1} ^ {K} \pi^ {k} \left(d i a g \left(\hat {c} _ {p} ^ {k}\right) - \left(\hat {c} _ {p} ^ {k}\right) ^ {\otimes 2}\right), \tag {8}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
where $\text{diag}(q)$ is a diagonal matrix with the elements of the vector $q$ and $q^{\otimes 2} = qq^{T}$ . In this case, $u_{al}$ is $C \times C$ matrix where the value of each diagonal element can be interpreted as a class-specific aleatoric uncertainty [27].
|
| 134 |
+
|
| 135 |
+
Finally, we modify the classification loss for training the model with improved parameter efficiency as follows:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
L _ {c l} ^ {P o s} (\lambda , c) = - \sum_ {\substack {i \in P o s \\ M \times N - K}} ^ {N} \lambda_ {G} ^ {i j} \sum_ {k = 1} ^ {K} \pi^ {i k} \left(\hat {c} _ {G} ^ {j} - \log \sum_ {p = 0} ^ {C} e ^ {\hat {\mu} _ {p} ^ {i k}}\right) \tag{9}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
L _ {c l} ^ {N e g} (c) = - \sum_ {i \in N e g} ^ {M \times N} \sum_ {k = 1} ^ {K} \pi^ {i k} (\hat {c} _ {0} ^ {i} - l o g \sum_ {p = 0} ^ {C} e ^ {\hat {\mu} _ {p} ^ {i k}}),
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
where all parameters are same as Eq. 5, except for class probability $\hat{\mu}_p^{ik}$ .
|
| 146 |
+
|
| 147 |
+
# 3.3. Scoring function
|
| 148 |
+
|
| 149 |
+
The scoring function in active learning provides a single value per image indicating its informativeness. Our scoring function estimates the informativeness of an image by aggregating all the aleatoric and epistemic uncertainty values for each detected object in the image.
|
| 150 |
+
|
| 151 |
+
Specifically, let $U = \{u^{ij}\}$ be the set of uncertainty values (aleatoric or epistemic) of a group of images where $u^{ij}$ is the uncertainty for the $j$ -th object in the $i$ -th image. For localization, $u^{ij}$ is the maximum value over the 4 bounding box outputs. We first normalize these values using z-score normalization $(\tilde{u}^{ij} = (u^{ij} - \mu_U) / \sigma_U)$ to compensate for the fact that the values for the coordinates of the bounding box are unbounded and each uncertainty of an image might have a different range of values. We then assign to each image the maximum uncertainty over the detected objects $u^i = \max_j\tilde{u}^{ij}$ . We empirically find that taking the maximum over the coordinates and the objects performs better than by taking the average.
|
| 152 |
+
|
| 153 |
+
Using the algorithm described above, we obtain four different normalized uncertainty values for each image: epistemic and aleatoric for classification and localization, $\mathbf{u} = \{u_{ep_c}^i, u_{al_c}^i, u_{ep_b}^i, u_{al_b}^i\}$ , respectively. The remaining part is to aggregate these scores into a single one. We explore different combinations of scoring functions that aggregate these uncertainties, including sum or taking the maximum,
|
| 154 |
+
|
| 155 |
+
<table><tr><td></td><td></td><td colspan="2">(a) VOC07</td><td colspan="2">(b) MS-COCO</td></tr><tr><td>Method</td><td>Head</td><td>IoU>0.5</td><td>IoU>0.75</td><td>IoU>0.5</td><td>IoU>0.75</td></tr><tr><td>SSD</td><td>-</td><td>69.29±0.51</td><td>43.36±1.24</td><td>25.63±0.40</td><td>11.93±0.60</td></tr><tr><td>SGM</td><td>Loc</td><td>70.20±0.27</td><td>45.39±0.23</td><td>27.20±0.08</td><td>12.70±0.16</td></tr><tr><td>MDN</td><td>Loc</td><td>70.09±0.22</td><td>46.01±0.27</td><td>27.67±0.12</td><td>13.53±0.05</td></tr><tr><td>SGM</td><td>CI</td><td>69.95±0.41</td><td>44.25±0.26</td><td>27.23±0.12</td><td>12.50±0.08</td></tr><tr><td>MDN</td><td>CI</td><td>70.47±0.17</td><td>44.47±0.06</td><td>27.33±0.09</td><td>12.67±0.09</td></tr><tr><td>\( Ours_{gmm} \)</td><td>Loc+CI</td><td>70.19±0.36</td><td>46.11±0.38</td><td>27.70±0.08</td><td>13.57±0.19</td></tr><tr><td>\( Ours_{eff} \)</td><td>Loc+CI</td><td>70.45±0.06</td><td>46.18±0.26</td><td>27.33±0.04</td><td>13.33±0.12</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 1: mAP (in %) of different instances of our approach compared to the original SSD network. SGM and MDN refer to single and multiple Gaussian models, and we apply those to localization (Loc), classification (Cl), and their combination (Loc+Cl).
|
| 158 |
+
|
| 159 |
+
like other active learning studies [16, 33]. As we will show in our experiments, taking the maximum over them achieves the highest results.
|
| 160 |
+
|
| 161 |
+
# 4. Experiments
|
| 162 |
+
|
| 163 |
+
In this section, we demonstrate the benefits of our approach. We first study the impact of using probabilistic modeling for the object detector and then analyze the proposed scoring function and relevant SOTA approaches in the context of active learning.
|
| 164 |
+
|
| 165 |
+
Datasets: We use PASCAL VOC [9] and MS-COCO [30] datasets. For PASCAL VOC, that contains 20 object categories, we use VOC07 (VOC2007) trainval and VOC07+12 trainval (union of VOC2007 and VOC2012) for training and evaluate our results on VOC07 test. For MS-COCO, that contains 80 object categories, we use MS-COCO train2014 for training and evaluate our results on val2017.
|
| 166 |
+
|
| 167 |
+
Experimental settings: We employ Single Shot MultiBox Detector (SSD) [31], which is widely used in active learning studies [33, 40], with a VGG-16 backbone [37]. We train our models for 120,000 iterations using SGD with a batch size of 32 and a maximum learning rate of 0.001. We use a learning rate warm-up strategy for the first 1,000 iterations and divide the learning rate by 10 after 80,000 and 100,000 iterations. We set the number of Gaussian mixtures to 4, and in the supplementary materials, we provide an ablation study on the number of mixtures. Unless specified otherwise, we report the performance using the average and standard deviation of mAP for three independent trials.
|
| 168 |
+
|
| 169 |
+
# 4.1. Object detection with probabilistic modeling
|
| 170 |
+
|
| 171 |
+
We first analyze the impact of using our proposed probabilistic modeling for object detection on PASCAL VOC and MS-COCO. For MS-COCO, we use a random subset of 5,000 training images from train2014. We compare the accuracy of our GMM $Ours_{gmm}$ and the model with improved parameter efficiency $Ours_{eff}$ to the SSD [31] and several network configurations either using single or multiple Gaussians for the classification or localization heads.
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
$u_{a l_{b}}$ : 3.60
|
| 175 |
+
$u_{a l_c}$ : 0.96
|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
$u_{e p h}\colon 1.06$
|
| 179 |
+
$u_{epc}$ : -0.19
|
| 180 |
+
$u_{a l_{b}}\colon -1.09$
|
| 181 |
+
$u_{a l c}$ :8.80
|
| 182 |
+
$u_{e p_b}$ : -0.38
|
| 183 |
+
$u_{ep_c}$ : 1.35
|
| 184 |
+
Figure 3: Examples of aleatoric and epistemic uncertainties for inaccurate detections, see more examples in the supplementary material. Starting from top-left image and going in clockwise direction: Person is a false positive; Person bounding box is not correct; A sheep is misclassified as a bird; A sheep is misclassified as a cow.
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
$u_{a l_{b}}:1.71$
|
| 188 |
+
$u_{a l_{c}}$ : -0.50
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
$u_{ep_h}$ : 11.45
|
| 192 |
+
$u_{ep_c}$ : -0.38
|
| 193 |
+
$u_{a l_{b}}$ : 0.74
|
| 194 |
+
$u_{\epsilon p_b}$ : 0.80
|
| 195 |
+
$u_{a l c}$ :1.06
|
| 196 |
+
$u_{ep_c}$ : 7.14
|
| 197 |
+
|
| 198 |
+
In Tab. 1a and Tab. 1b, we summarize the results of this experiment performed on VOC07 and MS-COCO, respectively. As shown, all networks that include probabilistic modeling outperform the SSD on both datasets. This is because of the regularization effect of the proposed loss function, which has a loss attenuation due to aleatoric uncertainty [6]. As a result, we obtain models that are robust to noisy data. Considering both the normal (IoU>0.5) and the strict metric (IoU>0.75), $\text{Ours}_{gmm}$ and $\text{Ours}_{eff}$ outperform all other variations on VOC07. On MS-COCO, $\text{Ours}_{gmm}$ outperforms all other instances and the baseline, while $\text{Ours}_{eff}$ reaches competitive results. We expect that the amount of noisy data in MS-COCO is larger than that of PASCAL VOC because MS-COCO has more diverse data. As shown in Eq. 9, there is no aleatoric uncertainty in $\text{Ours}_{eff}$ 's classification loss and therefore, we argue that the regularization by aleatoric uncertainty has a greater effect in MS-COCO.
|
| 199 |
+
|
| 200 |
+
In Fig. 3, we present representative examples of uncertainty scores for several images where the detector fails to detect the object. As shown, each uncertainty value (bold numbers in Fig. 3) provides a different insight into some particular failure. Localization uncertainties are related to the accuracy of the bounding box prediction, whereas classification uncertainties are related to the accuracy of the category prediction. Interestingly, in these examples, even if the predictions are wrong, uncertainty values seem to be uncorrelated suggesting each uncertainty could predict inaccurate results independently. From these results, we can
|
| 201 |
+
|
| 202 |
+
<table><tr><td rowspan="2">Aggregation function</td><td colspan="3">mAP in % (# images)</td></tr><tr><td>1st (2k)</td><td>2nd (3k)</td><td>3rd (4k)</td></tr><tr><td>random sampling</td><td>62.43±0.10</td><td>66.36±0.13</td><td>68.47±0.09</td></tr><tr><td>u_alb</td><td>62.43±0.10</td><td>67.06±0.18</td><td>68.84±0.18</td></tr><tr><td>u_epb</td><td>62.43±0.10</td><td>66.75±0.26</td><td>69.01±0.17</td></tr><tr><td>u_alc</td><td>62.43±0.10</td><td>67.09±0.09</td><td>68.75±0.08</td></tr><tr><td>u_epc</td><td>62.43±0.10</td><td>66.51±0.12</td><td>68.95±0.13</td></tr><tr><td>∑j∈{alb,epb}uj</td><td>62.43±0.10</td><td>67.01±0.10</td><td>68.58±0.29</td></tr><tr><td>∑j∈{alc,epc}uj</td><td>62.43±0.10</td><td>67.07±0.27</td><td>69.03±0.20</td></tr><tr><td>∑j∈{alb,alc}uj</td><td>62.43±0.10</td><td>66.96±0.08</td><td>68.92±0.23</td></tr><tr><td>∑j∈{epb,epc}uj</td><td>62.43±0.10</td><td>66.49±0.14</td><td>68.62±0.24</td></tr><tr><td>∑j∈{alb,epb,alc,epc}uj</td><td>62.43±0.10</td><td>67.04±0.28</td><td>69.09±0.30</td></tr><tr><td>maxj∈{alb,epb}uj</td><td>62.43±0.10</td><td>66.82±0.21</td><td>68.95±0.22</td></tr><tr><td>maxj∈{alc,epc}uj</td><td>62.43±0.10</td><td>66.87±0.14</td><td>68.99±0.31</td></tr><tr><td>maxj∈{alb,alc}uj</td><td>62.43±0.10</td><td>67.18±0.10</td><td>69.06±0.25</td></tr><tr><td>maxj∈{epb,epc}uj</td><td>62.43±0.10</td><td>66.72±0.10</td><td>68.99±0.21</td></tr><tr><td>maxj∈{alb,epb,alc,epc}uj</td><td>62.43±0.10</td><td>67.32±0.12</td><td>69.43±0.11</td></tr></table>
|
| 203 |
+
|
| 204 |
+
conclude that the proposed approach not only computes uncertainty in a single forward pass of a single model but also boosts the performance of the detection network. As shown in the next experiment, combining these values will improve the data selection process during active learning.
|
| 205 |
+
|
| 206 |
+
# 4.2. Active learning evaluation
|
| 207 |
+
|
| 208 |
+
We now focus on evaluating the performance of our active learning on PASCAL VOC and MS-COCO datasets. We use an initial set of 2,000 for VOC07, 1,000 for VOC07+12 as suggested by [40], and 5,000 samples in MS-COCO as suggested by [23]. Then, during the active learning stage, for each unlabeled image, we apply nonmaximum suppression and we compute the uncertainties for each of the surviving objects. The scoring function aggregates these uncertainties using the maximum or sum to provide the final informativeness score for the image. We score the set of unlabeled images and select the 1,000 images [40] with the highest score. Then, we add them to the labeled training set and repeat this process for several active learning cycles. For every active learning iteration, we train the model from scratch, using ImageNet pretrained weight.
|
| 209 |
+
|
| 210 |
+
Scoring aggregation function: We compare the active learning performance obtained using different functions to aggregate the aleatoric and epistemic uncertainties of both classification and localization heads. In particular, we compare seven different instances of our approach with random sampling: 1) Only the aleatoric or epistemic uncertainty on each task; 2) The sum of aleatoric and epistemic uncertainty on the localization or classification head; 3) The sum of aleatoric or epistemic uncertainty on the localization and classification; 4) The sum of aleatoric and epistemic uncertainties for both localization and classification; 5) The maximum of aleatoric and epistemic uncertainty on the localization
|
| 211 |
+
|
| 212 |
+
Table 2: VOC07: Comparison of scoring aggregation functions for active learning based on the aleatoric uncertainty, epistemic uncertainty, and their combination of each task.
|
| 213 |
+
|
| 214 |
+
<table><tr><td></td><td colspan="2">Localization</td><td colspan="2">Classification</td></tr><tr><td></td><td>Aleatoric \( u_{al_b} \)</td><td>Epistemic \( u_{ep_b} \)</td><td>Aleatoric \( u_{al_c} \)</td><td>Epistemic \( u_{ep_c} \)</td></tr><tr><td>\( u_{al_b} \)</td><td>100</td><td>48</td><td>6</td><td>11</td></tr><tr><td>\( u_{ep_b} \)</td><td>48</td><td>100</td><td>7</td><td>14</td></tr><tr><td>\( u_{al_c} \)</td><td>6</td><td>7</td><td>100</td><td>33</td></tr><tr><td>\( u_{ep_c} \)</td><td>11</td><td>14</td><td>33</td><td>100</td></tr></table>
|
| 215 |
+
|
| 216 |
+
Table 3: Overlapping ratio (in %) of selected images as a function of the type of uncertainty used.
|
| 217 |
+
|
| 218 |
+
<table><tr><td rowspan="2"></td><td colspan="3">mAP in % (# images)</td><td rowspan="2">Number of
|
| 219 |
+
para. (×10^6)</td><td rowspan="2">Forward
|
| 220 |
+
time (sec)</td></tr><tr><td>1st (2k)</td><td>2nd (3k)</td><td>3rd (4k)</td></tr><tr><td>Random [31]</td><td>62.43±0.10</td><td>66.36±0.13</td><td>68.47±0.09</td><td>52.35</td><td>0.031</td></tr><tr><td>Entropy [33]</td><td>62.43±0.10</td><td>66.85±0.12</td><td>68.70±0.18</td><td>52.35</td><td>0.031</td></tr><tr><td>Core-set [34]</td><td>62.43±0.10</td><td>66.57±0.20</td><td>68.57±0.26</td><td>52.35</td><td>0.031</td></tr><tr><td>LLAL [40]</td><td>62.47±0.16</td><td>67.02±0.11</td><td>68.90±0.15</td><td>52.71</td><td>0.036</td></tr><tr><td>MC-dropout [11]</td><td>62.43±0.19</td><td>67.10±0.07</td><td>69.39±0.09</td><td>52.35</td><td>0.689</td></tr><tr><td>Ensemble [16]</td><td>62.43±0.10</td><td>67.11±0.26</td><td>69.26±0.14</td><td>157.05</td><td>0.093</td></tr><tr><td>Oursgmm</td><td>62.43±0.10</td><td>67.32±0.12</td><td>69.43±0.11</td><td>52.35</td><td>0.031</td></tr><tr><td>Ourseff</td><td>62.91±0.16</td><td>67.61±0.17</td><td>69.66±0.17</td><td>41.12</td><td>0.029</td></tr></table>
|
| 221 |
+
|
| 222 |
+
Table 4: VOC07: Comparison of mAP and computing cost of active learning with most relevant approaches. Para. and sec refer to parameters and seconds, respectively.
|
| 223 |
+
|
| 224 |
+
tion or classification head; 6) The maximum of aleatoric or epistemic uncertainty on the localization and classification, and 7) The maximum value of these four uncertainties. The results for this comparison are shown in Tab. 2. Our approach using the maximum value of aleatoric and epistemic uncertainties of both localization and classification tasks consistently outperforms all the other aggregation functions on each active learning iterations. More concretely, the maximum value of all uncertainties for both tasks shows better data selection performance in active learning than others. Based on these results, we use the maximum value of all uncertainties as a scoring function during active learning to compare with other active learning studies.
|
| 225 |
+
|
| 226 |
+
In Tab. 3, we summarize the overlap in the selection as a function of the uncertainty measure. The overlapping ratio using both uncertainties is $48\%$ for localization and $33\%$ for classification. More importantly, when we consider both uncertainties on localization and classification together, the overlapping ratio decreases to barely $14\%$ . This suggests that uncertainty measures obtained for localization and classification are diversified and their combination improves the image selection process.
|
| 227 |
+
|
| 228 |
+
Comparison to SOTA on VOC07: In Tab. 4, we summarize the active learning results and computing cost of our method compared to relevant active learning approaches in the literature. In order to compare the computing cost, we provide the number of parameters and the forward time of each method. In general, a fast forward (backward) step and a small model size leads to a lower training cost and data sampling time during active learning [19, 22].
|
| 229 |
+
|
| 230 |
+
To focus on the active learning, we reproduce all numbers by applying each sampling method to the proposed GMM architecture where the output is a mixture distribu
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
(a)
|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
(b)
|
| 237 |
+
Figure 4: VOC07+12: a) Comparison to published works using a single model for scoring. Numbers are taken from [40]; b) Comparison to multiple model-based methods, ensemble and MC-dropout. Details of the numbers to reproduce the plot are in the supplementary material.
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
Figure 5: VOC07+12: Computing cost comparison to baseline and methods using multiple models; Model parameters in millions (M) and the forward time in seconds (sec). See the supplementary material for numerical details.
|
| 241 |
+
|
| 242 |
+
tion (i.e., the same model as $\text{Ours}_{\text{gmm}}$ ). For ensembles, we follow [2], building an ensemble of three independent models. For MC-dropout, we add dropout layers with $p = 0.1$ to the six convolutional layers composing the extra feature layers in SSD. We compute the image scores using 25 forward passes [2]. For these two methods and entropy-based method, we follow the most common approach in the literature and estimate the final image score as the average entropy on the classification head [16]. For core-set [34], we follow [40], using features of fully connected layer-7 in VGG-16. For LLAL [40], we implement the learning loss prediction module on the proposed GMM architecture. As a baseline, we use random sampling on our GMM architecture. Note, we train all methods with exactly the same hyperparameters as mentioned in the experimental settings. As shown in Tab. 4, both instances of our approach consistently outperform all the other single-model based methods [31, 33, 34, 40] in every active learning iteration. Compared to multi-model based methods [11, 16], our proposed methods show higher accuracy while requiring a significantly lower computing cost. These results demonstrate that despite having a lower computational cost, our proposed method improves the active learning sampling performance compared to previous works.
|
| 243 |
+
|
| 244 |
+
Comparison to SOTA on VOC07+12: We now compare our approach to existing single-model based approaches on VOC07+12. Here, we consider the SOTA results reported in [40] including LLAL [40] and core-set [34], in addition to entropy [33] and random sampling. We use the same open-source and setting used in [40] for a fair comparison. To solely focus on active learning, we compare the performance based on the same baseline with [40], i.e., SSD. To do this, we train the SSD using a dataset sampled by our proposed scoring function in the proposed GMM architecture and the architecture with improved parameter efficiency. To verify the influence of the initial training set for comparison, we run 5 independent trials with different seeds for the initial choice of the labeled set. We then obtain an average mAP of 0.5246 with a standard deviation of 0.003 that suggests little variations when experiments use a different initial subset of images. As shown in Fig. 4a, our method outperforms all the other single-model based methods. In the last active learning iteration, our approach achieves 0.7598 mAP which is 2.6 percent points higher than the score achieved by LLAL [40] (0.7338 mAP), thus showing a high-performance improvement in active learning based on a single model.
|
| 245 |
+
|
| 246 |
+
Finally, we compare our approach with methods using multiple models, i.e., ensembles [16] and MC-dropout [11]. For ensembles and MC-dropout, we follow the same design mentioned in Tab. 4 and apply it to the SSD. In Fig. 4b and Fig. 5, we present the accuracy and computational cost comparison of these methods. As shown in Fig. 4b, in terms of the accuracy, our approach performs on par with MC-dropout and ensembles. However, our method uses a single forward pass of a single model to estimate the uncertainties, which is more efficient than ensembles and MC-dropout based methods. With respect to the number of parameters, MC-dropout has the same number of parameters as SSD since dropout layers do not add any new parameters, but it requires multiple forward passes. Our approach adds extra
|
| 247 |
+
|
| 248 |
+
<table><tr><td rowspan="2"></td><td colspan="3">mAP in % (# images)</td><td rowspan="2">Number of para. (×106)</td><td rowspan="2">Forward time (sec)</td></tr><tr><td>1st (5k)</td><td>2nd (6k)</td><td>3rd (7k)</td></tr><tr><td>Random [31]</td><td>27.70±0.08</td><td>28.70±0.13</td><td>29.83±0.04</td><td>116.51</td><td>0.152</td></tr><tr><td>Entropy [33]</td><td>27.70±0.08</td><td>28.93±0.11</td><td>29.89±0.09</td><td>116.51</td><td>0.152</td></tr><tr><td>Core-set [34]</td><td>27.70±0.08</td><td>28.99±0.01</td><td>29.93±0.06</td><td>116.51</td><td>0.152</td></tr><tr><td>LLAL [40]</td><td>27.71±0.03</td><td>28.71±0.06</td><td>29.53±0.15</td><td>116.87</td><td>0.194</td></tr><tr><td>MC-dropout [11]</td><td>27.70±0.10</td><td>29.20±0.09</td><td>30.30±0.08</td><td>116.51</td><td>3.718</td></tr><tr><td>Ensemble [16]</td><td>27.70±0.08</td><td>29.03±0.07</td><td>30.02±0.06</td><td>349.53</td><td>0.456</td></tr><tr><td>Oursgmm</td><td>27.70±0.08</td><td>29.28±0.05</td><td>30.51±0.12</td><td>116.51</td><td>0.152</td></tr><tr><td>Ourseff</td><td>27.33±0.04</td><td>29.06±0.08</td><td>30.02±0.05</td><td>73.20</td><td>0.141</td></tr></table>
|
| 249 |
+
|
| 250 |
+
Table 5: MS-COCO: Comparison of mAP and computing cost of active learning with most relevant methods. Para. and sec refer to parameters and seconds, respectively.
|
| 251 |
+
|
| 252 |
+
<table><tr><td colspan="2"></td><td>Baseline [32]</td><td>Oursgmm</td><td>Ourseff</td></tr><tr><td rowspan="2">mAP (%)</td><td>IoU>0.5</td><td>75.31±0.22</td><td>75.90±0.09</td><td>75.80±0.15</td></tr><tr><td>IoU>0.75</td><td>48.70±0.11</td><td>49.36±0.07</td><td>49.83±0.30</td></tr><tr><td colspan="2"># of parameters (M)</td><td>41.17</td><td>42.23</td><td>41.61</td></tr><tr><td colspan="2">Forward time (sec)</td><td>0.059</td><td>0.062</td><td>0.060</td></tr></table>
|
| 253 |
+
|
| 254 |
+
parameters for the estimation of two types of uncertainty to the last layer of each head and therefore, the number of parameters is larger than in SSD. In ensemble-based methods, the number of parameters is proportional to the number of SSD models in the ensemble [28]. As shown in Fig. 5, our method requires significantly less computing cost than MC-dropout and ensemble-based method. In summary, our method provides the best trade-off between accuracy and computing cost for active learning.
|
| 255 |
+
|
| 256 |
+
Comparison to SOTA on MS-COCO: In Tab. 5, we summarize the active learning performance and computing cost of our approaches compared to active learning methods in the literature. To solely focus on the active learning, we reproduce all numbers by applying each sampling method to the proposed GMM architecture (i.e., the same model as $Ours_{gmm}$ ). For all methods, we follow the same settings as on Tab. 4. As shown, both instances of our approach consistently outperform all the other single-model based methods [31, 33, 34, 40] in each active learning cycle. In particular, LLAL [40] shows similar accuracy to random sampling on MS-COCO, because it does not take into account the large diversity of the data and the large number of classes present in the dataset. However, our approach also shows high accuracy on MS-COCO. Compared to multiple model-based methods [11, 16], both instances of our approach require a much less computing cost while $Ours_{gmm}$ outperforms those methods, and $Ours_{eff}$ shows competitive results with a much lower computational cost. These results suggest that our approach generalizes to larger datasets that have a larger number of classes.
|
| 257 |
+
|
| 258 |
+
# 4.3. Scalability and dataset transferability
|
| 259 |
+
|
| 260 |
+
Our method is not limited to single-stage detectors. Here, in a first experiment, we show how our method can be
|
| 261 |
+
|
| 262 |
+
Table 6: VOC07: Performance comparison of our mixture models based on Faster-RCNN and the original Faster-RCNN as a baseline [32].
|
| 263 |
+
|
| 264 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">Backbone</td><td colspan="3">mAP in %</td></tr><tr><td>Random selection</td><td>Oursgmm selection</td><td>Ourseff selection</td></tr><tr><td rowspan="3">SSD [31]</td><td>VGG-16 [37]</td><td>67.77±0.12</td><td>68.71±0.18</td><td>68.48±0.31</td></tr><tr><td>Resnet-34 [17]</td><td>65.53±0.17</td><td>67.00±0.14</td><td>67.20±0.13</td></tr><tr><td>Resnet-50 [17]</td><td>64.28±0.39</td><td>65.73±0.32</td><td>65.81±0.21</td></tr><tr><td>Faster-RCNN [32]</td><td>Resnet-50-FPN [29]</td><td>72.93±0.41</td><td>73.60±0.18</td><td>75.45±0.30</td></tr></table>
|
| 265 |
+
|
| 266 |
+
Table 7: VOC07: Transferability of a dataset created using the proposed scoring function and the mixture-based density models. As shown, datasets acquired using our method not only boost the performance of models using a different backbone but also the performance of two-stage detector such as Faster-RCNN.
|
| 267 |
+
|
| 268 |
+
applied to a two-stage detector such as Faster-RCNN [32] with FPN [29]. For this experiment, we use the same PASCAL VOC dataset as in Tab. 1a. In Tab. 6, we show the summary of the accuracy and the computing cost of our mixture models based on Faster-RCNN and the original Faster-RCNN as a baseline. As shown, both versions of our approach outperform the original model with up to 1.13 mAP improvement. Importantly, in this case, our approach is applied to the output layer of the detection network after region proposal in Faster-RCNN, therefore there is a negligible increase in computing cost and latency because the computation does not include the number of anchor boxes.
|
| 269 |
+
|
| 270 |
+
Finally, we study the transferability of actively acquired datasets. We compare the performance of SSD using different backbones such as Resnet-34 and Resnet-50 [17], and Faster-RCNN [32] detector trained using our actively sampled dataset. We perform the experiments in the actively sampled dataset from the last active learning cycle in Tab. 4. For completeness, we also report the accuracy obtained using random sampling. We summarize the results of this experiment in Tab. 7. As shown, networks trained using the actively sampled dataset outperform those trained using random sampling with up to $2.52\mathrm{mAP}$ improvement. Conclusively, our method not only scales to other object detection networks but also datasets actively acquired using our approach can be used to train other architectures.
|
| 271 |
+
|
| 272 |
+
# 5. Conclusions
|
| 273 |
+
|
| 274 |
+
We have proposed a novel deep active-learning approach for object detection. Our approach relies on mixture density networks to estimate, in a single forward pass of a single model, two types of uncertainty for both localization and classification tasks, and leverages them in the scoring function. Our proposed probabilistic modeling and scoring function achieve outstanding performance gains in accuracy and computing cost. We present a wide range of experiments on two publicly available datasets, PASCAL VOC and MS-COCO. Besides, our results suggest that our approach scales to new models with different architectures.
|
| 275 |
+
|
| 276 |
+
# References
|
| 277 |
+
|
| 278 |
+
[1] Hamed Habibi Aghdam, Abel Gonzalez-Garcia, Antonio M. López, and Joost van de Weijer. Active learning for deep detection neural networks. In International Conference on Computer Vision (ICCV), 2019.
|
| 279 |
+
[2] William H. Beluch, Tim Genewein, Andreas Nurnberger, and Jan M. Kohler. The power of ensembles for active learning in image classification. In Conference on Computer Vision and Pattern Recognition (CVPR), 2018.
|
| 280 |
+
[3] Christopher M Bishop. Mixture density networks. 1994.
|
| 281 |
+
[4] Kashyap Chitta, Jose M. Alvarez, Elmar Haussmann, and Clement Farabet. Less is more: An exploration of data redundancy with active dataset subsampling. arXiv preprint arXiv:1811.03542, 2019.
|
| 282 |
+
[5] Kashyap Chitta, Jose M. Alvarez, and Adam Lesnikowski. Large-Scale Visual Active Learning with Deep Probabilistic Ensembles. arXiv preprint arXiv:1811.03575, 2018.
|
| 283 |
+
[6] Jiwoong Choi, Dayoung Chun, Hyun Kim, and Hyuk-Jae Lee. Gaussian yolov3: An accurate and fast object detector using localization uncertainty for autonomous driving. In International Conference on Computer Vision (ICCV), 2019.
|
| 284 |
+
[7] Sungjoon Choi, Sanghoon Hong, Kyungjae Lee, and Sungbin Lim. Task agnostic robust learning on corrupt outputs by correlation-guided mixture density networks. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020.
|
| 285 |
+
[8] Sungjoon Choi, Kyungjae Lee, Sungbin Lim, and Songhwai Oh. Uncertainty-aware learning from demonstration using mixture density networks with sampling-free variance modeling. In International Conference on Robotics and Automation (ICRA), 2018.
|
| 286 |
+
[9] Mark Everingham, Luc Van Gool, Christopher K. I. Williams, John M. Winn, and Andrew Zisserman. The pascal visual object classes (VOC) challenge. International Journal in Computer Vision (IJCV), 88(2):303-338, 2010.
|
| 287 |
+
[10] Di Feng, Lars Rosenbaum, and Klaus Dietmayer. Towards safe autonomous driving: Capture uncertainty in the deep neural network for lidar 3d vehicle detection. In International Conference on Intelligent Transportation Systems (ITSC), 2018.
|
| 288 |
+
[11] Di Feng, Xiao Wei, Lars Rosenbaum, Atsuto Maki, and Klaus Dietmayer. Deep active learning for efficient training of a lidar 3d object detector. In IEEE Intelligent Vehicles Symposium (IV), 2019.
|
| 289 |
+
[12] Yarin Gal and Zoubin Ghahramani. Dropout as a bayesian approximation: Representing model uncertainty in deep learning. In International Conference on Machine Learning (ICML), 2016.
|
| 290 |
+
[13] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with image data. In International Conference on Machine Learning (ICML), 2017.
|
| 291 |
+
[14] Ross Girshick. Fast r-cnn. In International Conference on Computer Vision (ICCV), 2015.
|
| 292 |
+
[15] Ali Harakeh, Michael Smart, and Steven L Waslander. Bayesod: A bayesian approach for uncertainty estimation in deep object detectors. In International Conference on Robotics and Automation (ICRA), 2020.
|
| 293 |
+
|
| 294 |
+
[16] Elmar Haussmann, Michele Fenzi, Kashyap Chitta, Jan Ivanecky, Hanson Xu, Donna Roy, Akshitta Mittel, Nicolas Koumchatzky, Clement Farabet, and Jose M Alvarez. Scalable active learning for object detection. In IEEE Intelligent Vehicles Symposium (IV), 2020.
|
| 295 |
+
[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016.
|
| 296 |
+
[18] Yihui He and Jianren Wang. Deep multivariate mixture of gaussians for object detection under occlusion. arXiv preprint arXiv:1911.10614, 2019.
|
| 297 |
+
[19] Tyler Highlander and Andres Rodriguez. Very efficient training of convolutional neural networks using fast fourier transform and overlap-and-add. arXiv preprint arXiv:1601.06815, 2016.
|
| 298 |
+
[20] Stephen C. Hora. Aleatory and epistemic uncertainty in probability elicitation with an example from hazardous waste management. Reliability Engineering and System Safety, 54:217-223, 1996.
|
| 299 |
+
[21] Eyke Hüllermeier and Willem Waegeman. Aleatoric and epistemic uncertainty in machine learning: An introduction to concepts and methods. arXiv preprint arXiv:1910.09457, 2019.
|
| 300 |
+
[22] Forrest N Iandola, Song Han, Matthew W Moskewicz, Khalid Ashraf, William J Dally, and Kurt Keutzer. SqueezeNet: Alexnet-level accuracy with $50 \times$ fewer parameters and $< 0.5\mathrm{mb}$ model size. arXiv preprint arXiv:1602.07360, 2016.
|
| 301 |
+
[23] Chieh-Chi Kao, Teng-Yok Lee, Pradeep Sen, and Ming-Yu Liu. Localization-aware active learning for object detection. In Asian Conference on Computer Vision (ACCV), 2018.
|
| 302 |
+
[24] Alex Kendall and Yarin Gal. What uncertainties do we need in bayesian deep learning for computer vision? In Advances in Neural Information Processing Systems (NeurIPS). 2017.
|
| 303 |
+
[25] Diederik P Kingma and Max Welling. Auto-encoding variational bayes. arXiv preprint arXiv:1312.6114, 2013.
|
| 304 |
+
[26] Andreas Kirsch, Joost van Amersfoort, and Yarin Gal. Batchbald: Efficient and diverse batch acquisition for deep bayesian active learning. In Advances in Neural Information Processing Systems (NeurIPS), 2019.
|
| 305 |
+
[27] Yongchan Kwon, Joong-Ho Won, Beom Joon Kim, and Myunghee Cho Paik. Uncertainty quantification using bayesian neural networks in classification: Application to biomedical image segmentation. Computational Statistics & Data Analysis, 142:106816, 2020.
|
| 306 |
+
[28] Balaji Lakshminarayanan, Alexander Pritzel, and Charles Blundell. Simple and scalable predictive uncertainty estimation using deep ensembles. In Advances in Neural Information Processing Systems (NeurIPS), 2017.
|
| 307 |
+
[29] Tsung-Yi Lin, Piotr Dollar, Ross Girshick, Kaiming He, Bharath Hariharan, and Serge Belongie. Feature pyramid networks for object detection. In Conference on Computer Vision and Pattern Recognition (CVPR), 2017.
|
| 308 |
+
[30] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dálár, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In European Conference on Computer Vision (ECCV), 2014.
|
| 309 |
+
|
| 310 |
+
[31] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, Cheng-Yang Fu, and Alexander C Berg. Ssd: Single shot multibox detector. In European Conference on Computer Vision (ECCV), 2016.
|
| 311 |
+
[32] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: Towards real-time object detection with region proposal networks. In Advances in Neural Information Processing Systems (NeurIPS), 2015.
|
| 312 |
+
[33] Soumya Roy, Asim Unmesh, and Vinay P. Namboodiri. Deep active learning for object detection. In British Machine Vision Conference (BMVC), 2018.
|
| 313 |
+
[34] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A core-set approach. In International Conference on Learning Representations (ICLR), 2018.
|
| 314 |
+
[35] Burr Settles. Active Learning. Synthesis Lectures on Artificial Intelligence and Machine Learning. 2012.
|
| 315 |
+
[36] Claude E. Shannon. A mathematical theory of communication. Mobile Computing and Communications Review, 5(1):3-55, 2001.
|
| 316 |
+
[37] K. Simonyan and A. Zisserman. Very deep convolutional networks for large-scale image recognition. In International Conference on Learning Representations (ICLR), 2015.
|
| 317 |
+
[38] Natasa Tagasovska and David Lopez-Paz. Single-model uncertainties for deep learning. In Advances in Neural Information Processing Systems (NeurIPS), 2019.
|
| 318 |
+
[39] Ali Varamesh and Tinne Tuytelaars. Mixture dense regression for object detection and human pose estimation. In Conference on Computer Vision and Pattern Recognition (CVPR), 2020.
|
| 319 |
+
[40] Donggeun Yoo and In So Kweon. Learning loss for active learning. In Conference on Computer Vision and Pattern Recognition (CVPR), 2019.
|
| 320 |
+
[41] Jaeyoung Yoo, Geonseok Seo, and Nojun Kwak. Mixture-model-based bounding box density estimation for object detection. arXiv preprint arXiv:1911.12721, 2019.
|
activelearningfordeepobjectdetectionviaprobabilisticmodeling/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cab86c2f18389e3c8c0ccce9e3c20189edc21d400d85d14c530caa24a0f2fe6a
|
| 3 |
+
size 612003
|
activelearningfordeepobjectdetectionviaprobabilisticmodeling/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ba24375f5268a285daaea59626f3413866107f95f0a1e126a82baa38d9b96a08
|
| 3 |
+
size 420523
|
activelearningforlanedetectionaknowledgedistillationapproach/782e7183-c722-4ff4-9021-42918924e6a7_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:19a4450b0b0c66a54aa4a3d9aff2c33180c35fd49169054abeaf97974dcf5ace
|
| 3 |
+
size 72348
|
activelearningforlanedetectionaknowledgedistillationapproach/782e7183-c722-4ff4-9021-42918924e6a7_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1088ce2aa8d50bc359a3a6c24685dc6159862ac9a5a7fcec494e8938317d2514
|
| 3 |
+
size 97361
|
activelearningforlanedetectionaknowledgedistillationapproach/782e7183-c722-4ff4-9021-42918924e6a7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5bf983042b8294d8a9157b39316d38087f327ba2b87d32938b1c9c6edd01b67
|
| 3 |
+
size 1733674
|
activelearningforlanedetectionaknowledgedistillationapproach/full.md
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Learning for Lane Detection: A Knowledge Distillation Approach
|
| 2 |
+
|
| 3 |
+
Fengchao Peng, Chao Wang, Jianzhuang Liu, Zhen Yang Noah's Ark Lab, Huawei Technologies
|
| 4 |
+
|
| 5 |
+
{pengfengchao, wangchao165, liu.jianzhuang, yang.zhen}@huawei.com
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Lane detection is a key task for autonomous driving vehicles. Currently, lane detection relies on a huge amount of annotated images, which is a heavy burden. Active learning has been proposed to reduce annotation in many computer vision tasks, but no effort has been made for lane detection. Through experiments, we find that existing active learning methods perform poorly for lane detection, and the reasons are twofold. On one hand, most methods evaluate data uncertainties based on entropy, which is undesirable in lane detection because it encourages to select images with very few lanes or even no lane at all. On the other hand, existing methods are not aware of the noise of lane annotations, which is caused by heavy occlusion and unclear lane marks. In this paper, we build a novel knowledge distillation framework and evaluate the uncertainty of images based on the knowledge learnt by the student model. We show that the proposed uncertainty metric overcomes the above two problems. To reduce data redundancy, we explore the influence sets of image samples, and propose a new diversity metric for data selection. Finally we incorporate the uncertainty and diversity metrics, and develop a greedy algorithm for data selection. The experiments show that our method achieves new state-of-the-art on the lane detection benchmarks. In addition, we extend this method to common 2D object detection and the results show that it is also effective.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Lane detection is a crucial task for autonomous driving. Recently, great advances have been made by deep learning to improve the lane detection performance [33, 31, 8]. However, a deep model requires a huge amount of training data in order to yield a satisfying result. Due to the large aspect ratio and the special shape of lanes, it is highly expensive and cumbersome to annotate a sufficiently large dataset.
|
| 14 |
+
|
| 15 |
+
Active learning is a well-known technique to reduce the annotation cost [34, 42, 25]. It is proposed to select the most informative data items from the unlabeled dataset according
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1: Examples of noisy annotations. All the lanes except the rightmost one are invisible (occluded or unclear) in the original image. Their locations are annotated by guessing and can be misleading to a lane detection model.
|
| 21 |
+
|
| 22 |
+
to some policy. The selected data items are then annotated manually and added to the training set. Various selection policies have been proposed. Compared to the random selection, these policies manage to reduce the annotation cost by a large margin, and in the meanwhile, they are able to achieve a competitive, or even better, training performance.
|
| 23 |
+
|
| 24 |
+
However, though active learning has been fruitful in image classification [3, 42, 39], object detection [2, 6, 19], semantic segmentation [38, 41], and other non-computer-vision areas [30, 15, 32], we find that for the lane detection task, the existing methods are not so effective. The reasons are twofold. On one hand, entropy is widely used to estimate the uncertainty of images. The images with highest entropy values are considered informative. But in practice, we observe that entropy-based methods are prone to selecting images with very few lanes. These images provide less useful information than normal ones, and therefore a model trained using them does not perform well. On the other hand, lane annotations are often noisy. For example, on the CULane dataset [29], many annotations are made in regions where there are no visible lane marks at all. An example image is shown in Fig. 1. In these regions, annotators decide the location of an invisible lane just by guessing. The guessed annotations are often incorrect and can bring heavy noise. Existing methods do not model the noisy lane annotations, and are therefore easily disturbed.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
Figure 2: Framework of our proposed method. We build three models, the large teacher model, the small student model, and the student model distilled by the teacher (Student-KD in the figure). The prediction gaps of three models on each unlabeled image are used to estimate the uncertainty. The diversity score of an image is estimated based on its influence set, extracted from the outcome of the student model. The uncertainty and diversity scores are combined as the final score for data selection.
|
| 28 |
+
|
| 29 |
+
In this paper, we propose the first active learning method for lane detection, as shown in Fig. 2. It is able to solve the two above-mentioned problems (i.e., unsuitable entropy metric and label noise). To get rid of estimating the entropy, we propose to use Knowledge Distillation (KD) to explore uncertain samples. We regard the lane detection model to be deployed as the student (denoted as Student-KD in Fig. 2), and train it together with a large teacher model. We use their prediction gap as the basic estimation of uncertainty.
|
| 30 |
+
|
| 31 |
+
In addition, we also use KD to solve the label noise problem. We find that useful knowledge can be transferred from the teacher to the student, but label noise is difficult to transfer. On images with noisy labels, the prediction gaps between the teacher and the student are generally larger than those on normal images. However, a large prediction gap between the teacher and the student does not necessarily indicate high label noise. There can be knowledge, i.e., label with no noise, which is naturally difficult for the student to learn. To distinguish noise from hard-to-learn knowledge, we train another student model (denoted as Student in Fig. 2) that has the same structure as Student-KD. The difference is that we train it independently without knowledge distillation from the teacher. We also measure the prediction gap between the two students. Since label noise is random, on a noisy image, the prediction gap between any pair of the three models is likely to be large. On the contrary, a small prediction gap between any pair of the models indicates that the label noise is likely to be low. Based on these observations, we propose a novel uncertainty metric that is able to capture both the knowledge and the noise. Images with more knowledge and less noise are selected.
|
| 32 |
+
|
| 33 |
+
Uncertainty is not the only factor to decide the informativeness of a sample. Data redundancy can lead to waste in annotation cost [37]. Therefore, diversity is also a key factor for efficient data selection. We present a new diversity met
|
| 34 |
+
|
| 35 |
+
ric that uses influence sets [23] to estimate the diversity of a selected set. In the data selection phase, we calculate the similarity between unlabeled images based on their feature maps. Given the pair-wise similarity, we build the influence set for each image based on its reverse nearest neighbors and estimate the diversity score. Then, the uncertainty score and diversity score are combined as the final score for data selection (see Fig. 2). We formulate the data selection as a set cover problem, and use a greedy algorithm to solve it.
|
| 36 |
+
|
| 37 |
+
We perform extensive experiments on the most widely used benchmarks [29, 4]. The results show that our method achieves state-of-the-art performance on all the datasets. In addition, we adapt our method to 2D object detection and test it on a benchmark. The results show that our method outperforms a recent active learning method specifically designed for 2D object detection, and is therefore extendable to other visual recognition tasks.
|
| 38 |
+
|
| 39 |
+
Our contributions are summarized as follows:
|
| 40 |
+
|
| 41 |
+
1. We propose the first active learning method for lane detection. A knowledge distillation framework is built to solve the two specific problems in lane detection, the unsuitable entropy and label noise problems. We are also the first to explore knowledge distillation in the data selection of active learning.
|
| 42 |
+
2. We propose a novel uncertainty metric that is able to capture both knowledge and noise. Besides, we present a diversity metric based on reverse nearest neighbors to solve the data redundancy problem. The combination of the two metrics is not only effective but also extendable to other visual recognition tasks.
|
| 43 |
+
3. The experiments on two widely used lane detection benchmarks show that our method outperforms recent active learning methods. We also demonstrate the effectiveness of our method for object detection.
|
| 44 |
+
|
| 45 |
+
# 2. Related Work
|
| 46 |
+
|
| 47 |
+
Lane Detection. The lane detection problem has been studied for decades. Early methods rely on hand-crafted features and their capability is limited to detecting lanes in easy cases [40, 5]. In recent years, deep learning improves the lane detection performance by a large margin. Pan et al. [29] proposed a spatial CNN model to pass information across rows and columns inside a neural layer, and used a segmentation head to predict lanes. PINet [21] first predicted point clouds in the lane regions and then performed clustering in post-processing. PointLaneNet [8] and FastDraw [31] detected lanes in an object-detection manner. They both enumerated anchors on feature maps and built multi-task headers to perform classification and regression, respectively. The UFLD [33] first built row anchors on an image and formulated the lane detection problem as to select certain pixels in each anchor. A very recent work [26] used a transformer to predict the shape parameters of lanes. Different from these methods, we focus on active learning for lane detection and our method is model agnostic.
|
| 48 |
+
|
| 49 |
+
Active Learning. Active learning aims at selecting most informative data items to form the training set and improve the training performance at a very low annotation cost. The informativeness of a data item is studied from two perspectives, uncertainty and diversity. A variety of methods were proposed to estimate the uncertainty, such as cross entropy [16, 30], best-vs-second-best [18, 36], expected model change [7, 9, 22], etc. Gal et al. [20, 10, 24] proposed a series of Bayesian methods, drawing samples from the dropout distribution of a stochastic neural network. The uncertainty was evaluated as the mutual information of the sample outputs. Yoo et al. [42] proposed to directly estimate the uncertainty using a header of the model. Gao et al. [12] and Zhou et al. [45] added augmentations to input images and evaluated the uncertainty as the consistency of model predictions. Instead of solving general problems, Liu et al. [25] incorporated spatial information to the active learning for human pose estimation. Similar idea was used by Aghdam et al. [2] in 2D object detection.
|
| 50 |
+
|
| 51 |
+
A common drawback of the above methods is that they ignore data redundancy. To deal with this problem, Nguyen et al. [28] extracted clusters from the unlabeled dataset and prevented the model from repeatedly selecting samples from the same cluster. Sener et al. [37] defined the problem as a core-set selection problem and proposed a k-center greedy algorithm to solve it. Agarwal et al. [1] combined this idea with a contextual diversity measurement, encoding spatial context variations in sample selection. Sinha et al. [39] directly searched for the most representative samples using an adversarial learning framework.
|
| 52 |
+
|
| 53 |
+
None of the previous methods is designed specifically for lane detection. They are either inapplicable to lane detection or able to solve the problem only partially. In compar-
|
| 54 |
+
|
| 55 |
+
ison, we propose a framework that works very well for this task, and it is extendable to other visual recognition tasks.
|
| 56 |
+
|
| 57 |
+
Knowledge Distillation. Knowledge Distillation (KD) [13] was firstly proposed to transfer knowledge from a large model to a small model for model compression. Recently, researchers started to exploit the competence of KD in semi-supervised learning and active learning. Gao et al. [11] used KD in semi-supervised learning to improve the tolerance to data noise. Yun et al. [44] were the first to employ KD in active learning. However, they used KD only in the training phase, while we also use KD in the data selection phase (see Section 3.2).
|
| 58 |
+
|
| 59 |
+
# 3. Proposed Method
|
| 60 |
+
|
| 61 |
+
In this section, we introduce our method in detail. We first describe the knowledge distillation method, which is used to train the models and perform prediction on the unlabeled dataset. Then we present the calculation of prediction gaps, and propose a novel uncertainty metric to estimate valuable knowledge as well as label noise. After that, we design a diversity metric to reduce data redundancy. Finally, we combine the uncertainty and diversity metrics and develop an algorithm to select most informative samples.
|
| 62 |
+
|
| 63 |
+
# 3.1. Knowledge Distillation
|
| 64 |
+
|
| 65 |
+
We first build three models, a teacher $(M_T)$ and two students $(M_S$ and $M_{S - KD})$ . The teacher model uses a larger backbone network, while the two students are of the same structure. In the training phase, we train $M_S$ and $M_T$ independently on an initial training set. In this step, no information is passed between them.
|
| 66 |
+
|
| 67 |
+
We choose PointLaneNet (PLN) [8] as the primary model for its simplicity. Other models are also applicable. PLN has a backbone network to extract visual features, and builds two headers on the feature map. One header predicts the class of each feature pixel. The other predicts the coordinates of all the points of the lane passing through this pixel. The detection loss of PLN consists of a classification loss and two regression losses, which is defined as:
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
L _ {d e t} = \sum_ {i = 1} ^ {w} \sum_ {j = 1} ^ {h} \left(\lambda C E _ {i j} + \mu \mathbb {1} _ {i j} L _ {i j} ^ {l o c} + \nu \mathbb {1} _ {i j} L _ {i j} ^ {p o s}\right), \tag {1}
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
where $CE$ denotes the cross entropy, $L^{loc}$ and $L^{pos}$ are two L2 losses for regression, $w$ and $h$ are respectively the width and height of the feature map, $\lambda$ , $\mu$ and $\nu$ are weights, and $\mathbb{1}_{ij}$ is the indicator function which is 1 if the pixel $(i,j)$ is selected and otherwise 0. The definitions of these losses are based on the model predictions and the ground truth. More details about Eq. (1) refers to [8].
|
| 74 |
+
|
| 75 |
+
After the initial independent training, we obtain the trained student model $M_S$ and teacher model $M_T$ . Then,
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
(a) Input
|
| 79 |
+
(b) GT
|
| 80 |
+
(c) $M_T$
|
| 81 |
+
(d) $M_{S - KD}$
|
| 82 |
+
(e) $M_S$
|
| 83 |
+
Figure 3: An example of case 4. (a) The original image. (b) The ground truth. (c) The prediction of the teacher. (d) The prediction of the distilled student. (e) The prediction of the student without distillation. Obviously, the difference $D_{ST}$ between (c) and (d) is large, and $D_{SS}$ between (d) and (e) is also large. The reason is that, as shown in (a), most lane marks in this image are occluded and it is difficult for the models to detect the lanes. However, though this image presents a difficult case, we do not want to select it, because in this case, annotators are prone to guessing the locations of the occluded lanes. As shown in (b), the middle and right lanes are guessed by the annotator. Guessed annotations are often incorrect and thus misleading the models. Therefore, our uncertainty metric penalizes this by the fraction between $D_{SS}$ and $D_{ST}$ in Eq. (5).
|
| 84 |
+
|
| 85 |
+
we train the other student $M_{S - KD}$ by distilling the knowledge from $M_T$ using the same initial training set. Following [8], we design a distillation loss $L_{dis}$ that also consists of one classification loss and two regression losses. These losses are similar to those in [8], and the only difference is that the ground truth in [8] is replaced by the soft prediction of the teacher $M_T$ . Thus, the training loss of $M_{S - KD}$ (i.e., KD Loss in Fig. 2) is defined as:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
L o s s = L _ {d e t} + \alpha L _ {d i s}, \tag {2}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
where $\alpha$ is a weighting factor. The difference between $M_{S - KD}$ and $M_S$ is that $M_{S - KD}$ is trained to learn from both the ground truth and the teacher. It is usually stronger than $M_S$ . Here we also call $M_S$ a student because it has the same structure as the real student $M_{S - KD}$ .
|
| 92 |
+
|
| 93 |
+
# 3.2.Uncertainty
|
| 94 |
+
|
| 95 |
+
Uncertainty is a natural criterion for data selection. Cross entropy is a commonly used uncertainty measurement. However, we will show in the experiments that this method is prone to selecting images with few lanes, which therefore provide less information for training. Label noise also causes uncertainty. Fitting to noisy labels reduces the model performance. In this subsection, we propose a novel uncertainty metric to solve these two problems.
|
| 96 |
+
|
| 97 |
+
We first define the prediction gap between two models. Given an image $p$ and two models $M_1$ and $M_2$ , denote the sets of their predicted lanes as $M_1(p)$ and $M_2(p)$ , respectively. For each lane $l_1 \in M_1(p)$ , we find its closest lane in $M_2(p)$ with:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
l _ {2} = \underset {l \in M _ {2} (p)} {\arg \min } D i s t \left(l _ {1}, l\right). \tag {3}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
The distance $\text{Dist}()$ between two lanes is calculated as the segment-wise Euclidean distance. Then the prediction gap between $M_1$ and $M_2$ is defined as:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
D _ {1 2} (p) = \max _ {l _ {1} \in M _ {1} (p)} D i s t \left(l _ {1}, l _ {2}\right). \tag {4}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
To simplify the notation, we use $D_{12}(p)$ and $D_{12}$ interchangeably in the following.
|
| 110 |
+
|
| 111 |
+
We now have three trained models, the student $M_S$ , the distilled student $M_{S-KD}$ , and the teacher $M_T$ . To model the uncertainty, we calculate the gap $D_{SS}$ between $M_S$ and $M_{S-KD}$ , and the gap $D_{ST}$ between $M_{S-KD}$ and $M_T$ . $D_{SS}$ captures the knowledge learned by the distilled student from the teacher. $D_{ST}$ captures the knowledge that is difficult for the student $M_{S-KD}$ to learn. Based on $D_{SS}$ and $D_{ST}$ , we divide uncertain samples into four typical cases:
|
| 112 |
+
|
| 113 |
+
1. Small $D_{SS}$ and small $D_{ST}$ . This means the model predictions are stable and consistent. The image is likely to be an easy sample. There is no need to annotate it.
|
| 114 |
+
2. Small $D_{SS}$ and large $D_{ST}$ . There is a large gap between the teacher and the distilled student. This means the student $M_{S-KD}$ cannot well learn from the teacher for this image, which can be caused either by difficult knowledge or noise. The small $D_{SS}$ indicates that the image is unlikely to cause label noise, because an image easy to cause wrong annotations usually has unclear lane marks, leading two different models to guessing randomly with a large gap between their predictions. Therefore, this image hopefully contains useful knowledge and is valuable to annotate.
|
| 115 |
+
3. Large $D_{SS}$ and small $D_{ST}$ . A small gap between the teacher and the distilled student indicates the knowledge is easy to learn. However, a large $D_{SS}$ means there is a risk to trust the teacher. The teacher can transfer incorrect knowledge to the student. Therefore, this image is also valuable to annotate.
|
| 116 |
+
4. Large $D_{SS}$ and large $D_{ST}$ . The three models cannot provide consistent predictions on this image. Noisy images usually cause this problem and we do not want to select them. Even if this is not due to noise, it is not an easy sample for all the three models. Considering the two reasons, though this image can be valuable to annotate, we do not treat it as the highest priority.
|
| 117 |
+
|
| 118 |
+
An example of case 4 is shown in Fig. 3. Other cases are given in the appendix.
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
Figure 4: Comparison of the distance-based [25] and our RNN-based strategies. We randomly select five video segments from the CULane dataset, and in each segment we randomly choose two frames. Then the two strategies are used to select five from these ten images. The distance-based method chooses images 3, 5, 6, 9, and 10, while ours obtains images 2, 3, 6, 8, and 9. Our selection naturally covers all the five segments, but the distance-based method ignores the first and forth segments. This indicates that the average distance is not an appropriate metric to evaluate the influence among images.
|
| 122 |
+
|
| 123 |
+
Combining the above four cases, we propose a simple yet effective uncertainty metric for image $p$ :
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
U n c r (p) = \left(D _ {S S} + D _ {S T}\right) \cdot \max \left\{\frac {D _ {S T}}{D _ {S S}}, \frac {D _ {S S}}{D _ {S T}} \right\}. \tag {5}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
This metric encourages a large difference between $D_{SS}$ and $D_{ST}$ , so that images of cases 2 and 3 will get large scores. For easy samples, $D_{SS}$ and $D_{ST}$ are both small, and this metric is small. If $D_{SS}$ and $D_{ST}$ are both large (case 4), $(D_{SS} + D_{ST})$ is also large, but it is penalized by $\max \left\{ \frac{D_{ST}}{D_{SS}}, \frac{D_{SS}}{D_{ST}} \right\}$ . In this way, the images with potential noise will not get very large uncertainty scores. During data selection, we simply sort the images in the decreasing order of their uncertainty values and select those with top values.
|
| 130 |
+
|
| 131 |
+
# 3.3. Diversity
|
| 132 |
+
|
| 133 |
+
In addition to uncertainty, diversity is another important factor in selecting informative samples. It encourages the selected samples to be representative of, or in other words, to be able to influence, a variety of other unlabeled samples.
|
| 134 |
+
|
| 135 |
+
A recent method [25] evaluates the diversity of a sample by its average feature distance to other unlabeled samples. An image with the minimum average distance to all other unlabeled images is considered as the most influential sample. However, for lane detection, we can illustrate that this average distance is not an appropriate metric of the influence (diversity). In a lane dataset, images are sampled from video segments, and therefore, there are many similar images from a scene. A natural way to find an influential subset is to select one image from every video segment and then annotate them for training. However, the distance-based metric [25] cannot achieve this, as shown in the simplified experiment in Fig. 4. The reason is that the distance between two images calculated in a high-dimensional feature space is often not equivalent to the perceptual dissimilarity of them. Another recent work [3] uses the KMeans++ method for data selection, but its effectiveness on lane detection has not been validated. In fact, our experiments (Section 4.1 and Section 4.2) show that our diversity metric proposed below is a superior one.
|
| 136 |
+
|
| 137 |
+
In this work, we explore the influence set of each image, and define the diversity of a selected subset as the number of unlabeled samples it can influence. The influence set is extracted based on reverse nearest neighbors (RNNs). If
|
| 138 |
+
|
| 139 |
+
a sample $p$ is the nearest neighbor of a sample $q$ , then reversely, $q$ is called the reverse nearest neighbor of $p$ . Given a sample $p$ , a dataset $S$ , a distance function $d(\cdot)$ , and an integer $k$ , the reverse $k$ nearest neighbors of $p$ is defined as:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
R N N _ {k} (p) = \{q \in S - \{p \} | p \in N N _ {k} (q) \}, \tag {6}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
where $NN_{k}(q)$ denotes the $k$ nearest neighbors of $q$ . $RNN_{k}(p)$ means that the sample $p$ is closer to all the samples in $RNN_{k}(p)$ than most of the other samples in the entire dataset $S$ . Therefore, $p$ is likely to be the most influential sample for all the samples in $RNN_{k}(p)$ .
|
| 146 |
+
|
| 147 |
+
Given the unlabeled dataset $S_U$ , the current subset of selected samples $V \subset S_U$ , and an image $p \in S_U$ , we define the diversity of $p$ as the number of its reverse $k$ nearest neighbors in $S_U$ :
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
D i v (p | V, S _ {U}) = \left| R N N _ {k} (p) - V \right|. \tag {7}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
Note that different from nearest neighbors, for different $p$ 's, the sizes of $RNN_{k}(p)$ can be very different and even much larger than $k$ . In this work, $k$ is considered as a hyperparameter, so it is dropped on the left side of Eq. (7). The experiments in Fig. 4 and Section 4 show that for lane detection, our RNN-based strategy performs better than the previous diversity-based methods.
|
| 154 |
+
|
| 155 |
+
# 3.4. Active Learning Algorithm
|
| 156 |
+
|
| 157 |
+
We combine the uncertainty and diversity metrics, and define the data selection problem as follows:
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
\begin{array}{l} \max _ {V \subset S _ {U}} \quad \sum_ {p \in V} (U n c r (p) + \beta D i v (p | V, S _ {U})), \tag {8} \\ \begin{array}{c c} \text {s . t .} & | V | = b, \end{array} \\ \end{array}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
where $\beta$ is a weighting factor and $b$ is the annotation budget (number of selected samples). The target of this problem is to select a subset of samples that are of high uncertainty, and at the same time, can influence a large subset of samples in the remaining unlabeled dataset.
|
| 164 |
+
|
| 165 |
+
This optimization is a set cover problem [14]. Though it is NP-Hard, the objective function is non-decreasing and submodular, and an $O(N^2)$ greedy algorithm is able to ensure a $(1 - \frac{1}{e})$ -approximation to the optimal solution [27].
|
| 166 |
+
|
| 167 |
+
Algorithm 1 Active Learning with Knowledge Distillation
|
| 168 |
+
|
| 169 |
+
Input: Labeled dataset $S_{L}$ , unlabeled dataset $S_{U}$ , number of rounds $r$ , budget $b$ per round;
|
| 170 |
+
|
| 171 |
+
Output: Selected dataset $V\subset S_U$ , with annotations;
|
| 172 |
+
|
| 173 |
+
1: $M_S, M_T \gets \text{Train}(S_L)$ ;
|
| 174 |
+
2: $M_{ST}\gets Train_{KD}(S_L)$
|
| 175 |
+
3: $V\gets \emptyset$
|
| 176 |
+
4: while $|V| < r \cdot b$ do
|
| 177 |
+
5: for $p\in S_U$ do
|
| 178 |
+
6: $P_{S}, P_{ST}, P_{T} \gets \text{Predict}(M_{S}, M_{ST}, M_{T}, p)$ ;
|
| 179 |
+
7: Compute $D_{SS}$ and $D_{ST}$ with $P_S, P_{ST}, P_T$ ;
|
| 180 |
+
8: $uncr\gets (D_{SS} + D_{ST})\cdot \max \{\frac{D_{ST}}{D_{SS}},\frac{D_{SS}}{D_{ST}}\}$
|
| 181 |
+
9: $div\gets Div(p|V,S_U)$
|
| 182 |
+
10: $S_{score}(p) \gets uncr + \beta \cdot div;$
|
| 183 |
+
11: end for
|
| 184 |
+
12: $Q\gets Greedy(S_U,S_{score},b)$
|
| 185 |
+
13: $S_U \gets S_U - Q$ ;
|
| 186 |
+
14: $Q \gets \text{Annotation}(Q)$ ;
|
| 187 |
+
15: $V\gets V\cup Q;$
|
| 188 |
+
16: $M_S, M_T \gets \text{Train}(S_L \cup V)$ ;
|
| 189 |
+
17: $M_{ST}\gets Train_{KD}(S_L\cup V)$
|
| 190 |
+
18: end while
|
| 191 |
+
|
| 192 |
+
The complete active learning algorithm is shown in Alg. 1. We start by training the student and teacher models in the knowledge distillation framework (lines 1 and 2). Then given the label budget $b$ of each round, we iteratively calculate the uncertainty and diversity scores of samples (lines 5-11), select $b$ images from the unlabeled dataset using the greedy algorithm (line 12), annotate them (line 14), and use the updated training set to train the models (lines 15-17). The iteration (lines 4-18) repeats until $|V| \geq r \cdot b$ .
|
| 193 |
+
|
| 194 |
+
# 3.5. Extension to 2D Object Detection
|
| 195 |
+
|
| 196 |
+
Though our method is initially designed for lane detection, it is easy to extend it to other active learning tasks. In this paper, we explore 2D object detection.
|
| 197 |
+
|
| 198 |
+
We use the same KD framework, and the same definitions of uncertainty and diversity. We only slightly change the definition of the prediction gap between two models. Given an image $p$ and two models $M_1$ and $M_2$ , we first match the two sets of bounding boxes detected by $M_1$ and $M_2$ , respectively. That is, for each predicted box $b_1 \in M_1(p)$ , find $b_2 \in M_2(p)$ with the largest $IoU(b_1, b_2)$ . Then the prediction gap is defined as:
|
| 199 |
+
|
| 200 |
+
$$
|
| 201 |
+
D _ {1 2} (p) = \sum_ {\left\{b _ {1}, b _ {2} \right\}} \left(1 - I o U \left(b _ {1}, b _ {2}\right)\right) \cdot \left(1 + \gamma \mathbb {1} \left(c _ {1} \neq c _ {2}\right)\right), \tag {9}
|
| 202 |
+
$$
|
| 203 |
+
|
| 204 |
+
where $\gamma > 0$ is a hyper-parameter, and $c_{1}$ and $c_{2}$ are the predicted classes of $b_{1}$ and $b_{2}$ , respectively.
|
| 205 |
+
|
| 206 |
+
# 4. Experiments
|
| 207 |
+
|
| 208 |
+
Datasets. We perform experiments on two most popular datasets, CULane [29] and LLAMAS [4]. CULane contains 88880 training images and 34680 test images. These images are annotated by humans. The LLAMAS dataset is annotated automatically using Lidar maps. It contains 58269 training images and 20844 validation images.
|
| 209 |
+
|
| 210 |
+
Models. We test our method using two different lane detection models, PointLaneNet (PLN) [8] and UFLD [33]. For PLN, we use a pruned ResNet-122 as the backbone network for the student model, and a SENet-154 [17] for the teacher model. SGD is used to train the models with the batch size of 32. The learning rate is set to 0.02. The parameters $\lambda$ , $\mu$ , and $\nu$ in Eq. (1) are respectively set to 1, 0.01, and 0.1; $\alpha$ in Eq. (2) is set to 1; $k$ in Eq. (7) is set to 3; $\beta$ in Eq. (8) is set to 5. We use the output of the last convolutional layer of the backbone network as the feature of an image. Either the students or the teacher can be used to extract features, and there is almost no difference between their effects. Here we use the student model without distillation $(M_S)$ . On each dataset, the models are firstly trained for 30 epochs on a randomly selected training set, containing about $5\%$ samples of the entire training set. Then we iteratively select a subset of the unlabeled images for annotation, and fine-tune the models for 10 epochs. We repeat these until the budget is used up. To make a fair comparison with other methods, we use $M_S$ to perform evaluation, though we prefer to deploy $M_{S-KD}$ in practice. The experiment is repeated for five times and the curve of the mean evaluation results is reported. For UFLD, we use ResNet-18 for the student model and ResNet-101 for the teacher model. The same parameter values are set as recommended by the authors of UFLD. All the experiments are performed on a GPU server with 8 NVIDIA Tesla V100 cards.
|
| 211 |
+
|
| 212 |
+
Evaluation metrics. We use F1-Score for evaluation. Given a fixed lane width, a predicted lane and an annotated lane are considered matched if their IoU is greater than 0.5. Then the F1-Score is defined as: $F_{1} = \frac{2 \times P \times R}{P + R}$ , where $P$ is the precision and $R$ is the recall.
|
| 213 |
+
|
| 214 |
+
Compared methods. We compare our work with the following six methods:
|
| 215 |
+
|
| 216 |
+
1. Random (Rand). The baseline random selection.
|
| 217 |
+
2. Entropy (Ent). This method selects samples based on the cross entropy.
|
| 218 |
+
3. Ensemble (Ens). We use the student and teacher models, $M_S$ and $M_T$ , to predict lanes on each unlabeled image. Then we select the images with largest prediction gaps between $M_S$ and $M_T$ .
|
| 219 |
+
4. ACD [2]. This method is designed specifically for object detection. It incorporates the spatial information to estimate the entropy.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
(a)
|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
(b)
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
(c)
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
(d)
|
| 232 |
+
|
| 233 |
+
5. LLoss [42]. This method adds a header to the network to estimate the loss of each sample. Samples with largest predicted losses are selected.
|
| 234 |
+
6. BADGE [3]. This method combines an uncertainty metric (gradient norm) and a diversity metric (K-Means++) to select samples.
|
| 235 |
+
|
| 236 |
+
The methods 2-5 consider only uncertainty.
|
| 237 |
+
|
| 238 |
+
# 4.1. Results
|
| 239 |
+
|
| 240 |
+
The results are shown in Fig. 5. Figures (a) and (b) show the results with PLN. We find that our method achieves the best performance on the two datasets. Given the same label budget, the training sets selected by our method are the most informative, making the model trained with these sets yield the highest F1-Score on the test datasets. The BADGE method achieves the second best performance. The reason is that it also takes both uncertainty and diversity into consideration in the data selection process. The other methods only focus on uncertainty, and therefore perform worse.
|
| 241 |
+
|
| 242 |
+
In particular, the CULane dataset contains a variety of scenes such as crowd, night, shadow, etc. We observe that on this dataset, the methods with only uncertainty achieve a mild improvement, compared with the baseline Rand. One important reason is that these methods are prone to selecting samples with few lanes, as shown in Fig. 6. In the first iteration, the samples selected by Ent contain only 1.5 lanes per image, while for the entire dataset, the average number of lanes per image is 3.0. Other uncertainty-only methods, i.e., Ens, LLoss, and ACD, also have this problem. These selected samples provide too few positive annotations, leading the model to not being trained sufficiently. By comparison, the average lane number per image selected by our method is 3.1. However, this does not mean that we should simply select images with the most lanes. We have tested this idea but the result is worse than Rand.
|
| 243 |
+
|
| 244 |
+
ACD extends the calculation of entropy by taking the spatial neighborhood of each pixel into consideration. This idea is useful for object detection because it provides a more accurate estimation of uncertainty for each pixel. However, it is not so effective for lane detection because lanes are too
|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
Figure 5: Results of the seven methods on the CULane and LLAMAS datasets. (a) and (b) show the results with Point-LaneNet. (c) and (d) show the results with UFLD.
|
| 248 |
+
(a)
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
An image with no lane.
|
| 252 |
+
(b)
|
| 253 |
+
Figure 6: (a) Average number of lanes per image in the dataset selected by each method. The last bar (Entire) denotes the average number of lanes per image in the entire dataset. (b) An example of images with no lanes that are likely to be selected by the uncertainty-only methods.
|
| 254 |
+
|
| 255 |
+
thin, and a foreground pixel does not have as many foreground neighbor pixels as those in object detection. Therefore, the estimation of pixel-level uncertainty cannot be enhanced and ACD does not obtain the best performance. The LLoss method performs even worse than Rand on CULane. The reason is that in the first 4 or 5 iterations, this method focuses on selecting night images; $70\%$ of the selected images are in night scenes. This leads to a high redundancy in the selected training set. Though in the later iterations it starts to focus on other scenes, this cannot resolve the redundancy problem caused by so many night images selected already. Therefore, the performance of LLoss is worst.
|
| 256 |
+
|
| 257 |
+
In the LLAMAS dataset, the average number of lanes per image is 5.2. Most of the images each contains at least two lanes. The uncertainty-only methods are not likely to select images with few lanes, as those on the CULane dataset. Therefore, though performing worse than ours, they are able to outperform Rand by a large margin. In this dataset, most images are collected from scenes in daytime without crossing and heavy shadow, so that LLoss does not suffer from selecting too many night images. Its performance is much better than that on CULane.
|
| 258 |
+
|
| 259 |
+
In Fig. 5, (c) and (d) show the results with UFLD. Similar to those with PLN, our method achieves the best performance on the two datasets. The methods considering both uncertainty and diversity outperform those using only
|
| 260 |
+
|
| 261 |
+

|
| 262 |
+
Figure 7: Results of the ablation study.
|
| 263 |
+
|
| 264 |
+
uncertainty, on both datasets. On CULane, LLoss still focuses on selecting night images, and therefore performs the worst. Ent is prone to selecting images with few lanes in the beginning, so on the CULane dataset, its performance is worse than Rand in the first two iterations. On LLAMAS, all methods manage to outperform Rand.
|
| 265 |
+
|
| 266 |
+
These comparisons show that our method is effective in reducing the annotation cost for lane detection. The PLN and UFLD make different formulations to the lane detection problems. But our method achieves good performance with both of them. This shows that our method is model agnostic and is suitable for a variety of lane detection models.
|
| 267 |
+
|
| 268 |
+
# 4.2. Ablation Study
|
| 269 |
+
|
| 270 |
+
We now validate the effectiveness of our uncertainty metric and diversity metric. We first build a KD-only version of our method, i.e., we perform data selection only based on our uncertainty metric. Then to validate the diversity metric, we build a combination of the KD-only version and the widely used K-Means++ method [3], denoted as $\mathrm{KD + KM}$ . Due to limited space, we only perform experiments on CULane with PointLaneNet. The results are shown in Fig. 7. We find that even if we only use our uncertainty metric for data selection, its performance is still better than Ens, which performs best overall on CULane among the four uncertainty-only methods. The $\mathrm{KD + KM}$ method achieves a better performance. But this performance is worse than the combination of KD-only with our diversity metric based on reverse nearest neighbors (Ours).
|
| 271 |
+
|
| 272 |
+
These results show that both our uncertainty metric and diversity metric are effective for data selection. The combination of them obtains the best performance.
|
| 273 |
+
|
| 274 |
+
# 4.3. Extension to Object Detection
|
| 275 |
+
|
| 276 |
+
In this subsection, we extend our method to 2D object detection. We use the Faster-RCNN [35] as the base model, with ResNet-18 as the backbone for the student model, and ResNet-101 for the teacher model. The experiments are conducted on the BDD100K dataset [43]. It contains 70000 images in the training set, and 10000 images in the validation set. Our method is compared with three methods, Rand,
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
Figure 8: Results of 2D object detection.
|
| 280 |
+
|
| 281 |
+
Ent, and ACD [2]. ACD is a recent active learning method specifically designed for 2D object detection. The pipeline of the experiments is similar to that of the lane detection. Each method starts from training the models using an initial training set, which is formed via randomly sampling 5000 images from the original training set. Then in each iteration, each method selects 5000 images, adds them to the training set, and fine-tunes the models. The initial training takes 12 epochs and the fine-tuning takes 8 epochs, with SGD optimization and a learning rate of 0.02. The batch size is set to 32; $\gamma$ in Eq. (9) is set to 5; $\alpha$ in Eq. (2) is set to 10. The iteration repeats until the annotation budget is used up. This experiment is conducted for five times. We use the average mAP of the five experiments to evaluate the performance.
|
| 282 |
+
|
| 283 |
+
The results are shown in Fig. 8. Ent obtains a good improvement over Rand in the first three iterations. But its improvement decreases significantly in the later iterations. ACD outperforms Ent by a large margin because it considers the spatial information when calculating the entropy. It provides a more accurate pixel-level estimation of the uncertainty. Our method achieves an improvement that is even larger than that of ACD. These results show that our method is extendable to 2D object detection.
|
| 284 |
+
|
| 285 |
+
# 5. Conclusion
|
| 286 |
+
|
| 287 |
+
In this paper, we present the first active learning method for lane detection. We find that two problems restrict the performance of existing methods, namely the unsuitable entropy and label noise. To solve these problems, we propose to employ knowledge distillation to evaluate both the data uncertainty and the potential label noise. We also propose a diversity metric based on reverse nearest neighbors. This metric can help reduce the redundancy of the selected dataset. The experiments show that both the metrics are able to improve the lane detection performance, and the combination of them achieves the best results on two popular benchmarks. In addition, our method is extendable to other visual recognition tasks, and in this paper, we show its effectiveness on 2D object detection. In the future, we will extend this active learning framework to a wider range of recognition tasks to further examine its capability.
|
| 288 |
+
|
| 289 |
+
# References
|
| 290 |
+
|
| 291 |
+
[1] Sharat Agarwal, Himanshu Arora, Saket Anand, and Chetan Arora. Contextual diversity for active learning. In ECCV, 2020.
|
| 292 |
+
[2] Hamed H Aghdam, Abel Gonzalez-Garcia, Joost van de Weijer, and Antonio M López. Active learning for deep detection neural networks. In ICCV, 2019.
|
| 293 |
+
[3] Jordan T Ash, Chicheng Zhang, Akshay Krishnamurthy, John Langford, and Alekh Agarwal. Deep batch active learning by diverse, uncertain gradient lower bounds. In ICLR, 2019.
|
| 294 |
+
[4] Karsten Behrendt and Ryan Soussan. Unsupervised labeled lane marker dataset generation using maps. In ICCV, 2019.
|
| 295 |
+
[5] Amol Borkar, Monson Hayes, and Mark T Smith. A novel lane detection system with efficient ground truth generation. IEEE Transactions on Intelligent Transportation Systems, 2011.
|
| 296 |
+
[6] Clemens-Alexander Brust, Christoph Käding, and Joachim Denzler. Active learning for deep object detection. arXiv preprint arXiv:1809.09875, 2018.
|
| 297 |
+
[7] Wenbin Cai, Ya Zhang, and Jun Zhou. Maximizing expected model change for active learning in regression. In ICDM, 2013.
|
| 298 |
+
[8] Zhenpeng Chen, Qianfei Liu, and Chenfan Lian. Pointlanenet: Efficient end-to-end cnns for accurate real-time lane detection. In IEEE Intelligent Vehicles Symposium, 2019.
|
| 299 |
+
[9] Alexander Freytag, Erik Rodner, and Joachim Denzler. Selecting influential examples: Active learning with expected model output changes. In ECCV, 2014.
|
| 300 |
+
[10] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with image data. arXiv preprint arXiv:1703.02910, 2017.
|
| 301 |
+
[11] Jiyang Gao, Jiang Wang, Shengyang Dai, Li-Jia Li, and Ram Nevatia. Note-rcnn: Noise tolerant ensemble rcnn for semi-supervised object detection. In ICCV, 2019.
|
| 302 |
+
[12] Mingfei Gao, Zizhao Zhang, Guo Yu, Sercan Ö Arik, Larry S Davis, and Tomas Pfister. Consistency-based semi-supervised active learning: Towards minimizing labeling cost. In ECCV, 2020.
|
| 303 |
+
[13] Jianping Gou, Baosheng Yu, Stephen John Maybank, and Dacheng Tao. Knowledge distillation: A survey. arXiv preprint arXiv:2006.05525, 2020.
|
| 304 |
+
[14] Andrew Guillory and Jeff Bilmes. Interactive submodular set cover. arXiv preprint arXiv:1002.3345, 2010.
|
| 305 |
+
[15] Dilek Hakkani-Tür, Giuseppe Riccardi, and Allen Gorin. Active learning for automatic speech recognition. In ICASSP, 2002.
|
| 306 |
+
[16] Alex Holub, Pietro Perona, and Michael C Burl. Entropy-based active learning for object recognition. In CVPR Workshops, 2008.
|
| 307 |
+
[17] Jie Hu, Li Shen, and Gang Sun. Squeeze-and-excitation networks. In CVPR, 2018.
|
| 308 |
+
[18] Ajay J Joshi, Fatih Porikli, and Nikolaos Papanikolopoulos. Multi-class active learning for image classification. In CVPR, 2009.
|
| 309 |
+
|
| 310 |
+
[19] Chieh-Chi Kao, Teng-Yok Lee, Pradeep Sen, and Ming-Yu Liu. Localization-aware active learning for object detection. In ACCV, 2018.
|
| 311 |
+
[20] Alex Kendall and Yarin Gal. What uncertainties do we need in bayesian deep learning for computer vision? In NeurIPS, 2017.
|
| 312 |
+
[21] Yeongmin Ko, Jiwon Jun, Donghwuy Ko, and Moongu Jeon. Key points estimation and point instance segmentation approach for lane detection. arXiv preprint arXiv:2002.06604, 2020.
|
| 313 |
+
[22] Ksenia Konyushkova, Raphael Sznitman, and Pascal Fua. Learning active learning from data. In NeurIPS, 2017.
|
| 314 |
+
[23] Flip Korn and Suresh Muthukrishnan. Influence sets based on reverse nearest neighbor queries. ACM Sigmoid Record, 2000.
|
| 315 |
+
[24] Yingzhen Li and Yarin Gal. Dropout inference in bayesian neural networks with alpha-divergences. arXiv preprint arXiv:1703.02914, 2017.
|
| 316 |
+
[25] Buyu Liu and Vittorio Ferrari. Active learning for human pose estimation. In ICCV, 2017.
|
| 317 |
+
[26] Ruijin Liu, Zejian Yuan, Tie Liu, and Zhiliang Xiong. End-to-end lane shape prediction with transformers. In IEEE Winter Conference on Applications of Computer Vision, 2020.
|
| 318 |
+
[27] George L Nemhauser, Laurence A Wolsey, and Marshall L Fisher. An analysis of approximations for maximizing submodular set functions—i. Mathematical programming, 1978.
|
| 319 |
+
[28] Hieu T Nguyen and Arnold Smeulders. Active learning using pre-clustering. In ICML, 2004.
|
| 320 |
+
[29] Xingang Pan, Jianping Shi, Ping Luo, Xiaogang Wang, and Xiaou Tang. Spatial as deep: Spatial cnn for traffic scene understanding. In AAAI, 2018.
|
| 321 |
+
[30] Fengchao Peng, Qiong Luo, and Lionel M Ni. Acts: an active learning method for time series classification. In ICDE, 2017.
|
| 322 |
+
[31] Jonah Philion. Fastdraw: Addressing the long tail of lane detection by adapting a sequential prediction network. In CVPR, 2019.
|
| 323 |
+
[32] Robert Pinsler, Jonathan Gordon, Eric Nalisnick, and José Miguel Hernández-Lobato. Bayesian batch active learning as sparse subset approximation. In NeurIPS, 2019.
|
| 324 |
+
[33] Zequn Qin, Huanyu Wang, and Xi Li. Ultra fast structure-aware deep lane detection. arXiv preprint arXiv:2004.11757, 2020.
|
| 325 |
+
[34] Pengzhen Ren, Yun Xiao, Xiaojun Chang, Po-Yao Huang, Zhihui Li, Xiaojiang Chen, and Xin Wang. A survey of deep active learning. arXiv preprint arXiv:2009.00236, 2020.
|
| 326 |
+
[35] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster r-cnn: towards real-time object detection with region proposal networks. TPAMI, 2016.
|
| 327 |
+
[36] Dan Roth and Kevin Small. Margin-based active learning for structured output spaces. In ECML, 2006.
|
| 328 |
+
[37] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A core-set approach. arXiv preprint arXiv:1708.00489, 2017.
|
| 329 |
+
[38] Yawar Siddiqui, Julien Valentin, and Matthias Nießner. Viewal: Active learning with viewpoint entropy for semantic segmentation. In CVPR, 2020.
|
| 330 |
+
|
| 331 |
+
[39] Samarth Sinha, Sayna Ebrahimi, and Trevor Darrell. Variational adversarial active learning. In ICCV, 2019.
|
| 332 |
+
[40] Luo-Wei Tsai, Jun-Wei Hsieh, Chi-Hung Chuang, and Kuo-Chin Fan. Lane detection using directional random walks. In IEEE Intelligent Vehicles Symposium, 2008.
|
| 333 |
+
[41] Alexander Vezhnevets, Joachim M Buhmann, and Vittorio Ferrari. Active learning for semantic segmentation with expected change. In CVPR, 2012.
|
| 334 |
+
[42] Donggeun Yoo and In So Kweon. Learning loss for active learning. In CVPR, 2019.
|
| 335 |
+
[43] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darryll. Bdd100k: A diverse driving dataset for heterogeneous multitask learning. In CVPR, 2020.
|
| 336 |
+
[44] Juseung Yun, Byungjoo Kim, and Junmo Kim. Weight decay scheduling and knowledge distillation for active learning. In ECCV, 2020.
|
| 337 |
+
[45] Zongwei Zhou, Jae Shin, Lei Zhang, Suryakanth Gurudu, Michael Gotway, and Jianming Liang. Fine-tuning convolutional neural networks for biomedical image analysis: actively and incrementally. In CVPR, 2017.
|
activelearningforlanedetectionaknowledgedistillationapproach/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5b3586eccf15167b584d9a3e8e5bc3dae6854531f584dd17e846e2d1f2349af
|
| 3 |
+
size 392203
|
activelearningforlanedetectionaknowledgedistillationapproach/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:61ec0ee71436cd6db46726390a231399bb1559a21b7597b745e6e7da2a1feeb2
|
| 3 |
+
size 485147
|
activeuniversaldomainadaptation/7b2c6630-9b03-41fe-9663-1b4d1ef9d4bd_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:daa56b68c61a24141ba95af8c640f229398b5f741257d81ac59892c399d838d7
|
| 3 |
+
size 96317
|
activeuniversaldomainadaptation/7b2c6630-9b03-41fe-9663-1b4d1ef9d4bd_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2faf33d7a5e82d7e0e75422729d57f3d23776d11e9a7776937a06fdf5ea1f43a
|
| 3 |
+
size 120508
|
activeuniversaldomainadaptation/7b2c6630-9b03-41fe-9663-1b4d1ef9d4bd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bbd61fd1306f4505c501d5d801008b9a1a7809cd9860dda8e8f6cbdd34181280
|
| 3 |
+
size 1949304
|
activeuniversaldomainadaptation/full.md
ADDED
|
@@ -0,0 +1,331 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Universal Domain Adaptation
|
| 2 |
+
|
| 3 |
+
Xinhong Ma $^{1,2}$ , Junyu Gao $^{1,2}$ and Changsheng Xu $^{1,2,3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ National Lab of Pattern Recognition (NLPR), of Automation, Chinese Academy of Sciences (CASIA)
|
| 6 |
+
|
| 7 |
+
$^{2}$ School of Artificial Intelligence, University of Chinese Academy of Sciences (UCAS)
|
| 8 |
+
|
| 9 |
+
3 Peng Cheng Laboratory, Shenzhen, China
|
| 10 |
+
|
| 11 |
+
{xinhong.ma, junyu.gao, csxu}@nlpr.ia.ac.cn
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Most unsupervised domain adaptation methods rely on rich prior knowledge about the source-target label set relationship, and they cannot recognize categories beyond the source classes, which limits their applicability in practical scenarios. This paper proposes a new paradigm for unsupervised domain adaptation, termed as Active Universal Domain Adaptation (AUDA), which removes all label set assumptions and aims for not only recognizing target samples from source classes but also inferring those from target-private classes by using active learning to annotate a small budget of target data. For AUDA, it is challenging to jointly adapt the model to the target domain and select informative target samples for annotations under a large domain gap and significant semantic shift. To address the problems, we propose an Active Universal Adaptation Network (AUAN). Specifically, we first introduce Adversarial and Diverse Curriculum Learning (ADCL), which progressively aligns source and target domains to classify whether target samples are from source classes. Then, we propose a Clustering Non-transferable Gradient Embedding (CNTGE) strategy, which utilizes the clues of transferability, diversity, and uncertainty to annotate target informative sample, making it possible to infer labels for target samples of target-private classes. Finally, we propose to jointly train ADCL and CNTGE with target supervision to promote domain adaptation and target-private class recognition. Extensive experiments demonstrate that the proposed AUDA model equipped with ADCL and CNTGE achieves significant results on four popular benchmarks.
|
| 16 |
+
|
| 17 |
+
# 1. Introduction
|
| 18 |
+
|
| 19 |
+
Recent advances in deep neural networks have convincingly demonstrated the high capability of learning effective models on large datasets. The impressive achievements heavily rely on quantities of labeled training instances,
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
Figure 1. Comparison between Active Universal Domain Adaptation and representative domain adaptation settings with respect to classification tasks and assumptions on target label set. AUDA removes all label set assumptions and aims for not only recognizing target samples belonging to the shared common label set but also inferring labels for those belong to target-private label set by using active learning to annotate a small budget of target data.
|
| 23 |
+
|
| 24 |
+
which requires expensive and time-consuming labor work of collection and annotation. A reasonable question is why not directly recycling the off-the-shelf knowledge or models from a source domain to new domains. As data from different domains are sampled from different data distributions, there is probably a large domain gap [60] which may degrade the model performance in the target domain [35, 40]. An appealing way to address this issue is Unsupervised Domain Adaptation (UDA) [44], which aims to learn a classification model with source labeled data and target unlabeled data to ensure that the learned model could perform well in the target domain.
|
| 25 |
+
|
| 26 |
+
Most unsupervised domain adaptation methods can be divided into four categories, namely, closed set domain adaptation [25, 42, 47, 52, 57, 23], partial domain adaptation [4, 5, 6], open set domain adaptation [36, 46, 65, 28], and universal domain adaptation [61, 11, 45], as shown in the top of Figure 1. Specifically, closed set domain adaptation [16, 30, 33] supposes that the source and target do
|
| 27 |
+
|
| 28 |
+
mains share the same label set. The partial domain adaptation [4, 64, 6] assumes that the source label set contains the target label set. The open set domain adaptation assumes that common classes between two domains are known [36] or the source label set is a subset of the target label set [46]. Recently, Universal Domain Adaptation [61, 11, 45] removes all assumptions about source-target label set relationship, and classifies target samples as labels contained in the source label set or marks them as “unknown” similar to the open set domain adaptation. However, the “unknown” category is still unknown, which is inapplicable for practical applications, e.g., new products recommendation or rare animal/plant recognition. Therefore, it is necessary for practical domain adaptation algorithms to infer actual labels for samples belonging to the “unknown” category.
|
| 29 |
+
|
| 30 |
+
To achieve this goal, we propose to define a new paradigm for unsupervised domain adaptation, referred as Active Universal Domain Adaptation (AUDA). As shown in the bottom of Figure 1, a labeled source domain and a target domain without any explicit restrictions on the classes are provided for model training. Classes are defined as "known" if they belong to the source label set. Otherwise, they are defined as "unknown". Since target samples of "unknown" classes are much more difficult to recognize than the ones of "known" classes, AUDA algorithms need to draw knowledge from the source domain to firstly recognize the "known"/"unknown" label for the test samples from the target domain. Then, actual class labels should be inferred for both "known" and "unknown" samples. However, it is nearly impossible to infer labels for the "unknown" samples without any labeled training data. Since practical applications offer the possibility of annotating a small budget of target instances, termed as Active Learning (AL), we are motivated to acquire labels for a subset of target data from an oracle, especially, labels of target "unknown" samples, to assist the unknown category inference.
|
| 31 |
+
|
| 32 |
+
To design algorithms for active universal domain adaptation, we are exposed to two aspects of technical challenges: (1) Without any prior knowledge of the source-target label set relationship, there exist a large domain gap and significant semantic shift problems in AUDA. Specifically, source and target data are sampled from different distributions and the domain gap makes it hard to recognize "known"/"unknown" instances in the target domain. Moreover, the unexpected semantic shift means that many unknown classes are contained in the target domain, making it extremely difficult to reduce the domain gap between the shared classes. If the domain gap and semantic shift cannot be well reduced, it is challenging for active learning to annotate informative instances to infer target "unknown" instances. (2) During active learning, the most informative target instances should be annotated and used for learning to infer target "unknown" instances. Most existing AL ap
|
| 33 |
+
|
| 34 |
+
proaches prefer to annotate instances that are highly uncertain [10, 12, 24, 54] or diverse [49, 15]. As these approaches perform active learning without considering domain gap and semantic shift, uncertainty and diversity may be wrongly estimated [34]. Therefore, directly applying the traditional AL approaches easily lead to select outliers, redundant instances, or uninformative instances for annotation, which is detrimental for further reducing the domain gap and semantic shift, and damages the performance of inferring target "unknown" samples. Although the prior work in active domain adaptation [53] tries to deal with the problem of domain gap, it does not consider the semantic shift problem, making it inapplicable for AUDA. As a result, it is advisable to design active learning strategies that can annotate the most informative target instances with the joint consideration of domain gap and semantic shift.
|
| 35 |
+
|
| 36 |
+
Motivated by the above observations, we propose an Active Universal Adaptation Network, which simultaneously adapts the model from the source domain to the target domain, and performs active learning towards target informative instances for unknown category inference. Specifically, we first propose Adversarial and Diverse Curriculum Learning (ADCL), which designs an adversarial curriculum loss and a diverse curriculum loss to align source and target domains, and learn the ability of target "known"/"unknown" instances recognition<sup>1</sup>. Thus, the negative effects of domain gap and semantic shift in active learning can be alleviated, which helps to select more informative instances for annotation. Then, we propose an active learning strategy named Clustering Non-transferable Gradient Embedding (CNTGE), which utilizes the clues of transferability, diversity, and uncertainty to annotate target samples of target-private classes and assign pseudo labels to highly confident target "known" instances. The labeled and pseudo labeled target instances could provide better supervision for ADCL, which helps to learn better curriculums. Finally, jointly training with ADCL and CNTGE could further reinforce the adaptation process, and learn to infer actual labels for target "unknown" instances.
|
| 37 |
+
|
| 38 |
+
The main contributions of this paper are: (1) We introduce a more practical unsupervised domain adaptation paradigm, Active Universal Domain Adaptation, which requires no assumptions about the target label set and aims for not only recognizing target samples belonging to the shared label set but also inferring those of target-private classes via active learning. (2) To address the AUDA task, we propose Active Universal Adaptation Network, an end-to-end model, which performs adversarial and diverse curriculum learning and clustering non-transferable gradient embedding to cooperatively promote domain adaptation and active
|
| 39 |
+
|
| 40 |
+
learning. (3) Extensive experiments demonstrate that the proposed AUDA model equipped with ADCL and CNTGE achieves significant classification results.
|
| 41 |
+
|
| 42 |
+
# 2. Related Work
|
| 43 |
+
|
| 44 |
+
Domain Adaptation. According to the assumptions of the source-target label set, most domain adaptation approaches can be categorized into closed set adaptation, partial domain adaptation, openset domain adaptation, and universal domain adaptation. Closed Set Domain Adaptation assumes that the source and target domains share the same label set, which focuses on mitigating the impact of the domain gap between source and target domains. Solutions to closed set domain adaptation mainly fall into feature adaptation [17, 31, 63, 26, 9] and generative model [13, 21, 22, 29, 55, 59, 32]. Partial Domain Adaptation assumes that the label set of the source domain is supposed to be large enough to contain the target label set [4, 5, 6, 8]. Open Set Domain Adaptation assumes that classes shared by two domains are known [36] or the source label set is a subset of the target label set [46], which could classify target samples as source classes or a “unknown” class. Although knowledge graph is leveraged to further infer actual labels for “unknown” samples [65], it still follows the open set domain adaptation assumptions. Universal Domain Adaptation [61, 11, 45] adopts a more generated setting, which can classify target samples as any class in the source labels set or mark them as “unknown” without any prior knowledge on the target label set. Unfortunately, the existing paradigms of unsupervised domain adaptation can only classify samples as source classes. As for others, they can only be marked as an “unknown” class. Different from the existing DA settings, we are motivated to infer actual classes for all target instances without any assumptions about the source-target label set relationship via the cooperation between domain adaptation and active learning.
|
| 45 |
+
|
| 46 |
+
Active Learning. Active Learning aims to develop label-efficient algorithms by sampling the most representative queries to be labeled by an oracle [50]. Current approaches can be mainly divided into two categories: uncertainty and diversity. The first one aims to annotate samples for which the model has uncertain prediction [12, 10, 54, 48, 58, 43, 18, 3]. The second focuses on picking a set of instances that are representative and diverse for the entire dataset rata [49, 15, 51, 14]. Several approaches also propose a trade-off between uncertainty and diversity [20, 2]. Recently, active learning with domain adaptation, termed as Active Domain Adaptation, is of great practical interest. However, only a little previous work addresses the problem. The pioneering work [41] studies the task of active adaptation applied to sentiment classification for text data. Rita et al. [7] select target samples to learn importance weights for source instances by solving a convex optimization problem of minimizing maximum mean discrepancy (MMD).
|
| 47 |
+
|
| 48 |
+
However, those strategies do not fit model adaptation with deep nets. More Recently, Su et al. [53] study this task in the context of deep convolutional nets and instances are selected based on their uncertainty and "targetness". However, these label acquisition strategies are designed based on the assumption that source and target domains share the same label set. In our work, we design a novel active learning strategy under the challenges of domain gap and semantic shift, which does not rely on any assumptions about the source-target label set relationship.
|
| 49 |
+
|
| 50 |
+
# 3. Our Approach
|
| 51 |
+
|
| 52 |
+
# 3.1. Problem Setting
|
| 53 |
+
|
| 54 |
+
In active universal domain adaptation, the learning algorithm has access to a labeled source domain $\mathcal{D}_S = \{(\mathbf{x}_i^s,\mathbf{y}_i^s)\}_{i = 1}^{n_s}$ and an unlabeled target domain $\mathcal{D}_{UT} = \{(\mathbf{x}_i^t)\}_{i = 1}^{n_t}$ , which are respectively sampled from different distributions $p_{s}$ and $p_t$ . At each active learning round, the learning algorithm may query an oracle to obtain labels of $n_r$ instances from $\mathcal{D}_{UT}$ . After $R$ rounds of active learning, $n_b$ labeled target instances are added to the budget $\mathcal{D}_{LT} = \{(\mathbf{x}_i^t,\mathbf{y}_i^t)\}_{i = 1}^{n_b}$ where $n_b = R\cdot n_r$ . Besides, we use $\mathcal{C}_s$ to represent the label set of source domain while the label set of target domain is denoted as $\mathcal{C}_t$ . $\mathcal{C}_c = \mathcal{C}_s\cap \mathcal{C}_t$ is the common label set shared by both domains. $\tilde{\mathcal{C}}_s = \mathcal{C}_s\backslash \mathcal{C}_c$ and $\tilde{\mathcal{C}}_t = \mathcal{C}_t\backslash \mathcal{C}_c$ respectively represent source private label set and target private label set. Note that the target label set $\mathcal{C}_t$ is inaccessible during training. The task of AUDA is to infer actual labels for all target instances no matter they are from $\mathcal{C}_c$ or $\tilde{\mathcal{C}}_t$ .
|
| 55 |
+
|
| 56 |
+
# 3.2. Active Universal Adaptation Network
|
| 57 |
+
|
| 58 |
+
We propose an Active Universal Adaptation Network (AUAN) to address the AUDA task. The AUAN consists of a feature extractor $G_{f}$ , a classifier $G_{c}$ , a domain discriminator $G_{d}$ , and prototype classifiers $G_{p}$ , which are respectively parameterized by $\theta_{f}$ , $\theta_{c}$ , $\theta_{d}$ and $\theta_{p}$ . $G_{f}$ is learned to generate discriminative representations for source and target samples. $G_{c}$ aims to classify target "known" instances as source classes. $G_{d}$ is trained adversarially to align source and target domain. $G_{p}$ is designed to classify target "unknown" instances as target private classes, which maintains class representations (prototypes) in the target domain. During training, new prototypes will be dynamically added into $G_{p}$ , once an instance with target private classes is annotated by active learning and its prototype is not stored in $G_{p}$ .
|
| 59 |
+
|
| 60 |
+
The learning process mainly consists of three main parts at each training loop, as shown in Figure 2. The AUAN needs to be trained several loops. For simplicity, we take one training loop as an example to introduce our algorithm. During adversarial and diverse curriculum learning, we propose to train $G_{c}$ and $G_{d}$ as a curriculum learning style to progressively adapt $G_{c}$ to the target domain. Besides, the model gradually learns the ability to identify
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
Figure 2. Three stages in AUAN: adversarial and diverse curriculum learning, active learning via clustering non-transferable gradient embedding and joint training with target supervision.
|
| 64 |
+
|
| 65 |
+
target "known"/"unknown" instances, which helps to annotate target informative instances during active learning. In the active learning stage, we propose a Clustering Nontransferable Gradient Embedding strategy which utilizes the clues of transferability, uncertainty and diversity. Target informative instances are selected for annotation. Meanwhile, high confident target "known" instances are assigned with pseudo labels predicted by $G_{c}$ . Finally, all the labeled and pseudo labeled target data are used for further improving $G_{c}$ in cross-domain alignment and learning $G_{p}$ for target private classes. After several training loops of the three stages, the model can infer actual labels for all target instances.
|
| 66 |
+
|
| 67 |
+
# 3.2.1 Adversarial and Diverse Curriculum Learning
|
| 68 |
+
|
| 69 |
+
Due to the domain gap and semantic shift in AUDA, it is challenging to directly train a reliable model to recognize the "known"/"unknown" label for target instances and predict actual labels for target "known" instances. In addition, as $G_{c}$ overfits the source domain, $G_{c}$ may classify target "unknown" instances as source classes with high confidence. To alleviate the above problems subtly, motivated by curriculum learning [27], we select samples from easy to hard for cross-domain alignment and meanwhile, reduce the over-reliance of $G_{c}$ on target "unknown" instances. Specifically, we design two curriculum losses, namely, an adversarial curriculum loss $L_{adv}$ and a diverse curriculum loss $L_{div}$ . The overall objective in ADCL is:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\begin{array}{l} \min _ {\theta_ {f}, \theta_ {c}} L _ {c} + L _ {d i v} - L _ {a d v}, \\ \max _ {\theta_ {d}} L _ {a d v}, \tag {1} \\ \end{array}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $L_{c}$ is the standard cross-entropy classification loss calculated in the source domain. The min-max optimization is achieved by a gradient reverse layer [13]. Both $L_{adv}$ and $L_{div}$ are designed based on transfer score metric, which measures the transferability of a target sample $\mathbf{x}_i^t$ . Given a target sample $\mathbf{x}_i^t$ , its transfer score is a combination of two signals:
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
w _ {t} \left(\mathbf {x} _ {i} ^ {t}\right) = \max \bar {y} \left(\mathbf {x} _ {i} ^ {t}\right) + d \left(\mathbf {x} _ {i} ^ {t}\right), \tag {2}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
where $\max \bar{y} (\mathbf{x}_i^t)\in [0,1],d(\mathbf{x}_i^t)\in [0,1]$ and $w_{t}(\mathbf{x}_{i}^{t})\in$ [0,2]. The first term refers to the classification confidence that can manifest itself by the max value of classification probabilities, i.e., $\bar{y} (\mathbf{x}_i^t) = G_c(G_f(\mathbf{x}_i^t))$ . The second term is the similarity to the source domain, which can be es
|
| 82 |
+
|
| 83 |
+
timated by the output of the domain discriminator, i.e., $d(\mathbf{x}_i^t) = G_d(G_f(\mathbf{x}_i^t))$ . A higher value $w_{t}(\mathbf{x}_{i}^{t})$ indicates that $\mathbf{x}_i^t$ appears to be from the shared label set $\mathcal{C}_c$ ; otherwise $\mathbf{x}_i^t$ may be an "unknown" instance.
|
| 84 |
+
|
| 85 |
+
The adversarial curriculum loss $L_{adv}$ aims for progressively aligning source and target samples from the common label set $C_c$ , as shown in Eq (3):
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\begin{array}{l} L _ {a d v} = \mathbb {E} _ {\mathbf {x} _ {i} ^ {s} \in \mathcal {D} _ {S}} \left[ w _ {s} \left(\mathbf {x} _ {i} ^ {s}\right) \cdot \log \left(1 - G _ {d} \left(G _ {f} \left(\mathbf {x} _ {i} ^ {s}\right)\right)\right) \right] \\ + \mathbb {E} _ {\mathbf {x} _ {i} ^ {t} \in \mathcal {D} _ {U T}} \left[ \mathbb {1} _ {w t \left(\mathbf {x} _ {i} ^ {t}\right) \geqslant w _ {\alpha} (t)} \cdot \log \left(G _ {d} \left(G _ {f} \left(\mathbf {x} _ {i} ^ {t}\right)\right)\right) \right], \tag {3} \\ \end{array}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
where the indicator $\mathbb{1}_{w_t(\mathbf{x}_i^t)\geqslant w_\alpha (t)}$ in $L_{adv}$ can select target samples $\mathbf{x}_i^t$ belonging to $\mathcal{C}_c$ from easy to hard by gradually reducing the value of $w_{\alpha}(t)$ . The source weight $w_{s}(\mathbf{x}_{i}^{s})$ aims to assign higher values for source samples from $\mathcal{C}_c$ and lower values for source samples from $\vec{C}_s$ , which can be reliably estimated by $G_{c}$ 's predictions on target samples from $\mathcal{C}_c$ . First, we utilize the curriculum $w_{t}(\mathbf{x}_{i}^{t})\geqslant w_{\alpha}(t)$ to select target samples from $\mathcal{C}_c$ , which should have higher classification probability (predicted by $G_{c}$ ) on the shared categories than source private categories. Then, we can get the $G_{c}$ 's predictions on the selected target samples, and calculate the average classification probabilities $\mathbf{V}$ , i.e., $\mathbf{V} = \mathrm{avg}_{w_t(\mathbf{x}_i^t)\geqslant w_\alpha (t)}G_c(G_f(\mathbf{x}_i^t))$ . Note that categories with higher values in $\mathbf{V}$ are probably the shared categories while those with lower values are likely to be source-private categories. Therefore, $\mathbf{V}$ can be used to calculate the weight of a source sample $(\mathbf{x}_i^s,\mathbf{y}_i^s)$ , i.e., $w_{s}(\mathbf{x}_{i}^{s}) = \mathbf{V}_{\mathbf{y}_{i}^{s}}$ , where $\mathbf{y}_i^s$ is used as the index of $\mathbf{V}$ . Note that source samples with the same category label are assigned with the same weight.
|
| 92 |
+
|
| 93 |
+
To gradually reduce the over-reliance of classifier $G_{c}$ , the diverse curriculum loss $L_{div}$ defined in Eq (4) utilizes the indicator $\mathbb{1}_{w_t(\mathbf{x}_i^t) < w_\alpha(t)}$ to select target "unknown" samples and enforces these selected samples to be uniformly distributed across different classes in $C_s$ by minimizing the negative entropy of $G_{c}$ 's predictions.
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
L _ {d i v} = \mathbb {E} _ {\mathbf {x} _ {i} ^ {t} \sim p _ {t}} [ \mathbb {1} _ {w _ {t} (\mathbf {x} _ {i} ^ {t}) < w _ {\alpha} (t)} \cdot - H (G _ {c} (G _ {f} (\mathbf {x} _ {i} ^ {t}))) ], \tag {4}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $H(\cdot)$ is the entropy function. With the cooperation between adversarial curriculum loss and diverse curriculum loss, the model can progressively and reliably predict the "known"/"unknown" label for target instances and classify target "known" instances.
|
| 100 |
+
|
| 101 |
+
# 3.2.2 Active Learning via Clustering Non-transferable Gradient Embedding
|
| 102 |
+
|
| 103 |
+
We hope the model can find out the most informative instances in the unlabeled target dataset $\mathcal{D}_{UT}$ and query their labeling information from an oracle to construct the labeled target dataset $\mathcal{D}_{LT}$ . The informative target instances, intuitively, are the ones most different from what the model has already known. Previous AL strategies focus on finding instances that are highly uncertain or diverse, which is suboptimal for AUDA. The informative instances in AUDA
|
| 104 |
+
|
| 105 |
+
should satisfy the following conditions: (1) Similar to traditional AL, the selected instances should also be highly uncertain and diverse. (2) The selected instances should be target "unknown" samples and their actual labels are from the target private label set, making it possible to learn prototype classifiers for target private classes. To satisfy the above requirements, we propose to perform active learning by clustering Non-transferable Gradient Embedding (CNTGE), which utilizes the clues of transferability, uncertainty and diversity.
|
| 106 |
+
|
| 107 |
+
Transferability. To accurately select target samples of target private classes for annotation, we should firstly remove target "known" samples from $\mathcal{D}_{UT}$ and perform active learning on the remaining unlabeled data. To achieve this goal, we first run the K-means algorithm [1] on all the unlabeled target features $\{\mathbf{f}_i^{ut}|\mathbf{f}_i^{ut} = G_f(\mathbf{x}_i^t),\mathbf{x}_i^t\in \mathcal{D}_{UT}\}$ to obtain $n_r$ centroids $\{\mathbf{u}_i\}_{i = 1}^{n_r}$ . Then, we calculate the transfer scores $w_{t}(\mathbf{u}_{i})$ (Eq (2)) of these centroids. We assume that a cluster's category is from the common label set $\mathcal{C}_c$ and samples belonging to the cluster are transferable if $w_{t}(\mathbf{u}_{i}) > \beta$ , otherwise, the cluster's category is from the target private label set $\tilde{C}_t$ and its samples are nontransferable. As for clusters with $w_{t}(\mathbf{u}_{i}) > \beta$ , we can construct pseudo labeled target dataset $\mathcal{D}_{PLT}$ without any annotation cost, i.e., $\mathcal{D}_{PLT} = \{(x_{ij}^t,\tilde{y}_i^t)|w_t(\mathbf{u}_i) > \beta ,\tilde{y}_i^t = \arg \max G_c(\mathbf{u}_i),i = 1\dots ,n_r\}$ where $\mathbf{u}_i$ is the clustering centroid of $\mathbf{x}_{ij}^t$ . As for the rest of the target unlabeled samples, their labels are most likely from target private label set $\tilde{C}_t$ . These non-transferable instances will be used as query candidates $\mathcal{D}_{NT} = \mathcal{D}_{UT}\backslash \mathcal{D}_{PLT}$ for active learning.
|
| 108 |
+
|
| 109 |
+
Uncertainty and Diversity: Clustering Gradient Embeddings of Non-transferable Instances. To jointly capture both uncertainty and diversity of non-transferable instances in $\mathcal{D}_{NT}$ , we aim to select $n_r$ target instances to query their labels from an oracle and add the selected instances into the target labeled dataset $\mathcal{D}_{LT}$ at each active learning round. Specifically, we firstly compute the gradient embeddings [2] for all non-transferable instances $\mathcal{D}_{NT}$ . Note that the magnitude of a gradient vector captures the uncertainty of the model on the instance: if the model is highly certain about the instance's label, the norm of the instance's gradient embedding is small, and vice versa for samples where the model is uncertain. Then, $n_r$ diverse high-magnitude samples are selected. It is impossible to make sure all instances in $\mathcal{D}_{LT}$ are with labels in $\tilde{\mathcal{C}}_t$ , and some of them probably are with labels in $\mathcal{C}_c$ . Even so, they are helpful for promoting the adaptation process.
|
| 110 |
+
|
| 111 |
+
# 3.2.3 Joint Training with Target Supervision
|
| 112 |
+
|
| 113 |
+
After the active learning process, two types of target supervision are provided: $\mathcal{D}_{PLT}$ and $\mathcal{D}_{LT}$ . These annotated target instances will be leveraged to further promote the functions of ADCL and learn $G_{p}$ for inferring target "un
|
| 114 |
+
|
| 115 |
+
known" instances. To promote ADCL, instances in $\mathcal{D}_{LT}$ from source classes should join in the learning of classifier $G_{c}$ and cross-domain adversarial training while instances in $\mathcal{D}_{LT}$ from target private classes should help to reduce $G_{c}$ 's over-reliance. Therefore, the classification loss $L_{c}$ , adversarial curriculum loss $L_{adv}$ (Eq (3)) and diverse curriculum loss $L_{div}$ (Eq (4)) are re-formulated, as shown in Eq (5).
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\tilde {L} _ {c} = L _ {c} + \mathbb {E} _ {(\mathbf {x} _ {i} ^ {t}, \mathbf {y} _ {i} ^ {t}) \in \mathcal {D} _ {L T}, \mathbf {y} _ {i} ^ {t} \in \mathcal {C} _ {s}} \left[ L _ {c e} (\mathbf {y} _ {i} ^ {t}, G _ {c} (G _ {f} (\mathbf {x} _ {i} ^ {t}))) \right],
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\tilde {L} _ {a d v} = L _ {a d v} + \mathbb {E} _ {\left(\mathbf {x} _ {i} ^ {t}, \mathbf {y} _ {i} ^ {t}\right) \in \mathcal {D} _ {L T}, \mathbf {y} _ {i} ^ {t} \in \mathcal {C} _ {s}} \left[ \log \left(G _ {d} \left(G _ {f} \left(\mathbf {x} _ {i} ^ {t}\right)\right)\right) \right], \tag {5}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\tilde {L} _ {d i v} = L _ {d i v} + \mathbb {E} _ {(\mathbf {x} _ {i} ^ {t}, \mathbf {y} _ {i} ^ {t}) \in \mathcal {D} _ {L T}, \mathbf {y} _ {i} ^ {t} \notin \mathcal {C} _ {s}} [ - H (G _ {c} (G _ {f} (\mathbf {x} _ {i} ^ {t}))) ].
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
Since $\mathcal{D}_{PLT}$ contains noisy labels, for accurate cross-domain alignment, we only leverage $\mathcal{D}_{LT}$ to enforce the adaptation process.
|
| 130 |
+
|
| 131 |
+
As $\mathcal{D}_{LT}$ is too small to discriminatively learn the prototype classifiers $G_{p}$ , we leverage $\mathcal{D}_{PLT}$ to serve as complementary cues in the learning process. To further improve the instance-level discriminative power for all the target samples, we are motivated to cluster target features in $\mathcal{D}_{UT}$ with its neighbors (labeled target features or prototypes) by a self-supervised cluster objective $L_{nc}$ . Thus, similar features could cluster together and $G_{p}$ could make more reliable predictions. The overall objective to learn $G_{p}$ is:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\min _ {\theta_ {f}, \theta_ {p}} L _ {p} + L _ {n c}, \tag {6}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where the classification loss $L_{p}$ is defined as Eq (7):
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
\begin{array}{l} L _ {p} = \mathbb {E} _ {(\mathbf {x} _ {i} ^ {t}, \tilde {\mathbf {y}} _ {i} ^ {t}) \in \mathcal {D} _ {P L T}} \left[ L _ {c e} (\tilde {\mathbf {y}} _ {i} ^ {t}, G _ {p} (G _ {f} (\mathbf {x} _ {i} ^ {t}))) \right] \\ + \mathbb {E} _ {\left(\mathbf {x} _ {i} ^ {t}, \mathbf {y} _ {i} ^ {t}\right) \in \mathcal {D} _ {L T}} \left[ L _ {c e} \left(\mathbf {y} _ {i} ^ {t}, G _ {p} \left(G _ {f} \left(\mathbf {x} _ {i} ^ {t}\right)\right)\right) \right], \\ \end{array}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $L_{ce}$ is the cross-entropy loss.
|
| 144 |
+
|
| 145 |
+
To cluster target features in $\mathcal{D}_{UT}$ to meaningful neighbors, we propose to calculate a self-supervised cluster loss $L_{nc}$ . Here, the meaningful neighbors are labeled target samples or prototypes in $G_{p}$ . Firstly, in a mini-batch, we calculate the similarity of unlabeled target samples $\mathbf{f}_i^{ut}$ in $\mathcal{D}_{UT}$ to all labeled target samples $\{\mathbf{f}_i^{lt}|\mathbf{f}_i^{lt} = G_f(\mathbf{x}_i^t),\mathbf{x}_i^t\in \mathcal{D}_{LT}\}$ and $K$ prototypes $\{\mathbf{w}_1,\dots ,\mathbf{w}_k,\dots ,\mathbf{w}_K\}$ in $G_{p}$ . Since a mini-batch data cannot contain all the labeled target samples, we construct a memory bank $\mathbf{M}\in R^{(n_r + K)\times d}$ to store all the labeled target samples and prototypes, i.e., $\mathbf{M} = [\mathbf{f}_1^{lt},\dots ,\mathbf{f}_i^{lt},\dots \mathbf{f}_{n_r}^{lt},\mathbf{w}_1,\dots ,\mathbf{w}_k,\dots ,\mathbf{w}_K]$ where $\mathbf{f}_i^{lt}$ and $\mathbf{w}_k$ are L2-normalized. Because $G_{f}$ and $G_{p}$ are updated at each training step, $\mathbf{M}$ is updated with mini-batch data by replacing the older ones with the updated ones. Let $\mathbf{M}_j$ denotes the $j$ -th item in $\mathbf{M}$ . Then, the probability that a target feature $\mathbf{f}_i^{ut}$ in $\mathcal{D}_{UT}$ is a neighbor of $\mathbf{M}_j$ is:
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
p _ {i, j} = \frac {\exp \left(\mathbf {M} _ {j} ^ {T} \mathbf {f} _ {i} ^ {u t} / \tau\right)}{Z _ {i}} \text {a n d} Z _ {i} = \sum_ {\mathbf {M} _ {j} \in \mathbf {M}} \exp \left(\mathbf {M} _ {j} ^ {T} \mathbf {f} _ {i} ^ {u t} / \tau\right), \tag {8}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
where $\tau$ is the temperature parameter. Then, the entropy-based clustering loss is calculated as:
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
L _ {n c} = \mathbb {E} _ {\mathbf {x} _ {i} ^ {t} \in \mathcal {D} _ {U T}} \left[ \sum_ {j} - p _ {i, j} \log \left(p _ {i, j}\right) \right]. \tag {9}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
# Algorithm 1: Active Universal Adaptation Network
|
| 158 |
+
|
| 159 |
+
1 Require: Feature extractor $G_{f}$ , classifier $G_{c}$ , domain discriminator $G_{d}$ and prototype classifier $G_{p}$ , parameterized by $\theta_{f}, \theta_{c}, \theta_{d}$ and $\theta_{p}$ respectively, labeled source domain $\mathcal{D}_S$ , unlabeled target domain $\mathcal{D}_{UT}$ , total rounds $R$ , per-round budget $n_{r}$ .
|
| 160 |
+
2 Define: target labeled dataset $\mathcal{D}_{LT} = \emptyset$ , target pseudo labeled dataset $\mathcal{D}_{PLT} = \emptyset$ . $G_{p}$ contains no prototypes in the beginning.
|
| 161 |
+
3 Warm Up: Solve Eq (1) with $\mathcal{D}_S$ and $\mathcal{D}_{UT}$ .
|
| 162 |
+
4 for $r = 0$ to $R$ do
|
| 163 |
+
5 # AL: Clustering Non-transferable Gradient Embedding:
|
| 164 |
+
6 $\mathcal{D}_{UT} = \mathcal{D}_{UT}\backslash \mathcal{D}_{LT}$
|
| 165 |
+
7 For all instances in $\mathcal{D}_{UT}$
|
| 166 |
+
8 1. Run K-Means and calculate the transfer scores of $n_{T}$ centroids.
|
| 167 |
+
9 2. Construct target pseudo dataset $\mathcal{D}_{PLT}$
|
| 168 |
+
3. Compute gradient embedding [2] on $\mathcal{D}_{NT} = \mathcal{D}_{UT}\backslash \mathcal{D}_{PLT}$ query $n_r$ instances' labels, finally add them into $\mathcal{D}_{LT}$ , and finally add new prototypes to $G_{p}$ .
|
| 169 |
+
11 # Joint Training AUAN via Adversarial and Diverse Curriculum
|
| 170 |
+
Learning:
|
| 171 |
+
12 Solve Eq (10) with $\mathcal{D}_S, \mathcal{D}_{UT}, \mathcal{D}_{LT}$ and $\mathcal{D}_{PLT}$ .
|
| 172 |
+
13 end
|
| 173 |
+
Output: Model parameters: $\theta_{f},\theta_{c},\theta_{d}$ and $\theta_p$
|
| 174 |
+
|
| 175 |
+
Finally, The overall objective to learn the active universal adaptation network is shown as Eq (10)
|
| 176 |
+
|
| 177 |
+
$$
|
| 178 |
+
\begin{array}{l} \min _ {\theta_ {f}, \theta_ {c}} \tilde {L} _ {c} + \tilde {L} _ {d i v} - \tilde {L} _ {a d v}, \\ \max _ {\theta_ {d}} \tilde {L} _ {a d v}, \tag {10} \\ \min _ {\theta_ {f}, \theta_ {p}} L _ {p} + L _ {n c}, \\ \end{array}
|
| 179 |
+
$$
|
| 180 |
+
|
| 181 |
+
where the parameters are optimized as an alternate style. Algorithm 1 shows the processes of training and active learning. Once the model is trained, we can leverage AUAN model to classify all target instances according to Eq (11):
|
| 182 |
+
|
| 183 |
+
$$
|
| 184 |
+
y \left(\mathbf {x} _ {i} ^ {t}\right) = \left\{ \begin{array}{l l} \arg \max G _ {c} \left(G _ {f} \left(\mathbf {x} _ {i} ^ {t}\right)\right) & w _ {t} \left(\mathbf {x} _ {i} ^ {t}\right) > w _ {0} \\ \arg \max G _ {p} \left(G _ {f} \left(\mathbf {x} _ {i} ^ {t}\right)\right) & \text {o t h e r w i s e} \end{array} \right. \tag {11}
|
| 185 |
+
$$
|
| 186 |
+
|
| 187 |
+
# 4. Experiments
|
| 188 |
+
|
| 189 |
+
In this section, we first illustrate datasets, compared methods, evaluation protocols, and implementation details. Then, we show extensive experimental results and analysis. Due to the limited space, more results and analysis can be found in the supplementary material.
|
| 190 |
+
|
| 191 |
+
# 4.1. Setup
|
| 192 |
+
|
| 193 |
+
Datasets. The first dataset Office-Home [56] contains four domains: Art (Ar), Clipart (Cl), Product (Pr) and Real-World (Rw) across 65 classes. In the alphabet order, we use the first 10 classes as $\mathcal{C}_c$ , the next 5 classes as $\tilde{\mathcal{C}}_s$ and the rest as $\tilde{\mathcal{C}}_t$ . The second dataset VisDA [39] contains 12 classes from two domains: synthetic (S) and real (R) images. The class numbers of $\mathcal{C}_c$ , $\tilde{\mathcal{C}}_s$ and $\tilde{\mathcal{C}}_t$ are respectively 6, 5 and 3. The third dataset is Office-31 [44], which contains three domains (Amazon (A), DSLR (D), Webcam (W)) and 31 classes. The class numbers of $\mathcal{C}_c$ , $\tilde{\mathcal{C}}_s$ and $\tilde{\mathcal{C}}_t$ are respectively 10, 11 and 10. The forth dataset is DomainNet [38], which contains six domains: Clipart (C), Infograph (I), Painting (P), Quickdraw (Q), Real (R) and Sketch (S) across 345
|
| 194 |
+
|
| 195 |
+
Table 1. The average class accuracy $(\%)$ on Office-Home, Office31, VisDA and DomainNet datasets for different DA methods equipped with different AL strategies. The best results are bolded.
|
| 196 |
+
|
| 197 |
+
<table><tr><td>AL
|
| 198 |
+
DA</td><td>Random</td><td>Margin</td><td>Coreset</td><td>BADGE</td><td>AVG</td></tr><tr><td colspan="6">Office-Home</td></tr><tr><td>ResNet</td><td>26.32</td><td>28.06</td><td>30.42</td><td>28.35</td><td>28.29</td></tr><tr><td>UAN</td><td>32.58</td><td>32.58</td><td>32.77</td><td>33.86</td><td>32.95</td></tr><tr><td>ADCL</td><td>47.19</td><td>46.86</td><td>45.73</td><td>47.94</td><td>47.33</td></tr><tr><td colspan="6">Office-31</td></tr><tr><td>ResNet</td><td>75.67</td><td>76.37</td><td>77.97</td><td>77.29</td><td>76.83</td></tr><tr><td>UAN</td><td>64.56</td><td>60.56</td><td>65.43</td><td>61.96</td><td>63.13</td></tr><tr><td>ADCL</td><td>79.15</td><td>78.55</td><td>80.79</td><td>80.51</td><td>79.75</td></tr><tr><td colspan="6">VisDA</td></tr><tr><td>ResNet</td><td>59.27</td><td>61.98</td><td>61.43</td><td>61.91</td><td>61.15</td></tr><tr><td>UAN</td><td>59.11</td><td>72.45</td><td>57.28</td><td>62.83</td><td>62.92</td></tr><tr><td>ADCL</td><td>63.15</td><td>63.49</td><td>62.58</td><td>64.00</td><td>63.31</td></tr><tr><td colspan="6">DomainNet</td></tr><tr><td>ResNet</td><td>27.43</td><td>29.07</td><td>28.91</td><td>30.68</td><td>29.02</td></tr><tr><td>UAN</td><td>34.12</td><td>34.91</td><td>35.47</td><td>35.90</td><td>35.10</td></tr><tr><td>ADCL</td><td>37.54</td><td>37.37</td><td>34.34</td><td>37.09</td><td>36.59</td></tr></table>
|
| 199 |
+
|
| 200 |
+
classes. The class numbers of $\mathcal{C}_c$ , $\tilde{\mathcal{C}}_s$ and $\tilde{\mathcal{C}}_t$ are respectively 150, 50 and 145. Following [11], We choose 3 domains in the DomainNet dataset to transfer between each other. For a fair comparison, all dataset partitions follow the universal domain adaptation [61]. We set the per-round budget as 21 for office-home, 10 for office31, 100 for VisDA and 115 for DomainNet, and perform 15 rounds of active learning.
|
| 201 |
+
|
| 202 |
+
Compared Methods. As the existing UDA methods cannot handle the new UDA task, we extend two domain adaptation baselines to the AUDA setting, i.e., ResNet [19] and UAN [61] equipped with state-of-the-art active learning approaches. To compare four types of active learning strategies, we select the following seven approaches: (1) Random: The naive baseline that randomly selects several instances to annotate labels at each round. (2) Uncertainty: a) Entropy [58]: Sampling instances over which the model has high predictive entropy. b) Margin [43]: Sampling instances for which the score between the model's top-2 predictions is the smallest. c) Confidence [58]: Sampling instances for which the predictive confidence is the lowest. (3) Diversity: a) K-means: K-means is performed at each round and one sample closest to its centroid is selected for each cluster. b) Coreset [49]: Sampling instances that geometrically cover data distributions. (4) Mixture of Uncertainty and Diversity: BADGE [2]: Sampling instances that are disparate and high magnitude when presented in a hallucinated gradient space.
|
| 203 |
+
|
| 204 |
+
Evaluation Protocols. We report the average class accuracy for comparison. Specifically, we firstly calculate the classification accuracy for each category in the target domain and finally average them. Besides, the curves of average class accuracy with the annotation round increasing are drawn for comparing different active learning strategies.
|
| 205 |
+
|
| 206 |
+
Implementation Details. We use Pytorch [37] for our implementation. Following the UAN [61], ResNet-50 [19] is used as the feature extractor. A bottleneck layer with 256 units followed by a classifier and a domain discriminator, is added after the feature extractor. Another bottleneck layer
|
| 207 |
+
|
| 208 |
+
Table 2. Average class accuracy (%) at 5th, 10th and 15th annotation round on Office-Home, Office-31, VisDA and DomainNet datasets for comparing different active learning strategies. The best results are bolded.
|
| 209 |
+
|
| 210 |
+
<table><tr><td rowspan="3">AL Strategy</td><td colspan="21">Office-Home</td></tr><tr><td rowspan="2">5th</td><td colspan="2">Ar→Cl</td><td rowspan="2">5th</td><td colspan="2">Ar→Pr</td><td rowspan="2">5th</td><td colspan="2">Ar→Rw</td><td rowspan="2">5th</td><td colspan="2">C1→Ar</td><td rowspan="2">5th</td><td colspan="2">C1→Pr</td><td rowspan="2">5th</td><td colspan="2">C1→Rw</td><td rowspan="2">5th</td><td colspan="2">Pr→Ar</td></tr><tr><td>10th</td><td>15th</td><td>10th</td><td>15th</td><td>10th</td><td>15th</td><td>10th</td><td>15th</td><td>10th</td><td>15th</td><td>10th</td><td>15th</td><td>10th</td><td>15th</td></tr><tr><td>Random</td><td>21.36</td><td>26.34</td><td>29.33</td><td>50.58</td><td>54.09</td><td>60.05</td><td>42.67</td><td>46.03</td><td>48.32</td><td>34.48</td><td>41.51</td><td>47.27</td><td>48.53</td><td>53.06</td><td>56.53</td><td>42.10</td><td>45.99</td><td>47.20</td><td>39.10</td><td>45.63</td><td>52.78</td></tr><tr><td>Entropy</td><td>20.01</td><td>24.51</td><td>27.19</td><td>45.32</td><td>51.71</td><td>55.41</td><td>41.98</td><td>46.17</td><td>48.48</td><td>31.56</td><td>37.94</td><td>44.62</td><td>39.84</td><td>45.94</td><td>51.52</td><td>41.73</td><td>45.12</td><td>45.44</td><td>32.55</td><td>41.19</td><td>47.86</td></tr><tr><td>Confidence</td><td>18.59</td><td>26.72</td><td>29.36</td><td>46.99</td><td>52.66</td><td>55.81</td><td>42.06</td><td>46.35</td><td>47.41</td><td>35.52</td><td>42.31</td><td>43.89</td><td>45.98</td><td>49.25</td><td>52.20</td><td>42.17</td><td>47.15</td><td>50.10</td><td>36.08</td><td>41.44</td><td>46.10</td></tr><tr><td>K-means</td><td>21.68</td><td>25.27</td><td>27.45</td><td>46.53</td><td>50.37</td><td>53.17</td><td>40.40</td><td>41.24</td><td>42.71</td><td>34.17</td><td>37.79</td><td>41.36</td><td>43.51</td><td>47.21</td><td>48.09</td><td>39.08</td><td>41.17</td><td>41.54</td><td>38.69</td><td>41.41</td><td>46.54</td></tr><tr><td>Margin</td><td>24.60</td><td>26.37</td><td>29.49</td><td>50.63</td><td>56.31</td><td>58.49</td><td>49.24</td><td>49.95</td><td>51.36</td><td>36.93</td><td>42.35</td><td>44.79</td><td>47.41</td><td>52.62</td><td>56.35</td><td>42.15</td><td>46.63</td><td>47.73</td><td>37.43</td><td>45.34</td><td>50.11</td></tr><tr><td>Coreset</td><td>25.04</td><td>26.58</td><td>26.94</td><td>49.94</td><td>53.44</td><td>56.77</td><td>45.97</td><td>46.99</td><td>48.89</td><td>39.11</td><td>42.01</td><td>44.29</td><td>47.78</td><td>48.45</td><td>52.94</td><td>44.09</td><td>45.30</td><td>48.50</td><td>40.33</td><td>46.33</td><td>51.47</td></tr><tr><td>BADGE</td><td>22.28</td><td>28.45</td><td>30.53</td><td>51.03</td><td>55.81</td><td>58.44</td><td>43.41</td><td>48.05</td><td>49.79</td><td>32.60</td><td>39.66</td><td>44.59</td><td>49.07</td><td>54.25</td><td>58.63</td><td>42.20</td><td>45.58</td><td>48.00</td><td>41.14</td><td>47.86</td><td>54.68</td></tr><tr><td>CNTGE (Ours)</td><td>27.25</td><td>32.44</td><td>36.51</td><td>56.02</td><td>63.58</td><td>67.96</td><td>48.39</td><td>57.56</td><td>61.02</td><td>40.89</td><td>50.20</td><td>53.75</td><td>51.57</td><td>61.58</td><td>65.82</td><td>46.49</td><td>56.03</td><td>61.88</td><td>38.50</td><td>49.49</td><td>54.26</td></tr><tr><td rowspan="3">AL Strategy</td><td colspan="18">Office-Home</td><td colspan="3">VisDA S→R</td></tr><tr><td rowspan="2">5th</td><td colspan="2">Pr→Cl</td><td rowspan="2">5th</td><td colspan="2">Pr→Rw</td><td colspan="2">Rw→Ar</td><td colspan="2">Rw→Cl</td><td colspan="2">Rw→Pr</td><td colspan="2">Rw→Pr</td><td colspan="2">AVG</td><td rowspan="2">5th</td><td rowspan="2" colspan="3">S→R</td><td></td></tr><tr><td>10th</td><td>15th</td><td>10th</td><td>15th</td><td>5th</td><td>10th</td><td>15th</td><td>5th</td><td>10th</td><td>15th</td><td>5th</td><td>10th</td><td>15th</td><td>5th</td><td></td></tr><tr><td>Random</td><td>21.47</td><td>24.80</td><td>29.25</td><td>50.80</td><td>54.00</td><td>58.68</td><td>38.50</td><td>46.78</td><td>52.11</td><td>18.16</td><td>21.67</td><td>25.01</td><td>51.59</td><td>55.83</td><td>61.77</td><td>38.28</td><td>42.98</td><td>46.86</td><td>62.72</td><td>63.15</td><td>63.15</td></tr><tr><td>Entropy</td><td>17.01</td><td>21.58</td><td>22.95</td><td>45.15</td><td>50.11</td><td>54.75</td><td>33.31</td><td>38.93</td><td>45.95</td><td>17.12</td><td>20.47</td><td>22.38</td><td>45.18</td><td>49.82</td><td>53.01</td><td>34.23</td><td>39.46</td><td>43.30</td><td>57.46</td><td>57.97</td><td>60.06</td></tr><tr><td>Confidence</td><td>19.72</td><td>23.47</td><td>24.54</td><td>47.95</td><td>53.59</td><td>57.96</td><td>35.34</td><td>43.32</td><td>45.51</td><td>17.41</td><td>19.43</td><td>23.74</td><td>48.21</td><td>52.51</td><td>57.44</td><td>36.34</td><td>41.52</td><td>44.50</td><td>58.77</td><td>60.64</td><td>61.17</td></tr><tr><td>K-means</td><td>22.97</td><td>27.58</td><td>28.95</td><td>44.68</td><td>47.44</td><td>49.68</td><td>37.88</td><td>40.96</td><td>46.13</td><td>20.01</td><td>22.76</td><td>22.94</td><td>45.27</td><td>51.35</td><td>53.49</td><td>36.24</td><td>39.55</td><td>41.84</td><td>64.40</td><td>64.59</td><td>64.59</td></tr><tr><td>Margin</td><td>23.84</td><td>28.26</td><td>30.02</td><td>50.74</td><td>56.72</td><td>60.89</td><td>39.19</td><td>44.74</td><td>49.26</td><td>19.98</td><td>22.64</td><td>22.73</td><td>56.16</td><td>59.34</td><td>61.06</td><td>39.86</td><td>44.27</td><td>46.86</td><td>62.05</td><td>63.49</td><td>63.49</td></tr><tr><td>Coreset</td><td>24.21</td><td>27.34</td><td>28.00</td><td>51.51</td><td>58.47</td><td>59.10</td><td>41.10</td><td>45.25</td><td>50.17</td><td>22.63</td><td>22.68</td><td>22.68</td><td>50.89</td><td>55.54</td><td>58.99</td><td>40.22</td><td>43.20</td><td>45.73</td><td>62.24</td><td>62.24</td><td>62.58</td></tr><tr><td>BADGE</td><td>23.43</td><td>28.59</td><td>29.91</td><td>49.21</td><td>58.96</td><td>61.01</td><td>38.67</td><td>47.69</td><td>51.79</td><td>18.35</td><td>23.39</td><td>25.64</td><td>49.27</td><td>58.45</td><td>62.28</td><td>38.39</td><td>44.73</td><td>47.94</td><td>63.76</td><td>64.00</td><td>64.00</td></tr><tr><td>CNTGE (Ours)</td><td>28.49</td><td>34.38</td><td>39.11</td><td>55.02</td><td>64.44</td><td>69.76</td><td>40.96</td><td>50.93</td><td>55.03</td><td>25.65</td><td>31.13</td><td>36.55</td><td>56.18</td><td>64.56</td><td>69.24</td><td>42.95</td><td>51.36</td><td>55.91</td><td>71.32</td><td>73.23</td><td>73.91</td></tr><tr><td rowspan="3">AL Strategy</td><td colspan="20">Office-Home</td><td></td></tr><tr><td rowspan="2">5th</td><td colspan="2">A→D</td><td rowspan="2">5th</td><td colspan="2">A→W</td><td colspan="2">D→A</td><td colspan="2">D→W</td><td colspan="2">D→W</td><td colspan="2">W→A</td><td colspan="2">W→D</td><td rowspan="2" colspan="3">AVG</td><td></td><td></td></tr><tr><td>10th</td><td>15th</td><td>10th</td><td>15th</td><td>5th</td><td>10th</td><td>15th</td><td>5th</td><td>10th</td><td>15th</td><td>5th</td><td>10th</td><td>15th</td><td>5th</td><td></td><td></td></tr><tr><td>Random</td><td>80.75</td><td>82.78</td><td>85.23</td><td>71.58</td><td>79.30</td><td>84.22</td><td>59.80</td><td>63.01</td><td>64.90</td><td>81.65</td><td>88.06</td><td>91.35</td><td>58.69</td><td>61.38</td><td>62.84</td><td>83.29</td><td>84.09</td><td>86.33</td><td>72.63</td><td>76.43</td><td>79.15</td></tr><tr><td>Entropy</td><td>75.54</td><td>80.08</td><td>82.98</td><td>69.11</td><td>75.75</td><td>80.75</td><td>49.34</td><td>54.63</td><td>54.63</td><td>82.55</td><td>87.75</td><td>90.47</td><td>48.60</td><td>50.87</td><td>55.36</td><td>82.61</td><td>83.99</td><td>85.86</td><td>67.96</td><td>72.18</td><td>75.01</td></tr><tr><td>Confidence</td><td>74.65</td><td>80.38</td><td>81.97</td><td>72.93</td><td>76.19</td><td>81.06</td><td>52.06</td><td>54.04</td><td>54.04</td><td>87.17</td><td>90.01</td><td>90.82</td><td>48.63</td><td>54.86</td><td>56.62</td><td>84.02</td><td>85.43</td><td>87.73</td><td>69.91</td><td>73.48</td><td>75.37</td></tr><tr><td>K-means</td><td>76.96</td><td>79.85</td><td>82.34</td><td>70.20</td><td>73.39</td><td>78.83</td><td>54.65</td><td>58.84</td><td>61.46</td><td>75.51</td><td>78.86</td><td>86.72</td><td>54.01</td><td>56.64</td><td>58.88</td><td>83.41</td><td>84.16</td><td>86.57</td><td>69.12</td><td>71.96</td><td>75.80</td></tr><tr><td>Margin</td><td>80.50</td><td>83.34</td><td>83.75</td><td>75.37</td><td>81.46</td><td>82.11</td><td>58.79</td><td>62.46</td><td>66.31</td><td>86.35</td><td>90.15</td><td>91.27</td><td>53.55</td><td>57.34</td><td>61.55</td><td>85.38</td><td>86.32</td><td>86.32</td><td>73.32</td><td>76.84</td><td>78.55</td></tr><tr><td>Coreset</td><td>81.08</td><td>84.38</td><td>86.18</td><td>76.77</td><td>83.64</td><td>86.10</td><td>57.19</td><td>61.84</td><td>66.33</td><td>85.84</td><td>92.01</td><td>92.76</td><td>58.43</td><td>60.73</td><td>65.21</td><td>83.51</td><td>86.28</td><td>88.17</td><td>73.80</td><td>78.15</td><td>80.79</td></tr><tr><td>BADGE</td><td>79.90</td><td>84.12</td><td>84.45</td><td>78.21</td><td>81.58</td><td>86.16</td><td>58.07</td><td>64.28</td><td>68.77</td><td>87.77</td><td>90.43</td><td>91.34</td><td>58.12</td><td>62.16</td><td>65.74</td><td>84.49</td><td>85.27</td><td>86.59</td><td>74.43</td><td>77.97</td><td>80.51</td></tr><tr><td>CNTGE (Ours)</td><td>79.81</td><td>85.48</td><td>87.21</td><td>80.53</td><td>83.05</td><td>86.19</td><td>61.91</td><td>67.50</td><td>68.13</td><td>90.91</td><td>91.13</td><td>92.26</td><td>62.20</td><td>65.50</td><td>65.67</td><td>86.38</td><td>86.95</td><td>87.60</td><td>76.96</td><td>79.94</td><td>81.18</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="5"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="21"></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="5"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td colspan="12"></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td><td></td></tr></table>
|
| 211 |
+
|
| 212 |
+
with 256 units embeds features extracted by $G_{f}$ to learn prototype classifiers. The optimization setting follows [13]. The margin function is set as $w_{\alpha}(t) = w_0 + (1 - \frac{t}{T})\cdot \alpha$ where $w_{0} = 1.0$ , $t$ is the $t$ -th training step and $T$ is the total training iterations. The hyper-parameters are tuned with cross-validation [62], and fixed for each dataset, i.e., $\alpha = 0.2$ , $\beta = 1.5$ , $\tau = 0.05$ . More details are illustrated in the supplementary material.
|
| 213 |
+
|
| 214 |
+
# 4.2. Comparative Results
|
| 215 |
+
|
| 216 |
+
Comparison against different domain adaptation methods. To justify the effectiveness of our proposed ADCL for AUDA, we extend two domain adaptation baselines, i.e., ResNet and UAN, to the AUDA setting by learning prototype classifiers. Similar to our method, the prototype classifiers in baselines are learned with $\mathcal{D}_{LT}$ by optimizing Eq 6. We construct different combinations between domain adaptation models (ResNet, UAN, and our proposed ADCL) and active learning strategies (Random, Margin, Coreset, and BADGE) for comparison. The average class accuracy results are shown in Table 1. We can observe that ADCL performs the best when equipped with different AL strategies, especially, outperforms UAN which also deals with the domain gap and semantic shift problems. The results support that the proposed ADCL can effectively alleviate the neg
|
| 217 |
+
|
| 218 |
+
ative impact of domain gap and semantic shift, and helps AL strategies annotate informative instances to infer actual labels for all target instances.
|
| 219 |
+
|
| 220 |
+
Comparison with different active learning strategies. To evaluate the effectiveness of our proposed CNTGE strategy, we consider fixing the domain adaptation method as ADCL and varying the active learning methods (seven prior work) for comparison. As shown in Table 2, we report the average class accuracy on Office-Home, Office-31, VisDA and DomainNet datasets at the 5th, 10th and 15th annotation round for conciseness. Besides, the full performance curves of some hard transfer tasks are shown in Figure 3. Our proposed AL strategy CNTGE performs the best on most tasks or the second on a few tasks, which proves that target instances annotated by CNTGE are more informative than those annotated by other methods under domain gap and semantic shift. More importantly, CNTGE performs well in DomainNet, indicating that CNTGE is robust to large dataset with plenty of categories. In particular, we have some key observations. (1) In the practical AUDA setting, especially in difficult transfer tasks in the Office-Home, VisDA and DomainNet datasets, some traditional AL methods perform similarly to or even worse than Random. A possible reason is that the uncertainty or diversity are wrongly estimated by traditional AL methods due to
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
(a) Office-Home: $\mathrm{Ar}\rightarrow \mathrm{Rw}$
|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
(b) VisDA: S → R
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
(c) Office-31: $\mathrm{D}\to \mathrm{A}$
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
(d) DomainNet: $\mathrm{P}\rightarrow \mathrm{R}$
|
| 233 |
+
Figure 3. Average class accuracy across four hard transfer tasks from Office-Home, Office-31, VisDA and DomainNet datasets.
|
| 234 |
+
|
| 235 |
+
the violation of the assumption about the source-target label set relationship in AUDA. These traditional AL approaches may easily lead to sampling outliers, redundant instances, or uninformative instances from source classes, which is not beneficial for inferring labels for all target instances or even damages the classification performance. (2) Coreset tries to select instances geometrically matching the data distributions for annotations, and it performs the best in a few transfer tasks where CNTGE underperforms Coreset. However, Coreset only works well in small datasets, which is not robust enough since practical datasets are usually large. Fortunately, CNTGE is more practical and performs very well on large datasets such as Office-Home, VisDA and DomainNet.
|
| 236 |
+
|
| 237 |
+
# 4.3. Ablation Studies
|
| 238 |
+
|
| 239 |
+
Ablation studies on ADCL. To analyze the efficiency of the proposed adversarial curriculum loss $L_{adv}$ and diverse curriculum loss $L_{div}$ , we derive two variants: (1) w/o $L_{adv}$ is the variant by replacing $L_{adv}$ with naive adversarial loss [13] which is widely used in adversarial domain adaptation. The native adversarial loss can be obtained by removing $w_{s}(\mathbf{x}_{i}^{s})$ and indicator function $\mathbb{1}_{w_t(\mathbf{x}_i^t)\geqslant w_\alpha (t)}$ in Eq (3). (2) w/o $L_{div}$ is the variant learned without loss $L_{div}$ . All other loss functions remain the same as AUAN. Compared with AUAN in Table 3, the average performance drop of w/o $L_{adv}$ and w/o $L_{div}$ are respectively $2.34\%$ and $9.3\%$ . It indicates that $L_{adv}$ could effectively constrain the cross-domain alignment into the shared common label set. Besides, $L_{div}$ could reduce the over-reliance of classifier $G_{c}$ on target "unknown" samples, which promotes CNTGE to select more informative target instances for active learning.
|
| 240 |
+
|
| 241 |
+
Ablation studies on learning $G_{p}$ . Two variants are proposed to study the effectiveness of $L_{p}$ and $L_{nc}$ on learning the prototype classifiers: (1) w/o $L_{p}$ is the variant where the prototype classifiers are learned without loss $L_{p}$ . In this
|
| 242 |
+
|
| 243 |
+
Table 3. Ablation studies on Office-Home (6 challenging tasks).
|
| 244 |
+
|
| 245 |
+
<table><tr><td>Variant</td><td>Ar → Rw</td><td>Cl→Rw</td><td>Pr→Rw</td><td>Rw→Ar</td><td>Rw→Cl</td><td>Rw→Pr</td><td>AVG</td></tr><tr><td>AUAN</td><td>61.02</td><td>61.88</td><td>69.76</td><td>55.03</td><td>36.55</td><td>69.24</td><td>58.91</td></tr><tr><td>w/o Ladv</td><td>57.40</td><td>56.79</td><td>68.46</td><td>54.35</td><td>34.36</td><td>68.09</td><td>56.57↓2,34</td></tr><tr><td>w/o Ldiv</td><td>50.40</td><td>43.78</td><td>50.44</td><td>54.35</td><td>34.37</td><td>64.31</td><td>49.61↓9,30</td></tr><tr><td>w/o Lp</td><td>58.28</td><td>58.06</td><td>64.74</td><td>52.97</td><td>31.63</td><td>68.56</td><td>55.71↓3,21</td></tr><tr><td>w/o Lnc</td><td>60.55</td><td>59.21</td><td>66.58</td><td>54.19</td><td>33.77</td><td>67.42</td><td>56.95↓1,96</td></tr><tr><td>AUAN-1</td><td>55.18</td><td>56.12</td><td>65.11</td><td>52.58</td><td>32.77</td><td>66.46</td><td>54.70↓4,21</td></tr><tr><td>AUAN-2</td><td>60.28</td><td>60.44</td><td>68.08</td><td>53.88</td><td>34.62</td><td>68.73</td><td>57.67↓1,24</td></tr></table>
|
| 246 |
+
|
| 247 |
+
case, the prototype classifiers cannot be well learned, and we apply a KNN classifier to infer labels for target unknown instances. (2) w/o $L_{nc}$ is the variant where the prototype classifiers are learned without loss $L_{nc}$ . As shown in Table 3, the w/o $L_{p}$ and w/o $L_{nc}$ both underperform AUAN. The performance drop of w/o $L_{p}$ and w/o $L_{nc}$ are respectively $3.21\%$ and $1.96\%$ . It implies that $L_{p}$ and $L_{nc}$ are beneficial for learning to infer labels in $\tilde{C}_t$ with limited data.
|
| 248 |
+
|
| 249 |
+
Effect of $\mathcal{D}_{LT}$ during adaptation. To testy whether $\mathcal{D}_{LT}$ helps to suppress the negative impact of domain gap and semantic shift, we design the AUAN-1 model which is optimized by Eq (1) and Eq (6) while the original AUAN is optimized by Eq (5) and Eq (6). The average performance of AUAN-1 drops $4.21\%$ , as shown in Table 3, indicating that $\mathcal{D}_{LT}$ could enforce the adaptation process and narrow the domain gap and semantic shift.
|
| 250 |
+
|
| 251 |
+
Effect of $\mathcal{D}_{PLT}$ when learning $G_{p}$ . To study the effectiveness of learning $G_{p}$ with $\mathcal{D}_{PLT}$ , we derive the AUAN-2 model which is learned without the first term in Eq (7). Results are shown in Table 3. The $1.24\%$ performance drop of AUAN-2 illustrates that although $\mathcal{D}_{PLT}$ contains noisy labels, $\mathcal{D}_{PLT}$ could cluster similar instances and assist to learn discriminative prototypes.
|
| 252 |
+
|
| 253 |
+
# 5. Conclusion
|
| 254 |
+
|
| 255 |
+
In this paper, we propose a novel paradigm for unsupervised domain adaptation, termed as Active Universal Domain Adaptation (AUDA), which extends the applicability of domain adaptation in practical scenarios. An active universal adaptation network equipped with ADCL and CNTGE is proposed to address this issue. Extensive experiments show the effectiveness of our model. In the future, we will design AL strategies that consider the distribution information of known and unknown samples and utilize the knowledge graph for unknown category inference.
|
| 256 |
+
|
| 257 |
+
# Acknowledgements
|
| 258 |
+
|
| 259 |
+
This work was supported by the National Key Research & Development Plan of China under Grant 2020AAA0106200, in part by the National Natural Science Foundation of China under Grants 62036012, 61721004, 62072286, 61720106006, 61832002, 62072455, 62002355, U1836220, and U1705262, in part by the Key Research Program of Frontier Sciences of CAS under Grant QYZDJSSWJSC039, and in part by Beijing Natural Science Foundation (L201001).
|
| 260 |
+
|
| 261 |
+
# References
|
| 262 |
+
|
| 263 |
+
[1] David Arthur and Sergei Vassilitskii. k-means++ the advantages of careful seeding. In Proceedings of the eighteenth annual ACM-SIAM symposium on Discrete algorithms, pages 1027-1035, 2007.
|
| 264 |
+
[2] Jordan T Ash, Chicheng Zhang, Akshay Krishnamurthy, John Langford, and Alekh Agarwal. Deep batch active learning by diverse, uncertain gradient lower bounds. In ICLR, 2019.
|
| 265 |
+
[3] Maria-Florina Balcan, Alina Beygelzimer, and John Langford. Agnostic active learning. Journal of Computer and System Sciences, 75(1):78-89, 2009.
|
| 266 |
+
[4] Zhangjie Cao, Mingsheng Long, Jianmin Wang, and Michael I Jordan. Partial transfer learning with selective adversarial networks. In CVPR, pages 2724-2732, 2018.
|
| 267 |
+
[5] Zhangjie Cao, Lijia Ma, Mingsheng Long, and Jianmin Wang. Partial adversarial domain adaptation. In ECCV, pages 135-150, 2018.
|
| 268 |
+
[6] Zhangjie Cao, Kaichao You, Mingsheng Long, Jianmin Wang, and Qiang Yang. Learning to transfer examples for partial domain adaptation. In CVPR, pages 2985-2994, 2019.
|
| 269 |
+
[7] Rita Chattopadhyay, Wei Fan, Ian Davidson, Sethuraman Panchanathan, and Jieping Ye. Joint transfer and batch-mode active learning. In ICML, pages 253-261, 2013.
|
| 270 |
+
[8] Zhihong Chen, Chao Chen, Zhaowei Cheng, Boyuan Jiang, Ke Fang, and Xinyu Jin. Selective transfer with reinforced transfer network for partial domain adaptation. In CVPR, pages 12706-12714, 2020.
|
| 271 |
+
[9] Zhengming Ding, Sheng Li, Ming Shao, and Yun Fu. Graph adaptive knowledge transfer for unsupervised domain adaptation. In ECCV, pages 37-52, 2018.
|
| 272 |
+
[10] Melanie Ducoffe and Frederic Precioso. Adversarial active learning for deep networks: a margin based approach. arXiv preprint arXiv:1802.09841, 2018.
|
| 273 |
+
[11] Bo Fu, Zhangjie Cao, Mingsheng Long, and Jianmin Wang. Learning to detect open classes for universal domain adaptation. In ECCV, pages 567-583, 2020.
|
| 274 |
+
[12] Yarin Gal, Riashat Islam, and Zoubin Ghahramani. Deep bayesian active learning with image data. In ICML, pages 1183-1192, 2017.
|
| 275 |
+
[13] Yaroslav Ganin, Evgeniya Ustinova, Hana Ajakan, Pascal Germain, Hugo Larochelle, François Laviolette, Mario Marchand, and Victor Lempitsky. Domain-adversarial training of neural networks. The Journal of Machine Learning Research, 17(1):2096-2030, 2016.
|
| 276 |
+
[14] Yonatan Geifman and Ran El-Yaniv. Deep active learning over the long tail. arXiv preprint arXiv:1711.00941, 2017.
|
| 277 |
+
[15] Daniel Gissin and Shai Shalev-Shwartz. Discriminative active learning. arXiv preprint arXiv:1907.06347, 2019.
|
| 278 |
+
[16] Boqing Gong, Yuan Shi, Fei Sha, and Kristen Grauman. Geodesic flow kernel for unsupervised domain adaptation. In CVPR, pages 2066-2073, 2012.
|
| 279 |
+
[17] Philip Haeusser, Thomas Frerix, Alexander Mordvintsev, and Daniel Cremers. Associative domain adaptation. In ICCV, pages 2765-2773, 2017.
|
| 280 |
+
|
| 281 |
+
[18] Steve Hanneke et al. Theory of disagreement-based active learning. Foundations and Trends in Machine Learning, 7(2-3):131-309, 2014.
|
| 282 |
+
[19] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In CVPR, pages 770-778, 2016.
|
| 283 |
+
[20] Wei-Ning Hsu and Hsuan-Tien Lin. Active learning by learning. In AAAI, pages 2659-2665, 2015.
|
| 284 |
+
[21] Lanqing Hu, Meina Kan, Shiguang Shan, and Xilin Chen. Duplex generative adversarial network for unsupervised domain adaptation. In CVPR, pages 1498-1507, 2018.
|
| 285 |
+
[22] Sheng-Wei Huang, Che-Tsung Lin, Shu-Ping Chen, Yen-Yi Wu, Po-Hao Hsu, and Shang-Hong Lai. Auggan: Cross domain adaptation with gan-based data augmentation. In ECCV, pages 718-731, 2018.
|
| 286 |
+
[23] Guoliang Kang, Lu Jiang, Yi Yang, and Alexander G Hauptmann. Contrastive adaptation network for unsupervised domain adaptation. In CVPR, pages 4893-4902, 2019.
|
| 287 |
+
[24] Andreas Kirsch, Joost van Amersfoort, and Yarin Gal. Batchbald: Efficient and diverse batch acquisition for deep bayesian active learning. In NeurIPS, pages 7024-7035, 2019.
|
| 288 |
+
[25] Gregory Koch, Richard Zemel, and Ruslan Salakhutdinov. Siamese neural networks for one-shot image recognition. In ICML Workshop, 2015.
|
| 289 |
+
[26] Abhishek Kumar, Prasanna Sattigeri, Kahini Wadhawan, Leonid Karlinsky, Rogerio Feris, Bill Freeman, and Gregory Wornell. Co-regularized alignment for unsupervised domain adaptation. In NeurIPS, pages 9345–9356, 2018.
|
| 290 |
+
[27] M. Pawan Kumar, Benjamin Packer, and Daphne Koller. Self-paced learning for latent variable models. In NeurIPS, page 1189-1197, 2010.
|
| 291 |
+
[28] Hong Liu, Zhangjie Cao, Mingsheng Long, Jianmin Wang, and Qiang Yang. Separate to adapt: Open set domain adaptation via progressive separation. In CVPR, pages 2927-2936, 2019.
|
| 292 |
+
[29] Yen-Cheng Liu, Yu-Ying Yeh, Tzu-Chien Fu, Sheng-De Wang, Wei-Chen Chiu, and Yu-Chiang Frank Wang. Detach and adapt: Learning cross-domain disentangled deep representation. In CVPR, pages 8867–8876, 2018.
|
| 293 |
+
[30] Mingsheng Long, Guiguang Ding, Jianmin Wang, Jiaguang Sun, Yuchen Guo, and Philip S Yu. Transfer sparse coding for robust image representation. In CVPR, pages 407-414, 2013.
|
| 294 |
+
[31] Mingsheng Long, Han Zhu, Jianmin Wang, and Michael I Jordan. Unsupervised domain adaptation with residual transfer networks. In NeurIPS, pages 136-144, 2016.
|
| 295 |
+
[32] Xinhong Ma, Tianzhu Zhang, and Changsheng Xu. Gcan: Graph convolutional adversarial network for unsupervised domain adaptation. In CVPR, pages 8266-8276, 2019.
|
| 296 |
+
[33] Tzu Ming Harry Hsu, Wei Yu Chen, Cheng-An Hou, Yao-Hung Hubert Tsai, Yi-Ren Yeh, and Yu-Chiang Frank Wang. Unsupervised domain adaptation with imbalanced cross-domain data. In ICCV, pages 4121-4129, 2015.
|
| 297 |
+
[34] Yaniv Ovadia, Emily Fertig, Jie Ren, Zachary Nado, David Sculley, Sebastian Nowozin, Joshua V Dillon, Balaji Lakshminarayanan, and Jasper Snoek. Can you trust your model's
|
| 298 |
+
|
| 299 |
+
uncertainty? evaluating predictive uncertainty under dataset shift. In NeurIPS, pages 13969-13980, 2019.
|
| 300 |
+
[35] Sinno Jialin Pan and Qiang Yang. A survey on transfer learning. IEEE Transactions on knowledge and data engineering, 22(10):1345-1359, 2009.
|
| 301 |
+
[36] Pau Panareda Busto and Juergen Gall. Open set domain adaptation. In ICCV, pages 754-763, 2017.
|
| 302 |
+
[37] Adam Paszke, Sam Gross, Soumith Chintala, Gregory Chanan, Edward Yang, Zachary DeVito, Zeming Lin, Alban Desmaison, Luca Antiga, and Adam Lerer. Automatic differentiation in pytorch. 2017.
|
| 303 |
+
[38] Xingchao Peng, Qinxun Bai, Xide Xia, Zijun Huang, Kate Saenko, and Bo Wang. Moment matching for multi-source domain adaptation. In ICCV, pages 1406-1415, 2019.
|
| 304 |
+
[39] X. Peng, B. Usman, N. Kaushik, D. Wang, J. Hoffman, and K. Saenko. Visda: A synthetic-to-real benchmark for visual domain adaptation. In CVPR Workshops, pages 2102-2105, 2018.
|
| 305 |
+
[40] Joaquin Quionero-Candela, Masashi Sugiyama, Anton Schwaighofer, and Neil D Lawrence. Dataset shift in machine learning. 2009.
|
| 306 |
+
[41] Piyush Rai, Avishek Saha, Hal Daumé III, and Suresh Venkatasubramanian. Domain adaptation meets active learning. In NAACL Workshop, pages 27-32, 2010.
|
| 307 |
+
[42] Sachin Ravi and Hugo Larochelle. Optimization as a model for few-shot learning. 2016.
|
| 308 |
+
[43] Dan Roth and Kevin Small. Margin-based active learning for structured output spaces. In ECML, pages 413-424, 2006.
|
| 309 |
+
[44] Kate Saenko, Brian Kulis, Mario Fritz, and Trevor Darrell. Adapting visual category models to new domains. In ECCV, pages 213-226, 2010.
|
| 310 |
+
[45] Kuniaki Saito, Donghyun Kim, Stan Sclaroff, and Kate Saenko. Universal domain adaptation through self-supervision. In NeurIPS, pages 16282-16292, 2020.
|
| 311 |
+
[46] Kuniaki Saito, Shohei Yamamoto, Yoshitaka Ushiku, and Tatsuya Harada. Open set domain adaptation by backpropagation. In ECCV, pages 153-168, 2018.
|
| 312 |
+
[47] Adam Santoro, Sergey Bartunov, Matthew Botvinick, Daan Wierstra, and Timothy Lillicrap. One-shot learning with memory-augmented neural networks. arXiv preprint arXiv:1605.06065, 2016.
|
| 313 |
+
[48] Greg Schon. Less is more: Active learning with support vector machines. In ICML, pages 839-846, 2000.
|
| 314 |
+
[49] Ozan Sener and Silvio Savarese. Active learning for convolutional neural networks: A core-set approach. In ICLR, 2018.
|
| 315 |
+
[50] Burr Settles. Active learning literature survey. 2009.
|
| 316 |
+
[51] Samarth Sinha, Sayna Ebrahimi, and Trevor Darrell. Variational adversarial active learning. In ICCV, pages 5972-5981, 2019.
|
| 317 |
+
[52] Jake Snell, Kevin Swersky, and Richard Zemel. Prototypical networks for few-shot learning. In NeurIPS, pages 4077-4087, 2017.
|
| 318 |
+
[53] Jong-Chyi Su, Yi-Hsuan Tsai, Kihyuk Sohn, Buyu Liu, Subhransu Maji, and Manmohan Chandraker. Active adversarial domain adaptation. In WACV, pages 739-748, 2020.
|
| 319 |
+
|
| 320 |
+
[54] Simon Tong and Daphne Koller. Support vector machine active learning with applications to text classification. Journal of machine learning research, 2(11):45-66, 2001.
|
| 321 |
+
[55] Eric Tzeng, Judy Hoffman, Kate Saenko, and Trevor Darrell. Adversarial discriminative domain adaptation. In CVPR, pages 7167-7176, 2017.
|
| 322 |
+
[56] H. Venkateswara, J. Eusebio, S. Chakraborty, and S. Panchanathan. Deep hashing network for unsupervised domain adaptation. In CVPR, pages 5385-5394, 2017.
|
| 323 |
+
[57] Oriol Vinyals, Charles Blundell, Timothy Lillicrap, Daan Wierstra, et al. Matching networks for one shot learning. In NeurIPS, pages 3630-3638, 2016.
|
| 324 |
+
[58] Dan Wang and Yi Shang. A new active labeling method for deep learning. In IJCNN, pages 112-119, 2014.
|
| 325 |
+
[59] Shaoan Xie, Zibin Zheng, Liang Chen, and Chuan Chen. Learning semantic representations for unsupervised domain adaptation. In ICML, pages 5419-5428, 2018.
|
| 326 |
+
[60] Ting Yao, Chong-Wah Ngo, and Shhai Zhu. Predicting domain adaptivity: Redo or recycle? In ACM MM, pages 821-824, 2012.
|
| 327 |
+
[61] Kaichao You, Mingsheng Long, Zhangjie Cao, Jianmin Wang, and Michael I Jordan. Universal domain adaptation. In CVPR, pages 2720-2729, 2019.
|
| 328 |
+
[62] Kaichao You, Ximei Wang, Mingsheng Long, and Michael Jordan. Towards accurate model selection in deep unsupervised domain adaptation. In ICML, pages 7124-7133, 2019.
|
| 329 |
+
[63] Werner Zellinger, Thomas Grubinger, Edwin Lughofer, Thomas Natschlager, and Susanne Saminger-Platz. Central moment discrepancy (cmd) for domain-invariant representation learning. arXiv preprint arXiv:1702.08811, 2017.
|
| 330 |
+
[64] Jing Zhang, Zewei Ding, Wanqing Li, and Philip Ogunbona. Importance weighted adversarial nets for partial domain adaptation. In CVPR, pages 8156-8164, 2018.
|
| 331 |
+
[65] Junbao Zhuo, Shuhui Wang, Shuhao Cui, and Qingming Huang. Unsupervised open domain recognition by semantic discrepancy minimization. In CVPR, pages 750-759, 2019.
|
activeuniversaldomainadaptation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9639c886b2d7e56cb9ed3729383155bfef6e7bfaddec470d00e67ff95b6aa5bc
|
| 3 |
+
size 645860
|
activeuniversaldomainadaptation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fa7dce332f2f2acbcf36c0de4ac975f404b904a7440c53a30798a50dadd60cc9
|
| 3 |
+
size 566841
|
actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/43c3af89-ebd3-440b-870b-e655eced5ada_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95f8324ce506134f46ff611235e10ece420f926e92277cfa989c2f3d44798e78
|
| 3 |
+
size 72682
|
actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/43c3af89-ebd3-440b-870b-e655eced5ada_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0ff3a8265d67f57033388490281c2199b44d9c5b5ab06174422a45d2755ae47
|
| 3 |
+
size 92102
|
actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/43c3af89-ebd3-440b-870b-e655eced5ada_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b344213bc229f591f18ef0a2eab1a94ce660c0de74780a919be342bad42ba33
|
| 3 |
+
size 2256206
|
actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/full.md
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Act the Part: Learning Interaction Strategies for Articulated Object Part Discovery
|
| 2 |
+
|
| 3 |
+
Samir Yitzhak Gadre $^{1}$ Kiana Ehsani $^{2}$ Shuran Song $^{1}$ \
|
| 4 |
+
$^{1}$ Columbia University\
|
| 5 |
+
$^{2}$ Allen Institute for AI\
|
| 6 |
+
https://atp.cs.columbia.edu
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
People often use physical intuition when manipulating articulated objects, irrespective of object semantics. Motivated by this observation, we identify an important embodied task where an agent must play with objects to recover their parts. To this end, we introduce Act the Part (AtP) to learn how to interact with articulated objects to discover and segment their pieces. By coupling action selection and motion segmentation, AtP is able to isolate structures to make perceptual part recovery possible without semantic labels. Our experiments show AtP learns efficient strategies for part discovery, can generalize to unseen categories, and is capable of conditional reasoning for the task. Although trained in simulation, we show convincing transfer to real world data with no fine-tuning. A summary video, interactive demo, and code will be available at https://atp.cs.columbia.edu.
|
| 11 |
+
|
| 12 |
+
# 1. Introduction
|
| 13 |
+
|
| 14 |
+
How do people and animals make sense of the physical world? Studies from cognitive science indicate observing the consequences of one's actions plays a crucial role [17, 38, 3]. Gibson's influential work on affordances argues visual objects ground action possibilities [14]. Work from Tucker et al. goes further, suggesting what one sees affects what one does [44]. These findings establish a plausible biological link between seeing and doing. However, in an age of data-driven computer vision, static image and video datasets [40, 24, 2] have taken center stage.
|
| 15 |
+
|
| 16 |
+
In this paper, we aim to elucidate connections between perception and interaction by investigating articulated object part discovery and segmentation. In this task, an agent must recover part masks by choosing strategic interactions over a few timesteps. We do not assume dense part labels or known kinematic structure [1, 23]. We also do not interact randomly [33]. Rather, we learn an agent capable of holding and pushing, allowing us to relax the assumption that objects are fixed to a ground plane [28]. Our task and
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
Figure 1. Interaction for Part Discovery. Passive part segmentation algorithms require detailed annotation and cannot generalize to new categories. While motion can help discover new objects, prior work cannot infer actions for understanding individual parts. Our work, Act the Part, learns interaction strategies that expose parts and generalize to unseen categories.
|
| 20 |
+
|
| 21 |
+
approach novelty are highlighted in Fig. 1.
|
| 22 |
+
|
| 23 |
+
Segmentation from strong supervision and random interaction is widely studied; however, creating informative motion to enable category level generalization while relaxing supervision is less explored in the community. We identify the following hurdles, which make this direction salient and difficult. Motion cannot be assumed in a scene as objects seldom move spontaneously. Even with agent interaction, not all actions create perceivable motion to give insight about articulation. Actions might activate only a small number of parts, so diversity of action and aggregation of potentially noisy perceptual discoveries is necessary. Generalization of interaction and perception to unseen categories without retraining or fine-tuning is also desirable. These facets are often overlooked in prior work but are at the heart of this paper.
|
| 24 |
+
|
| 25 |
+
To address these challenges, we introduce Act the Part (AtP), which takes visual observations, interacts intelligently, and outputs part masks. Our key insight is to couple
|
| 26 |
+
|
| 27 |
+
action selection and segmentation inference. Given an RGB input image and the part segmentation belief, our interaction network reasons about where to hold and push to move undiscovered parts. By reasoning about changes in visual observations, our perception algorithm is able to discover new parts, keep track of existing ones, and update the part segmentation belief.
|
| 28 |
+
|
| 29 |
+
We evaluate our approach on eight object categories from the PartNet-Mobility dataset [9, 29, 49] and a ninth multilink category, which we configure with three links. Our experiments suggest: (1) AtP learns effective interaction strategies to isolate part motion, which makes articulated object part discovery and segmentation possible. (2) Our method generalizes to unseen object instances and categories with different numbers of parts and joints. (3) Our model is capable of interpretable conditional reasoning for the task—inferring where and how to push given arbitrary hold locations.
|
| 30 |
+
|
| 31 |
+
We also demonstrate transfer to real images of unseen categories (without fine-tuning) and introduce a toolkit to make PartNet-Mobility more suitable for future research.
|
| 32 |
+
|
| 33 |
+
# 2. Related Work
|
| 34 |
+
|
| 35 |
+
Our approach builds on existing work in interactive perception [6], where visual tasks are solved using agent intervention. We also position our work alongside existing methods in articulated object understanding.
|
| 36 |
+
|
| 37 |
+
Interactive Perception for Rigid Objects. Instance segmentation of rigid objects from interaction is well studied [13, 5, 46, 32, 7, 33, 12]. Similar work infers physical properties [35, 52] and scene dynamics [30, 50, 45]. These approaches typically employ heuristic or random actions. In contrast, we learn to act to expose articulation.
|
| 38 |
+
|
| 39 |
+
For learning interaction strategies, Lohmann et al. [26] learn to interact with rigid objects to estimate their segmentation masks and physical properties. Yang et al. [54] learn to navigate to recover amodal masks. These algorithms do not change object internal states in structured ways for articulated object part discovery.
|
| 40 |
+
|
| 41 |
+
There is also work that leverages multimodal tactile and force inputs [10]. Inspired by this work, we explore using touch feedback in our learning loop. However, we assume only binary signals (e.g., the presence of shear force), which is easier to obtain in real world settings.
|
| 42 |
+
|
| 43 |
+
Passive Perception for Object Structures. Existing work extracts parts from pairs of images [53, 51], point clouds [55] or videos [41, 27, 25]. In these settings, agents do not have control over camera or scene motion. While the assumption that structures move spontaneously is valid for robot arms or human limbs, the premise breaks down when considering inanimate objects. Even when motion exists, it is not guaranteed to give insight about articulation. We address these issues by learning how to create informative
|
| 44 |
+
|
| 45 |
+
motion to find and extract parts.
|
| 46 |
+
|
| 47 |
+
Other work tackles part segmentation from a single image [47, 43, 19, 22, 1, 23] or point clouds [36, 37, 48, 18]. These algorithms are trained with full supervision (e.g., pixel labels) or assume strong category-level priors (e.g., known kinematics or single networks per category). In contrast, our approach uses flow and touch feedback as supervision and makes no class specific assumptions. As a result, we are able to learn a single model for all our object categories, which encompasses diverse kinematic structures.
|
| 48 |
+
|
| 49 |
+
Interactive Perception for Articulated Objects. In traditional pipelines, agents are carefully programmed to execute informative actions to facilitate visual feature tracking [42, 21, 34]. Other classical approaches improve on action selection for downstream perception [4, 31, 15]. However, these methods assume known object structure, which is used to design heuristics. In contrast, we employ a framework, which allows learning actions directly from pixels without known object models.
|
| 50 |
+
|
| 51 |
+
Recently, Mo et al. [28] present a learnable framework to estimate action affordances on articulated objects from a single RGB image or point cloud. However, they do not consider using their learned interactions for multistep part discovery and segmentation.
|
| 52 |
+
|
| 53 |
+
# 3. Approach
|
| 54 |
+
|
| 55 |
+
Our goal is to learn how to interact with articulated objects to discover and segment parts without semantic supervision. This poses many technical challenges: (1) With repetitive actions, an agent may not explore all parts. (2) Actions resulting in rigid transformations are undesirable. (3) Erroneous segmentation makes tracking parts over time difficult. To begin exploring these complexities, we consider articulated objects in table-top environments.
|
| 56 |
+
|
| 57 |
+
First, we formally define the task and environment details (Sec. 3.1). We then explain the three components of our approach: an interaction network (Sec. 3.2) to determine what actions to take, a part network (Sec. 3.3) to recover masks from motion, and a history aggregation algorithm (Sec. 3.4) to keep track of discovered parts. Finally, we explain the reward formulation (Sec. 3.5) and combine our modules to present the full pipeline (Sec. 3.6), Act the Part (AtP). Our approach is summarized in Fig. 2.
|
| 58 |
+
|
| 59 |
+
# 3.1. Problem Formulation
|
| 60 |
+
|
| 61 |
+
General Setting. Let $\mathcal{O}$ denote a set of articulated objects, each with $n \leq N$ parts. At each timestep $t$ , an agent gets an observation $I_t \in \mathbb{R}^{H \times W \times C}$ , and executes an action $a_t \in \mathcal{A}$ on an object $o \in \mathcal{O}$ , where $\mathcal{A}$ is the set of all possible actions. Additional sensor readings $s_t \in \mathbb{R}^l$ complement visual perception. The action results in the next observation $I_{t+1}$ . Given the sequence of $T$ observations, sensor readings, and actions, the goal is to infer part mask
|
| 62 |
+
|
| 63 |
+

|
| 64 |
+
Figure 2. Model overview. (a) The interaction network computes hold and push from an image observation and current part memory. The physics simulator gives the action effects yielding the next observation. (b) The part network takes the action and image observations to infer the motion masks for the part that moved, one aligned to the current frame and one to the next frame. (c) The history aggregation module incorporates newly discovered parts and updates existing parts in the memory.
|
| 65 |
+
|
| 66 |
+
$\mathcal{M}_T \in \{1, 2, \dots, N + 1\}^{H \times W}$ , where each pixel is assigned a value corresponding to $N$ part labels or background.
|
| 67 |
+
|
| 68 |
+
Task Details. We consider $\mathcal{O}$ to be a set of common household objects with $n \leq 3$ parts, $T = 5$ , $W, H = 90$ , and $C = 3$ (RGB). All objects have revolute joints and no fixed base link. Each $a \in \mathcal{A}$ represents a tuple: an image pixel to hold, another pixel to push, and one of eight push directions. The directions are discretized every $45^{\circ}$ and are parallel to the ground plane. We take $s_t \in \{0,1\}^3$ , representing binary signals for detecting contact on the hold and push grippers and a binary sheer force reading on the hold gripper to emulate touch.
|
| 69 |
+
|
| 70 |
+
Environment Details. To enable large-scale training and ground truth part segmentation (for benchmarking only), we use a simulated environment. However, we also show our model generalizes to real-world images without finetuning. Our simulation environment is built using PyBullet [11] with Partnet-Mobility [9, 29, 49] style dataset assets.
|
| 71 |
+
|
| 72 |
+
Our environment supports two generic actions. First, a hold action parameterized by its location and implemented as a fixed point constraint between the gripper and a part. Second, a push action parameterized by the location and the direction of the applied force. Actions are easily extensible to facilitate future 2D and 3D object interaction research.
|
| 73 |
+
|
| 74 |
+
# 3.2. Learning to Act to Discover Parts
|
| 75 |
+
|
| 76 |
+
Given a visual observation of an object, we want to create motion by interacting to expose articulation. We give the agent two sub-actions every timestep: hold and push. The action space directly motivates network and reward design.
|
| 77 |
+
|
| 78 |
+
Conditional Bimanual Action Inference. The interaction task reduces to finding pixels to hold and push and determine the push direction. To decrease the search space, we discretize the push direction into eight options (45° apart). We consider a constant magnitude push force parallel to the ground plane. We condition the predicted push location and direction on the predicted hold location. This allows us to
|
| 79 |
+
|
| 80 |
+

|
| 81 |
+
Figure 3. Interaction network. Given an image and the current belief of part segmentation, our network predicts a hold and a push conditioned on the hold.
|
| 82 |
+
|
| 83 |
+
synchronize sub-actions without considering all pairs.
|
| 84 |
+
|
| 85 |
+
Interaction Network. At every timestep, we predict one step pixel-wise reward for holding and pushing at the spatial resolution of the input image, similar to Zeng et al. [56]. As shown in Fig. 3, we use a shared ResNet18 [16] with two residual decoder heads wired with U-Net [39] skip connections. At each timestep $t$ , we have a current belief about the part segmentation. This is represented as a part memory $V_{t} \in \{0,1\}^{H \times W \times N}$ , where each channel encodes a different part mask. Given an image $I_{t}$ and $V_{t}$ , the network predicts a hold reward map $H_{t} \in [0,1]^{H \times W}$ , where each entry estimates the reward for holding that pixel. We uniformly sample one of the top $k = 100$ pixels from $H_{t}$ as the hold location. Sampling encourages optimization over the top $k$ actions, which we notice is necessary for the model to learn effective strategies.
|
| 86 |
+
|
| 87 |
+
Since we wish to infer pushing based on holding, we encode the hold as a 2D Gaussian $h_t$ centered at the hold location with standard deviation of one pixel [20]. In doing so, we can pass the hold location in a manner that preserves its spatial relationship to $I_t$ and $V_t$ . To predict the push reward maps, we pass eight rotations of $I_t$ , $V_t$ , and $h_t$ —every $45^\circ$ —through the push network. The rotations allow the network to reason implicitly about pushing in all eight directions, while reasoning explicitly only about pushing right [56]. We consider the output map with the largest reward, whose index encodes the push direction, and sample uniformly from the top $k = 100$ actions to choose the push location. An emergent property of our network is conditional reasoning, where hold locations can come from anywhere and the network still reasons about a synchronized push. We demonstrate this capability on real world data in our experiments (Sec. 4.3).
|
| 88 |
+
|
| 89 |
+
During training, we rollout the current interaction network for seven timesteps for each training instance, which gives the network more opportunities to learn from it's mistakes. The images for the last 10 iterations of rollouts are saved in a training buffer. For each image we also buffer
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
Figure 4. Part network. Given a pair of observations and the action that caused the change, this network predicts motion masks aligned to each observation.
|
| 93 |
+
|
| 94 |
+
optical flow, touch sensors binary output, and intermediate predictions, which are used to define the reward target (Sec. 3.5). We use pixel-wise binary cross entropy loss to learn the hold and push reward maps. Interaction network rollouts are also a data generation process. All pairs of frames, executed actions, and optical flow ground truth are saved as a dataset to fit the part network.
|
| 95 |
+
|
| 96 |
+
# 3.3. Learning to Discover Parts from Action
|
| 97 |
+
|
| 98 |
+
After an action is executed, we wish to recover the moved part. To do so, we create a part network to predict two masks for the pixels that moved—one mask $M_{t}$ aligned to the current frame and the other $M_{t + 1}$ to the next frame.
|
| 99 |
+
|
| 100 |
+
Part Network. Our part network (Fig. 4) takes the observations before and after the interaction. Additionally we pass in the hold location $h_t$ and a spatial encoding $p_t$ of the push location and direction. $p_t$ has a 2D Gaussian centered at the push pixel, analogous to $h_t$ . To encode direction, we add Gaussians of smaller mean value in the direction of the push, forming a trail. The network is comprised of a shared encoder with two decoder heads to predict $M_t$ and $M_{t+1}$ . Using consistent forward and backward flow collected during interaction network training, we threshold at zero to acquire target motion masks. We supervise predictions using binary cross-entropy loss.
|
| 101 |
+
|
| 102 |
+
# 3.4. History Aggregation
|
| 103 |
+
|
| 104 |
+
We introduce a history aggregation algorithm to update part memory $V$ , based on predicted $M_t$ and $M_{t+1}$ . Our algorithm classifies the type of update into four categories: (1) no movement, (2) finding a new part, (3) moving an existing part, (4) entangling parts. These labels are used to decide how to update $V$ and influence the reward (Sec. 3.5). New Part. If $M_t$ does not overlap significantly with any channels in $V$ , it is likely to be a new part. A free channel $c$ is assigned: $V^c \gets M_{t+1}$ . If there is significant overlap between $M_t$ and a mask $V^c$ , relative only to the area of $M_t$ , there is indication two parts are assigned to $V^c$ that must be split: $V^c \gets V^c - (V^c \cap M_t)$ and $V^{c+1} \gets M_{t+1}$ . Finding a new part is the most desirable case.
|
| 105 |
+
|
| 106 |
+
<table><tr><td>Optical Flow</td><td>Touch Sensor</td><td>Part Memory</td><td>Hold Reward</td><td>Push Reward</td></tr><tr><td>x</td><td>1/0</td><td>-</td><td>N/A</td><td>0</td></tr><tr><td>✓</td><td>1</td><td>New part</td><td>1</td><td>1</td></tr><tr><td>✓</td><td>1</td><td>Existing part</td><td>.5</td><td>.5</td></tr><tr><td>✓</td><td>0</td><td>-</td><td>0</td><td>N/A</td></tr><tr><td>✓</td><td>1</td><td>Entangled part</td><td>0</td><td>N/A</td></tr></table>
|
| 107 |
+
|
| 108 |
+
Table 1. Reward Calculation. N/A indicates no backpropagation due to insufficient information. For more details refer to Appx. C.
|
| 109 |
+
|
| 110 |
+
Existing Part. If there is significant overlap between $M_{t}$ and a mask $V^{c}$ , relative to the areas of both $M_{t}$ and $V^{c}$ , we execute the update: $V^{c} \gets M_{t + 1}$ . This case is less desirable than discovering a new part.
|
| 111 |
+
|
| 112 |
+
Entangled Parts. If there is significant overlap between $M_{t}$ and a mask $V^{c}$ , relative to the area of only $V^{c}$ , it suggests our action is entangling movement of more than one part. During training; $V^{c} \gets M_{t + 1}$ . During testing, we use Iterative Closest Point (ICP) to get the correspondences between $V^{c}$ and $M_{t + 1}$ , yielding $T \in SE(2)$ , to execute the updates: $V^{c} \gets (T \circ V^{c}) \cap M_{t + 1}$ , then $V^{c + 1} \gets M_{t + 1} - V^{c}$ . Entangled part actions are the least desirable, as reflected in our reward described next.
|
| 113 |
+
|
| 114 |
+
For more details on handling edge cases (e.g., all channels being filled at allocation time), refer to Appx. B.
|
| 115 |
+
|
| 116 |
+
# 3.5. Reward
|
| 117 |
+
|
| 118 |
+
During training, reward for the interaction network is determined from the optical flow, touch feedback, and history aggregation case. The reward conditions and values are shown in Tab. 1.
|
| 119 |
+
|
| 120 |
+
As presented, reward is sparse; however, we leverage touch and flow to make the reward more dense. If the touch sensor feels no force but flow exists, we know the agent should not hold or push in areas of no flow, which should correspond to the background. We can safely supervise with reward 0 for all such pixels for both hold and push reward maps. If the touch sensor feels a force, flow exists, and we have moved a new or existing part, then we can make the push reward dense. We compute the L2-norm of the flow field and normalize by the max value. If we moved a new part, these values give a dense target for the push map prediction. If we moved an existing part, we scale the target dense push values by the existing part reward of 0.5. For more details please see Appx. C.
|
| 121 |
+
|
| 122 |
+
# 3.6. Putting Everything Together: Act the Part
|
| 123 |
+
|
| 124 |
+
We begin by training the interaction network using motion masks from the thresholded flow for history aggregation. We then train our part network using the entire dataset of interactions to learn to infer motion masks. At inference, we first predict and execute an action. We infer motion masks and run history aggregation to output a segmentation mask at every timestep. Further architecture and training details are provided in Appx. D and E.
|
| 125 |
+
|
| 126 |
+
# 4. Evaluation
|
| 127 |
+
|
| 128 |
+
Five Act the Part (AtP) models, trained with different seeds, are evaluated on 20 unseen instances from four seen categories (scissors, knife, USB, safe) and 87 instances from five unseen categories (pliers, microwave, lighter, eyeglasses, and multilink). The multilink objects have three links in a chain similar to eyeglasses. Critically, all train instances have two links; however, during testing, we evaluate on objects with two and three links. See Appx. A.1 for information about the number of instances per category.
|
| 129 |
+
|
| 130 |
+
To initialize instances, we uniformly sample start position, orientation, joint angles, and scale. Dataset, test initialization, and pre-trained models will be released for reproducibility and benchmarking.
|
| 131 |
+
|
| 132 |
+
# 4.1. Metrics and Points of Comparison
|
| 133 |
+
|
| 134 |
+
For each test data point, we allow the agent to interact with the object five times. We collect three perceptual metrics to evaluate performance on part discovery and segmentation. Two additional metrics measure effectiveness of the actions for part discovery. Let $\mathcal{G}$ , $\mathcal{H}$ denote the sets of ground truth and predicted binary part masks respectively.
|
| 135 |
+
|
| 136 |
+
Average Percentage Error (APE). To measure errors in number of parts discovered, we compute $\left| \left( |\mathcal{G}| - |\mathcal{H}| \right) \right| / | \mathcal{G} |$ .
|
| 137 |
+
|
| 138 |
+
Part-aware Intersection over Union (IoU). We use Hungarian matching to solve for the maximal IoU bipartite match between $\mathcal{G}$ and $\mathcal{H}$ . Unmatched parts get IoU of 0. Final IoU is determined by summing part IoUs and dividing by $\max(|\mathcal{G}|, |\mathcal{H}|)$ . The metric penalizes both errors in mask prediction and failure to discover masks (e.g., if one of two parts is discovered, maximum IoU is $50\%$ ).
|
| 139 |
+
|
| 140 |
+
Part-aware Hausdorff distance @ 95% ( $d_{H95}$ ). We notice IoU is sensitive for thin structures. For example, a small pixel shift in a thin rod can lead to IoU of 0. To provide a better metric for these structures, we measure $d_{H95}$ which is a part-aware variant of a common metric in medical image segmentation [8]. The directed Hausdorff distance @ 95% between some masks $G \in \mathcal{G}$ and $H \in \mathcal{H}$ is $d_{H95}^d(G, H) \coloneqq P_{95} \min_{h \in H} ||g - h||_2$ where $P_{95}$ gives the 95-th percentile value over pixel distances. The metric is robust to a small number of outliers, which would otherwise dominate. The symmetric measure is given as $d_{H95}(G, H) \coloneqq \max(d_{H95}^d(G, H), d_{H95}^d(H, G))$ . We use Hungarian matching to find minimal $d_{H95}$ bipartite matches between $\mathcal{G}$ and $\mathcal{H}$ . If $|\mathcal{G}| \neq |\mathcal{H}|$ , we compute the distance of unmatched parts against a matrix of ones at the image resolution. Distances are summed and normalized by $\max(|\mathcal{G}|, |\mathcal{H}|)$ .
|
| 141 |
+
|
| 142 |
+
Effective Steps. A step is effective if the hold is on an object link, the push is on another link, and the action creates motion.
|
| 143 |
+
|
| 144 |
+
Optimal Steps. An interaction is optimal if it is effective
|
| 145 |
+
|
| 146 |
+
and a new part is discovered. If all the parts have already been discovered, moving a single existing part in the interaction is not penalized.
|
| 147 |
+
|
| 148 |
+
We compute the average of perceptual metrics for each category at every timestep over five models trained with different random seeds. Hence IoU, APE, and $d_{H95}$ yield mIoU, MAPE, and $\bar{d}_{H95}$ . For evaluation in Tab. 2, we consider metrics after the fifth timestep. Efficient and optimal step scores are averaged for each category over all timesteps (in contrast to being considered only at the fifth timestep).
|
| 149 |
+
|
| 150 |
+
Baselines and Ablations. We compare the AtP framework trained with and without touch reward, [Ours-Touch] and [Ours-NoTouch] respectively, with the following alternative approaches to study the efficacy of our interaction network. All methods use the same part network trained from the full AtP rollouts:
|
| 151 |
+
|
| 152 |
+
- Act-Random: hold and push locations and the push direction are uniformly sampled from the action space.
|
| 153 |
+
- Act-NoHold: The agent applies a single push action every step. Single modes of interaction are widely used in interactive segmentation algorithms [13, 5, 46, 32, 33, 12]; however, this ablation differs from these works as push is learned for the part segmentation task.
|
| 154 |
+
- Act-NoPart: The interaction network does not take the part memory and considers each moved part as a new part for reward calculation.
|
| 155 |
+
|
| 156 |
+
For the modified reward used to train the above networks see Appx. C. We also design three oracle algorithms using simulation state to provide performance upper bounds:
|
| 157 |
+
|
| 158 |
+
- GT-Single: GT object mask as output. This gives an upper bound for part-unaware algorithms that segment objects from background via interaction.
|
| 159 |
+
- GT-Act: Optimal step based on ground truth state, but use of AtP part network for the mask inference. This is conceptually similar to [34], which uses expert actions for part segmentation.
|
| 160 |
+
- GT-Act-Mot: Optimal step based on ground truth state with motion masks from the ground truth flow.
|
| 161 |
+
|
| 162 |
+
# 4.2. Benchmark Results
|
| 163 |
+
|
| 164 |
+
To validate the major design decisions, we run a series of quantitative experiments in simulation to compare different algorithms. We also provide qualitative results in Fig. 5. In Sec. 4.3, we evaluate our model on real world data.
|
| 165 |
+
|
| 166 |
+
Does Interaction Help Part Discovery? First we want to validate if AtP learns effective interaction strategies for part discovery by accumulating information over time. To evaluate, we plot the part mIoU w.r.t. interaction steps in Fig. 6. As expected, the upper bounds peaks at 2 and 3 steps for pliers and multilink, respectively. While other algorithms' performance saturate quickly with one or two interactions, [Ours-Touch] and [Ours-NoTouch] are able to
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
Figure 5. Qualitative Results. Our network learns a policy to interact with unseen objects and categories. While it is only trained on objects with two parts, it also learns to reason about three part objects. Due to space limitation, only three interaction steps are shown in this figure. For more results, please refer to our project website: https://atp.cs.columbia.edu.
|
| 170 |
+
|
| 171 |
+
improve with more interactions. These plots indicate that while the learned interaction strategies may not be optimal (compared to upper bounds using ground truth state), they are informative for discovering new parts of the object and self-correct errors over time. Results from other categories
|
| 172 |
+
|
| 173 |
+
are presented in Appx. G, where we see all AtP curves approach the upper bounds.
|
| 174 |
+
|
| 175 |
+
Does Part Prediction Help with Action Selection? Our interaction network takes the current belief of the part segmentation as input and obtains reward for new part discov-
|
| 176 |
+
|
| 177 |
+
<table><tr><td></td><td colspan="4">Seen Categories (Novel Instances)</td><td colspan="5">Unseen Categories</td></tr><tr><td>Method</td><td>Scissors ↓/↓/↑</td><td>Knife ↓/↓/↑</td><td>USB ↓/↓/↑</td><td>Safe ↓/↓/↑</td><td>Pliers ↓/↓/↑</td><td>Microwave ↓/↓/↑</td><td>Lighter ↓/↓/↑</td><td>Eyeglasses ↓/↓/↑</td><td>Multilink ↓/↓/↑</td></tr><tr><td>GT-Single</td><td>0.5 / 36.2 / 27.8</td><td>0.5 / 42.5 / 32.7</td><td>0.5 / 31.7 / 29.5</td><td>0.5 / 39.4 / 44.3</td><td>0.5 / 47.3 / 28.1</td><td>0.5 / 38.9 / 43.3</td><td>0.5 / 38.2 / 34.1</td><td>0.66 / 56.2 / 18.4</td><td>0.66 / 51.5 / 14.2</td></tr><tr><td>GT-Act</td><td>0.01 / 4.3 / 78.0</td><td>0.02 / 4.5 / 81.6</td><td>0.0 / 5.7 / 82.7</td><td>0.02 / 2.1 / 89.7</td><td>0.01 / 4.3 / 78.4</td><td>0.03 / 2.4 / 87.8</td><td>0.0 / 3.3 / 88.0</td><td>0.03 / 7.4 / 64.6</td><td>0.10 / 7.2 / 75.3</td></tr><tr><td>GT-Act+Mot</td><td>0.0 / 1.6 / 88.4</td><td>0.0 / 0.9 / 92.9</td><td>0.0 / 2.4 / 91.5</td><td>0.0 / 0.6 / 91.7</td><td>0.0 / 0.9 / 92.7</td><td>0.0 / 0.4 / 94.2</td><td>0.0 / 0.6 / 94.8</td><td>0.0 / 4.2 / 82.5</td><td>0.0 / 5.2 / 86.9</td></tr><tr><td>Act-Random</td><td>0.62 / 36.2 / 22.0</td><td>0.63 / 41.6 / 24.1</td><td>0.47 / 26.8 / 33.1</td><td>0.62 / 40.8 / 32.5</td><td>0.56 / 39.9 / 25.0</td><td>0.58 / 37.9 / 36.0</td><td>0.62 / 40.4 / 24.5</td><td>0.70 / 53.5 / 12.5</td><td>0.78 / 52.6 / 10.7</td></tr><tr><td>Act-NoHold</td><td>0.46 / 34.4 / 28.5</td><td>0.43 / 35.5 / 35.2</td><td>0.40 / 30.0 / 33.2</td><td>0.38 / 32.1 / 42.7</td><td>0.41 / 35.4 / 30.5</td><td>0.41 / 31.3 / 40.7</td><td>0.45 / 39.6 / 34.3</td><td>0.40 / 41.6 / 19.6</td><td>0.53 / 48.0 / 19.9</td></tr><tr><td>Act-NoPart</td><td>0.25 / 15.3 / 53.8</td><td>0.29 / 20.4 / 51.6</td><td>0.44 / 19.5 / 47.6</td><td>0.37 / 18.3 / 49.1</td><td>0.34 / 20.6 / 48.5</td><td>0.43 / 19.7 / 46.3</td><td>0.49 / 26.4 / 42.0</td><td>0.33 / 27.1 / 34.4</td><td>0.40 / 31.0 / 39.7</td></tr><tr><td>Ours-NoTouch</td><td>0.19 / 13.1 / 58.0</td><td>0.16 / 13.2 / 66.2</td><td>0.14 / 10.4 / 68.6</td><td>0.15 / 9.9 / 75.0</td><td>0.15 / 12.5 / 59.8</td><td>0.14 / 9.2 / 74.1</td><td>0.22 / 14.5 / 64.4</td><td>0.28 / 26.2 / 37.9</td><td>0.25 / 24.6 / 46.6</td></tr><tr><td>Ours-Touch</td><td>0.10 / 8.5 / 65.6</td><td>0.16 / 12.2 / 65.9</td><td>0.09 / 8.3 / 75.3</td><td>0.17 / 10.1 / 74.2</td><td>0.13 / 9.7 / 64.9</td><td>0.14 / 8.3 / 75.4</td><td>0.25 / 15.1 / 62.8</td><td>0.24 / 21.8 / 43.0</td><td>0.22 / 20.0 / 54.7</td></tr></table>
|
| 178 |
+
|
| 179 |
+
Table 2. Perception performance. MAPE [frac] / $\overline{d}_{H95}$ [pixels] / mIoU [%]. Image resolution is $90 \times 90$ . Numbers are evaluated after the fifth interaction. Numbers are averaged over five models trained with different seeds.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
Figure 6. IoU w.r.t. Interaction Steps. Results on two unseen object categories show our methods (pink and brown) approach the oracle baseline over time.
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Figure 7. Effective and Optimal Steps. Our method learns an efficient policy that chooses optimal steps (i.e., actions that discover new parts) more frequently than other approaches.
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
|
| 191 |
+
ery. We hope this design would encourage the algorithm to focus on selecting actions that provide information gain (e.g., push new parts to discover them). To validate this design, we compare AtP to an ablated version, [Act-NoPart], which is not mask-aware. Interestingly, this model performs efficient actions at roughly the same rate as [Ours-Touch] (Fig. 7); however, [Ours-Touch] is better at finding optimal steps (resulting in new part discovery). Histograms for all other categories are presented in Appx. G and corroborate these findings. This result is also supported in Tab. 2, which shows degradation on all perceptual metrics when part-awareness is not exploited.
|
| 192 |
+
|
| 193 |
+
Is Holding Necessary? In contrast to a vast majority of prior work that use simple push actions, our algorithm uses bimanual actions for object interaction (i.e., simultaneous hold and push). Our hypothesis is that such actions give the system a better chance at disentangling motion between
|
| 194 |
+
|
| 195 |
+
different moving parts and therefore aid part discovery. To validate this hypothesis, we compare our algorithm with an agent that performs only push actions [Act-NoHold]. The result in Tab. 2 shows that without the hold action the system performance is much worse at part segmentation. [Act-NoHold] has trouble discovering more than one object part, since the whole object is likely to be moved during interaction. Furthermore, this result suggests more complex perceptual modules are necessary to get push-only policies to achieve competitive performance at this task. While this is an interesting direction, disentangling the motion of many moving parts is non-trivial and out of scope for this paper.
|
| 196 |
+
|
| 197 |
+
Does Touch Feedback Help? In this experiment, we want to evaluate the effect of touch feedback. Looking at Tab. 2, we see that [Ours-Touch] outperforms [Ours-NoTouch] in most categories. A similar trend is noticeable when looking at action performance in Figs. 6 and 7. We conjecture this is due to the benefit of using touch signal to define more specific reward cases, which is ultimately reflected in the final system performance. However, we are still able to learn helpful interaction strategies even without touch.
|
| 198 |
+
|
| 199 |
+
Generalization to Unseen Objects and Categories. Our algorithm does not make category-level assumptions, therefore the same policy and perception model should work for unseen object categories with different kinematic structures. More specifically, we wish to probe generalization capabilities of our model to unseen instances from seen categories and novel categories.
|
| 200 |
+
|
| 201 |
+
The algorithm's generalizability is supported by results in Tab. 2, where mIoU, MAPE, and $\bar{d}_{H95}$ are comparable for seem and unseen categories. Performance on eyeglasses is slightly worse, however, still impressive as our model is only trained on instances with two links. Furthermore, for eyeglasses, MAPE value falls under 0.33, suggesting the model finds the three parts in most cases. IoU performance on the multilink category is better than on eyeglasses; however, MAPE is comparable, suggesting that eyeglasses are particularly challenging for reasons other than having three links. These results support that our method learns to interact intelligently and reason about motion in spite of differing shape, texture, or structure in the test objects.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Figure 8. Conditional Action on Real Images. (a) Varying the hold location, we observe the model is able to reason where to push right. (b) Fixing the hold location, we observe the model reasons about a good direction to push (i.e., top left).
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
|
| 208 |
+
# 4.3. Real World Results
|
| 209 |
+
|
| 210 |
+
In these experiments, we want to validate [Ours-Touch] performance on real world data. Since our algorithm does not need prior knowledge of objects or special sensory input during inference, we directly test our learned model on real world RGB images of unseen categories taken by smartphone cameras. To build a pipeline that demonstrates the viability of our model on real world data, a camera is positioned over an articulated object and an image is captured. Our trained model runs interaction inference, predicts hold and push actions, and provides a human operator with instructions on what to execute. A next frame image is sent back to the model, at which point it runs the part network, history aggregation, and another round of interaction inference. More details and a discussion about experimental limitations can be found in Appx. F.
|
| 211 |
+
|
| 212 |
+
Conditional Action Reasoning. We visualize the conditional action inference result from the interaction network on real world images. Fig. 8 shows two types of visualizations. In example (a), we pick various hold positions and analyze the "push right" reward prediction maps (recall: pushing is conditioned on holding). We notice that the affordance prediction switches between the links depending on the hold location, which indicates the network's understanding about the object structure. When hold is placed in free space or between the links, the push reward predictions are not confident about pushing anywhere. These results suggest that our model is able to disentangle push predictions from its own hold predictions, thereby demonstrating a form of conditional reasoning.
|
| 213 |
+
|
| 214 |
+
In example (b), we further probe the model's reasoning about the push direction by visualizing different push maps for the same holding position. Among all directions, the network infers the highest score on the top-left rotation, which would push the scissors open. The result suggests that the algorithm is able to pick a push direction that would lead to informative motion, when reasoning over many options.
|
| 215 |
+
|
| 216 |
+
Interaction Experiment. Next, we evaluate both perception and interaction networks together with the real world
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
Figure 9. Interaction Experiment. Images were captured and sent to AtP, where hold and push are predicted. A human agent executes actions following the instructions. From pairs of images, masks are recovered and aggregated by AtP, and the next instruction is given. This is especially interesting because the lighting, shadows, and artifacts of the images taken with a phone are different from our simulated environment (i.e., sim2real gap).
|
| 220 |
+
|
| 221 |
+
physical interactions. To validate performance independent of robot execution accuracy, a human is instructed to execute the actions. Fig. 9 shows the predicted actions, affordances and final object part masks discovered by the algorithm. Without any fine-tuning, the algorithm shows promising results on inferring interaction strategies and reasoning about the observed motion for part discovery. Please refer to Appx. G for more real world experiment results and failure case analysis.
|
| 222 |
+
|
| 223 |
+
# 5. Conclusion and Future Work
|
| 224 |
+
|
| 225 |
+
We present Act the Part (AtP) to take visual observations of articulated objects, interact strategically, and output part segmentation masks. Our experiments suggest: (1) AtP is able to learn efficient strategies to isolate and discover parts. (2) AtP generalizes to novel categories of objects with unknown and unseen number of links—in simulation and the real world. (3) Our model demonstrates conditional reasoning about how to push based on arbitrary hold locations. We see broad scope for future work including extensions to 3D part segmentation and singular frameworks for rigid, articulated, and deformable object understanding. We hope this paper will inspire others in the vision and robotics communities to investigate perception and interaction in tandem.
|
| 226 |
+
|
| 227 |
+
Acknowledgements. Thank you Shubham Agrawal, Jessie Chapman, Cheng Chi, the Gadres, Bilkit Githinji, Huy Ha, Gabriel Ilharco Magalhaes, Sarah Pratt, Fiadh Sheeran, Mitchell Wortsman, and Zhenjia Xu for valuable conversations. This work was supported in part by the Amazon Research Award and NSF CMMI-2037101.
|
| 228 |
+
|
| 229 |
+
# References
|
| 230 |
+
|
| 231 |
+
[1] Ben Abbatematteo, Stefanie Tellex, and George Konidaris. Learning to generalize kinematic models to novel objects. CoRL, 2019. 1, 2
|
| 232 |
+
[2] Sami Abu-El-Haija, Nisarg Kothari, Joonseok Lee, Paul Natsev, George Toderici, Balakrishnan Varadarajan, and Sudheendra Vijayanarasimhan. Youtube-8m: A largescale video classification benchmark. arXiv preprint arXiv:1609.08675, 2016. 1
|
| 233 |
+
[3] Renée Baillargeon. Infants' physical world. Current Directions in Psychological Science, 13(3), 2004. 1
|
| 234 |
+
[4] Patrick R. Barragan, Leslie Kaelbling, and Tomás Lozano-Pérez. Interactive bayesian identification of kinematic mechanisms. ICRA, 2014. 2
|
| 235 |
+
[5] Måten Björkman and Danica Kragic. Active 3d scene segmentation and detection of unknown objects. ICRA, 2010. 2, 5
|
| 236 |
+
[6] Jeannette Bohg, Karol Hausman, Bharath Sankaran, Oliver Brock, Danica Kragic, Stefan Schaal, and Gaurav Sukhatme. Interactive perception: Leveraging action in perception and perception in action. T-RO, 2017. 2
|
| 237 |
+
[7] Arunkumar Byravan and Dieter Fox. Se3-nets: Learning rigid body motion using deep neural networks. ICRA, 2017. 2
|
| 238 |
+
[8] Vikram Chalana and Yongmin Kim. A methodology for evaluation of boundary detection algorithms on medical images. IEEE Transactions on Medical Imaging, 16(5):642-652, 1997. 5
|
| 239 |
+
[9] Angel X Chang, Thomas Funkhouser, Leonidas Guibas, Pat Hanrahan, Qixing Huang, Zimo Li, Silvio Savarese, Manolis Savva, Shuran Song, Hao Su, et al. Shapenet: An information-rich 3d model repository. arXiv preprint arXiv:1512.03012, 2015. 2, 3, 11
|
| 240 |
+
[10] Vivian Chu, Ian McMahon, Lorenzo Riano, Craig G. McDonald, Qin He, Jorge Martinez Perez-Tejada, Michael Arrigo, Trevor Darrell, and Katherine J. Kuchenbecker. Robotic learning of haptic adjectives through physical interaction. Robotics and Autonomous Systems, 63, 2015. 2
|
| 241 |
+
[11] Erwin Coumans and Yunfei Bai. Pybullet, a python module for physics simulation for games, robotics and machine learning. 2016. 3, 11
|
| 242 |
+
[12] Andreas Eitel, Nico Hauff, and Wolfram Burgard. Self-supervised transfer learning for instance segmentation through physical interaction. IROS, 2019. 2, 5
|
| 243 |
+
[13] Paul M. Fitzpatrick. First contact: an active vision approach to segmentation. IROS, 2003. 2, 5
|
| 244 |
+
[14] James J Gibson. The ecological approach to visual perception. Psychology Press, 1979. 1
|
| 245 |
+
[15] Karol Hausman, Scott Niekum, Sarah Osentoski, and Gaurav S. Sukhatme. Active articulation model estimation through interactive perception. ICRA, 2015. 2
|
| 246 |
+
[16] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. CVPR, 2016. 3, 12
|
| 247 |
+
[17] Richard Held and Alan Hein. Movement-produced stimulation in the development of visually guided behavior. Journal of comparative and physiological psychology, 56, 1963. 1
|
| 248 |
+
|
| 249 |
+
[18] Jiahui Huang, He Wang, Tolga Birdal, Minhyuk Sung, Federica Arrigoni, Shi-Min Hu, and Leonidas Guibas. Multibodiesync: Multi-body segmentation and motion estimation via 3d scan synchronization. arXiv preprint arxiv:2101.06605, 2021. 2
|
| 250 |
+
[19] Wei-Chih Hung, Varun Jampani, Sifei Liu, Pavlo Molchanov, Ming-Hsuan Yang, and Jan Kautz. Scops: Self-supervised co-part segmentation. CVPR, 2019. 2
|
| 251 |
+
[20] Tomas Jakab, Ankush Gupta, Hakan Bilen, and Andrea Vedaldi. Unsupervised learning of object landmarks through conditional image generation. NeurIPS, 2018. 3
|
| 252 |
+
[21] Dov Katz, Moslem Kazemi, J. Andrew Bagnell, and Anthony Stentz. Interactive segmentation, tracking, and kinematic modeling of unknown 3d articulated objects. ICRA, 2013. 2
|
| 253 |
+
[22] Timothy E. Lee, Jonathan Tremblay, Thang To, Jia Cheng, Terry Mosier, Oliver Kroemer, Dieter Fox, and Stan Birchfield. Camera-to-robot pose estimation from a single image. ICRA, 2020. 2
|
| 254 |
+
[23] Xiaolong Li, He Wang, Li Yi, Leonidas Guibas, A. Lynn Abbott, and Shuran Song. Category-level articulated object pose estimation. CVPR, 2020. 1, 2
|
| 255 |
+
[24] Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, and Piotr Dólar. Microsoft coco: Common objects in context. ECCV, 2014. 1
|
| 256 |
+
[25] Qihao Liu, Weichao Qiu, Weiyao Wang, Gregory D. Hager, and Alan L. Yuille. Nothing but geometric constraints: A model-free method for articulated object pose estimation. arXiv preprint arXiv:2012.00088, 2020. 2
|
| 257 |
+
[26] Martin Lohmann, Jordi Salvador, Aniruddha Kembhavi, and Roozbeh Mottaghi. Learning about objects by learning to interact with them. NeurIPS, 2020. 2
|
| 258 |
+
[27] Roberto Martín Martín and Oliver Brock. Online interactive perception of articulated objects with multi-level recursive estimation based on task-specific priors. IROS, 2014. 2
|
| 259 |
+
[28] Kaichun Mo, Leonidas Guibas, Mustafa Mukadam, Abhinav Gupta, and Shubham Tulsiani. Where2act: From pixels to actions for articulated 3d objects. arXiv preprint arXiv:2101.02692, 2021. 1, 2
|
| 260 |
+
[29] Kaichun Mo, Shilin Zhu, Angel X. Chang, Li Yi, Subarna Tripathi, Leonidas J. Guibas, and Hao Su. PartNet: A large-scale benchmark for fine-grained and hierarchical part-level 3D object understanding. CVPR, 2019. 2, 3, 11
|
| 261 |
+
[30] Iman Nematollahi, Oier Mees, Lukas Hermann, and Wolfram Burgard. Hindsight for foresight: Unsupervised structured dynamics models from physical interaction. IROS, 2020. 2
|
| 262 |
+
[31] Stefan Otte, Johannes Kulick, Marc Toussaint, and Oliver Brock. Entropy-based strategies for physical exploration of the environment's degrees of freedom. IROS, 2014. 2
|
| 263 |
+
[32] Joni Pajarinen and Ville Kyrki. Decision making under uncertain segmentations. ICRA, 2015. 2, 5
|
| 264 |
+
[33] Deepak Pathak, Yide Shentu, Dian Chen, Pulkit Agrawal, Trevor Darrell, Sergey Levine, and Jitendra Malik. Learning instance segmentation by interaction. CVPRW, 2018. 1, 2, 5
|
| 265 |
+
|
| 266 |
+
[34] Sudeep Pillai, Matthew Walter, and Seth Teller. Learning articulated motions from visual demonstration. RSS, 2014. 2, 5
|
| 267 |
+
[35] Lerrel Pinto, Dhiraj Gandhi, Yuanfeng Han, Yong-Lae Park, and Abhinav Gupta. The curious robot: Learning visual representations via physical interactions. ECCV, 2016. 2
|
| 268 |
+
[36] Charles R. Qi, Hao Su, Kaichun Mo, and Leonidas J. Guibas. Pointnet: Deep learning on point sets for 3d classification and segmentation. CVPR, 2017. 2
|
| 269 |
+
[37] Charles R. Qi, Li Yi, Hao Su, and Leonidas J. Guibas. Point-net++: Deep hierarchical feature learning on point sets in a metric space. NeurIPS, 2017. 2
|
| 270 |
+
[38] Gwendolyn E. Roberson, Mark T. Wallace, and James A. Schirillo. The sensorimotor contingency of multisensory localization correlates with the conscious percept of spatial unity. Behavioral and Brain Sciences, 24(5), 2001. 1
|
| 271 |
+
[39] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. MICCAI, pages 234-241, 2015. 3, 13
|
| 272 |
+
[40] Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. Imagenet large scale visual recognition challenge. IJCV, 2015. 1
|
| 273 |
+
[41] Tanner Schmidt, Richard A. Newcombe, and Dieter Fox. Dart: Dense articulated real-time tracking. RSS, 2014. 2
|
| 274 |
+
[42] Jürgen Sturm, Cyril Stachniss, and Wolfram Burgard. A probabilistic framework for learning kinematic models of articulated objects. JAIR, 2011. 2
|
| 275 |
+
[43] Stavros Tsogkas, Iasonas Kokkinos, George Papandreou, and Andrea Vedaldi. Semantic part segmentation with deep learning. arXiv preprint arXiv:1505.02438, 2015. 2
|
| 276 |
+
[44] Mike Tucker and Rob Ellis. On the relation between seen objects and components of potential actions. Journal of experimental psychology. Human perception and performance, 24, 1998. 1
|
| 277 |
+
[45] Hsiao-Yu Fish Tung, Zhou Xian, Mihir Prabhudesai, Shamit Lal, and Katerina Fragkiadaki. 3d-oes: Viewpoint-invariant object-factorized environment simulators. CoRL, 2020. 2
|
| 278 |
+
[46] Herke van Hoof, Oliver Kroemer, and Jan Peters. Probabilistic segmentation and targeted exploration of objects in cluttered environments. T-RO, 2014. 2, 5
|
| 279 |
+
[47] Jianyu Wang and Alan Yuille. Semantic part segmentation using compositional model combining shape and appearance. CVPR, 2015. 2
|
| 280 |
+
[48] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E. Sarma, Michael M. Bronstein, and Justin M. Solomon. Dynamic graph cnn for learning on point clouds. TOG, 2019. 2
|
| 281 |
+
[49] Fanbo Xiang, Yuzhe Qin, Kaichun Mo, Yikuan Xia, Hao Zhu, Fangchen Liu, Minghua Liu, Hanxiao Jiang, Yifu Yuan, He Wang, Li Yi, Angel X. Chang, Leonidas J. Guibas, and Hao Su. SAPIEN: A simulated part-based interactive environment. CVPR, 2020. 2, 3, 11
|
| 282 |
+
[50] Zhenjia Xu, Zhanpeng He, Jiajun Wu, and Shuran Song. Learning 3d dynamic scene representations for robot manipulation. CoRL, 2020. 2
|
| 283 |
+
|
| 284 |
+
[51] Zhenjia Xu, Zhijian Liu, Chen Sun, Kevin Murphy, William Freeman, Joshua Tenenbaum, and Jiajun Wu. Unsupervised discovery of parts, structure, and dynamics. ICLR, 2019. 2
|
| 285 |
+
[52] Zhenjia Xu, Jiajun Wu, Andy Zeng, Joshua B Tenenbaum, and Shuran Song. Densephysnet: Learning dense physical object representations via multi-step dynamic interactions. RSS, 2019. 2
|
| 286 |
+
[53] Jingyu Yan and Marc Pollefeys. A general framework for motion segmentation: Independent, articulated, rigid, non-rigid, degenerate and non-degenerate. ECCV, 2006. 2
|
| 287 |
+
[54] Jianwei Yang, Zhile Ren, Mingze Xu, Xinlei Chen, David J. Crandall, D. Parikh, and Dhruv Batra. Embodied amodal recognition: Learning to move to perceive objects. ICCV, 2019. 2
|
| 288 |
+
[55] Li Yi, Haibin Huang, Difan Liu, Evangelos Kalogerakis, Hao Su, and Leonidas Guibas. Deep part induction from articulated object pairs. TOG, 37(6), 2019. 2
|
| 289 |
+
[56] Andy Zeng, Shuran Song, Stefan Welker, Johnny Lee, Alberto Rodriguez, and Thomas Funkhouser. Learning synergies between pushing and grasping with self-supervised deep reinforcement learning. IROS, 2018. 3
|
actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e32072e3a7335386a0a912e2ed038c12d27e5d860b2b9349bbf7b5f601589981
|
| 3 |
+
size 572789
|
actthepartlearninginteractionstrategiesforarticulatedobjectpartdiscovery/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1ade2028eba788e3740c043ee3fe745137ca9c5d0044b56d8585665aa408c3b8
|
| 3 |
+
size 380565
|
adaattnrevisitattentionmechanisminarbitraryneuralstyletransfer/63d5ce94-5551-487f-8d44-d134ec17d02b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:132082a4eb644c500d26941c91452c8bb8f6cb631c48dbe7b1c87f1b10cd2de6
|
| 3 |
+
size 80707
|
adaattnrevisitattentionmechanisminarbitraryneuralstyletransfer/63d5ce94-5551-487f-8d44-d134ec17d02b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d92eaaf7b1f9bb2a09f5261ebfe08b4b2e8f3a2e29fc0269ab00196fcab955c
|
| 3 |
+
size 100201
|