Add Batch 72ba5f4b-2606-4901-badf-3a89a733f0e9
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- actionlocalizationthroughcontinualpredictivelearning/3bbe7823-fe23-4bbf-8d5b-8f7efaef12cc_content_list.json +3 -0
- actionlocalizationthroughcontinualpredictivelearning/3bbe7823-fe23-4bbf-8d5b-8f7efaef12cc_model.json +3 -0
- actionlocalizationthroughcontinualpredictivelearning/3bbe7823-fe23-4bbf-8d5b-8f7efaef12cc_origin.pdf +3 -0
- actionlocalizationthroughcontinualpredictivelearning/full.md +289 -0
- actionlocalizationthroughcontinualpredictivelearning/images.zip +3 -0
- actionlocalizationthroughcontinualpredictivelearning/layout.json +3 -0
- actionsasmovingpoints/a3d29da9-cfad-485f-a46c-15611092570e_content_list.json +3 -0
- actionsasmovingpoints/a3d29da9-cfad-485f-a46c-15611092570e_model.json +3 -0
- actionsasmovingpoints/a3d29da9-cfad-485f-a46c-15611092570e_origin.pdf +3 -0
- actionsasmovingpoints/full.md +281 -0
- actionsasmovingpoints/images.zip +3 -0
- actionsasmovingpoints/layout.json +3 -0
- activecrowdcountingwithlimitedsupervision/78314569-4d9a-4d8a-8006-ffa9e1ce7f64_content_list.json +3 -0
- activecrowdcountingwithlimitedsupervision/78314569-4d9a-4d8a-8006-ffa9e1ce7f64_model.json +3 -0
- activecrowdcountingwithlimitedsupervision/78314569-4d9a-4d8a-8006-ffa9e1ce7f64_origin.pdf +3 -0
- activecrowdcountingwithlimitedsupervision/full.md +290 -0
- activecrowdcountingwithlimitedsupervision/images.zip +3 -0
- activecrowdcountingwithlimitedsupervision/layout.json +3 -0
- activeperceptionusinglightcurtainsforautonomousdriving/64e5a70a-c7d5-4804-aaf5-56c8f5cd421b_content_list.json +3 -0
- activeperceptionusinglightcurtainsforautonomousdriving/64e5a70a-c7d5-4804-aaf5-56c8f5cd421b_model.json +3 -0
- activeperceptionusinglightcurtainsforautonomousdriving/64e5a70a-c7d5-4804-aaf5-56c8f5cd421b_origin.pdf +3 -0
- activeperceptionusinglightcurtainsforautonomousdriving/full.md +274 -0
- activeperceptionusinglightcurtainsforautonomousdriving/images.zip +3 -0
- activeperceptionusinglightcurtainsforautonomousdriving/layout.json +3 -0
- activevisualinformationgatheringforvisionlanguagenavigation/a84150dc-dd46-413d-8323-ec629cc4b60a_content_list.json +3 -0
- activevisualinformationgatheringforvisionlanguagenavigation/a84150dc-dd46-413d-8323-ec629cc4b60a_model.json +3 -0
- activevisualinformationgatheringforvisionlanguagenavigation/a84150dc-dd46-413d-8323-ec629cc4b60a_origin.pdf +3 -0
- activevisualinformationgatheringforvisionlanguagenavigation/full.md +319 -0
- activevisualinformationgatheringforvisionlanguagenavigation/images.zip +3 -0
- activevisualinformationgatheringforvisionlanguagenavigation/layout.json +3 -0
- adaptingobjectdetectorswithconditionaldomainnormalization/05885c2c-b740-4e53-9dfe-2678a4cac04a_content_list.json +3 -0
- adaptingobjectdetectorswithconditionaldomainnormalization/05885c2c-b740-4e53-9dfe-2678a4cac04a_model.json +3 -0
- adaptingobjectdetectorswithconditionaldomainnormalization/05885c2c-b740-4e53-9dfe-2678a4cac04a_origin.pdf +3 -0
- adaptingobjectdetectorswithconditionaldomainnormalization/full.md +357 -0
- adaptingobjectdetectorswithconditionaldomainnormalization/images.zip +3 -0
- adaptingobjectdetectorswithconditionaldomainnormalization/layout.json +3 -0
- adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/ce8cfb4f-aab6-4458-b05a-8376aede26a3_content_list.json +3 -0
- adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/ce8cfb4f-aab6-4458-b05a-8376aede26a3_model.json +3 -0
- adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/ce8cfb4f-aab6-4458-b05a-8376aede26a3_origin.pdf +3 -0
- adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/full.md +325 -0
- adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/images.zip +3 -0
- adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/layout.json +3 -0
- adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/20cc9823-27e8-4b97-95f3-8c57433a4366_content_list.json +3 -0
- adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/20cc9823-27e8-4b97-95f3-8c57433a4366_model.json +3 -0
- adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/20cc9823-27e8-4b97-95f3-8c57433a4366_origin.pdf +3 -0
- adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/full.md +260 -0
- adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/images.zip +3 -0
- adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/layout.json +3 -0
- adaptivemixtureregressionnetworkwithlocalcountingmapforcrowdcounting/8104dbf5-7c8b-4e86-821c-1a6135e556ba_content_list.json +3 -0
- adaptivemixtureregressionnetworkwithlocalcountingmapforcrowdcounting/8104dbf5-7c8b-4e86-821c-1a6135e556ba_model.json +3 -0
actionlocalizationthroughcontinualpredictivelearning/3bbe7823-fe23-4bbf-8d5b-8f7efaef12cc_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d4449d20c8d0b8d58767f389381c5d40d36ea18b3ffa1f3e02a4ddae2968245
|
| 3 |
+
size 77825
|
actionlocalizationthroughcontinualpredictivelearning/3bbe7823-fe23-4bbf-8d5b-8f7efaef12cc_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8e62af56fe10dc54f2fb41376de6bb2e56648c02d2b87d71a8b7face1c9e00d
|
| 3 |
+
size 96791
|
actionlocalizationthroughcontinualpredictivelearning/3bbe7823-fe23-4bbf-8d5b-8f7efaef12cc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c950b41d0e264138b6debf6384f4c2a0ea8d655921c182eb00dd819df8173e11
|
| 3 |
+
size 10704015
|
actionlocalizationthroughcontinualpredictivelearning/full.md
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Action Localization through Continual Predictive Learning
|
| 2 |
+
|
| 3 |
+
Sathyanarayanan Aakur $^{1}$ [0000-0003-1062-8929] and Sudeep Sarkar $^{2}$ [0000-0001-7332-4207]
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> Oklahoma State University, Stillwater, OK 74074 saakur@okstate.edu
|
| 6 |
+
<sup>2</sup> University of South Florida, Tampa, FL, 33620 sarkar@usf.edu
|
| 7 |
+
|
| 8 |
+
Abstract. The problem of action localization involves locating the action in the video, both over time and spatially in the image. The current dominant approaches use supervised learning to solve this problem. They require large amounts of annotated training data, in the form of frame-level bounding box annotations around the region of interest. In this paper, we present a new approach based on continual learning that uses feature-level predictions for self-supervision. It does not require any training annotations in terms of frame-level bounding boxes. The approach is inspired by cognitive models of visual event perception that propose a prediction-based approach to event understanding. We use a stack of LSTMs coupled with a CNN encoder, along with novel attention mechanisms, to model the events in the video and use this model to predict high-level features for the future frames. The prediction errors are used to learn the parameters of the models continuously. This self-supervised framework is not complicated as other approaches but is very effective in learning robust visual representations for both labeling and localization. It should be noted that the approach outputs in a streaming fashion, requiring only a single pass through the video, making it amenable for real-time processing. We demonstrate this on three datasets - UCF Sports, JHMDB, and THUMOS'13 and show that the proposed approach outperforms weakly-supervised and unsupervised baselines and obtains competitive performance compared to fully supervised baselines. Finally, we show that the proposed framework can generalize to egocentric videos and achieve state-of-the-art results on the unsupervised gaze prediction task. Code is available on the project page<sup>3</sup>.
|
| 9 |
+
|
| 10 |
+
Keywords: Action localization, continuous learning, self-supervision
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
We develop a framework for jointly learning spatial and temporal localization through continual, self-supervised learning, in a streaming fashion, requiring only a single pass through the video. Visual understanding tasks in computer vision have focused on the problem of recognition [1, 3, 23, 25] and captioning [1, 9, 47,
|
| 15 |
+
|
| 16 |
+
46], with the underlying assumption that each input video is already localized both spatially and temporally. While there has been tremendous progress in action localization, it has primarily been driven by the dependence on large amounts of tedious, spatial-temporal annotations. In this work, we aim to tackle the problem of spatial-temporal segmentation of streaming videos in a continual, self-supervised manner, without any training annotations.
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
Fig. 1: The Proposed Approach has four core components: (i) feature extraction and spatial region proposal, (ii) a future prediction framework, (iii) a spatial-temporal error detection module and (iv) the error-based action localization process.
|
| 20 |
+
|
| 21 |
+
Drawing inspiration from psychology [13, 14, 52], we consider the underlying mechanism for both event understanding and attention selection in humans to be the idea of predictability. Defined as the surprise-attention hypothesis [13], unpredictable factors such as large changes in motion, appearance, or goals of the actor have a substantial effect on the event perception and human attention. Human event perception studies [52, 2] have shown that longer-term, temporal surprise have a strong correlation with event boundary detection. In contrast, short-term spatial surprise (such as those caused by motion) has a more substantial effect on human attention and localization [14]. Our approach combines both spatial and temporal surprise to formulate a computational framework to tackle the problem of self-supervised action localization in streaming videos in a continual manner.
|
| 22 |
+
|
| 23 |
+
We formulate our computational framework on the idea of spatial-temporal feature anticipation to model predictability of perceptual features. The main assumption in our framework is that expected, unpredictable features require attention and often point to the actor performing the action of interest. In contrast, predictable features can belong to background clutter and are not relevant to the action of interest. It is to be noted that unpredictability or surprise is not the same as rarity. It refers to short-term changes that aid in the completion of an overall task, which can be recurring [13]. We model the perceptual features using a hierarchical, cyclical, and recurrent framework, whose predictions are influenced by current and prior observations as well as current perceptual predictions. Hence, the predictive model's output can influence the perception of
|
| 24 |
+
|
| 25 |
+
the current frame being observed. The predictions are constantly compared with the incoming observations to provide self-supervision to guide future predictions.
|
| 26 |
+
|
| 27 |
+
We leverage these characteristics to derive and quantify spatial-temporal predictability. Our framework performs continuous learning to generate "attention maps" that overlap with the action being performed. Using these attention maps, we leverage advances in region proposals [29, 31, 44, 54] to localize actions in streaming videos without any supervision. Contrary to other attention-based approaches [5, 28, 33], we do not use the object-level characteristics such as label, role, and affordance in the proposal generation process.
|
| 28 |
+
|
| 29 |
+
Contributions: The contributions of our approach are three-fold: (i) we are among the first to tackle the problem of self-supervised action localization in streaming videos without any training data such as labels or bounding boxes, (ii) we show that modeling spatial-temporal prediction error can yield consistent localization performance across action classes and (iii) we show that the approach generalizes to egocentric videos and achieves competitive performance on the unsupervised gaze prediction task.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
Supervised action localization approaches tackle action localization through the simultaneous generation of bounding box proposals and labeling each bounding box with the predicted action class. Both bounding box generation and labeling are fully supervised, i.e., they require ground truth annotations of both bounding boxes and labels. Typical approaches leverage advances in object detection to include temporal information [7, 16, 18, 36, 37, 40, 43, 50] for proposal generation. The final step typically involves the use of the Viterbi algorithm [7] to link the generated bounding boxes across time.
|
| 34 |
+
|
| 35 |
+
Weakly-supervised action localization approaches have been explored to reduce the need for extensive annotations [5, 26, 28, 33]. They typically only require video-level labels and rely on object detection-based approaches to generate bounding box proposals. It is to be noted that weakly supervised approaches also use object-level labels and characteristics to guide the bounding box selection process. Some approaches [5] use a similarity-based tracker to connect bounding boxes across time to incorporate temporal consistency.
|
| 36 |
+
|
| 37 |
+
Unsupervised action localization approaches have not been explored to the same extent as supervised and weakly-supervised approaches. These approaches do not require any supervision - both labels or bounding boxes. The two more common approaches are to generate action proposals using (i) supervoxels [18,38] and (ii) clustering motion trajectories [45]. It should be noted that [38] also uses object characteristics to evaluate the "humanness" of each super-voxel to select bounding box proposals. Our approach falls into the class of unsupervised action localization approaches. The most closely related approaches (with respect to architecture and theme) to ours are VideoLSTM [28] and Actor Supervision [5], which use attention in the selection process for gen
|
| 38 |
+
|
| 39 |
+
erating bounding box proposals, but require video-level labels. We, on the other hand, do not require any labels or bounding box annotations for training.
|
| 40 |
+
|
| 41 |
+
While fully supervised approaches have more precise localization and achieve better recognition, the required number of annotations is rather large. It is not amenable to an increase in the number of classes and a decrease in the number of training videos. While not requiring frame-level annotations, weakly supervised approaches have the underlying assumption that there exists a large, annotated training set that allows for effective detection of all possible actors (both human and non-human) in the set of action classes. Unsupervised approaches, such as ours, do not make any such assumptions but can result in poorer localization performance. We alleviate this to an extent by leveraging advances in region proposal mechanisms and self-learning robust representations for obtaining video-level labels.
|
| 42 |
+
|
| 43 |
+
# 3 Self-Supervised Action Localization
|
| 44 |
+
|
| 45 |
+
In this section, we introduce our self-supervised action localization framework, as illustrated in Figure 1. Our approach has four core components: (i) feature extraction and spatial region proposal, (ii) a self-supervised future prediction framework, (iii) a spatial-temporal error detection module, and (iv) the error-based action localization process.
|
| 46 |
+
|
| 47 |
+
# 3.1 Feature Extraction and Spatial Region Proposal
|
| 48 |
+
|
| 49 |
+
The first step in our approach is feature extraction and the subsequent per-frame region proposal generation for identifying possible areas of actions and associated objects. Considering the tremendous advances in deep learning architectures for learning robust spatial representations, we use pre-trained convolutional neural networks to extract the spatial features for each frame in the video. We use a region proposal module, based on these spatial features, to predict possible action-agnostic spatial locations. We use class-agnostic proposals (i.e., the object category is ignored, and only feature-based localizations are taken into account) for two primary reasons. First, we do not want to make any assumptions on the actor's characteristics, such as label, role, and affordance. Second, despite significant progress in object detection, there can be many missed detections, especially when the object (or actor) performs actions that can transform their physical appearance. It is to be noted that these considerations can result in a large number of region proposals that require careful and robust selection but can yield higher chances of correct localization.
|
| 50 |
+
|
| 51 |
+
# 3.2 Self-supervised Future Prediction
|
| 52 |
+
|
| 53 |
+
The second stage in our proposed framework is the self-supervised future prediction framework. We consider the future prediction module to be a generative model whose output is conditioned on two factors - the current observation and
|
| 54 |
+
|
| 55 |
+
an internal event model. The current observation $f_{t}^{S}$ is the feature-level encoding of the presently observed frame, $I_{t}$ . We use the same feature encoder as the region proposal module to reduce the approach's memory footprint and complexity. The internal event model is a set of parameters that can effectively capture the spatial-temporal dynamics of the observed event. Formally, we define the predictor model as $P(\hat{f}_{t + 1}^{S}|W_{e},f_{t}^{S})$ , where $W_{e}$ represents the internal event model and $\hat{f}_{t + 1}^{S}$ is the predicted features at time $t + 1$ . Note that features $f_{t}^{S}$ is not a one-dimensional vector, but a tensor (of dimension $w_{f}\times h_{f}\times d_{f}$ ) representing the features at each spatial location.
|
| 56 |
+
|
| 57 |
+
We model temporal dynamics of the observed event using Long Short Term Memory Networks (LSTMs)[12]. While other approaches [21, 48, 49] can be used for prediction, we consider LSTMs to be more suited for the following reasons. First, we want to model the temporal dynamics across all frames of the observed action (or event). Second, LSTMs can allow for multiple possible futures and hence will not tend to average the outcomes of these possible futures, as can be the case with other prediction models. Third, since we work with error-based localization, using LSTMs can ensure that the learning process propagates the spatial-temporal error across time and can yield progressively better predictions, especially for actions of longer duration. Formally, we can express LSTMs as
|
| 58 |
+
|
| 59 |
+
$$
|
| 60 |
+
i _ {t} = \sigma \left(W _ {i} x _ {t} + W _ {h i} h _ {t - 1} + b _ {i}\right); \quad f _ {t} = \sigma \left(W _ {f} x _ {t} + W _ {h f} h _ {t - 1} + b _ {f}\right) \tag {1}
|
| 61 |
+
$$
|
| 62 |
+
|
| 63 |
+
$$
|
| 64 |
+
o _ {t} = \sigma \left(W _ {o} x _ {t} + W _ {h o} h _ {t - 1} + b _ {o}\right); \quad g _ {t} = \phi \left(W _ {g} x _ {t} + W _ {h g} h _ {t - 1} + b _ {g}\right) \tag {2}
|
| 65 |
+
$$
|
| 66 |
+
|
| 67 |
+
$$
|
| 68 |
+
m _ {t} = f _ {t} \cdot m _ {t - 1} + i _ {t} \cdot g _ {t}; \quad h _ {t} = o _ {t} \cdot \phi (m _ {t}) \tag {3}
|
| 69 |
+
$$
|
| 70 |
+
|
| 71 |
+
where $x_{t}$ is the input at time $t$ , $\sigma$ is a non-linear activation function, $(\cdot)$ represents element-wise multiplication, $\phi$ is the hyperbolic tangent function (tanh) and $W_{k}$ and $b_{k}$ represent the trained weights and biases for each of the gates.
|
| 72 |
+
|
| 73 |
+
As opposed to [2], who also use an LSTM-based predictor and a decoder network, we use a hierarchical LSTM model (with three LSTM layers) as our event model. This modification allows us to model both spatial and temporal dependencies, since each higher-level LSTMs act as a progressive decoder framework that captures the temporal dependencies captured by the lower-level LSTMs. The first LSTM captures the spatial dependency that is propagated up the prediction stack. The updated hidden state of the first (bottom) LSTM layer $(h_t^1)$ depends on the current observation $f_t^S$ , the previous hidden state $(h_{t - 1}^{1})$ and memory state $(m_{t - 1}^{1})$ . Each of the higher-level LSTMs at level $l$ take the output of the bottom LSTM's output $h_t^{l - 1}$ and memory state $m_t^{l - 1}$ and can be defined as $(h_t^l,m_t^l) = LSTM(h_{t - 1}^l,h_t^{l - 1},m_t^{l - 1})$ . Note this is different from a typical hierarchical LSTM model [35] in that the higher LSTMs are impacted by the output of the lower level LSTMs at current time step, as opposed to that from the previous time step. Collectively, the event model $W_{e}$ is described by the learnable parameters and their respective biases from the hierarchical LSTM stack.
|
| 74 |
+
|
| 75 |
+
Hence, the top layer of the prediction stack acts as the decoder whose goal is to predict the next feature $f_{t + 1}^{S}$ given all previous predictions $\hat{f}_1^S, \hat{f}_2^S, \dots, \hat{f}_t^S$ , an event model $W_{e}$ and the current observation $f_{t}^{S}$ . We model this prediction function as a log-linear model characterized by
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\log p \left(\hat {f} _ {t + 1} ^ {s} \mid h _ {t} ^ {l}\right) = \sum_ {n = 1} ^ {t} f \left(W _ {e}, f _ {t} ^ {S}\right) + \log Z \left(h _ {t}\right) \tag {4}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
where $h_t^l$ is the hidden state of the $l^{th}$ level LSTM at time $t$ and $Z(h_{t})$ is a normalization constant. The LSTM prediction stack acts as a generative process for anticipating future features.
|
| 82 |
+
|
| 83 |
+
The objective function for training the predictive stack is a weighted zero order hold between the predicted features and the actual observed features, weighted by the zero order hold difference. The prediction error at time $t$ is given by $E(t) = \frac{1}{n_f} \sum_{i=1}^{w_f} \sum_{j=1}^{h_f} e_{ij}$ , where
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
e _ {i j} = \hat {m} _ {t} (i, j) \odot \| f _ {t + 1} ^ {S} (i, j) - \hat {f} _ {t + 1} ^ {S} (i, j) \| _ {\ell_ {2}} ^ {2} \tag {5}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
Each feature $f_{t}^{S}$ has dimensions $w_{f} \times h_{f} \times d_{f}$ and $\hat{m}_{t}(i,j)$ is a function that returns the zero order difference between the observed features at times $t$ and $t + 1$ at location $(i,j)$ . Note that the prediction is done at the feature level and not at the pixel level, so the spatial quantization is coarser than pixels.
|
| 90 |
+
|
| 91 |
+
# 3.3 Prediction Error-based Attention Map
|
| 92 |
+
|
| 93 |
+
At the core of our approach is the idea of spatial-temporal prediction error for localizing the actions of interest in the video. It takes into account the quality of the predictions made and the relative spatial alignment of the prediction errors. The input to the error detection module is the quantity from Equation 5. We compute a weight $\alpha_{ij}$ associated with each spatial location $(i,j)$ in the predicted feature $\hat{f}_{t + 1}^{S}$ as
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\alpha_ {i j} = \frac {\exp \left(e _ {i j}\right)}{\sum_ {m = 1} ^ {w _ {k}} \sum_ {n = 1} ^ {h _ {k}} \exp \left(e _ {m n}\right)} \tag {6}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $e_{ij}$ represents the weighted prediction error at location $(i,j)$ (Equation 5). It can be considered to be a function $a(f_t^S, h_{t-1}^l)$ of the state of the top-most LSTM and the input feature $f_t^S$ at time $t$ . The resulting matrix is an error-based attention map that allows us to localize the prediction error at a specific spatial location. And the average spatial error over time, $E(t)$ , is used for temporal localization.
|
| 100 |
+
|
| 101 |
+
One may remark that the formulation of $\alpha_{ij}$ is very similar to Bahdanau attention [4]. However, there are two key differences. First, our formulation is not parametrized and does not add to the number of learnable parameters in the framework. Second, our attention map is a characterization of the difficulty in anticipating unpredictable motion. In contrast, Bahdanau attention is an effort to increase the decoder's encoding ability and does not characterize the unpredictability of the future feature. We compare the use of both types of attention in Section 5.4, where we see that error-based localization is more suitable for our application.
|
| 102 |
+
|
| 103 |
+
# 3.4 Extraction of Action Tubes
|
| 104 |
+
|
| 105 |
+
The action localization module receives a stream of bounding box proposals and an error-based attention map to select an output tube. The action localization is a selection algorithm that filters all region proposals from Section 3.1 and returns the collection of proposals that have a higher probability of action localization. We do so by assigning an energy term to each of the bounding box proposals $(\mathcal{B}_{it})$ at time $t$ and choosing the top $k$ bounding boxes with least energy as our final proposals. The energy of a bounding box $\mathcal{B}_{it}$ is defined as
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
E \left(\mathcal {B} _ {i t}\right) = w _ {\alpha} \phi \left(\alpha_ {i j}, \mathcal {B} _ {i t}\right) + w _ {t} \delta \left(\mathcal {B} _ {i t}, \left\{\mathcal {B} _ {j, t - 1} \right\}\right) \tag {7}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
where $\phi(\cdot)$ is a function that returns a value characteristic of the distance between the bounding box center and location of maximum error, $\delta(\cdot)$ is a function that returns the minimum spatial distance between the current bounding box and the closest bounding box from the previous time step. The constants $w_{\alpha}$ and $w_{t}$ are scaling factors. Note that $\delta(\cdot)$ is introduced to enforce temporal consistency in predictions, but we find that it is optional since the LSTM prediction stack implicitly enforces the temporal consistency through its memory states. In our experiments we set $k = 10$ , $w_{\alpha} = 0.75$ .
|
| 112 |
+
|
| 113 |
+
# 3.5 Implementation Details
|
| 114 |
+
|
| 115 |
+
In our experiments, we use a VGG-16 [34] network pre-trained on ImageNet as our feature extraction network. We use the output of the last convolutional layer before the fully connected layers as our spatial features. Hence the dimensions of the spatial features are $w_{f} = 14$ , $h_{f} = 14$ , $d_{f} = 512$ . These output features are then used by an SSD [29] to generate bounding box proposals. Note that we take the generated bounding box proposals without taking into account classes and associated probabilities. We use a three-layer hierarchical LSTM model with the hidden state size as 512 as our predictor module. We use the vanilla LSTM as proposed in [12]. Video level-features are obtained by max-pooling the element-wise dot-product of the hidden state of the top-most LSTM and the attention values across time. We train with the adaptive learning mechanism proposed in [2], with the initial learning rate set to be $1 \times 10^{-8}$ and scaling factors $\Delta_{t}^{-}$ and $\Delta_{t}^{+}$ as $1 \times 10^{-2}$ and $1 \times 10^{-3}$ , respectively. The network was trained for 1 epoch on a computer with one Titan X Pascal.
|
| 116 |
+
|
| 117 |
+
# 4 Experimental Setup
|
| 118 |
+
|
| 119 |
+
# 4.1 Data
|
| 120 |
+
|
| 121 |
+
We evaluate our approach on three publicly available datasets for evaluating the proposed approach on the action localization task.
|
| 122 |
+
|
| 123 |
+
UCF Sports [32] is an action localization dataset consisting of 10 classes of sports actions such as skating and lifting collected from sports broadcasts. It
|
| 124 |
+
|
| 125 |
+
is an interesting dataset since it has a high concentration of distinct scenes and motions that make it challenging for localization and recognition. We use the splits (103 training and 47 testing videos) as defined in [26] for evaluation.
|
| 126 |
+
|
| 127 |
+
JHMDB [19] is composed of 21 action classes and 928 trimmed videos. All videos are annotated with human-joints for every frame. The ground truth bounding box for the action localization task is chosen such that the box encompasses all the joints. This dataset offers several challenges, such as increasing amounts of background clutter, high inter-class similarity, complex motion (including camera motion), and occluded objects of interest. We report all results as the average across all three splits.
|
| 128 |
+
|
| 129 |
+
THUMOS'13 [22] is a subset of the UCF-101 [39] dataset, consisting of 24 classes and 3,207 videos. Ground truth bounding boxes are provided for each of the classes for the action localization task. It is also known as the UCF-101-24 dataset. Following prior works [28,38], we perform our experiments and report results on the first split.
|
| 130 |
+
|
| 131 |
+
We also evaluate the proposed approach's generalization ability on egocentric videos by evaluating it on the unsupervised gaze prediction task. There has been evidence from cognitive psychology that there is a strong correlation between gaze points and action localization [41]. Hence, the gaze prediction task would be a reasonable measure of the generalization to action localization in egocentric videos. We evaluate the performance on the GTEA Gaze [6] dataset, which consists of 17 sequences of tasks performed by 14 subjects, with each sequence lasting about 4 minutes. We use the official splits for the GTEA datasets as defined in prior works [6].
|
| 132 |
+
|
| 133 |
+
# 4.2 Metrics and Baselines
|
| 134 |
+
|
| 135 |
+
For the action localization task, we follow prior works [28,38] and report the mean average precision (mAP) at various overlap thresholds, obtained by computing the Intersection Over Union (IoU) of the predicted and ground truth bounding boxes. We also evaluate the quality of bounding box proposals by measuring the average, per-frame IoU, and the bounding box recall at varying overlap ratios.
|
| 136 |
+
|
| 137 |
+
Since ours is an unsupervised approach, we obtain class labels by clustering the learned representations using the $k$ -means algorithm. While more complicated clustering may yield better recognition results [38], the $k$ -means approach allows us to evaluate the robustness of learned features. We evaluate our approach in two settings $K_{gt}$ and $K_{opt}$ , where the number of clusters is set to the number of ground truth action classes and an optimal number obtained through the elbow method [24], respectively. From our experiments, we observe that $K_{opt}$ is three times the number of ground truth classes, which is not unreasonable and has been a working assumption in other deep learning-based clustering approaches [11]. Clusters are mapped to the ground truth clusters for evaluation using the Hungarian method, as done in prior unsupervised approaches [20, 51].
|
| 138 |
+
|
| 139 |
+
We also compare against other LSTM and attention-based approaches (Section 5.3) to the action localization problem for evaluating the effectiveness of the proposed training protocol.
|
| 140 |
+
|
| 141 |
+
For the gaze prediction task, we evaluate the approaches using Area Under the Curve (AUC), which measures the area under the curve on saliency maps for true positive versus false-positive rates under various threshold values. We also report the Average Angular Error (AAE), which measures the angular distance between the predicted and ground truth gaze positions. Since our model's output is a saliency map, AUC is a more appropriate metric compared to average angular error (AAE), which requires specific locations.
|
| 142 |
+
|
| 143 |
+
# 5 Quantitative Evaluation
|
| 144 |
+
|
| 145 |
+
In this section, we present the quantitative evaluation of our approach on two different tasks, namely action localization, and egocentric gaze prediction. For the action localization task, we evaluate our approach on two aspects - the quality of proposals and spatial-temporal localization.
|
| 146 |
+
|
| 147 |
+
# 5.1 Quality of Localization Proposals
|
| 148 |
+
|
| 149 |
+
We first evaluate the quality of our localization proposals by assuming perfect class prediction. This allows us to independently assess the quality of localization performed in a self-supervised manner. We present the results of the evaluation in Table 1 and compare against fully supervised, weakly supervised, and unsupervised baselines. As can be seen, we outperform many supervised and weakly supervised baselines. APT [45] achieves a higher localization score. However, it produces, on average, 1,500 proposals per video, whereas our approach returns approximately 10 proposals. A large number of localization proposals per video can lead to higher recall and IoU but makes the localization task, i.e., action labeling per video harder and can affect the ability to generalize across domains. Also, it should be noted that our approach produces proposals in streaming fashion, as opposed to many of the other approaches, which produce action tubes based on motion computed across the entire video. This can make real-time action localization in streaming videos harder.
|
| 150 |
+
|
| 151 |
+
# 5.2 Spatial-temporal Action Localization
|
| 152 |
+
|
| 153 |
+
We also evaluate our approach on the spatial-temporal localization task. This evaluation allows us to analyze the robustness of the self-supervised features learned through prediction. We generate video-level class labels through clustering and use the standard evaluation metrics (Section 4.2) to quantify the performance. The AUC curves with respect to varying overlap thresholds are presented in Figure 2. We compare against a mix of supervised, weakly-supervised, and unsupervised baselines on all three datasets.
|
| 154 |
+
|
| 155 |
+
<table><tr><td>Supervision</td><td>Approach</td><td>Average</td></tr><tr><td rowspan="2">Full</td><td>STPD[42]</td><td>44.6</td></tr><tr><td>Max Path Search [43]</td><td>54.3</td></tr><tr><td rowspan="3">Weak</td><td>Ma et al. [30]</td><td>44.6</td></tr><tr><td>GBVS [8]</td><td>42.1</td></tr><tr><td>Soomro et al. [38]</td><td>47.7</td></tr><tr><td rowspan="3">None</td><td>IME Tablets [18]</td><td>51.5</td></tr><tr><td>APT [45]</td><td>63.7</td></tr><tr><td>Proposed Approach</td><td>55.7</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 1: Comparison with fully supervised and weakly supervised baselines on class-agnostic action localization on UCF Sports dataset. We report the average localization accuracy of each approach i.e. average IoU.
|
| 158 |
+
|
| 159 |
+
On the UCF Sports dataset (Figure 2(a)), we outperform all baselines including several supervised baselines except for Gkioxari and Malik [7] at higher overlap thresholds ( $\sigma > 0.4$ ) when we set number of clusters $k$ to the number of ground truth classes. When we allow for some over-segmentation and use the optimal number of clusters, we outperform all baselines till $\sigma > 0.5$ .
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
(a)
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
(b)
|
| 166 |
+
Fig. 2: AUC for the action localization tasks are shown for (a) UCF Sports, (b) JHMDB and (c) THUMOS13 datasets. We compare against baselines with varying levels of supervision such as Lan et al. [26], Tian et al. [40], Wang et al. [50], Gkioxari and Malik [7], Jain et al. [18], Soomro et al. [36-38], Hou et al. [16], and VideoLSTM [28].
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
(c)
|
| 170 |
+
|
| 171 |
+
On the JHMDB dataset (Figure 2(b)), we find that our approach, while having high recall $(77.8\% \circ \sigma = 0.5)$ , the large camera motion and intra-class variations have a significant impact on the classification accuracy. Hence, the mAP suffers when we set $k$ to be the number of ground truth classes. When we set the number of clusters to the optimal number of clusters, we outperform other baselines at lower thresholds $(\sigma < 0.5)$ . It should be noted that the other unsupervised baseline (Soomro et al. [38]) uses object detection proposals from a Faster R-CNN backbone to score the "humanness" of a proposal. This assumption tends to make the approach biased towards human-centered action localization and affects its ability to generalize towards actions with non-human
|
| 172 |
+
|
| 173 |
+
actors. On the other hand, we do not make any assumptions on the characteristics of the actor, scene, or motion dynamics.
|
| 174 |
+
|
| 175 |
+
On the THUMOS'13 dataset (Figure 2(c)), we achieve consistent improvements over unsupervised and weakly supervised baselines, at $k = k_{gt}$ and achieve state-of-the-art mAP scores when $k = k_{opt}$ . It is interesting to note that we perform competitively (when $k = k_{gt}$ ) the weakly-supervised attention-based VideoLSTM [28], which uses a convLSTM for temporal modeling along with a CNN-based spatial attention mechanism. It should be noted that we have a higher recall rate $(0.47@\sigma = 0.4$ and $0.33@\sigma = 0.5$ ) at higher thresholds than other state-of-the-art approaches on THUMOS'13 and shows the robustness of the error-based localization approach to intra-class variation and occlusion.
|
| 176 |
+
|
| 177 |
+
Clustering quality. Since there is a significant difference in the mAP score when we set a different number of clusters in k-means, we measured the homogeneity (or purity) of the clustering. The homogeneity score measures the "quality" of the cluster by measuring how well a cluster models a given ground-truth class. Since we allow the over-segmentation of clusters when we set $k$ to the optimal number of clusters, this is an essential measure of feature robustness. Higher homogeneity indicates that intra-class variations are captured since all data points in a given cluster belong to the same ground truth class. We observe an average homogeneity score of $74.56\%$ when $k$ is set to the number of ground truth classes and $78.97\%$ when we use the optimal number of clusters. As can be seen, although we over-segment, each of the clusters typically models a single action class to a high degree of integrity.
|
| 178 |
+
|
| 179 |
+
<table><tr><td rowspan="2">Approach</td><td colspan="2">Annotations</td><td rowspan="2"># Proposals</td><td colspan="5">Average Recall</td><td rowspan="2">mAP @0.2</td></tr><tr><td>Labels</td><td>Boxes</td><td>0.1</td><td>0.2</td><td>0.3</td><td>0.4</td><td>0.5</td></tr><tr><td>ALSTM [33]</td><td>✓</td><td>X</td><td>1</td><td>0.46</td><td>0.28</td><td>0.05</td><td>0.02</td><td>-</td><td>0.06</td></tr><tr><td>VideoLSTM [28]</td><td>✓</td><td>X</td><td>1</td><td>0.71</td><td>0.52</td><td>0.32</td><td>0.11</td><td>-</td><td>0.37</td></tr><tr><td>Actor Supervision [5]</td><td>✓</td><td>X</td><td>~ 1000</td><td>0.89</td><td>-</td><td>-</td><td>-</td><td>0.44</td><td>0.46</td></tr><tr><td>Proposed Approach</td><td>X</td><td>X</td><td>~ 10</td><td>0.84</td><td>0.72</td><td>0.58</td><td>0.47</td><td>0.33</td><td>0.59</td></tr></table>
|
| 180 |
+
|
| 181 |
+
Table 2: Comparison with other LSTM-based and attention-based approaches on the THUMOS'13 dataset. We report average recall at various overlap thresholds, mAP at 0.2 overlap threshold and the average number of proposals per frame.
|
| 182 |
+
|
| 183 |
+
# 5.3 Comparison with other LSTM-based approaches
|
| 184 |
+
|
| 185 |
+
We also compare our approach with other LSTM-based and attention-based models to highlight the importance of the proposed self-supervised learning paradigm. Since LSTM-based frameworks can have highly similar architectures, we consider different requirements and characteristics, such as the level of annotations required for training and the number of localization proposals returned per video. We compare with three approaches similar in spirit to our approach - ALSTM [33], VideoLSTM [28] and Actor Supervision [5] and summarize the results in Table 2. It can be seen that we significantly outperform VideoLSTM
|
| 186 |
+
|
| 187 |
+
and ALSTM on the THUMOS'13 dataset in both recall and $mAP@\sigma = 0.2$ . Actor Supervision [5] outperforms our approach on recall, but it is to be noted that the region proposals are dependent on two factors - (i) object detection-based actor proposals and (ii) a filtering mechanism that limits proposals based on ground truth action classes, which can increase the training requirements and limit generalizability. Also, note that returning a higher number of localization proposals can increase recall at the cost of generalization.
|
| 188 |
+
|
| 189 |
+
# 5.4 Ablative Studies
|
| 190 |
+
|
| 191 |
+
The proposed approach has three major units that affect its performance the most - (i) the region proposal module, (ii) future prediction module, and (iii) error-based action localization module. We consider and evaluate several alternatives to all three modules.
|
| 192 |
+
|
| 193 |
+
We choose selective search [44] and EdgeBox [54] as alternative region proposal methods to SSD. We use an attention-based localization method for action localization as an approximation of the ALSTM [33] to evaluate the effectiveness of using the proposed error-based localization. We also evaluate a 1-layer LSTM predictor with a fully connected decoder network to approximate [2] on the localization task. We evaluate the effect of attention-based prediction by introducing a Bahdanau [4] attention layer before prediction as an alternative to the error-based action localization module.
|
| 194 |
+
|
| 195 |
+
These ablative studies are conducted on the UCF Sports dataset. The results are plotted in Figure 3(a). It can be seen that the use of the prediction error-based localization has a significant improvement over a trained attention-based localization approach. We can also see that the choice of region proposal methods do have some effect on the performance of the approach, with selective search and EdgeBox proposals doing slightly better at higher thresholds $(\sigma \in (0.4, 0.5))$ at the cost of inference time and additional bounding box proposals (50 compared to the 10 from SSD-based region proposal). Using SSD for generating proposals allows us to share weights across the frame encoder and region proposal tasks and hence reduce the memory and computational footprint of the approach. We also find that using attention as part of the prediction module significantly impacts the architecture's performance. It could, arguably, be attributed to the objective function, which aims to minimize the prediction error. Using attention to encode the input could impact the prediction function.
|
| 196 |
+
|
| 197 |
+
# 5.5 Unsupervised Egocentric Gaze Prediction
|
| 198 |
+
|
| 199 |
+
Finally, we evaluate the ability to generalize to egocentric videos by quantifying the model's performance on the unsupervised gaze prediction task. Given that we do not need any annotations or other auxiliary data, we employ the same architecture and training strategy for this task. We evaluate on the GTEA gaze dataset and compare it with other unsupervised models in Table 3. As can be seen, we obtain competitive results on the gaze prediction task, outperforming all baselines on both the AUC and AAE scores. It is to be noted that we outperform
|
| 200 |
+
|
| 201 |
+
<table><tr><td></td><td>Itti et al. [17]</td><td>GBVS [10]</td><td>AWS-D [27]</td><td>Center Bias</td><td>OBDL [15]</td><td>Ours</td></tr><tr><td>AUC</td><td>0.747</td><td>0.769</td><td>0.770</td><td>0.789</td><td>0.801</td><td>0.861</td></tr><tr><td>AAE</td><td>18.4</td><td>15.3</td><td>18.2</td><td>10.2</td><td>15.6</td><td>13.6</td></tr></table>
|
| 202 |
+
|
| 203 |
+
Table 3: Comparison with state-of-the-art on the unsupervised egocentric gaze prediction task on the GTEA dataset.
|
| 204 |
+
|
| 205 |
+
the center bias method on the AUC metric. Center bias exploits the spatial bias in egocentric images and always predicts the center of the video frame as the predicted gaze position. The AUC metric's significant improvement indicates that our approach predicts gaze fixations that are more closely aligned with the ground truth than the center bias approach. Given that the model was not designed explicitly for this task, it is a remarkable performance, especially given the performance of fully supervised baselines such as DFG [53], which achieves 10.6 and 88.3 for AUC and AAE.
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
(a)
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
(b)
|
| 212 |
+
Fig. 3: Qualitative analysis of the proposed approach on UCF Sports dataset (a) ablative variations on AUC. (a) class-wise AUC, and (c) class-wise bounding box recall at different overlap thresholds.
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
(c)
|
| 216 |
+
|
| 217 |
+
# 5.6 Qualitative Evaluation
|
| 218 |
+
|
| 219 |
+
We find that our approach has a consistently high recall for the localization task across datasets and domains. We consider that an action is correctly localized if the average IoU across all frames is higher than 0.5, which indicates that most, if not all, frames in a video are correctly localized. We illustrate the recall scores and subsequent AUC scores for each class in the UCF sports dataset in Figures 3(b) and (c). For many classes (7/10 to be specific), we have more than $80\%$ recall at an overlap threshold of 0.5. We find, through visual inspection, that the spatial-temporal error is often correlated with the actor, but is usually not at the center of the region of interest and thus reduces the quality of the chosen proposals. We illustrate this effect in Figure 4. The first row shows the input frame, the second shows the error-based attention, and the last row shows the final localization proposals. If more proposals are returned (as is the case with selective search and EdgeBox), we can obtain a higher recall (Figure 3(b)) and higher mAP.
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
Fig. 4: Qualitative Examples: We present the error-based attention location and the final prediction, for both successful and unsuccessful localizations. Green BB: Prediction, Blue BB: Ground truth
|
| 223 |
+
|
| 224 |
+
# 6 Conclusion
|
| 225 |
+
|
| 226 |
+
In this work, we introduce a self-supervised approach to action localization, driven by spatial-temporal error localization. We show that the use of self-supervised prediction using video frames can help learn highly robust features and obtain state-of-the-art results on localization without any training annotations. We also show that the proposed framework can work with a variety of proposal generation methods without losing performance. We also show that the approach can generalize to egocentric videos without changing the training methodology or the framework and obtain competitive performance on the unsupervised gaze prediction task.
|
| 227 |
+
|
| 228 |
+
# Acknowledgement
|
| 229 |
+
|
| 230 |
+
This research was supported in part by the US National Science Foundation grants CNS 1513126, IIS 1956050, and IIS 1955230.
|
| 231 |
+
|
| 232 |
+
# References
|
| 233 |
+
|
| 234 |
+
1. Aakur, S., de Souza, F.D., Sarkar, S.: Going deeper with semantics: Exploiting semantic contextualization for interpretation of human activity in videos. In: IEEE Winter Conference on Applications of Computer Vision (WACV). IEEE (2019)
|
| 235 |
+
2. Aakur, S.N., Sarkar, S.: A perceptual prediction framework for self supervised event segmentation. In: The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (June 2019)
|
| 236 |
+
3. Aakur, S.N., de Souza, F.D., Sarkar, S.: Towards a knowledge-based approach for generating video descriptions. In: Conference on Computer and Robot Vision (CRV). Springer (2017)
|
| 237 |
+
4. Bahdanau, D., Cho, K., Bengio, Y.: Neural machine translation by jointly learning to align and translate. arXiv preprint arXiv:1409.0473 (2014)
|
| 238 |
+
5. Escorcia, V., Dao, C.D., Jain, M., Ghanem, B., Snoek, C.: Guess where? actor-supervision for spatiotemporal action localization. Computer Vision and Image Understanding 192, 102886 (2020)
|
| 239 |
+
6. Fathi, A., Li, Y., Rehg, J.M.: Learning to recognize daily actions using gaze. In: European Conference on Computer Vision. pp. 314-327. Springer (2012)
|
| 240 |
+
7. Gkioxari, G., Malik, J.: Finding action tubes. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 759-768 (2015)
|
| 241 |
+
8. Grundmann, M., Kwatra, V., Han, M., Essa, I.: Efficient hierarchical graph-based video segmentation. In: 2010 ieee computer society conference on computer vision and pattern recognition. pp. 2141-2148. IEEE (2010)
|
| 242 |
+
9. Guo, Z., Gao, L., Song, J., Xu, X., Shao, J., Shen, H.T.: Attention-based LSTM with semantic consistency for videos captioning. In: ACM Conference on Multimedia (ACM MM). pp. 357-361. ACM (2016)
|
| 243 |
+
0. Harel, J., Koch, C., Perona, P.: Graph-based visual saliency. In: Advances in Neural Information Processing Systems. pp. 545-552 (2007)
|
| 244 |
+
1. Hershey, J.R., Chen, Z., Le Roux, J., Watanabe, S.: Deep clustering: Discriminative embeddings for segmentation and separation. In: 2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). pp. 31-35. IEEE (2016)
|
| 245 |
+
2. Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural computation 9(8), 1735-1780 (1997)
|
| 246 |
+
3. Horstmann, G., Herwig, A.: Surprise attracts the eyes and binds the gaze. Psychonomic Bulletin & Review 22(3), 743-749 (2015)
|
| 247 |
+
4. Horstmann, G., Herwig, A.: Novelty biases attention and gaze in a surprise trial. Attention, Perception, & Psychophysics 78(1), 69-77 (2016)
|
| 248 |
+
5. Hossein Khatoonabadi, S., Vasconcelos, N., Bajic, I.V., Shan, Y.: How many bits does it take for a stimulus to be salient? In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5501-5510 (2015)
|
| 249 |
+
6. Hou, R., Chen, C., Shah, M.: Tube convolutional neural network (t-cnn) for action detection in videos. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV). pp. 5822-5831 (2017)
|
| 250 |
+
7. Itti, L., Koch, C.: A saliency-based search mechanism for overt and covert shifts of visual attention. Vision Research 40(10-12), 1489-1506 (2000)
|
| 251 |
+
8. Jain, M., Van Gemert, J., Jégou, H., Bouthemy, P., Snoek, C.G.: Action localization with tubelets from motion. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 740-747 (2014)
|
| 252 |
+
|
| 253 |
+
19. Jhuang, H., Gall, J., Zuffi, S., Schmid, C., Black, M.J.: Towards understanding action recognition. In: Proceedings of the IEEE international conference on computer vision. pp. 3192-3199 (2013)
|
| 254 |
+
20. Ji, X., Henriques, J.F., Vedaldi, A.: Invariant information clustering for unsupervised image classification and segmentation. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 9865-9874 (2019)
|
| 255 |
+
21. Jia, X., De Brabandere, B., Tuytelaars, T., Gool, L.V.: Dynamic filter networks. In: Neural Information Processing Systems. pp. 667-675 (2016)
|
| 256 |
+
22. Jiang, Y.G., Liu, J., Zamir, A.R., Toderici, G., Laptev, I., Shah, M., Sukthankar, R.: Thumos challenge: Action recognition with a large number of classes (2014)
|
| 257 |
+
23. Karpathy, A., Toderici, G., Shetty, S., Leung, T., Sukthankar, R., Fei-Fei, L.: Large-scale video classification with convolutional neural networks. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 1725-1732 (2014)
|
| 258 |
+
24. Kodinariya, T.M., Makwana, P.R.: Review on determining number of cluster in k-means clustering. International Journal 1(6), 90-95 (2013)
|
| 259 |
+
25. Kuehne, H., Arslan, A., Serre, T.: The language of actions: Recovering the syntax and semantics of goal-directed human activities. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 780-787 (2014)
|
| 260 |
+
26. Lan, T., Wang, Y., Mori, G.: Discriminative figure-centric models for joint action localization and recognition. In: 2011 International conference on computer vision. pp. 2003-2010. IEEE (2011)
|
| 261 |
+
27. Leboran, V., Garcia-Diaz, A., Fdez-Vidal, X.R., Pardo, X.M.: Dynamic whitening saliency. IEEE Transactions on Pattern Analysis and Machine Intelligence 39(5), 893-907 (2016)
|
| 262 |
+
28. Li, Z., Gavrilyuk, K., Gavves, E., Jain, M., Snoek, C.G.: Videolstm convolves, attends and flows for action recognition. Computer Vision and Image Understanding 166, 41-50 (2018)
|
| 263 |
+
29. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: Ssd: Single shot multibox detector. In: European conference on computer vision. pp. 21-37. Springer (2016)
|
| 264 |
+
30. Ma, S., Zhang, J., Ikizler-Cinbis, N., Sclaroff, S.: Action recognition and localization by hierarchical space-time segments. In: Proceedings of the IEEE international conference on computer vision. pp. 2744-2751 (2013)
|
| 265 |
+
31. Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: Unified, real-time object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 779-788 (2016)
|
| 266 |
+
32. Rodriguez, M.D., Ahmed, J., Shah, M.: Action mach a spatio-temporal maximum average correlation height filter for action recognition. In: 2008 IEEE conference on computer vision and pattern recognition. pp. 1-8. IEEE (2008)
|
| 267 |
+
33. Sharma, S., Kiros, R., Salakhutdinov, R.: Action recognition using visual attention. In: Neural Information Processing Systems: Time Series Workshop (2015)
|
| 268 |
+
34. Simonyan, K., Zisserman, A.: Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556 (2014)
|
| 269 |
+
35. Song, J., Gao, L., Guo, Z., Liu, W., Zhang, D., Shen, H.T.: Hierarchical LSTM with adjusted temporal attention for video captioning. In: Proceedings of the 26th International Joint Conference on Artificial Intelligence. pp. 2737-2743. AAAI Press (2017)
|
| 270 |
+
36. Soomro, K., Idrees, H., Shah, M.: Action localization in videos through context walk. In: Proceedings of the IEEE international conference on computer vision. pp. 3280-3288 (2015)
|
| 271 |
+
|
| 272 |
+
37. Soomro, K., Idrees, H., Shah, M.: Predicting the where and what of actors and actions through online action localization. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2648-2657 (2016)
|
| 273 |
+
38. Soomro, K., Shah, M.: Unsupervised action discovery and localization in videos. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 696-705 (2017)
|
| 274 |
+
39. Soomro, K., Zamir, A.R., Shah, M.: Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)
|
| 275 |
+
40. Tian, Y., Sukthankar, R., Shah, M.: Spatiotemporal deformable part models for action detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2642-2649 (2013)
|
| 276 |
+
41. Tipper, S.P., Lortie, C., Baylis, G.C.: Selective reaching: Evidence for action-centered attention. Journal of Experimental Psychology: Human Perception and Performance 18(4), 891 (1992)
|
| 277 |
+
42. Tran, D., Yuan, J.: Optimal spatio-temporal path discovery for video event detection. In: CVPR 2011. pp. 3321-3328. IEEE (2011)
|
| 278 |
+
43. Tran, D., Yuan, J.: Max-margin structured output regression for spatio-temporal action localization. In: Advances in neural information processing systems. pp. 350-358 (2012)
|
| 279 |
+
44. Uijlings, J.R., Van De Sande, K.E., Gevers, T., Smeulders, A.W.: Selective search for object recognition. International Journal of Computer Vision (IJCV) 104(2), 154-171 (2013)
|
| 280 |
+
45. Van Gemert, J.C., Jain, M., Gati, E., Snoek, C.G., et al.: Apt: Action localization proposals from dense trajectories. In: BMVC. vol. 2, p. 4 (2015)
|
| 281 |
+
46. Venugopalan, S., Rohrbach, M., Donahue, J., Mooney, R., Darrell, T., Saenko, K.: Sequence to sequence-video to text. In: IEEE International Conference on Computer Vision (ICCV). pp. 4534-4542 (2015)
|
| 282 |
+
47. Venugopalan, S., Xu, H., Donahue, J., Rohrbach, M., Mooney, R., Saenko, K.: Translating videos to natural language using deep recurrent neural networks. arXiv preprint arXiv:1412.4729 (2014)
|
| 283 |
+
48. Vondrick, C., Pirsiavash, H., Torralba, A.: Anticipating visual representations from unlabeled video. In: IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 98-106 (2016)
|
| 284 |
+
49. Vondrick, C., Torralba, A.: Generating the future with adversarial transformers. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1020-1028 (2017)
|
| 285 |
+
50. Wang, L., Qiao, Y., Tang, X.: Video action detection with relational dynamicposelets. In: European conference on computer vision. pp. 565-580. Springer (2014)
|
| 286 |
+
51. Xie, J., Girshick, R., Farhadi, A.: Unsupervised deep embedding for clustering analysis. In: International Conference on Machine Learning (ICML). pp. 478-487 (2016)
|
| 287 |
+
52. Zacks, J.M., Tversky, B., Iyer, G.: Perceiving, remembering, and communicating structure in events. Journal of Experimental Psychology: General 130(1), 29 (2001)
|
| 288 |
+
53. Zhang, M., Teck Ma, K., Hwee Lim, J., Zhao, Q., Feng, J.: Deep future gaze: Gaze anticipation on egocentric videos using adversarial networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 4372-4381 (2017)
|
| 289 |
+
54. Zhu, G., Porikli, F., Li, H.: Tracking randomly moving objects on edge box proposals. arXiv preprint arXiv:1507.08085 (2015)
|
actionlocalizationthroughcontinualpredictivelearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a1a330d9ef8b96642e0bc52353eeb590d452a943f09488886687358442dbc65
|
| 3 |
+
size 422331
|
actionlocalizationthroughcontinualpredictivelearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5c1a69b2a15e5379414dfbd3db13791b4aa2baf9810f1126c9be651eeed4cbf0
|
| 3 |
+
size 389725
|
actionsasmovingpoints/a3d29da9-cfad-485f-a46c-15611092570e_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fd2ef48ec4fb828011e50338b749045ccd4a2a7d935afe12fe3b4a6c7a0903a
|
| 3 |
+
size 77721
|
actionsasmovingpoints/a3d29da9-cfad-485f-a46c-15611092570e_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:10b6da23b26a7767ff184f377dfb4dce71562c41ef77e8293cd8aff1ec88c849
|
| 3 |
+
size 94173
|
actionsasmovingpoints/a3d29da9-cfad-485f-a46c-15611092570e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:48c8caf7d8ee45e8de3c04df8b245ef2911d16f56fa8a2bfbc1e0076ac3c4c95
|
| 3 |
+
size 1668003
|
actionsasmovingpoints/full.md
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Actions as Moving Points
|
| 2 |
+
|
| 3 |
+
Yixuan Li\*, Zixu Wang\*, Limin Wang[0000-0002-3674-7718], and Gangshan Wu
|
| 4 |
+
|
| 5 |
+
State Key Laboratory for Novel Software Technology, Nanjing University, China {liyixxxuan,zixuwang1997}@gmail.com, {lmwang,gswu}@nju.edu.cn
|
| 6 |
+
|
| 7 |
+
Abstract. The existing action tubelet detectors often depend on heuristic anchor design and placement, which might be computationally expensive and sub-optimal for precise localization. In this paper, we present a conceptually simple, computationally efficient, and more precise action tubelet detection framework, termed as MovingCenter Detector (MOC-detector), by treating an action instance as a trajectory of moving points. Based on the insight that movement information could simplify and assist action tubelet detection, our MOC-detector is composed of three crucial head branches: (1) Center Branch for instance center detection and action recognition, (2) Movement Branch for movement estimation at adjacent frames to form trajectories of moving points, (3) Box Branch for spatial extent detection by directly regressing bounding box size at each estimated center. These three branches work together to generate the tubelet detection results, which could be further linked to yield video-level tubes with a matching strategy. Our MOC-detector outperforms the existing state-of-the-art methods for both metrics of frame-mAP and video-mAP on the JHMDB and UCF101-24 datasets. The performance gap is more evident for higher video IoU, demonstrating that our MOC-detector is particularly effective for more precise action detection. We provide the code at https://github.com/MCG-NJU/MOC-Detector.
|
| 8 |
+
|
| 9 |
+
Keywords: Spatio-temporal action detection, anchor-free detection
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Spatio-temporal action detection is an important problem in video understanding, which aims to recognize all action instances present in a video and also localize them in both space and time. It has wide applications in many scenarios, such as video surveillance [20,12], video captioning [31,36] and event detection [5]. Some early approaches [8,21,25,32,33,26] apply an action detector at each frame independently and then generate action tubes by linking these frame-wise detection results [8,21,25,32,26] or tracking one detection result [33] across time. These methods fail to well capture temporal information when conducting frame-level detection, and thus are less effective for detecting action tubes in reality. To address this issue, some approaches [24,14,11,35,38,27] try
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
(c) Move the 'Point' to each frame center
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
(d) Generate bbox from each center (Tubelet detection result)
|
| 24 |
+
Fig. 1. Motivation Illustration. We focus on devising an action tubelet detector from a short sequence. Movement information naturally describes human behavior, and each action instance could be viewed as a trajectory of moving points. In this view, action tubelet detector could be decomposed into three simple steps: (1) localizing the center point (red dots) at key frame (i.e., center frame), (2) estimating the movement at each frame with respect to the center point (yellow arrows), (3) regressing bounding box size at the calculated center point (green dots) for all frames. Best viewed in color and zoom in.
|
| 25 |
+
|
| 26 |
+
to perform action detection at the clip-level by exploiting short-term temporal information. In this sense, these methods input a sequence of frames and directly output detected tubelets (i.e., a short sequence of bounding boxes). This tubelet detection scheme yields a more principled and effective solution for video-based action detection and has shown promising results on standard benchmarks.
|
| 27 |
+
|
| 28 |
+
The existing tubelet detection methods [24,14,11,35,38,27] are closely related with the current mainstream object detectors such as Faster R-CNN [23] or SSD [19], which operate on a huge number of pre-defined anchor boxes. Although these anchor-based object detectors have achieved success in image domains, they still suffer from critical issues such as being sensitive to hyper-parameters (e.g., box size, aspect ratio, and box number) and less efficient due to densely placed bounding boxes. These issues are more serious when adapting the anchor-based detection framework from images to videos. First, the number of possible tubelet anchors would grow dramatically when increasing clip duration, which imposes a great challenge for both training and inference. Second, it is generally required to devise more sophisticated anchor box placement and adjustment to consider the variation along the temporal dimension. In addition, these anchor-based methods directly extend 2D anchors along the temporal dimension which predefine each action instance as a cuboid across space and time. This assumption lacks the flexibility to well capture temporal coherence and correlation of adjacent frame-level bounding boxes.
|
| 29 |
+
|
| 30 |
+
Inspired by the recent advances in anchor-free object detection [22,15,4,40,30], we present a conceptually simple, computationally efficient, and more precise action tubelet detector in videos, termed as MovingCenter detector (MOC-detector). As shown in Figure 1, our detector presents a new tubelet detection scheme by treating each instance as a trajectory of moving points. In
|
| 31 |
+
|
| 32 |
+
this sense, an action tubelet is represented by its center point in the key frame and offsets of other frames with respect to this center point. To determine the tubelet shape, we directly regress the bounding box size along the moving point trajectory on each frame. Our MOC-detector yields a fully convolutional one-stage tubelet detection scheme, which not only allows for more efficient training and inference but also could produce more precise detection results (as demonstrated in our experiments).
|
| 33 |
+
|
| 34 |
+
Specifically, our MOC detector decouples the task of tubelet detection into three sub-tasks: center detection, offset estimation and box regression. First, frames are fed into a 2D efficient backbone network for feature extraction. Then, we devise three separate branches: (1) Center Branch: detecting the action instance center and category; (2) Movement Branch: estimating the offsets of the current frame with respect to its center; (3) Box Branch: predicting bounding box size at the detected center point of each frame. This unique design enables three branches cooperate with each other to generate the tubelet detection results. Finally, we link these detected action tubelets across frames to yield long-range detection results following the common practice [14]. We perform experiments on two challenging action tube detection benchmarks of UCF101-24 [28] and JHMDB [13]. Our MOC-detector outperforms the existing state-of-the-art approaches for both frame-mAP and video-mAP on these two datasets, in particular for higher IoU criteria. Moreover, the fully convolutional nature of MOC detector yields a high detection efficiency of around 25FPS.
|
| 35 |
+
|
| 36 |
+
# 2 Related Work
|
| 37 |
+
|
| 38 |
+
# 2.1 Object Detection
|
| 39 |
+
|
| 40 |
+
Anchor-based Object Detectors. Traditional one-stage [19,22,17] and two-stage object detectors [7,10,6,23] heavily relied on predefined anchor boxes. Two-stage object detectors like Faster-RCNN [23] and Cascade-RCNN [1] devised RPN to generate RoIs from a set of anchors in the first stage and handled classification and regression of each RoI in the second stage. By contrast, typical one-stage detectors utilized class-aware anchors and jointly predicted the categories and relative spatial offsets of objects, such as SSD [19], YOLO [22] and RetinaNet [17].
|
| 41 |
+
|
| 42 |
+
Anchor-free Object Detectors. However, some recent works [30,40,15,4,41] have shown that the performance of anchor-free methods could be competitive with anchor-based detectors and such detectors also get rid of computation-intensive anchors and region-based CNN. CornerNet [15] detected object bounding box as a pair of corners, and grouped them to form the final detection. CenterNet [40] modeled an object as the center point of its bounding box and regressed its width and height to build the final result.
|
| 43 |
+
|
| 44 |
+
# 2.2 Spatio-temporal Action Detection
|
| 45 |
+
|
| 46 |
+
Frame-level Detector. Many efforts have been made to extend an image object detector to the task of action detection as frame-level action detect-
|
| 47 |
+
|
| 48 |
+
tors [8,32,21,25,26,33]. After getting the frame detection, linking algorithm is applied to generate final tubes [8,32,21,25,26] and Weinzaepfel et al. [33] utilized a tracking-by-detection method instead. Although flows are used to capture motion information, frame-level detection fails to fully utilize the video's temporal information.
|
| 49 |
+
|
| 50 |
+
Clip-level Detector. In order to model temporal information for detection, some clip-level approaches or action tubelet detectors [14,11,35,16,38,27] have been proposed. ACT [14] took a short sequence of frames and output tubelets which were regressed from anchor cuboids. STEP [35] proposed a progressive method to refine the proposals over a few steps to solve the large displacement problem and utilized longer temporal information. Some methods [11,16] linked frame or tubelet proposals first to generate tubes proposal and then did classification.
|
| 51 |
+
|
| 52 |
+
These approaches are all based on anchor-based object detectors, whose design might be sensitive to anchor design and computationally cost due to large numbers of anchor boxes. Instead, we try to design an anchor-free action tubelet detector by treating each action instance as a trajectory of moving points. Experimental results demonstrate that our proposed action tubelet detector is effective for spatio-temporal action detection, in particular for the high video IoU.
|
| 53 |
+
|
| 54 |
+
# 3 Approach
|
| 55 |
+
|
| 56 |
+
Overview. Action tubelet detection aims at localizing a short sequence of bounding boxes from an input clip and recognizing its action category as well. We present a new tubelet detector, coined as MovingCenter detector (MOC-detector), by viewing an action instance as a trajectory of moving points. As shown in Figure 2, in our MOC-detector, we take a set of consecutive frames as input and separately feed them into an efficient 2D backbone to extract frame-level features. Then, we design three head branches to perform tubelet detection in an anchor-free manner. The first branch is Center Branch, which is defined on the center (key) frame. This Center Branch localizes the tubelet center and recognizes its action category. The second branch is Movement Branch, which is defined over all frames. This Movement Branch tries to relate adjacent frames to predict the center movement along the temporal dimension. The estimated movement would propagate the center point from key frame to other frames to generate a trajectory. The third branch is Box Branch, which operates on the detected center points of all frames. This branch focuses on determining the spatial extent of the detected action instance at each frame, by directly regressing the height and width of the bounding box. These three branches collaborate together to yield tubelet detection from a short clip, which will be further linked to form action tube detection in a long untrimmed video by following a common linking strategy [14]. We will first give a short description of the backbone design, and then provide technical details of three branches and the linking algorithm in the following subsections.
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Fig. 2. Pipeline of MOC-detector. In the left, we present the overall MOC-detector framework. The red cuboids represent the extracted features, the blue boxes denote the backbone or detection head, and the gray cuboids are detection results produced by the Center Branch, the Movement Branch, the Box Branch. In the right, we show the detailed design of each branch. Each branch consists of a sequence of one $3^{*}3$ conv layer, one ReLu layer and one $1^{*}1$ conv layer, which is presented as yellow cuboids. The parameters of convolution are input channel, output channel, convolution kernel height, convolution kernel width.
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
|
| 63 |
+
Backbone. In our MOC-detector, we input $K$ frames and each frame is with the resolution of $W \times H$ . First $K$ frames are fed into a 2D backbone network sequentially to generate a feature volume $\mathbf{f} \in \mathbb{R}^{K \times \frac{W}{R} \times \frac{H}{R} \times B}$ . $R$ is the spatial downsample ratio and $B$ denotes channel number. To keep the full temporal information for subsequent detection, we do not perform any downsampling over the temporal dimension. Specifically, we choose DLA-34 [37] architecture as our MOC-detector feature backbone following CenterNet [40]. This architecture employs an encoder-decoder architecture to extract features for each frame. The spatial downsampling ratio $R$ is 4 and the channel number $B$ is 64. The extracted features are shared by three head branches. Next we will present the technical details of these head branches.
|
| 64 |
+
|
| 65 |
+
# 3.1 Center Branch: Detect Center at Key Frame
|
| 66 |
+
|
| 67 |
+
The Center Branch aims at detecting the action instance center in the key frame (i.e., center frame) and recognizing its category based on the extracted video features. Temporal information is important for action recognition, and thereby we design a temporal module to estimate the action center and recognize its class by concatenating multi-frame feature maps along channel dimension. Specifically, based on the video feature representation $\mathbf{f} \in \mathbb{R}^{\frac{W}{R} \times \frac{H}{R} \times (K \times B)}$ , we estimate a center heatmap $\hat{L} \in [0,1]^{\frac{W}{R} \times \frac{H}{R} \times C}$ for the key frame. The $C$ is the number of action classes. The value of $\hat{L}_{x,y,c}$ represents the likelihood of detecting an
|
| 68 |
+
|
| 69 |
+
action instance of class $c$ at location $(x, y)$ , and higher value indicates a stronger possibility. Specifically, we employ a standard convolution operation to estimate the center heatmap in a fully convolutional manner.
|
| 70 |
+
|
| 71 |
+
Training. We train the Center Branch following the common dense prediction setting [15,40]. For $i^{th}$ action instance, we represent its center as key frame's bounding box center and utilize center's position for each action category as the ground truth label $(x_{c_i},y_{c_i})$ . We generate the ground truth heatmap $L\in [0,1]^{\frac{W}{R}\times \frac{H}{R}\times C}$ using a Gaussian kernel which produces the soft heatmap groundtruth $L_{x,y,c_i} = \exp (-\frac{(x - x_{c_i})^2 + (y - y_{c_i})^2}{2\sigma_p^2})$ . For other class (i.e., $c\neq c_i$ ), we set the heatmap $L_{x,y,c} = 0$ . The $\sigma_p$ is adaptive to instance size and we choose the maximum when two Gaussian of the same category overlap. We choose the training objective, which is a variant of focal loss [17], as follows:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\ell_ {\text {c e n t e r}} = - \frac {1}{n} \sum_ {x, y, c} \left\{ \begin{array}{l l} (1 - \hat {L} _ {x y c}) ^ {\alpha} \log (\hat {L} _ {x y c}) & \text {i f} L _ {x y c} = 1 \\ (1 - L _ {x y c}) ^ {\beta} (\hat {L} _ {x y c}) ^ {\alpha} \log (1 - \hat {L} _ {x y c}) & \text {o t h e r w i s e} \end{array} \right. \tag {1}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
where $n$ is the number of ground truth instances and $\alpha$ and $\beta$ are hyperparameters of the focal loss [17]. We set $\alpha = 2$ and $\beta = 4$ following [15,40] in our experiments. It indicates that this focal loss is able to deal with the imbalanced training issue effectively [17].
|
| 78 |
+
|
| 79 |
+
Inference. After the training, the Center Branch could be deployed in tubelet detection for localizing action instance center and recognizing its category. Specifically, we detect all local peaks which are equal to or greater than their 8-connected neighbors in the estimated heatmap $\hat{L}$ for each class independently. And then keep the top $N$ peaks from all categories as candidate centers with tubelet scores. Following [40], we set $N$ as 100 and detailed ablation studies will be provided in the supplementary material.
|
| 80 |
+
|
| 81 |
+
# 3.2 Movement Branch: Move Center Temporally
|
| 82 |
+
|
| 83 |
+
The Movement Branch tries to relate adjacent frames to predict the movement of the action instance center along the temporal dimension. Similar to Center Branch, Movement Branch also employs temporal information to regress the center offsets of current frame with respect to key frame. Specifically, Movement Branch takes stacked feature representation as input and outputs a movement prediction map $\hat{M} \in \mathbb{R}^{\frac{W}{R} \times \frac{H}{R} \times (K \times 2)}$ . $2K$ channels represent center movements from key frame to current frames in $X$ and $Y$ directions. Given the key frame center $(\hat{x}_{key}, \hat{y}_{key})$ , $\hat{M}_{\hat{x}_{key}, \hat{y}_{key}, 2j:2j+2}$ encodes center movement at $j^{th}$ frame.
|
| 84 |
+
|
| 85 |
+
Training. The ground truth tubelet of $i^{th}$ action instance is $[(x_{tl}^1, y_{tl}^1, x_{br}^1, y_{br}^1), \ldots, (x_{tl}^j, y_{tl}^j, x_{br}^j, y_{br}^j), \ldots, (x_{tl}^K, y_{tl}^K, x_{br}^K, y_{br}^K)]$ , where subscript $tl$ and $br$ represent top-left and bottom-right points of bounding boxes, respectively. Let $k$ be the key frame index, and the $i^{th}$ action instance center at key frame is defined as follows:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
(x _ {i} ^ {k e y}, y _ {i} ^ {k e y}) = (\lfloor (x _ {t l} ^ {k} + x _ {b r} ^ {k}) / 2 \rfloor , \lfloor (y _ {t l} ^ {k} + y _ {b r} ^ {k}) / 2 \rfloor). \qquad (2)
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
We could compute the bounding box center $(x_i^j, y_i^j)$ of $i^{th}$ instance at $j^{th}$ frame as follows:
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
\left(x _ {i} ^ {j}, y _ {i} ^ {j}\right) = \left(\left(x _ {t l} ^ {j} + x _ {b r} ^ {j}\right) / 2, \left(y _ {t l} ^ {j} + y _ {b r} ^ {j}\right) / 2\right). \tag {3}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
Then, the ground truth movement of the $i^{th}$ action instance is calculated as follows:
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
m _ {i} = (x _ {i} ^ {1} - x _ {i} ^ {k e y}, y _ {i} ^ {1} - y _ {i} ^ {k e y}, \dots , x _ {i} ^ {K} - x _ {i} ^ {k e y}, y _ {i} ^ {K} - y _ {i} ^ {k e y}). \qquad (4)
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
For the training of Movement Branch, we optimize the movement map $\hat{M}$ only at the key frame center location and use the $\ell_1$ loss as follows:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\ell_ {\text {m o v e m e n t}} = \frac {1}{n} \sum_ {i = 1} ^ {n} | \hat {M} _ {x _ {i} ^ {\text {k e y}}, y _ {i} ^ {\text {k e y}}} - m _ {i} |. \tag {5}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
Inference. After the Movement Branch training and given $N$ detected action centers $\{(\hat{x}_i,\hat{y}_i)|i\in \{1,2,\dots ,N\} \}$ from Center Branch, we obtain a set of movement vector $\{\hat{M}_{\hat{x}_i,\hat{y}_i}|i\in \{1,2,\dots ,N\} \}$ for all detected action instance. Based on the results of Movement Branch and Center Branch, we could easily generate a trajectory set $T = \{T_{i}|i\in \{1,2,\dots ,N\} \}$ , and for the detected action center $(\hat{x}_i,\hat{y}_i)$ , its trajectory of moving points is calculated as follows:
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
T _ {i} = \left(\hat {x} _ {i}, \hat {y} _ {i}\right) + \left[ \hat {M} _ {\hat {x} _ {i}, \hat {y} _ {i}, 0: 2}, \hat {M} _ {\hat {x} _ {i}, \hat {y} _ {i}, 2: 4}, \dots , \hat {M} _ {\hat {x} _ {i}, \hat {y} _ {i}, 2 K - 2: 2 K} \right]. \tag {6}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
# 3.3 Box Branch: Determine Spatial Extent
|
| 116 |
+
|
| 117 |
+
The Box Branch is the last step of tubelet detection and focuses on determining the spatial extent of the action instance. Unlike Center Branch and Movement Branch, we assume box detection only depends on the current frame and temporal information will not benefit the class-agnostic bounding box generation. We will provide the ablation study in the supplementary material. In this sense, this branch could be performed in a frame-wise manner. Specifically, Box Branch inputs the single frame's feature $\mathbf{f}^j \in \mathbb{R}_{\frac{W}{R}}^{W \times \frac{H}{R} \times B}$ and generates a size prediction map $\hat{S}^j \in \mathbb{R}_{\frac{W}{R}}^{W \times \frac{H}{R} \times 2}$ for the $j^{th}$ frame to directly estimate the bounding box size (i.e., width and height). Note that the Box Branch is shared across K frames.
|
| 118 |
+
|
| 119 |
+
Training. The ground truth bbox size of $i^{th}$ action instance at $j^{th}$ frame can be represented as follows:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
s _ {i} ^ {j} = \left(x _ {b r} ^ {j} - x _ {t l} ^ {j}, y _ {b r} ^ {j} - y _ {t l} ^ {j}\right). \tag {7}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
With this ground truth bounding box size, we optimize the Box Branch at the center points of all frames for each tubelet with $\ell_1$ Loss as follows:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\ell_ {\mathrm {b o x}} = \frac {1}{n} \sum_ {i = 1} ^ {n} \sum_ {j = 1} ^ {K} | \hat {S} _ {p _ {i} ^ {j}} ^ {j} - s _ {i} ^ {j} |. \tag {8}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
Note that the $p_i^j$ is the $i^{th}$ instance ground truth center at $j^{th}$ frame. So the overall training objective of our MOC-detector is
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\ell = \ell_ {\text {c e n t e r}} + a \ell_ {\text {m o v e m e n t}} + b \ell_ {\text {b o x}}, \tag {9}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where we set $a = 1$ and $b = 0.1$ in all our experiments. Detailed ablation studies will be provided in the supplementary material.
|
| 138 |
+
|
| 139 |
+
Inference. Now, we are ready to generate the tubelet detection results. based on center trajectories $T$ from Movement Branch and size prediction heatmap $\hat{S}$ for each location produced by this branch. For $j^{th}$ point in trajectory $T_{i}$ , we use $(T_{x}, T_{y})$ to denote its coordinates, and (w,h) to denote Box Branch size output $\hat{S}$ at specific location. Then, the bounding box for this point is calculated as:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\left(T _ {x} - w / 2, T _ {y} - h / 2, T _ {x} + w / 2, T _ {y} + h / 2\right). \tag {10}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
# 3.4 Tubelet Linking
|
| 146 |
+
|
| 147 |
+
After getting the clip-level detection results, we link these tubelets into final tubes across time. As our main goal is to propose a new tubelet detector, we use the same linking algorithm as [14] for fair comparison. Given a video, MOC extracts tubelets and keeps the top 10 as candidates for each sequence of K frames with stride 1 across time, which are linked into the final tubes in a tubelet by tubelet manner. Initialization: In the first frame, every candidate starts a new link. At a given frame, candidates which are not assigned to any existing links start new links. Linking: one candidate can only be assigned to one existing link when it meets three conditions: (1) the candidate is not selected by other links, (2) the candidate $t$ has the highest score, (3) the overlap between link and candidate is greater than a threshold $\tau$ . Termination: An existing link stops if it has not been extended in consecutive K frames. We build an action tube for each link, whose score is the average score of tubelets in the link. For each frame in the link, we average the bbox coordinates of tubelets containing that frame. Initialization and termination determine tubes' temporal extents. Tubes with low confidence and short duration are abandoned. As this linking algorithm is online, MOC can be applied for online video stream.
|
| 148 |
+
|
| 149 |
+
# 4 Experiments
|
| 150 |
+
|
| 151 |
+
# 4.1 Experimental Setup
|
| 152 |
+
|
| 153 |
+
Datasets and Metrics. We perform experiments on the UCF101-24 [28] and JHMDB [13] datasets. UCF101-24 [28] consists of 3207 temporally untrimmed videos from 24 sports classes. Following the common setting [21,14], we report the action detection performance for the first split only. JHMDB [13] consists of 928 temporally trimmed videos from 21 action classes. We report results averaged over three splits following the common setting [21,14]. AVA [9] is a larger dataset for action detection but only contains a single-frame action instance annotation for each 3s clip, which concentrates on detecting actions on a single key frame. Thus, AVA is not suitable to verify the effectiveness of tubelet action detectors. Following [33,8,14], we utilize frame mAP and video mAP to evaluate detection accuracy.
|
| 154 |
+
|
| 155 |
+
Implementation Details. We choose the DLA34 [37] as our backbone with COCO [18] pretrain and ImageNet [3] pretrain. We provide MOC results with COCO pretrain without extra explanation. For a fair comparison, we provide two-stream results on two datasets with both COCO pretrain and ImageNet pretrain in Section 4.3. The frame is resized to $288 \times 288$ . The spatial down-sample ratio $R$ is set to 4 and the resulted feature map size is $72 \times 72$ . During training, we use the same data augmentation as [14] to the whole video: photometric transformation, scale jittering, and location jittering. We use Adam with a learning rate 5e-4 to optimize the overall objective. The learning rate adjusts to convergence on the validation set and it decreases by a factor of 10 when performance saturates. The iteration maximum is set to 12 epochs on UCF101-24 [28] and 20 epochs on JHMDB [13].
|
| 156 |
+
|
| 157 |
+
# 4.2 Ablation Studies
|
| 158 |
+
|
| 159 |
+
For efficient exploration, we perform experiments only using RGB input modality, COCO pretrain, and K as 5 without extra explanation. Without special specified, we use exactly the same training strategy in this subsection.
|
| 160 |
+
|
| 161 |
+
Effectiveness of Movement Branch. In MOC, Movement Branch impacts on both bbox's location and size. Movement Branch moves key frame center to other frames to locate bbox center, named as Move Center strategy. Box Branch estimates bbox size on the current frame center, which is located by Movement Branch not the same with key frame, named as Bbox Align strategy. To explore the effectiveness of Movement Branch, we compare MOC with other two detector designs, called as No Movement and Semi Movement. We set the tubelet length $K = 5$ in all detection designs with the same training strategy. As shown in Figure 3, No Movement directly removes the Movement Branch and just generates the bounding box for each frame at the same location with key frame center. Semi Movement first generates the bounding box for each frame at the same location with key frame center, and then moves the generated box in each frame according to Movement Branch prediction. Full Movement (MOC) first moves the key frame center to the current frame center according to Movement Branch prediction, and then Box Branch generates the bounding box for each frame at its own center. The difference between Full Movement and Semi Movement is that they generate the bounding box at different locations: one at the real center, and the other at the fixed key frame center. The results are summarized in Table 1.
|
| 162 |
+
|
| 163 |
+
First, we observe that the performance gap between No Movement and Semi Movement is $1.56\%$ for frame mAP@0.5 and $11.05\%$ for video mAP@0.5. We find that the Movement Branch has a relatively small influence on frame mAP, but contributes much to improve the video mAP. Frame mAP measures the detection quality in a single frame without tubelet linking while video mAP measures the tube-level detection quality involving tubelet linking. Small movement in short tubelet doesn't harm frame mAP dramatically but accumulating these subtle
|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
(a) Generate bbox at key frame center, without any movement
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
(b) First generate bbox at key frame center, then move the bbox
|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
(c) First move key frame center, then generate bbox at current frame center
|
| 173 |
+
Fig. 3. Illustration of Three Movement Strategies. Note that the arrow represents moving according to Movement Branch prediction, the red dot represents the key frame center and the green dot represents the current frame center, which is localized by moving key frame center according to Movement Branch prediction.
|
| 174 |
+
|
| 175 |
+
Table 1. Exploration study on MOC detector design with various combinations of movement strategies on UCF101-24.
|
| 176 |
+
|
| 177 |
+
<table><tr><td rowspan="2">Method</td><td>Strategy</td><td rowspan="2">F-mAP@0.5 (%)</td><td colspan="4">Video-mAP (%)</td></tr><tr><td>Move Center Bbox Align</td><td>@0.2</td><td>@0.5</td><td>@0.75</td><td>0.5:0.95</td></tr><tr><td>No Movement</td><td></td><td>68.22</td><td>68.91</td><td>37.77</td><td>19.94</td><td>19.27</td></tr><tr><td>Semi Movement</td><td>✓</td><td>69.78</td><td>76.63</td><td>48.82</td><td>27.05</td><td>26.09</td></tr><tr><td>Full Movement (MOC)</td><td>✓</td><td>✓</td><td>71.63</td><td>77.74</td><td>49.55</td><td>27.04</td></tr></table>
|
| 178 |
+
|
| 179 |
+
errors in the linking process will seriously harm the video-level detection. So it demonstrates that the movement information is important for improving video mAP. Second, we can see that Full Movement performs slightly better than Semi Movement for both video mAP and frame mAP. Without Bbox Align, Box Branch estimates bbox size at key frame center for all frames, which causes a small performance drop with MOC. This small gap implies that Box Branch is relatively robust to the box center and estimating bbox size at small shifted location only brings a very slight performance difference.
|
| 180 |
+
|
| 181 |
+
Study on Movement Branch Design. In practice, in order to find an efficient way to capture center movements, we implement Movement Branch in several different ways. The first one is Flow Guided Movement strategy which utilizes optical flow between adjacent frames to move action instance center. The second strategy, Cost Volume Movement, is to directly compute the movement offset by constructing cost volume between key frame and current frame following [39], but this explicit computing fails to yield better results and is slower due to the constructing of cost volume. The third one is Accumulated Movement strategy which predicts center movement between consecutive frames instead of with respect to key frame. The fourth strategy, Center Movement, is to employ 3D convolutional operation to directly regress the offsets of the current frame with respect to key frame as illustrated in Section 3.2. The results are reported in Table 2.
|
| 182 |
+
|
| 183 |
+
We notice that the simple Center Movement performs best and choose it as Movement Branch design in our MOC-detector, which directly employs a 3D convolution to regress key frame center movement for all frames as a whole. We will analyze the fail reason for other three designs. For Flow Guided Move
|
| 184 |
+
|
| 185 |
+
Table 2. Exploration study on the Movement Branch design on UCF101-24 [28]. Note that our MOC-detector adopts the Center Movement.
|
| 186 |
+
|
| 187 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">F-mAP@0.5 (%)</td><td colspan="4">Video-mAP (%)</td></tr><tr><td>@0.2</td><td>@0.5</td><td>@0.75</td><td>0.5:0.95</td></tr><tr><td>Flow Guided Movement</td><td>69.38</td><td>75.17</td><td>42.28</td><td>22.26</td><td>21.16</td></tr><tr><td>Cost Volume Movement</td><td>69.63</td><td>72.56</td><td>43.67</td><td>21.68</td><td>22.46</td></tr><tr><td>Accumulated Movement</td><td>69.40</td><td>75.03</td><td>46.19</td><td>24.67</td><td>23.80</td></tr><tr><td>Center Movement</td><td>71.63</td><td>77.74</td><td>49.55</td><td>27.04</td><td>26.09</td></tr></table>
|
| 188 |
+
|
| 189 |
+
Table 3. Exploration study on the tubelet duration $K$ on UCF101-24.
|
| 190 |
+
|
| 191 |
+
<table><tr><td rowspan="2">Tubelet Duration</td><td rowspan="2">F-mAP@0.5 (%)</td><td colspan="4">Video-mAP (%)</td></tr><tr><td>@0.2</td><td>@0.5</td><td>@0.75</td><td>0.5:0.95</td></tr><tr><td>K = 1</td><td>68.33</td><td>65.47</td><td>31.50</td><td>15.12</td><td>15.54</td></tr><tr><td>K = 3</td><td>69.94</td><td>75.83</td><td>45.94</td><td>24.94</td><td>23.84</td></tr><tr><td>K = 5</td><td>71.63</td><td>77.74</td><td>49.55</td><td>27.04</td><td>26.09</td></tr><tr><td>K = 7</td><td>73.14</td><td>78.81</td><td>51.02</td><td>27.05</td><td>26.51</td></tr><tr><td>K = 9</td><td>72.17</td><td>77.94</td><td>50.16</td><td>26.26</td><td>26.07</td></tr></table>
|
| 192 |
+
|
| 193 |
+
ment, (i) Flow is not accurate and just represents pixel movement, while Center Movement is supervised by box movement. (ii) Accumulating adjacent flow to generate trajectory will enlarge error. For the Cost Volume Movement, (i) We explicitly calculate the correlation of the current frame with respect to key frame. When regressing the movement of the current frame, it only depends on the current correlation map. However, when directly regressing movement with 3D convolutions, the movement information of each frame will depend on all frames, which might contribute to more accurate estimation. (ii) As cost volume calculation and offset aggregation involve a correlation without extra parameters, it is observed that the convergence is much harder than Center Movement. For Accumulated Movement, this strategy also causes the issue of error accumulation and is more sensitive to the training and inference consistency. In this sense, the ground truth movement is calculated at the real bounding box center during training, while for inference, the current frame center is estimated from Movement Branch and might not be so precise, so that Accumulated Movement would bring large displacement to the ground truth.
|
| 194 |
+
|
| 195 |
+
Study on Input Sequence Duration. The temporal length $K$ of the input clip is an important parameter in our MOC-detector. In this study, we report the RGB stream performance of MOC on UCF101-24 [28] by varying $K$ from 1 to 9 and the experiment results are summarized in Table 3. We reduce the training batch size for $K = 7$ and $K = 9$ due to GPU memory limitation.
|
| 196 |
+
|
| 197 |
+
First, we notice that when $K = 1$ , our MOC-detector reduces to the frame-level detector which obtains the worst performance, in particular for video mAP. This confirms the common assumption that frame-level action detector lacks
|
| 198 |
+
|
| 199 |
+
Table 4. Comparison with the state of the art on JHMDB (trimmed) and UCF101-24 (untrimmed). Ours $(\mathrm{MOC})^{\dagger}$ is pretrained on ImageNet [3] and Ours (MOC) is pretrained on COCO [18].
|
| 200 |
+
|
| 201 |
+
<table><tr><td rowspan="3">Method</td><td colspan="4">JHMDB</td><td colspan="4">UCF101-24</td></tr><tr><td rowspan="2">Frame-mAP@0.5 (%)</td><td colspan="3">Video-mAP (%)</td><td>Frame-mAP@0.5 (%)</td><td rowspan="2" colspan="3">Video-mAP (%)</td></tr><tr><td>@0.2</td><td>@0.5</td><td>@0.75</td><td>0.5:0.95</td><td>@0.2</td><td>@0.5</td></tr><tr><td colspan="9">2D Backbone</td></tr><tr><td>Saha et al. 2016 [25]</td><td>-</td><td>72.6</td><td>71.5</td><td>43.3</td><td>40.0</td><td>-</td><td>66.7</td><td>35.9</td></tr><tr><td>Peng et al. 2016 [21]</td><td>58.5</td><td>74.3</td><td>73.1</td><td>-</td><td>-</td><td>39.9</td><td>42.3</td><td>-</td></tr><tr><td>Singh et al. 2017 [26]</td><td>-</td><td>73.8</td><td>72.0</td><td>44.5</td><td>41.6</td><td>-</td><td>73.5</td><td>46.3</td></tr><tr><td>Kalogeiton et al. 2017 [14]</td><td>65.7</td><td>74.2</td><td>73.7</td><td>52.1</td><td>44.8</td><td>69.5</td><td>76.5</td><td>49.2</td></tr><tr><td>Yang et al. 2019 [35]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>75.0</td><td>76.6</td><td>-</td></tr><tr><td>Song et al. 2019 [27]</td><td>65.5</td><td>74.1</td><td>73.4</td><td>52.5</td><td>44.8</td><td>72.1</td><td>77.5</td><td>52.9</td></tr><tr><td>Zhao et al. 2019 [38]</td><td>-</td><td>-</td><td>74.7</td><td>53.3</td><td>45.0</td><td>-</td><td>78.5</td><td>50.3</td></tr><tr><td>Ours (MOC)1</td><td>68.0</td><td>76.2</td><td>75.4</td><td>68.5</td><td>54.0</td><td>76.9</td><td>81.3</td><td>54.4</td></tr><tr><td>Ours (MOC)</td><td>70.8</td><td>77.3</td><td>77.2</td><td>71.7</td><td>59.1</td><td>78.0</td><td>82.8</td><td>53.8</td></tr><tr><td colspan="9">3D Backbone</td></tr><tr><td>Hou et al. 2017 [11] (C3D)</td><td>61.3</td><td>78.4</td><td>76.9</td><td>-</td><td>-</td><td>41.4</td><td>47.1</td><td>-</td></tr><tr><td>Gu et al. 2018 [9] (I3D)</td><td>73.3</td><td>-</td><td>78.6</td><td>-</td><td>-</td><td>76.3</td><td>-</td><td>59.9</td></tr><tr><td>Sun et al. 2018 [29] (S3D-G)</td><td>77.9</td><td>-</td><td>80.1</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr></table>
|
| 202 |
+
|
| 203 |
+
consideration of temporal information for action recognition and thus it is worse than those tubelet detectors, which agrees with our basic motivation of designing an action tubelet detector. Second, we see that the detection performance will increase as we vary $K$ from 1 to 7 and the performance gap becomes smaller when comparing $K = 5$ and $K = 7$ . From $K = 7$ to $K = 9$ , detection performance drops because predicting movement is harder for longer input length. According to the results, we set $K = 7$ in our MOC.
|
| 204 |
+
|
| 205 |
+
# 4.3 Comparison with the State of the Art
|
| 206 |
+
|
| 207 |
+
Finally, we compare our MOC with the existing state-of-the-art methods on the trimmed JHMDB dataset and the untrimmed UCF101-24 dataset in Table 4. For a fair comparison, we also report two-stream results with ImageNet pretrain.
|
| 208 |
+
|
| 209 |
+
Our MOC gains similar performance on UCF101-24 for ImageNet pretrain and COCO pretrain, while COCO pretrain obviously improves MOC's performance on JHMDB because JHMDB is quite small and sensitive to the pretrain model. Our method significantly outperforms those frame-level action detectors [25,21,26] both for frame-mAP and video-mAP, which perform action detection at each frame independently without capturing temporal information. [14,35,38,27] are all tubelet detectors, our MOC outperforms them for all metrics on both datasets, and the improvement is more evident for high IoU video mAP. This result confirms that our anchor-free MOC detector is more effective for localizing precise tubelets from clips than those anchor-based detectors, which might be ascribed to the flexibility and continuity of MOC detector by directly regressing tubelet shape. Our methods get comparable performance to those 3D backbone based methods [11,9,29]. These methods usually divide action detection into two steps: person detection (ResNet50-based Faster RCNN [23] pretrained on ImageNet), and action classification (I3D [2]/S3D-G [34] pretrained
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
(a)
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
(b)
|
| 216 |
+
Fig. 4. Runtime Comparison and Analysis. (a) Comparison with other methods. Two-stream results following ACT [14]'s setting. (b) The detection accuracy (green bars) and speeds (red dots) of MOC's online setting.
|
| 217 |
+
|
| 218 |
+
on Kinetics [2] + ROI pooling), and fail to provide a simple unified action detection framework.
|
| 219 |
+
|
| 220 |
+
# 4.4 Runtime Analysis
|
| 221 |
+
|
| 222 |
+
Following ACT [14], we evaluate MOC's two-stream offline speed on a single GPU without including flow extraction time and MOC reaches 25 fps. In Figure 4(a), we compare MOC with some existing methods which have reported their speed in the original paper. [35,38,14] are all action tubelet detectors and our MOC gains more accurate detection results with comparable speed. Our MOC can be applied for processing online real-time video stream. To simulate online video stream, we set batch size as 1. Since the backbone feature can be extracted only once, we save previous K-1 frames' features in a buffer. When getting a new frame, MOC's backbone first extracts its feature and combines with the previous K-1 frames' features in the buffer. Then MOC's three branches generate tubelet detections based on these features. After that, update the buffer by adding current frame's feature for subsequent detection. For online testing, we only input RGB as optical flow extraction is quite expensive and the results are reported in Figure 4(b). We see that our MOC is quite efficient in online testing and it reaches 53 FPS for $\mathrm{K} = 7$ .
|
| 223 |
+
|
| 224 |
+
# 4.5 Visualization
|
| 225 |
+
|
| 226 |
+
In Figure 5, we give some qualitative examples to compare the performance between tubelet duration $\mathrm{K} = 1$ and $\mathrm{K} = 7$ . Comparison between the second row and the third row shows that our tubelet detector leads to less missed detection results and localizes action more accurately owing to offset constraint in the same tubelet. What's more, comparison between the fifth and the sixth row presents that our tubelet detector can reduce classification error because some actions can not be discriminated by just looking one frame.
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Fig. 5. Examples of Per-frame $(\mathbf{K} = \mathbf{1})$ and Tubelet $(\mathbf{K} = \mathbf{7})$ Detection. The yellow color boxes present detection results, whose categories and scores are provided beside. Yellow categories represent correct and red ones represent wrong. Red dashed boxes represent missed actors. Green boxes and categories are the ground truth. MOC generates one score and category for one tubelet and we mark these in the first frame of the tubelet. Note that we set the visualization threshold as 0.4.
|
| 230 |
+
|
| 231 |
+
# 5 Conclusion and Future Work
|
| 232 |
+
|
| 233 |
+
In this paper, we have presented an action tubelet detector, termed as MOC, by treating each action instance as a trajectory of moving points and directly regressing bounding box size at estimated center points of all frames. As demonstrated on two challenging datasets, the MOC-detector has brought a new state-of-the-art with both metrics of frame mAP and video mAP, while maintaining a reasonable computational cost. The superior performance is largely ascribed to the unique design of three branches and their cooperative modeling ability to perform tubelet detection. In the future, based on the proposed MOC-detector, we try to extend its framework to longer-term modeling and model action boundary in the temporal dimension, thus contributing to spatio-temporal action detection in longer continuous video streams.
|
| 234 |
+
|
| 235 |
+
Acknowledgements. This work is supported by Tencent AI Lab Rhino-Bird Focused Research Program (No. JR202025), the National Science Foundation of China (No. 61921006), Program for Innovative Talents and Entrepreneur in Jiangsu Province, and Collaborative Innovation Center of Novel Software Technology and Industrialization.
|
| 236 |
+
|
| 237 |
+
# References
|
| 238 |
+
|
| 239 |
+
1. Cai, Z., Vasconcelos, N.: Cascade r-cnn: Delving into high quality object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 6154-6162 (2018)
|
| 240 |
+
2. Carreira, J., Zisserman, A.: Quo vadis, action recognition? a new model and the kinetics dataset. In: proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 6299-6308 (2017)
|
| 241 |
+
3. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: 2009 IEEE conference on computer vision and pattern recognition. pp. 248-255. IEEE (2009)
|
| 242 |
+
4. Duan, K., Bai, S., Xie, L., Qi, H., Huang, Q., Tian, Q.: Centernet: Keypoint triplets for object detection. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 6569-6578 (2019)
|
| 243 |
+
5. Gan, C., Wang, N., Yang, Y., Yeung, D.Y., Hauptmann, A.G.: Devnet: A deep event network for multimedia event detection and evidence recounting. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2568-2577 (2015)
|
| 244 |
+
6. Girshick, R.: Fast r-cnn. In: Proceedings of the IEEE international conference on computer vision. pp. 1440-1448 (2015)
|
| 245 |
+
7. Girshick, R., Donahue, J., Darrell, T., Malik, J.: Rich feature hierarchies for accurate object detection and semantic segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 580-587 (2014)
|
| 246 |
+
8. Gkioxari, G., Malik, J.: Finding action tubes. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 759-768 (2015)
|
| 247 |
+
9. Gu, C., Sun, C., Ross, D.A., Vondrick, C., Pantofaru, C., Li, Y., Vijayanarasimhan, S., Toderici, G., Ricco, S., Sukthankar, R., et al.: Ava: A video dataset of spatiotemporally localized atomic visual actions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 6047-6056 (2018)
|
| 248 |
+
0. He, K., Zhang, X., Ren, S., Sun, J.: Spatial pyramid pooling in deep convolutional networks for visual recognition. IEEE transactions on pattern analysis and machine intelligence 37(9), 1904-1916 (2015)
|
| 249 |
+
1. Hou, R., Chen, C., Shah, M.: Tube convolutional neural network (t-cnn) for action detection in videos. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 5822-5831 (2017)
|
| 250 |
+
2. Hu, W., Tan, T., Wang, L., Maybank, S.: A survey on visual surveillance of object motion and behaviors. IEEE Transactions on Systems, Man, and Cybernetics, Part C (Applications and Reviews) 34(3), 334-352 (2004)
|
| 251 |
+
3. Jhuang, H., Gall, J., Zuffi, S., Schmid, C., Black, M.J.: Towards understanding action recognition. In: Proceedings of the IEEE international conference on computer vision. pp. 3192-3199 (2013)
|
| 252 |
+
4. Kalogeiton, V., Weinzaepfel, P., Ferrari, V., Schmid, C.: Action tubelet detector for spatio-temporal action localization. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 4405-4413 (2017)
|
| 253 |
+
5. Law, H., Deng, J.: Cornernet: Detecting objects as paired keypoints. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 734-750 (2018)
|
| 254 |
+
6. Li, D., Qiu, Z., Dai, Q., Yao, T., Mei, T.: Recurrent tubelet proposal and recognition networks for action detection. In: Proceedings of the European conference on computer vision (ECCV). pp. 303-318 (2018)
|
| 255 |
+
|
| 256 |
+
17. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollár, P.: Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision. pp. 2980-2988 (2017)
|
| 257 |
+
18. Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dólar, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: European conference on computer vision. pp. 740-755. Springer (2014)
|
| 258 |
+
19. Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.Y., Berg, A.C.: Ssd: Single shot multibox detector. In: European conference on computer vision. pp. 21-37. Springer (2016)
|
| 259 |
+
20. Oh, S., Hoogs, A., Perera, A., Cuntoor, N., Chen, C.C., Lee, J.T., Mukherjee, S., Aggarwal, J., Lee, H., Davis, L., et al.: A large-scale benchmark dataset for event recognition in surveillance video. In: CVPR 2011. pp. 3153-3160. IEEE (2011)
|
| 260 |
+
21. Peng, X., Schmid, C.: Multi-region two-stream r-cnn for action detection. In: European conference on computer vision. pp. 744-759. Springer (2016)
|
| 261 |
+
22. Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: Unified, real-time object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 779-788 (2016)
|
| 262 |
+
23. Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. In: Advances in neural information processing systems. pp. 91-99 (2015)
|
| 263 |
+
24. Saha, S., Singh, G., Cuzzolin, F.: Amtnet: Action-micro-tube regression by end-to-end trainable deep architecture. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 4414-4423 (2017)
|
| 264 |
+
25. Saha, S., Singh, G., Sapienza, M., Torr, P.H., Cuzzolin, F.: Deep learning for detecting multiple space-time action tubes in videos. arXiv preprint arXiv:1608.01529 (2016)
|
| 265 |
+
26. Singh, G., Saha, S., Sapienza, M., Torr, P.H., Cuzzolin, F.: Online real-time multiple spatiotemporal action localisation and prediction. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 3637-3646 (2017)
|
| 266 |
+
27. Song, L., Zhang, S., Yu, G., Sun, H.: Tacnet: Transition-aware context network for spatio-temporal action detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 11987-11995 (2019)
|
| 267 |
+
28. Soomro, K., Zamir, A.R., Shah, M.: Ucf101: A dataset of 101 human actions classes from videos in the wild. arXiv preprint arXiv:1212.0402 (2012)
|
| 268 |
+
29. Sun, C., Shrivastava, A., Vondrick, C., Murphy, K., Sukthankar, R., Schmid, C.: Actor-centric relation network. In: ECCV. pp. 335-351 (2018)
|
| 269 |
+
30. Tian, Z., Shen, C., Chen, H., He, T.: Fcos: Fully convolutional one-stage object detection. In: The IEEE International Conference on Computer Vision (ICCV) (October 2019)
|
| 270 |
+
31. Venugopalan, S., Rohrbach, M., Donahue, J., Mooney, R., Darrell, T., Saenko, K.: Sequence to sequence-video to text. In: Proceedings of the IEEE international conference on computer vision. pp. 4534-4542 (2015)
|
| 271 |
+
32. Wang, L., Qiao, Y., Tang, X., Gool, L.V.: Actionness estimation using hybrid fully convolutional networks. In: CVPR. pp. 2708-2717 (2016)
|
| 272 |
+
33. Weinzaepfel, P., Harchaoui, Z., Schmid, C.: Learning to track for spatio-temporal action localization. In: Proceedings of the IEEE international conference on computer vision. pp. 3164-3172 (2015)
|
| 273 |
+
34. Xie, S., Sun, C., Huang, J., Tu, Z., Murphy, K.: Rethinking spatiotemporal feature learning: Speed-accuracy trade-offs in video classification. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 305-321 (2018)
|
| 274 |
+
|
| 275 |
+
35. Yang, X., Yang, X., Liu, M.Y., Xiao, F., Davis, L.S., Kautz, J.: Step: Spatiotemporal progressive learning for video action detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 264-272 (2019)
|
| 276 |
+
36. Yao, L., Torabi, A., Cho, K., Ballas, N., Pal, C., Larochelle, H., Courville, A.: Describing videos by exploiting temporal structure. In: Proceedings of the IEEE international conference on computer vision. pp. 4507-4515 (2015)
|
| 277 |
+
37. Yu, F., Wang, D., Shelhamer, E., Darrell, T.: Deep layer aggregation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 2403-2412 (2018)
|
| 278 |
+
38. Zhao, J., Snoek, C.G.: Dance with flow: Two-in-one stream action detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 9935-9944 (2019)
|
| 279 |
+
39. Zhao, Y., Xiong, Y., Lin, D.: Recognize actions by disentangling components of dynamics. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 6566-6575 (2018)
|
| 280 |
+
40. Zhou, X., Wang, D., Krähenbuhl, P.: Objects as points. arXiv preprint arXiv:1904.07850 (2019)
|
| 281 |
+
41. Zhou, X., Zhuo, J., Krahenbuhl, P.: Bottom-up object detection by grouping extreme and center points. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 850-859 (2019)
|
actionsasmovingpoints/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:841436324c926f4f86a8a86507a5d75914867c4b39a420a72d061728ca330b02
|
| 3 |
+
size 481630
|
actionsasmovingpoints/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f82b48e025e97c411335a36581594af9b0ba2deee4ff0ecf59b386239cf82ec7
|
| 3 |
+
size 382724
|
activecrowdcountingwithlimitedsupervision/78314569-4d9a-4d8a-8006-ffa9e1ce7f64_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2fef285be96ea73873cf146e616776f48ea5c9d48cc47d710330b7700a967ae0
|
| 3 |
+
size 79197
|
activecrowdcountingwithlimitedsupervision/78314569-4d9a-4d8a-8006-ffa9e1ce7f64_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f49e48b997e8def96687864af31b7c4b95ebf2c538d3b20a79cf8509432f9abe
|
| 3 |
+
size 99882
|
activecrowdcountingwithlimitedsupervision/78314569-4d9a-4d8a-8006-ffa9e1ce7f64_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:787cce74f66b80b3647c0d3d23240558ec8d7bc46f956c0c837ba67e6cc87036
|
| 3 |
+
size 1566718
|
activecrowdcountingwithlimitedsupervision/full.md
ADDED
|
@@ -0,0 +1,290 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Crowd Counting with Limited Supervision
|
| 2 |
+
|
| 3 |
+
Zhen Zhao $^{1\star}$ , Miaojing Shi $^{2\star}$ , Xiaoxiao Zhao $^{1}$ , and Li Li $^{1,3}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> College of Electronic and Information Engineering, Tongji University
|
| 6 |
+
<sup>2</sup> King's College London
|
| 7 |
+
|
| 8 |
+
<sup>3</sup> Institute of Intelligent Science and Technology, Tongji University zhenzhao0917@gmail.com; miaojing.shi@kcl.ac.uk; lili@tongji.edu.cn
|
| 9 |
+
|
| 10 |
+
Abstract. To learn a reliable people counter from crowd images, head center annotations are normally required. Annotating head centers is however a laborious and tedious process in dense crowds. In this paper, we present an active learning framework which enables accurate crowd counting with limited supervision: given a small labeling budget, instead of randomly selecting images to annotate, we first introduce an active labeling strategy to annotate the most informative images in the dataset and learn the counting model upon them. The process is repeated such that in every cycle we select the samples that are diverse in crowd density and dissimilar to previous selections. In the last cycle when the labeling budget is met, the large amount of unlabeled data are also utilized: a distribution classifier is introduced to align the labeled data with unlabeled data; furthermore, we propose to mix up the distribution labels and latent representations of data in the network to particularly improve the distribution alignment in-between training samples. We follow the popular density estimation pipeline for crowd counting. Extensive experiments are conducted on standard benchmarks i.e. ShanghaiTech, UCF_CC_50, MAll, TRANCOS, and DCC. By annotating limited number of images (e.g. $10\%$ of the dataset), our method reaches levels of performance not far from the state of the art which utilize full annotations of the dataset.
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
The task of crowd counting in computer vision is to automatically count people numbers in images/videos. With the rapid growth of world's population, crowd gathering becomes more frequent than ever. To help with crowd control and public safety, accurate crowd counting is demanded.
|
| 15 |
+
|
| 16 |
+
Early methods count crowds via the detection of individuals [49, 2, 34]. They suffer from heavy occlusions in dense crowds. More importantly, learning such people detectors normally requires bounding box or instance mask annotations for individuals, which often makes it undesirable in large-scale applications. Modern methods mainly conduct crowd counting via density estimation [32, 60,
|
| 17 |
+
|
| 18 |
+

|
| 19 |
+
Active Labeling and Learning
|
| 20 |
+
Fig. 1: Given a crowd counting dataset, we propose an active learning framework (ALAC) which actively labels only a small proportion of the dataset and learns an accurate density estimation network using both labeled and unlabeled data.
|
| 21 |
+
|
| 22 |
+
44, 37, 26, 21, 20, 54]. Counting is realized by estimating a density map of an image whose integral over the image gives the total people count. Given a training image, its density map is obtained via Gaussian blurring at every head center. Head centers are the required annotations for training. Thanks to the powerful deep neural networks (DNNs) [17], density estimation based methods show a great success in recent progress [60, 39, 20, 35, 42, 54, 43, 25].
|
| 23 |
+
|
| 24 |
+
Despite above, annotating head centers in dense crowds is still a laborious and tedious process. For instance, it can take up to 10 minutes for our annotators to annotate a single image with 500 persons; while the popular counting dataset ShanghaiTech PartA [60] has 300 training images with an average of 501 persons per image! To substantially reduce the annotation cost, we study the crowd density estimation in a semi-supervised setting where only handful images are labeled while the rest are unlabeled. This setting has not been largely explored in crowd counting: [4,61] propose to actively annotate the most informative video frames for semi-supervised crowd counting, yet the algorithms are not deep learning based and rely on frame consecutiveness. Recently, some deep learning works propose to leverage additional web data [24, 23] or synthetic data [51] for crowd counting; images in existing dataset are still assumed annotated, or at least many of them. The model transferability is also evaluated in some works [12, 54] where a network is trained on a source dataset with full annotations and tested on a target dataset with no/few annotations.
|
| 25 |
+
|
| 26 |
+
Given an existing dataset and a power DNN, we find that 1) learning from only a small subset, the performance can vary a lot depending on the subset selection; 2) for the specific subset that covers diverse crowd densities, the performance can be quite good (see results in Sec. 4.2). This motivates us to study crowd counting with very limited annotations yet producing very competitive precision. To achieve this goal, we propose an Active Learning framework for Accurate crowd Counting (AL-AC) as illustrated in Fig. 1: given a labeling budget, instead of randomly selecting images to annotate, we first introduce an active labelling strategy to iteratively annotate the most informative images in the
|
| 27 |
+
|
| 28 |
+
dataset and learn the counting model on them. In each cycle we select samples that cover different crowd densities and also dissimilar to previous selections. Eventually, the large amount of unlabeled data are also included into the network training: we design a classifier with gradient reversal layer [7] to align the intrinsic distributions of labeled and unlabeled data. Since all training samples contain the same object class, e.g. person, we propose to further align distributions in-between training samples by mixing up the latent representations and distribution labels among labeled and unlabeled data in the network. With very limited labeled data, our model produces very competitive counting result.
|
| 29 |
+
|
| 30 |
+
To summarize, several new elements are offered:
|
| 31 |
+
|
| 32 |
+
- We introduce an active learning framework for accurate crowd counting with limited supervision.
|
| 33 |
+
- We propose a partition-based sample selection with weights (PSSW) strategy to actively select and annotate both diverse and dissimilar samples for network training.
|
| 34 |
+
- We design a distribution alignment branch with latent MixUp to align the distribution between the labeled data and large amount of unlabeled data in the network.
|
| 35 |
+
|
| 36 |
+
Extensive experiments are conducted on standard counting benchmarks, i.e. ShanghaiTech [60], UCF_CC_50 [13], Mall [5], TRANCOS [9], and DCC [28]. Results demonstrate that, with a small number of labeled data, our AL-AC reaches levels of performance not far from state of the art fully-supervised methods.
|
| 37 |
+
|
| 38 |
+
# 2 Related works
|
| 39 |
+
|
| 40 |
+
In this section, we mainly survey deep learning based crowd counting methods and discuss semi-supervised learning and active learning in crowd counting.
|
| 41 |
+
|
| 42 |
+
# 2.1 Crowd counting
|
| 43 |
+
|
| 44 |
+
The prevailed crowd counting solution is to estimate a density map of a crowd image, whose integral of the density map gives the total person count of that image [60]. A density map encodes spatial information of an image, regressing it in a DNN is demonstrated to be more robust than simply regressing a global crowd count [58, 26]. Due to the commonly occurred heavy occlusions and perspective distortions in crowd images, multi-scale or multi-resolution architectures are often exploited in DNNs: Ranjan et al. [35] propose an iterative crowd counting network which produces the low-resolution density map and uses it to generate the high-resolution density map. Cao et al. [3] propose a novel encoder-decoder network, where the encoder extracts multi-scale features with scale aggregation modules and the decoder generates high-resolution density maps by using a set of transposed convolutions. Furthermore, Jiang et al. [15] develop a trellis encoder-decoder network that incorporates multiple decoding paths to hierarchically aggregate features at different encoding stages. In order to better utilize
|
| 45 |
+
|
| 46 |
+
multi-scale features in the network, the attention [21, 43], context [44, 22], or perspective [42, 55] information in crowd images is often leveraged into the network. Our work is a density estimation based approach.
|
| 47 |
+
|
| 48 |
+
# 2.2 Semi-supervised learning
|
| 49 |
+
|
| 50 |
+
Semi-supervised learning [29] refers to learning with a small amount of labeled data and a large amount of unlabeled data, and has been a popular paradigm in deep learning [52, 36, 18, 57]. It is traditionally studied for classification, where a label represents a class per image [19, 10, 36, 18]. In this work, we focus on semi-supervised learning in crowd counting, where the label of an image means the people count, with individual head points available in most cases. The common semi-supervised crowd counting solution is to leverage both labeled and unlabeled data into the learning procedure: Tan et al. [46] propose a semi-supervised elastic net regression method by utilizing sequential information between unlabeled samples and their temporally neighboring samples as a regularization term; Loy et al. [4] further improve it by utilizing both the spatial and temporal regularization in a semi-supervised kernel ridge regression problem; finally, in [61], graph Laplacian regularization and spatiotemporal constraints are incorporated into the semi-supervised regression. All these are not deep learning works and rely on temporal information among video frames.
|
| 51 |
+
|
| 52 |
+
Recently, Olmschenk et al. [30, 31] employ a generative adversarial network (GAN) in DNN to allow the usage of unlabeled data in crowd counting. Sam et al. [38] introduce an almost unsupervised learning method that only a tiny proportion of model parameters is trained with labeled data while vast parameters are trained with unlabeled data. Liu et al. [24, 23] propose to learn from unlabeled crowd data via a self-supervised ranking loss in the network. In [24, 23], they mainly assume the existence of a labeled dataset and add extra data from the web; in contrast, our AL-AC seeks a solution for accurate crowd counting with limited labeled data. Our method is also similar to [30, 31] in spirit of the distribution alignment between labeled and unlabeled data. While in [30, 31] they need to generate fake images to learn the discriminator in GAN which makes it hard to learn and converge. Our AL-AC instead mixes representations of labeled and unlabeled data in the network and learns the discriminator against them.
|
| 53 |
+
|
| 54 |
+
# 2.3 Active learning
|
| 55 |
+
|
| 56 |
+
Active learning defines a strategy determining data samples that, when added to the training set, improve a previously trained model most effectively [40]. Although it is not possible to obtain an universally good active learning strategy [6], there exist many heuristics [41], which have been proved to be effective in practice. Active learning has been explored in many applications such as image classification [45, 16] and object detection [8], while in this paper we focus on crowd counting. Methods in this context normally assume the availability of the whole counting set and choose samples from it, which is the so-called pool-based
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Fig. 2: Overview of our active learning framework for accurate crowd counting (AL-AC). GRL: gradient reversal layer; GAP: global average pooling. PSSW: Partition-based sample selection with weights; Conv $1 \times 1$ : output channel is 1.
|
| 60 |
+
|
| 61 |
+
active learning [56]. [4] and [61] employ the graph-based approach to build adjacency matrix of all crowd images in the pool, sample selection is therefore cast as a matrix partitioning problem. Our work is also pool-based active learning.
|
| 62 |
+
|
| 63 |
+
Lately, Liu et al. [23] apply active learning in DNN where they measure the informativeness of unlabeled samples via mistakes made by the network on a self-supervised proxy task. The method is conducted iteratively and in each cycle it selects a group of images based their uncertainties to the model. The diversity of selected images is however not carefully taken care in their uncertainty measure, which might result in a biased selection within some specific count range. Our work instead interprets uncertainty from two perspectives: selected samples are diverse in crowd density and dissimilar to previous selection in each learning cycle. It should also be noted that [23] mainly focuses on adding extra unlabeled data to an existing labeled dataset, while our AL-AC seeks for the limited data to be labeled within a given dataset.
|
| 64 |
+
|
| 65 |
+
# 3 Method
|
| 66 |
+
|
| 67 |
+
# 3.1 Problem
|
| 68 |
+
|
| 69 |
+
We follow crowd density estimation in deep learning context where density maps are pixel-wise regressed in a DNN [60, 20]. A ground truth density map is generated by convolving Gaussian kernels at head centers in an image [60]. The network is optimized through a loss function minimizing the prediction error over the ground truth. In this paper, we place our problem in a semi-supervised setting where we only label several or few dozens of images while the rest large amount remains unlabeled. Both the labeled and unlabeled data will be exploited in model learning. Below, we introduce our active learning framework for accurate crowd counting (AL-AC).
|
| 70 |
+
|
| 71 |
+
# 3.2 Overview
|
| 72 |
+
|
| 73 |
+
Our algorithm follows an active learning pipeline in general. It is an iterative process where a model is learnt in each cycle and a set of samples is chosen to be labeled from a pool of unlabeled samples [41]. In classic setting, only one single sample is chosen in each cycle. This is however not feasible for DNNs because it is infeasible to train as many models as the number of samples since many practical problems of interest are very large-scale [40]. Hence, the commonly used strategy is batch mode selection [50, 23] where a subset is selected and labeled in each cycle. This subset is added into the labeled set to update the model and repeat the selection in next cycle. The procedure continues until a predefined criterion is met, e.g. a fixed budget.
|
| 74 |
+
|
| 75 |
+
Our method is illustrated in Fig. 2: given a dataset $\mathcal{A}$ with labeling budget $M$ (number of images as in [38, 23]), we start by labeling $m$ samples uniformly at random from $\mathcal{A}$ . For each labeled sample $v_{i}$ , we generate its count label $c_{i}$ and density map $d_{i}$ based on the annotated head points in $v_{i}$ . We denote $\mathcal{V}^{1} = \{v_{i}, c_{i}, d_{i}\}$ and $\mathcal{U}^{1} = \{u_{j}\}$ as the labeled and unlabeled set in cycle 1, respectively. A DNN regressor $R^{1}$ is trained on $\mathcal{V}^{1}$ for crowd density estimation. Based on $R^{1}$ 's estimation of density maps on $\mathcal{U}^{1}$ , we propose a partition-based sample selection with weights strategy to select and annotate $m$ samples from $\mathcal{U}^{1}$ . These samples are added to $\mathcal{V}^{1}$ so we have the updated labeled and unlabeled set $\mathcal{V}^{2}$ and $\mathcal{U}^{2}$ in $2^{\mathrm{rd}}$ cycle. Model $R^{1}$ is further trained on $\mathcal{V}^{2}$ and updated as $R^{2}$ . The prediction of $R^{2}$ is better than $R^{1}$ as it uses more labeled data, we use the new prediction on $\mathcal{U}^{2}$ to again select $m$ samples and add them to $\mathcal{V}^{2}$ . The process moves on until the labeling budget $M$ is met. The unlabeled set $\mathcal{U}$ is also employed in network training through our proposed distribution alignment with latent MixUp. We only use $\mathcal{U}(\mathcal{U}^{T})$ in the last learning cycle $T$ as we observe that adding it in every cycle does not bring us accumulative benefits but rather additional training cost.
|
| 76 |
+
|
| 77 |
+
The backbone network is not specified in Fig. 2 as it can be any standard backbone. We will detail our selection of backbone, $M$ , $m$ and $R$ in Sec. 4. Below we introduce our partition-based sample selection with weights and distribution alignment with latent MixUp. Overall loss function is given in this end.
|
| 78 |
+
|
| 79 |
+
# 3.3 Partition-based sample selection with weights (PSSW)
|
| 80 |
+
|
| 81 |
+
In each learning cycle, we want to annotate the most informative/uncertain samples and add them to the network. The informativeness/uncertainty of samples is evaluated from two perspectives: diverse in density and dissimilar to previous selections. It is observed that crowd data often forms a well structured manifold where different crowd densities normally distribute smoothly within the manifold space [4]; the diversity is to select crowd samples that cover different crowd densities in the manifold. This is realized by separating the unlabeled set into different density partitions for diverse selection. Within each partition, we want to select those samples that are dissimilar to previous labeled samples, such that the model has not seen them. The dissimilarity is measured considering both local
|
| 82 |
+
|
| 83 |
+
crowd density and global crowd count: we introduce a grid-based dissimilarity measure (GDSIM) for this purpose. Below, we formulate our partition-based sample selection with weights.
|
| 84 |
+
|
| 85 |
+
Formally, given the model $R^t$ , unlabeled set $\mathcal{U}^t$ and labeled set $\mathcal{V}^t$ in $t^{th}$ cycle, we denote by $\widetilde{c}_j$ the predicted crowd count by $R^t$ for an unlabeled image $u_j$ . The histogram of all $\widetilde{c}_j$ on $\mathcal{U}^t$ discloses the overall density distribution. For the sake of diversity, we want to partition the histogram into $m$ parts and select one sample from each. Since the crowd counts are not evenly distributed (see Fig. 3: Left), sampling images evenly from the histogram can end up with a biased view of the original distribution. We therefore employ the Jenks natural breaks optimization [14] to partition the histogram. Jenks minimizes the variation within each range, so the partitions between ranges reflect the natural breaks of the histogram (Fig. 3).
|
| 86 |
+
|
| 87 |
+
Within each partition $P_{k}$ , inspired by grid average mean absolute error (GAME) [9], we propose a grid-based dissimilarity from an unlabeled sample to labeled samples. Given an image $i$ , GAME is originally introduced as an evaluation measure for density estimation,
|
| 88 |
+
|
| 89 |
+
$$
|
| 90 |
+
\operatorname {G A M E} (L) = \sum_ {l = 1} ^ {4 ^ {L}} | \widetilde {c _ {i} ^ {l}} - c _ {i} ^ {l} |, \tag {1}
|
| 91 |
+
$$
|
| 92 |
+
|
| 93 |
+
where $\widetilde{c_i^l}$ is the estimated count in region $l$ of image $i$ . It can be obtained via the integration over the density $\widetilde{d}_i^l$ of that region $l$ ; $c_{i}^{l}$ is the corresponding ground truth count. Given a specific level $L$ , GAME $(L)$ subdivides the image using a grid of $4^{L}$ non-overlapping regions which cover the full image (Fig. 3); the difference between the prediction and ground truth is the sum of the mean absolute error (MAE) in each of these regions. With different $L$ , GAME indeed offers moderate ways to compute the dissimilarity between two density maps, taking care of both global counts and local details. Building on GAME, we introduce grid-based dissimilarity measure GDSIM as,
|
| 94 |
+
|
| 95 |
+
$$
|
| 96 |
+
\underset {u _ {j} \in \mathcal {P} _ {k}} {\operatorname {G D S I M}} \left(u _ {j}, L _ {A}\right) = \underset {i, v _ {i} \in \mathcal {P} _ {k}} {\min } \left(\sum_ {L = 0} ^ {L _ {\mathrm {A}}} \sum_ {l = 1} ^ {4 ^ {L}} \left| \widetilde {c _ {j} ^ {l}} - c _ {i} ^ {l} \right|\right), \tag {2}
|
| 97 |
+
$$
|
| 98 |
+
|
| 99 |
+
where $u_{j}$ and $v_{i}$ are from the unlabeled set $\mathcal{U}^t$ and labeled set $\mathcal{V}^t$ , respectively; they both fall into the $\mathcal{P}_k$ -th partition. $\widetilde{c_i^l}$ and $c_i^l$ are crowd counts in region $l$ as in formula (1) but for different images $u_{j}$ and $v_{i}$ (see Fig. 3: Right). Given the level $L_{A}$ , unlike GAME, we compute the dissimilarity between $u_{j}$ and $v_{i}$ by traversing all levels from 0 to $L_{A}$ (Fig. 3). In this way, the dissimilarity is computed based on both global count $(L = 0)$ and local density $(L = L_{A})$ differences. Afterwards, instead of averaging the dissimilarity scores from $u_{j}$ to all the $v_{i}$ in $\mathcal{P}_k$ , we use min to indicate if $u_{j}$ is closer to any one of the labeled images, it is regarded as a familiar sample to the model. Ideally, we should choose the most dissimilar sample from each partition; nevertheless, the crowd count $\widetilde{c_j^l}$ in formula (2) is not
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
Fig. 3: Illustration of Jenks natural breaks (Left) and grid-based dissimilarity measure (GDSIM, Right). We take the histogram of crowd count on SHB.
|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
|
| 106 |
+
ground truth. We convert the GDSIM scores to probabilities and adopt weighted random selection to label one sample from each partition.
|
| 107 |
+
|
| 108 |
+
# 3.4 Distribution alignment with latent MixUp
|
| 109 |
+
|
| 110 |
+
Since labeled data only represents partial crowd manifold, particularly when they are limited, distribution alignment with large amount of unlabeled data becomes necessary even within the same domain. In order for the model to learn a proper subspace representation of the entire set, we introduce distribution alignment with latent MixUp.
|
| 111 |
+
|
| 112 |
+
We assign labeled data with distribution labels 0 while unlabeled data with labels 1. A distribution classifier branched off from the deep extractor ( $\phi$ in Fig. 2) is designed: it is composed of a gradient reversal layer (GRL) [7], $1 \times 1$ convolution layer and global average pooling (GAP) layer. The GRL multiplies the gradient by a certain negative constant (-1 in this paper) during the network back propagation; it enforces that the feature distributions over the labeled and unlabeled data are made as indistinguishable as possible for the distribution classifier, thus aligning them together.
|
| 113 |
+
|
| 114 |
+
The hard distribution labels create hard boundaries between labeled and unlabeled data. To further merge the distributions and particularly align in between training samples, we adapt the idea from MixUp [59]. MixUp normally trains a model on random convex combinations of raw inputs and their corresponding labels. It encourages the model to behave linearly "between" training samples, as this linear behavior reduces the amount of undesirable oscillations when predicting outside the training samples. It has been popularly employed in several semi-supervised classification works [1,47,48,59]. In this work, we integrate it into our distribution alignment branch for semi-supervised crowd counting. We find that mixing raw input images does not work for our problem. Instead we propose to mix their latent representations in the network: supposedly we have two images, $x_{1}$ , $x_{2}$ , and their distribution labels $y_{1}$ , $y_{2}$ , respectively. The latent representations of $x_{1}$ and $x_{2}$ are produced by the deep extractor $\phi$ as two tensors $(\phi(x_{1})$ and $\phi(x_{2})$ ) from the last convolutional layer of the backbone.
|
| 115 |
+
|
| 116 |
+
We mix up $(\phi(x_1), y_1)$ , $(\phi(x_2), y_2)$ with a weight $\lambda'$ as
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
z ^ {\prime} = \lambda^ {\prime} \phi (x _ {1}) + (1 - \lambda^ {\prime}) \phi (x _ {2})
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
y ^ {\prime} = \lambda^ {\prime} \times y _ {1} + (1 - \lambda^ {\prime}) \times y _ {2}. \tag {3}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where $(z', y')$ denotes the mixed latent representation and label. $\lambda'$ is generated in the same way with [1]: $\lambda' = \max(\lambda, 1 - \lambda)$ , $\lambda \sim \mathrm{Beta}(\alpha, \alpha)$ ; $\alpha$ is a hyperparameter set to 0.5. Both labeled and unlabeled data can be mixed. For two samples with the same label, their mixed label remains. We balance the number of labeled and unlabeled data with data augmentation (see Sec. 4.1) so a mixed pair can be composed of labeled or unlabeled data with (almost) the same probability. MixUp enriches the distribution in-between training samples. Together with GRL, it allows the network to elaborately knit the distributions of labeled and unlabeled data. The alignment is only carried out in the last active learning cycle as an efficient practice. The network training proceeds with a multi-task optimization that minimizes the density regression loss on labeled data and the distribution classification loss for all data including mixed ones, specified below.
|
| 127 |
+
|
| 128 |
+
# 3.5 Loss function
|
| 129 |
+
|
| 130 |
+
For density regression, we adopt the commonly used pixel-wise MSE loss $\mathcal{L}_{reg}$ :
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\mathcal {L} _ {r e g} = \frac {1}{2 K} \sum_ {k = 1} ^ {K} \| d _ {k} ^ {e} - d _ {k} ^ {g} \| _ {2} ^ {2} \tag {4}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
$d_k^e$ and $d_k^g$ denote the density map prediction and ground truth of image $k$ , respectively. $K$ is the number of labeled images. For the distribution classification, since distribution labels for mixed samples can be non-integers, we adopt the binary cross entropy with logits loss $\mathcal{L}_{dc}$ , which combines a Sigmoid layer with the binary cross entropy loss. Given an image pair, $\mathcal{L}_{dc}$ is computed on each individual as well as their mixed representations (see Fig. 2). The overall multi-task loss function is given by
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
\mathcal {L} = \mathcal {L} _ {r e g} + \beta \mathcal {L} _ {d c} \tag {5}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
# 4 Experiments
|
| 143 |
+
|
| 144 |
+
We conduct our experiments on three counting datasets: ShanghaiTech [60], UCF_CC_50 [13], Mall [5]. In the supplementary material, we offer more results not only in the three datasets for people counting, but also in the TRANCOS [9] and DCC [28] datasets for vehicle and cell counting, respectively.
|
| 145 |
+
|
| 146 |
+
# 4.1 Experimental Setup
|
| 147 |
+
|
| 148 |
+
Datasets. ShanghaiTech [60] consists of 1,198 annotated images with a total of 330,165 people with head center annotations. This dataset is split into SHA and
|
| 149 |
+
|
| 150 |
+
SHB. The average crowd counts are 123.6 and 501.4, respectively. Following [60], we use 300 images for training and 182 images for testing in SHA; 400 images for training and 316 images for testing in SHB. UCF_CC_50 [13] has 50 images with 63,974 head center annotations in total. The head counts range between 94 and 4,543 per image. The small dataset size and large variance make this a very challenging counting dataset. We call it UCF for short. Following [13], we perform 5-fold cross validations to report the average test performance. Mall [5] contains 2000 frames collected in a shopping mall. Each frame on average has only 31 persons. The first 800 frames are used as the training set and the rest 1200 frames as the test set.
|
| 151 |
+
|
| 152 |
+
Implementation details. The backbone $(\phi)$ design follows [20]: VGGnet with 10 convolutional and 6 dilated convolutional layers, it is pretrained on ILSVRC classification task. We follow the setting in [20] to generate ground truth density maps. To have a strong baseline, the training set is augmented by randomly cropping patches of $1/4$ size of each image. We set a reference number 1200, both labeled and unlabeled data in each dataset are augmented up to this number to have a balanced distribution. For instance, if we have 30 labeled images, we need to crop 40 patches from each image to augment it to 1200. We feed the network with a minibatch of two image patches each time. In order to have the same size of two patches, we further crop them to keep the shorter width and height of the two. We set the learning rate as 1e-7, momentum 0.95 and weight decay 5e-4. We train 100 epochs with SGD optimizer for each active learning cycle and before the last cycle, the network is trained with only labeled data. In the last cycle, it is trained with both labeled and unlabeled data. In all experiments, $L_{A}$ is 3 for GDSIM (2) and $\beta$ is 3 for loss weight (5).
|
| 153 |
+
|
| 154 |
+
Evaluation protocol. We evaluate the counting performance via the commonly used mean absolute error (MAE) and mean square error (MSE) [39, 44, 21] which measures the difference between the counts of ground truth and estimation. For active learning, we choose to label around $10\%$ images of the entire set, which goes along with our setting of limited supervision. $m$ is chosen not too small so that we can normally reach the labeling budget in about 2-4 active learning cycles. Sec. 5 gives a discussion on the time complexity. $M$ and $m$ are by default 30/40 and 10 on SHA and SHB, 10 and 3 on UCF (initial number is 4), 80 and 20 on Mall, respectively. We also evaluate different $M$ and $m$ to show the effectiveness of our method. The baseline is to randomly label $M$ images and train a regression model using the same backbone with our AL-AC but without distribution alignment. As in [4,61], taken the randomness into account, we repeat each experiment with 10 trials for both mean and standard deviation, to show the improvement of our method over baseline.
|
| 155 |
+
|
| 156 |
+
# 4.2 ShanghaiTech
|
| 157 |
+
|
| 158 |
+
Ablation study. The proposed partition-based sample selection with weights and distribution alignment with latent MixUp are ablated.
|
| 159 |
+
|
| 160 |
+
Labeling budget $M$ and $m$ . As mentioned in Sec. 4.1, we set $M = 30/40$ and $m = 10$ by default. Comparable experiments are offered in two ways. First,
|
| 161 |
+
|
| 162 |
+
<table><tr><td>Dataset</td><td colspan="2">SHA</td><td colspan="2">SHB</td></tr><tr><td>Method</td><td>PSSW</td><td>RS</td><td>PSSW</td><td>RS</td></tr><tr><td>M=10, m=10</td><td>121.2±9.3</td><td>121.2±9.3</td><td>20.5±4.8</td><td>20.5±4.8</td></tr><tr><td>M=20, m=10</td><td>96.7±7.3</td><td>111.5±7.4</td><td>17.0±1.9</td><td>19.3±2.2</td></tr><tr><td>M=30, m=10</td><td>93.5±2.9</td><td>102.1±7.0</td><td>15.7±1.5</td><td>19.9±3.1</td></tr><tr><td>M=40, m=10</td><td>85.4±2.5</td><td>93.8±5.6</td><td>14.6±1.3</td><td>17.9±1.9</td></tr><tr><td>M=30, m=5</td><td>92.6±3.1</td><td>102.1±7.0</td><td>15.1±1.5</td><td>19.9±3.1</td></tr><tr><td>M=40, m=5</td><td>84.4±2.6</td><td>93.8±5.6</td><td>14.4±1.2</td><td>17.9±1.9</td></tr></table>
|
| 163 |
+
|
| 164 |
+
Table 1: Ablation study of the proposed partition-based sample selection with weights (PSSW) strategy. Left: comparison against random selection (RS). Right: comparison to some variants of PSSW; Even Partition means evenly splitting on the histogram of crowd count; Global Diff refers to using global count difference for dissimilarity. MAE is reported on SHA and SHB.
|
| 165 |
+
|
| 166 |
+
<table><tr><td>M=40, m=10</td><td>SHA</td><td>SHB</td></tr><tr><td>RS (Baseline)</td><td>93.8</td><td>17.9</td></tr><tr><td>Even Partition</td><td>89.6</td><td>16.2</td></tr><tr><td>Global Diff</td><td>86.6</td><td>15.3</td></tr><tr><td>PSSW</td><td>84.4</td><td>14.4</td></tr></table>
|
| 167 |
+
|
| 168 |
+
keeping $m = 10$ , we vary $M$ from 10 to 40. The results are shown in Table 1. We compare our partition-based sample selection with weights (PSSW) with random selection (RS); distribution alignment is not added in this experiment. For PSSW, its MAE on SHA is gradually decreased from 121.2 with $M = 10$ to 85.4 with $M = 40$ , the standard deviation is also decreased from 9.3 to 2.5. The MAE result is in general 10 points lower than RS. With different $M$ , PSSW also produces lower MAE than RS on SHB. For example, with $M = 40$ , PSSW yields an MAE of 14.6 v.s. 17.9 for RS.
|
| 169 |
+
|
| 170 |
+
Second, by keeping $M = 30/40$ , we decrease $m$ from 10 to 5 and repeat the experiment. Results show that having a small $m$ indeed works slightly better: for instance, PSSW with $M = 30$ and $m = 5$ reduces MAE by 1.0 on SHA compared to PSSW with $M = 30$ and $m = 10$ . On the other hand, $m$ can not be too small as discussed in Sec. 3.2 and Sec. 5. In practice, we still keep $m = 10$ for both efficiency and effectiveness.
|
| 171 |
+
|
| 172 |
+
Variants of PSSW. Our PSSW has two components: the Jenks-based partition for diversity, and the GDSIM for dissimilarity (Sec. 3). In order to show the effectiveness of each, we present two variants of PSSW: Even Partition and Global Diff. Even Partition means that Jenks-based partition is replaced by evenly splitting the ranges on the histogram of crowd count while GSDIM remains; Global Diff means that GDSIM is replaced by using the global count difference to measure the dissimilarity while Jenks-based partition remains. We report MAE on SHA and SHB in Table 1: Right. It can be seen that Even Partition produces MAE 89.6 on SHA and 16.2 on SHB, while Global Diff produces 86.6 and 15.3. Both are clearly inferior to PSSW (84.4 and 14.4). This suggests the importance of the proposed diversity and dissimilarity measure.
|
| 173 |
+
|
| 174 |
+
Distribution alignment with latent MixUp. Our proposed distribution alignment with latent MixUp is composed of two elements: distribution classifier with GRL and latent MixUp (Sec. 3.4). To demonstrate their effectiveness, we present the result of PSSW plus GRL classifier (denoted as PSSW + GR-L), and latent MixUp (denoted as PSSW + GRL + MX) in Table 2. We take $M = 40$ as an example, adding GRL and MX to PSSW contributes to 5.0 points
|
| 175 |
+
|
| 176 |
+
<table><tr><td>Dataset</td><td colspan="2">SHA</td><td colspan="2">SHB</td></tr><tr><td>M = 30, m =10</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td></tr><tr><td>PSSW</td><td>93.5±2.9</td><td>151.0±15.1</td><td>15.7±1.5</td><td>28.3±3.4</td></tr><tr><td>PSSW+GRL</td><td>90.8±2.7</td><td>144.9±14.5</td><td>14.7±1.3</td><td>27.8±2.9</td></tr><tr><td>PSSW+GRL+MX</td><td>87.9±2.3</td><td>139.5±12.7</td><td>13.9±1.2</td><td>26.2±2.5</td></tr><tr><td>M = 40, m =10</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td></tr><tr><td>PSSW</td><td>85.4±2.5</td><td>144.7±10.7</td><td>14.6±1.3</td><td>24.6±3.0</td></tr><tr><td>PSSW+GRL</td><td>82.7±2.4</td><td>140.9±11.3</td><td>13.7±1.3</td><td>23.5±2.2</td></tr><tr><td>PSSW+GRL+MX</td><td>80.4±2.4</td><td>138.8±10.1</td><td>12.7±1.1</td><td>20.4±2.1</td></tr></table>
|
| 177 |
+
|
| 178 |
+
<table><tr><td>M=40, m=10</td><td>SHA</td><td>SHB</td></tr><tr><td>RS (Baseline)</td><td>93.8</td><td>17.9</td></tr><tr><td>RS+GRL+MX</td><td>87.3</td><td>15.1</td></tr><tr><td>PSSW</td><td>84.4</td><td>14.4</td></tr><tr><td>PSSW+GRL+MX</td><td>80.4</td><td>12.7</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 2: Ablation study of the proposed distribution alignment with latent MixUp. Left: analysis on latent MixUp (MX) and gradient reversal layer (GRL). Right: comparison against RS plus GRL and MX. MAE is reported in the right table.
|
| 181 |
+
|
| 182 |
+
MAE decrease on SHA and 1.9 points decrease on SHB. Specifically, The MX contributes to 2.3 and 1.0 points decrease on SHA and SHB, respectively. The same observation goes for MSE: by adding GRL and MX, it decreases from 144.7 to 138.8 on SHA, from 24.6 to 20.4 on SHB.
|
| 183 |
+
|
| 184 |
+
To make a further comparison, we also add the proposed distribution alignment with latent MixUp to RS in Table 2: Right, where we achieve MAE 87.3 on SHA and 15.1 on SHB. Adding GRL+MX to RS also improves the baseline: the performance difference between PSSW and RS becomes smaller; yet, the absolute value of the difference is still big, which justifies our PSSW. Notice PSSW $+\mathrm{GRL} + \mathrm{MX}$ is the final version of our AL-AC hereafter.
|
| 185 |
+
|
| 186 |
+
Comparison with fully-supervised methods. We compare our work with those prior arts [60, 39, 20, 35, 42, 43, 27]. All these approaches are fully-supervised methods which utilize annotations of the entire dataset (300 in SHA and 400 in SHB). While in our setting, we label only $30/40$ images, $10\%$ of the entire set. It can be seen that our method outperforms the representative methods [60, 39] a few years ago, and are not far from other recent arts, i.e. [20, 35, 42, 43, 27]. A direct comparison to ours is CSRNet [20], we share the same backbone. With about $10\%$ labeled data, our AL-AC retains $85\%$ accuracy on SHA (68.2 / 80.4), $83\%$ accuracy on SHB (10.6 / 12.7). Compared to our baseline (denoted as RS in Table 1), AL-AC in general produces significantly lower MAE, e.g. 87.9 v.s. 102.1 on SHA with $M = 30$ ; 17.9 v.s. 12.7 on SHB with $M = 40$ .
|
| 187 |
+
|
| 188 |
+
Despite that we only label $10\%$ data, our distribution alignment with latent MixUp indeed enables us to make use of more unlabeled data across datasets: for instance, a simple implementation with $\mathrm{M} = 40$ on SHA, if we add SHB as unlabeled data to AL-AC for distribution alignment, we obtain an even lower MAE 78.6 v.s. 80.4 in Table 3.
|
| 189 |
+
|
| 190 |
+
Comparison with semi-supervised methods. There are also some semi-supervised crowd counting methods [23, 38, 31] $^{1}$ . For instance in [38, 31], with $M = 50$ they produce MAE 170.0 and 136.9 on SHA, respectively. These are much higher MAE than ours. Since [38, 31] use different architectures from AL-
|
| 191 |
+
|
| 192 |
+
<table><tr><td>Dataset</td><td colspan="2">SHA</td><td colspan="2">SHB</td><td>Counting</td><td colspan="2">UCF</td></tr><tr><td>Measures</td><td>MAE</td><td>MSE</td><td>MAE</td><td>MSE</td><td>Measures</td><td>MAE</td><td>MSE</td></tr><tr><td>MCNN [60]</td><td>110.2</td><td>173.2</td><td>26.4</td><td>41.3</td><td>MCNN [60]</td><td>377.6</td><td>509.1</td></tr><tr><td>Switching CNN [39]</td><td>90.4</td><td>135.0</td><td>21.6</td><td>33.4</td><td>Switching CNN [39]</td><td>318.1</td><td>439.2</td></tr><tr><td>CSRNet [20]</td><td>68.2</td><td>115.0</td><td>10.6</td><td>16.0</td><td>CP-CNN[44]</td><td>295.8</td><td>320.9</td></tr><tr><td>ic-CNN [35]</td><td>68.5</td><td>116.2</td><td>10.7</td><td>16.0</td><td>CSRNet [20]</td><td>266.1</td><td>397.5</td></tr><tr><td>PACNN [42]</td><td>62.4</td><td>102.0</td><td>7.6</td><td>11.8</td><td>ic-CNN [35]</td><td>260.0</td><td>365.5</td></tr><tr><td>CFF [43]</td><td>65.2</td><td>109.4</td><td>7.2</td><td>11.2</td><td>PACNN [42]</td><td>241.7</td><td>320.7</td></tr><tr><td>BAYESIAN+ [27]</td><td>62.8</td><td>101.8</td><td>7.7</td><td>12.7</td><td>BAYESIAN+ [27]</td><td>229.3</td><td>308.2</td></tr><tr><td>Baseline (M = 30)</td><td>102.1</td><td>164.0</td><td>19.9</td><td>30.6</td><td>Baseline (M=10, m=3)</td><td>444.7± 25.9</td><td>600.3± 32.7</td></tr><tr><td>AL-AC (M = 30)</td><td>87.9</td><td>139.5</td><td>13.9</td><td>26.2</td><td>AL-AC (M=10, m=3)</td><td>351.4± 19.2</td><td>448.1± 24.5</td></tr><tr><td>Baseline (M =40)</td><td>93.8</td><td>150.9</td><td>17.9</td><td>27.3</td><td>Baseline (M=20, m=10)</td><td>417.2± 29.8</td><td>550.1± 25.5</td></tr><tr><td>AL-AC (M =40)</td><td>80.4</td><td>138.8</td><td>12.7</td><td>20.4</td><td>AL-AC (M=20, m=10)</td><td>318.7± 23.0</td><td>421.6± 24.1</td></tr></table>
|
| 193 |
+
|
| 194 |
+
Table 3: Comparison of AL-AC to Table 4: Comparison of AL-AC with state the state of the art on SHA and SHB. of the art on UCF.
|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
Fig. 4: Examples of AL-AC on SHA, SHB, UCF, TRANCOS, and DCC. Ground truth counts are in the original images while predicted counts in the estimated density maps.
|
| 198 |
+
|
| 199 |
+
AC, they are not straightforward comparisons. For [23], it uses about $50\%$ labeled data on SHA (Fig.7 in [23]) to reach the similar performance of our AL-AC with $10\%$ labeled data. We both adopt the VGGnet yet [23] utilizes extra web data for ranking loss while we only use unlabeled data within SHA, we use dilated convolutions while [23] does not. To make them more comparable, we instead use the same backbone of [23] and repeat AL-AC on SHA (implementation details still follow Sec. 4.1), the mean MAE with $\mathrm{M} = 30$ , $\mathrm{m} = 10$ on SHA becomes 91.4 (v.s. 87.9 in Table 3), which is still much better than that of [23].
|
| 200 |
+
|
| 201 |
+
In the supplementary material, we also provide the result by gradually increasing $M$ till 280 on SHA, where we show that by labelling about 80-100 labeled data (nearly $30\%$ of the dataset), AL-AC already reaches the performance close to the fully-supervised method, as in [20] (Table 3).
|
| 202 |
+
|
| 203 |
+
# 4.3 UCF_CC_50
|
| 204 |
+
|
| 205 |
+
It has 40 training images in total. We show in Table 4 that, labeling ten of them $(M = 10, m = 3)$ already produces a very competitive result: the MAE is 351.4 while the MSE is 448.1. The MAE and MSE are significantly lower (93.3 and
|
| 206 |
+
|
| 207 |
+
<table><tr><td colspan="8">Mall | Baseline | AL-AC* | Count Forest [33] | ConvLSTM [53] | DecideNet [21] | E3D [62] | SAAN [11]</td></tr><tr><td colspan="8">MAE | 5.9±0.9 | 3.8±0.5 | 4.4 | 2.1 | 1.5 | 1.6 | 1.3</td></tr><tr><td colspan="8">MSE | 6.3±1.1 | 5.4±0.8 | 2.4 | 7.6 | 1.9 | 2.1 | 1.7</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 5: Comparison of AL-AC with state of the art on Mall (M=80, m=20).
|
| 210 |
+
|
| 211 |
+
152.2 points) than baseline. We analyzed the result and found that our AL-AC is able to select those hard samples with thousands of persons and label them for training, while this is not guaranteed in random selection. Compared to fully supervised method, e.g. [20], our MAE is not far. We also present the result of $M = 20$ , $m = 10$ : MAE/MSE is further reduced.
|
| 212 |
+
|
| 213 |
+
# 4.4 Mall
|
| 214 |
+
|
| 215 |
+
Different from ShanghaiTech and UCF datasets, Mall contains images with much sparser crowds, 31 persons on average per image. Following our setup, we label 80 out of 800 images and compare our AL-AC with both baseline and other fully-supervised methods [33, 53, 21, 62, 11] in Table 5. With $10\%$ labeled data, we achieve MAE 3.8 superior to the baseline and [33], MSE 5.4 superior to the baseline and [53]. This shows the effectiveness of our method on sparse crowds.
|
| 216 |
+
|
| 217 |
+
# 5 Discussion
|
| 218 |
+
|
| 219 |
+
We present an active learning framework for accurate crowd counting with limited supervision. Given a counting dataset, instead of annotating every image, we introduce a partition-based sample selection with weights to label only a few most informative images and learn a crowd regression network upon them. This process is iterated till the labeling budget is reached. Next, rather than learning from only labeled data, the abundant unlabeled data are also exploited: we introduce a distribution alignment branch with latent MixUp in the network. Experiments conducted on standard benchmarks show that labeling only $10\%$ of the entire set, our method already performs close to recent state-of-the-art.
|
| 220 |
+
|
| 221 |
+
By choosing an appropriate $m$ , we normally reach the labeling budget in three active learning cycles. In our setting, training data in each dataset are augmented to a fixed number. We run our experiments with GPU GTX1080. It takes around three hours to complete each active learning cycle. The total training hours are more or less the same to fully-supervised training, as in each learning cycle we train much fewer epochs with limited number of labeled data. More importantly, compared to the annotation cost for an entire dataset (see Sec. 1 for an estimation on SHA), ours is substantially reduced!
|
| 222 |
+
|
| 223 |
+
Acknowledgement: This work was supported by the National Natural Science Foundation of China (NSFC) under Grant No. 61828602 and 51475334; as well as National Key Research and Development Program of Science and Technology of China under Grant No. 2018YFB1305304, Shanghai Science and Technology Pilot Project under Grant No. 19511132100.
|
| 224 |
+
|
| 225 |
+
# References
|
| 226 |
+
|
| 227 |
+
1. Berthelot, D., Carlini, N., Goodfellow, I., Papernot, N., Oliver, A., Raffel, C.: Mixmatch: A holistic approach to semi-supervised learning. arXiv preprint arXiv:1905.02249 (2019)
|
| 228 |
+
2. Brostow, G.J., Cipolla, R.: Unsupervised bayesian detection of independent motion in crowds. In: CVPR (2006)
|
| 229 |
+
3. Cao, X., Wang, Z., Zhao, Y., Su, F.: Scale aggregation network for accurate and efficient crowd counting. In: ECCV (2018)
|
| 230 |
+
4. Change Loy, C., Gong, S., Xiang, T.: From semi-supervised to transfer counting of crowds. In: CVPR (2013)
|
| 231 |
+
5. Chen, K., Loy, C.C., Gong, S., Xiang, T.: Feature mining for localised crowd counting. In: BMVC (2012)
|
| 232 |
+
6. Dasgupta, S.: Analysis of a greedy active learning strategy. In: NIPS (2005)
|
| 233 |
+
7. Ganin, Y., Lempitsky, V.: Unsupervised domain adaptation by backpropagation. In: JMLR (2015)
|
| 234 |
+
8. Gonzalez-Garcia, A., Vezhnevets, A., Ferrari, V.: An active search strategy for efficient object class detection. In: CVPR (2015)
|
| 235 |
+
9. Guerrero-Gomez-Olmedo, R., Torre-Jimenez, B., López-Sastre, R., Maldonado-Bascon, S., Onoro-Rubio, D.: Extremely overlapping vehicle counting. In: Iberian Conference on Pattern Recognition and Image Analysis (2015)
|
| 236 |
+
0. Hoffer, E., Ailon, N.: Semi-supervised deep learning by metric embedding. arXiv preprint arXiv:1611.01449 (2016)
|
| 237 |
+
1. Hossain, M., Hosseinzadeh, M., Chanda, O., Wang, Y.: Crowd counting using scale-aware attention networks. In: WACV (2019)
|
| 238 |
+
2. Hossain, M.A., Kumar, M., Hosseinzadeh, M., Chanda, O., Wang, Y.: One-shot scene-specific crowd counting. In: BMVC (2019)
|
| 239 |
+
3. Idrees, H., Saleemi, I., Seibert, C., Shah, M.: Multi-source multi-scale counting in extremely dense crowd images. In: CVPR (2013)
|
| 240 |
+
4. Jenks, G.F.: The data model concept in statistical mapping. International yearbook of cartography 7, 186-190 (1967)
|
| 241 |
+
5. Jiang, X., Xiao, Z., Zhang, B., Zhen, X., Cao, X., Doermann, D., Shao, L.: Crowd counting and density estimation by trellis encoder-decoder networks. In: CVPR (2019)
|
| 242 |
+
6. Joshi, A.J., Porikli, F., Papanikolopoulos, N.: Multi-class active learning for image classification. In: CVPR (2009)
|
| 243 |
+
7. Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: NIPS (2012)
|
| 244 |
+
8. Laine, S., Aila, T.: Temporal ensembling for semi-supervised learning. In: ICLR (2016)
|
| 245 |
+
9. Lee, D.H.: Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. In: ICMLW (2013)
|
| 246 |
+
20. Li, Y., Zhang, X., Chen, D.: Csrnet: Dilated convolutional neural networks for understanding the highly congested scenes. In: CVPR (2018)
|
| 247 |
+
21. Liu, J., Gao, C., Meng, D., G. Hauptmann, A.: Decidenet: Counting varying density crowds through attention guided detection and density estimation. In: CVPR (2018)
|
| 248 |
+
22. Liu, W., Salzmann, M., Fua, P.: Context-aware crowd counting. In: CVPR (2019)
|
| 249 |
+
23. Liu, X., Van De Weijer, J., Bagdanov, A.D.: Exploiting unlabeled data in cnns by self-supervised learning to rank. IEEE transactions on pattern analysis and machine intelligence (2019)
|
| 250 |
+
|
| 251 |
+
24. Liu, X., Weijer, J., Bagdanov, A.D.: Leveraging unlabeled data for crowd counting by learning to rank. In: CVPR (2018)
|
| 252 |
+
25. Liu, Y., Shi, M., Zhao, Q., Wang, X.: Point in, box out: Beyond counting persons in crowds. In: CVPR (2019)
|
| 253 |
+
26. Lu, Z., Shi, M., Chen, Q.: Crowd counting via scale-adaptive convolutional neural network. In: WACV (2018)
|
| 254 |
+
27. Ma, Z., Wei, X., Hong, X., Gong, Y.: Bayesian loss for crowd count estimation with point supervision. In: ICCV (2019)
|
| 255 |
+
28. Marsden, M., McGuinness, K., Little, S., Keogh, C.E., O'Connor, N.E.: People, penguins and petri dishes: adapting object counting models to new visual domains and object types without forgetting. In: CVPR (2018)
|
| 256 |
+
29. Olivier, C., Bernhard, S., Alexander, Z.: Semi-supervised learning. In: IEEE Transactions on Neural Networks, vol. 20, pp. 542-542 (2006)
|
| 257 |
+
30. Olmschenk, G., Tang, H., Zhu, Z.: Crowd counting with minimal data using generative adversarial networks for multiple target regression. In: WACV (2018)
|
| 258 |
+
31. Olmschenk, G., Zhu, Z., Tang, H.: Generalizing semi-supervised generative adversarial networks to regression using feature contrasting. Computer Vision and Image Understanding (2019)
|
| 259 |
+
32. Onoro-Rubio, D., López-Sastre, R.J.: Towards perspective-free object counting with deep learning. In: ECCV (2016)
|
| 260 |
+
33. Pham, V.Q., Kozakaya, T., Yamaguchi, O., Okada, R.: Count forest: Co-voting uncertain number of targets using random forest for crowd density estimation. In: ICCV (2015)
|
| 261 |
+
34. Rabaud, V., Belongie, S.: Counting crowded moving objects. In: CVPR (2006)
|
| 262 |
+
35. Ranjan, V., Le, H., Hoai, M.: Iterative crowd counting. In: ECCV (2018)
|
| 263 |
+
36. Rasmus, A., Berglund, M., Honkala, M., Valpola, H., Raiko, T.: Semi-supervised learning with ladder networks. In: NIPS (2015)
|
| 264 |
+
37. Sam, D.B., Babu, R.V.: Top-down feedback for crowd counting convolutional neural network. In: AAAI (2018)
|
| 265 |
+
38. Sam, D.B., Sajjan, N.N., Maurya, H., Babu, R.V.: Almost unsupervised learning for dense crowd counting. In: AAAI (2019)
|
| 266 |
+
39. Sam, D.B., Surya, S., Babu, R.V.: Switching convolutional neural network for crowd counting. In: CVPR (2017)
|
| 267 |
+
40. Sener, O., Savarese, S.: Active learning for convolutional neural networks: A core-set approach. In: ICLR (2018)
|
| 268 |
+
41. Settles, B.: Active learning literature survey. Tech. rep., University of Wisconsin-Madison Department of Computer Sciences (2009)
|
| 269 |
+
42. Shi, M., Yang, Z., Xu, C., Chen, Q.: Revisiting perspective information for efficient crowd counting. In: CVPR (2019)
|
| 270 |
+
43. Shi, Z., Mettes, P., Snoek, C.G.: Counting with focus for free. In: ICCV (2019)
|
| 271 |
+
44. Sindagi, V.A., Patel, V.M.: Generating high-quality crowd density maps using contextual pyramid cnns. In: ICCV (2017)
|
| 272 |
+
45. Sinha, S., Ebrahimi, S., Darrell, T.: Variational adversarial active learning. In: ICCV (2019)
|
| 273 |
+
46. Tan, B., Zhang, J., Wang, L.: Semi-supervised elastic net for pedestrian counting. Pattern Recognition 44(10-11), 2297-2304 (2011)
|
| 274 |
+
47. Verma, V., Lamb, A., Beckham, C., Najafi, A., Mitliagkas, I., Courville, A., Lopez-Paz, D., Bengio, Y.: Manifold mixup: Better representations by interpolating hidden states. In: ICML (2019)
|
| 275 |
+
48. Verma, V., Lamb, A., Kannala, J., Bengio, Y., Lopez-Paz, D.: Interpolation consistency training for semi-supervised learning. arXiv preprint arXiv:1903.03825 (2019)
|
| 276 |
+
|
| 277 |
+
49. Viola, P., Jones, M.J., Snow, D.: Detecting pedestrians using patterns of motion and appearance. IJCV 63(2), 153-161 (2003)
|
| 278 |
+
50. Wang, K., Zhang, D., Li, Y., Zhang, R., Lin, L.: Cost-effective active learning for deep image classification. IEEE Transactions on Circuits and Systems for Video Technology 27(12), 2591-2600 (2016)
|
| 279 |
+
51. Wang, Q., Gao, J., Lin, W., Yuan, Y.: Learning from synthetic data for crowd counting in the wild. In: CVPR (2019)
|
| 280 |
+
52. Weston, J., Ratle, F., Mobahi, H., Collobert, R.: Deep learning via semi-supervised embedding. In: Neural Networks: Tricks of the Trade, pp. 639-655. Springer (2012)
|
| 281 |
+
53. Xiong, F., Shi, X., Yeung, D.Y.: Spatiotemporal modeling for crowd counting in videos. In: ICCV (2017)
|
| 282 |
+
54. Xu, C., Qiu, K., Fu, J., Bai, S., Xu, Y., Bai, X.: Learn to scale: Generating multipolar normalized density map for crowd counting. In: ICCV (2019)
|
| 283 |
+
55. Yan, Z., Yuan, Y., Zuo, W., Tan, X., Wang, Y., Wen, S., Ding, E.: Perspective-guided convolution networks for crowd counting. In: ICCV (2019)
|
| 284 |
+
56. Yang, Y., Ma, Z., Nie, F., Chang, X., Hauptmann, A.G.: Multi-class active learning by uncertainty sampling with diversity maximization. International Journal of Computer Vision 113(2), 113-127 (2015)
|
| 285 |
+
57. Yang, Z., Shi, M., Avrithis, Y., Xu, C., Ferrari, V.: Training object detectors from few weakly-labeled and many unlabeled images. arXiv preprint arXiv:1912.00384 (2019)
|
| 286 |
+
58. Zhang, C., Li, H., Wang, X., Yang, X.: Cross-scene crowd counting via deep convolutional neural networks. In: CVPR (2015)
|
| 287 |
+
59. Zhang, H., Cisse, M., Dauphin, Y.N., Lopez-Paz, D.: Mixup: Beyond empirical risk minimization. In: ICLR (2018)
|
| 288 |
+
60. Zhang, Y., Zhou, D., Chen, S., Gao, S., Ma, Y.: Single-image crowd counting via multi-column convolutional neural network. In: CVPR (2016)
|
| 289 |
+
61. Zhou, Q., Zhang, J., Che, L., Shan, H., Wang, J.Z.: Crowd counting with limited labeling through submodular frame selection. IEEE Transactions on Intelligent Transportation Systems 20(5), 1728-1738 (2018)
|
| 290 |
+
62. Zou, Z., Shao, H., Qu, X., Wei, W., Zhou, P.: Enhanced 3d convolutional networks for crowd counting. In: BMVC (2019)
|
activecrowdcountingwithlimitedsupervision/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0b47b92a4bdb4298c04490df681f227e2ae18a985be664dbb46cc37f4369f059
|
| 3 |
+
size 378383
|
activecrowdcountingwithlimitedsupervision/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a80b50942d8f4e1e3b885f81a22f5ec52acc529fd77eb735cf44c955a63d13e7
|
| 3 |
+
size 461078
|
activeperceptionusinglightcurtainsforautonomousdriving/64e5a70a-c7d5-4804-aaf5-56c8f5cd421b_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc4053f9ebc6f91203f97dab326c1a0a9952e16eb2a0631102aef416802a5926
|
| 3 |
+
size 73939
|
activeperceptionusinglightcurtainsforautonomousdriving/64e5a70a-c7d5-4804-aaf5-56c8f5cd421b_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:11357509de8ec1586a558766d88b4ed4cc8c1dd5f2b5241f577558998da36f9d
|
| 3 |
+
size 88947
|
activeperceptionusinglightcurtainsforautonomousdriving/64e5a70a-c7d5-4804-aaf5-56c8f5cd421b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f06b4d819ec06fb1b091925158b6eb18b390354232b86cbc0ef453707b80629
|
| 3 |
+
size 5604248
|
activeperceptionusinglightcurtainsforautonomousdriving/full.md
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Perception using Light Curtains for Autonomous Driving
|
| 2 |
+
|
| 3 |
+
Siddharth Ancha, Yaadhav Raaj, Peiyun Hu, Srinivasa G. Narasimhan, and David Held
|
| 4 |
+
|
| 5 |
+
Carnegie Mellon University, Pittsburgh PA 15213, USA {sancha, ryaadhav, peiyunh, srinivas, dheld} @andrew.cmu.edu
|
| 6 |
+
|
| 7 |
+
Website: http://siddancha.github.io/projects/active-perception-light-curtains
|
| 8 |
+
|
| 9 |
+
Abstract. Most real-world 3D sensors such as LiDARs perform fixed scans of the entire environment, while being decoupled from the recognition system that processes the sensor data. In this work, we propose a method for 3D object recognition using light curtains, a resource-efficient controllable sensor that measures depth at user-specified locations in the environment. Crucially, we propose using prediction uncertainty of a deep learning based 3D point cloud detector to guide active perception. Given a neural network's uncertainty, we develop a novel optimization algorithm to optimally place light curtains to maximize coverage of uncertain regions. Efficient optimization is achieved by encoding the physical constraints of the device into a constraint graph, which is optimized with dynamic programming. We show how a 3D detector can be trained to detect objects in a scene by sequentially placing uncertainty-guided light curtains to successively improve detection accuracy. Links to code can be found on the project webpage.
|
| 10 |
+
|
| 11 |
+
Keywords: Active Vision, Robotics, Autonomous Driving, 3D Vision
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
3D sensors, such as LiDAR, have become ubiquitous for perception in autonomous systems operating in the real world, such as self-driving vehicles and field robots. Combined with recent advances in deep-learning based visual recognition systems, they have lead to significant breakthroughs in perception for autonomous driving, enabling the recent surge of commercial interest in self-driving technology.
|
| 16 |
+
|
| 17 |
+
However, most 3D sensors in use today perform passive perception, i.e. they continuously sense the entire environment while being completely decoupled from the recognition system that will eventually process the sensor data. In such a case, sensing the entire scene can be potentially inefficient. For example, consider an object detector running on a self-driving car that is trying to recognize objects in its environment. Suppose that it is confident that a tree-like structure on the side of the street is not a vehicle, but it is unsure whether an object turning around the curb is a vehicle or a pedestrian. In such a scenario, it might be
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Fig.1: Object detection using light curtains. (a) Scene with 4 cars; ground-truth boxes shown in green. (b) Sparse green points are from a single-beam LiDAR; it can detect only two cars (red boxes). Numbers above detections boxes are confidence scores. Uncertainty map in greyscale is displayed underneath: whiter means higher uncertainty. (c) First light curtain (blue) is placed to optimally cover the most uncertain regions. Dense points (green) from light curtain results in detecting 2 more cars. (d) Second light curtain senses even more points and fixes the misalignment error in the leftmost detection.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
|
| 28 |
+
beneficial if the 3D sensor focuses on collecting more data from the latter object, rather than distributing its sensing capacity uniformly throughout the scene.
|
| 29 |
+
|
| 30 |
+
In this work, we propose a method for 3D object detection using active perception, i.e. using sensors that can be purposefully controlled to sense specific regions in the environment. Programmable light curtains [22,2] were recently proposed as controllable, light-weight, and resource efficient sensors that measure the presence of objects intersecting any vertical ruled surface whose shape can be specified by the user (see Fig. 2). There are two main advantages of using programmable light curtains over LiDARs. First, they can be cheaply constructed, since light curtains use ordinary CMOS sensors (a current lab-built prototype costs $1000, and the price is expected to go down significantly in production). In contrast, a 64-beam Velodyne LiDAR that is commonly used in 3D self-driving datasets like KITTI [10] costs upwards of $80,000. Second, light curtains generate data with much higher resolution in regions where they actively focus [2] while LiDARs sense the entire environment and have low spatial and angular resolution.
|
| 31 |
+
|
| 32 |
+
One weakness of light curtains is that they are able to sense only a subset of the environment - a vertical ruled surface (see Fig. 1(c,d), Fig 2). In contrast, a LiDAR senses the entire scene. To mitigate this weakness, we can take advantage of the fact that the light curtain is a controllable sensor - we can choose where to place the light curtains. Thus, we must intelligently place light curtains in the appropriate locations, so that they sense the most important parts of the scene. In this work, we develop an algorithm for determining how to best place the light curtains for maximal detection performance.
|
| 33 |
+
|
| 34 |
+
We propose to use a deep neural network's prediction uncertainty as a guide for determining how to actively sense an environment. Our insight is that if an active sensor images the regions which the network is most uncertain about, the data obtained from those regions can help resolve the network's uncertainty and improve recognition. Conveniently, most deep learning based recognition systems output confidence maps, which can be used for this purpose when converted to an appropriate notion of uncertainty.
|
| 35 |
+
|
| 36 |
+
Given neural network uncertainty estimates, we show how a light curtain can be placed to optimally cover the regions of maximum uncertainty. First, we use an information-gain based framework to propose placing light curtains that maximize the sum of uncertainties of the covered region (Sec. 4.3, Appendix A). However, the structure of the light curtain and physical constraints of the device impose restrictions on how the light curtain can be placed. Our novel solution is to precompute a "constraint graph", which describes all possible light curtain placements that respect these physical constraints. We then use an optimization approach based on dynamic programming to efficiently search over all possible feasible paths in the constraint graph and maximize this objective (Sec. 4.4). This is a novel approach to constrained optimization of a controllable sensor's trajectory which takes advantage of the properties of the problem we are solving.
|
| 37 |
+
|
| 38 |
+
Our proposed active perception pipeline for 3D detection proceeds as follows. We initially record sparse data with an inexpensive single beam LIDAR sensor that performs fixed 3D scans. This data is input to a 3D point cloud object detector, which outputs an initial set of detections and confidence estimates. These confidence estimates are converted into uncertainty estimates, which are used by our dynamic programming algorithm to determine where to place the first light curtain. The output of the light curtain readings are again input to the 3D object detector to obtain refined detections and an updated uncertainty map. This process of estimating detections and placing new light curtains can be repeated multiple times (Fig. 3). Hence, we are able to sense the environment progressively, intelligently, and efficiently.
|
| 39 |
+
|
| 40 |
+
We evaluate our algorithm using two synthetic datasets of urban driving scenes [9,29]. Our experiments demonstrate that our algorithm leads to a monotonic improvement in performance with successive light curtain placement. We compare our proposed optimal light curtain placement strategy to multiple baseline strategies and find that they are significantly outperformed by our method. To summarize, our contributions are the following:
|
| 41 |
+
|
| 42 |
+
- We propose a method for using a deep learning based 3D object detector's prediction uncertainty as a guide for active sensing (Sec. 4.2).
|
| 43 |
+
- Given a network's uncertainty, we show how to compute a feasible light curtain that maximizes the coverage of uncertainty. Our novel contribution is to encode the physical constraints of the device into a graph and use dynamic-programming based graph optimization to efficiently maximize the objective while satisfying the physical constraints (Sec. 4.3, 4.4).
|
| 44 |
+
- We show how to train such an active detector using online light curtain data generation (Sec. 4.5).
|
| 45 |
+
|
| 46 |
+
- We empirically demonstrate that our approach leads to significantly improved detection performance compared to a number of baseline approaches (Sec. 5).
|
| 47 |
+
|
| 48 |
+
# 2 Related Work
|
| 49 |
+
|
| 50 |
+
# 2.1 Active Perception and Next-Best View Planning
|
| 51 |
+
|
| 52 |
+
Active Perception encompasses a variety of problems and techniques that involve actively controlling the sensor for improved perception [1,23]. Examples include actively modifying camera parameters [1], moving a camera to look around occluding objects [4], and next-best view (NBV) planning [5]. NBV refers to a broad set of problems in which the objective is to select the next best sensing action in order to solve a specific task. Typical problems include object instance classification [24,8,7,18] and 3D reconstruction [12,13,21,6,11]. Many works on next-best view formulate the objective as maximizing information gain (also known as mutual information) [24,7,12,13,21,6], using models such as probabilistic occupancy grids for beliefs over states [24,12,13,21,6]. Our method is similar in spirit to next-best view. One could consider each light curtain placement as obtaining a new "view" of the environment; we try to find the next best light curtain that aids object detection. In Sec. 4.3 and Appendix A, we derive an information-gain based objective to find the next best light curtain placement.
|
| 53 |
+
|
| 54 |
+
# 2.2 Object Detection from Point Clouds
|
| 55 |
+
|
| 56 |
+
There have been many recent advances in deep learning for 3D object detection. Approaches include representing LiDAR data as range images in LaserNet[16], using raw point clouds [19], and using point clouds in the bird's eye view such as AVOD [14], HDNet [26] and Complex-YOLO [20]. Most state-of-the-art approaches use voxelized point clouds, such as VoxelNet [27], PointPillars [15], SECOND [25], and CBGS [28]. These methods process an input point cloud by dividing the space into 3D regions (voxels or pillars) and extracting features from each of region using a PointNet [17] based architecture. Then, the volumetric feature map is converted to 2D features via convolutions, followed by a detection head that produces bounding boxes. We demonstrate that we can use such detectors, along with our novel light curtain placement algorithm, to process data from a single beam LiDAR combined with light curtains.
|
| 57 |
+
|
| 58 |
+
# 3 Background on Light Curtains
|
| 59 |
+
|
| 60 |
+
Programmable light curtains [22,2] are a sensor for adaptive depth sensing. "Light curtains" can be thought of as virtual surfaces placed in the environment. They can detect points on objects that intersect this surface. Before explaining how the curtain is created, we briefly describe our coordinate system and the basics of a rolling shutter camera.
|
| 61 |
+
|
| 62 |
+
Coordinate system: Throughout the paper, we will use the standard camera
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
(a) Working principle
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
(b) Optical schematic (top view)
|
| 69 |
+
Fig. 2: Illustration of programmable light curtains adapted from [2,22]. a) The light curtain is placed at the intersection of the illumination plane (from the projector) and the imaging plane (from the camera). b) A programmable galvanometer and a rolling shutter camera create multiple points of intersection, $\mathbf{X}_t$ .
|
| 70 |
+
|
| 71 |
+
coordinate system centered at the sensor. We assume that the $z$ axis corresponds to depth from the sensor pointing forward, and that the $y$ vector points vertically downwards. Hence the $xz$ -plane is parallel to the ground and corresponds to a top-down view, also referred to as the bird's eye view.
|
| 72 |
+
|
| 73 |
+
Rolling shutter camera: A rolling shutter camera contains pixels arranged in $T$ number of vertical columns. Each pixel column corresponds to a vertical imaging plane. Readings from only those visible 3D points that lie on the imaging plane get recorded onto its pixel column. We will denote the $xz$ -projection of the imaging plane corresponding to the $t$ -th pixel column by ray $\mathbf{R}_t$ , shown in the top-down view in Fig. 2(b). We will refer to these as "camera rays". The camera has a rolling shutter that successively activates each pixel column and its imaging plane one at a time from left to right. The time interval between the activation of two adjacent pixel columns is determined by the pixel clock.
|
| 74 |
+
|
| 75 |
+
Working principle of light curtains: The latest version of light curtains [2] works by rapidly rotating a light sheet laser in synchrony with the motion of a camera's rolling shutter. A laser beam is collimated and shaped into a line sheet using appropriate lenses and is reflected at a desired angle using a controllable galvanometer mirror (see Fig. 2(b)). The illumination plane created by the laser intersects the active imaging plane of the camera in a vertical line along the curtain profile (Fig. 2(a)). The $xz$ -projection of this vertical line intersecting the $t$ -th imaging plane lies on $\mathbf{R}_t$ , and we call this the $t$ -th "control point", denoted by $\mathbf{X}_t$ (Fig. 2(b)).
|
| 76 |
+
|
| 77 |
+
Light curtain input: The shape of a light curtain is uniquely defined by where it intersects each camera ray in the $xz$ -plane, i.e. the control points $\{\mathbf{X}_1,\dots ,\mathbf{X}_T\}$ . These will act as inputs to the light curtain device. In order to produce the light curtain defined by $\{\mathbf{X}_t\}_{t = 1}^T$ , the galvanometer is programmed to compute and rotate at, for each camera ray $\mathbf{R}_t$ , the reflection angle $\theta_t(\mathbf{X}_t)$ of the laser beam
|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
Fig.3: Our method for detecting objects using light curtains. An inexpensive single-beam lidar input is used by a 3D detection network to obtain rough initial estimates of object locations. The uncertainty of the detector is used to optimally place a light curtain that covers the most uncertain regions. The points detected by the light curtain (shown in green in the bottom figure) are input back into the detector so that it can update its predictions as well as uncertainty. The new uncertainty maps can again be used to place successive light curtains in an iterative manner, closing the loop.
|
| 81 |
+
|
| 82 |
+
such that the laser sheet intersects $\mathbf{R}_t$ at $\mathbf{X}_t$ . By selecting a control point on each camera ray, the light curtain device can be made to image any vertical ruled surface [2,22].
|
| 83 |
+
|
| 84 |
+
Light curtain output: The light curtain outputs a point cloud of all 3D visible points in the scene that intersect the light curtain surface. The density of light curtain points on the surface is usually much higher than LiDAR points.
|
| 85 |
+
|
| 86 |
+
Light curtain constraints: The rotating galvanometer can only operate at a maximum angular velocity $\omega_{\mathrm{max}}$ . Let $\mathbf{X}_t$ and $\mathbf{X}_{t + 1}$ be the control points on two consecutive camera rays $\mathbf{R}_t$ and $\mathbf{R}_{t + 1}$ . These induce laser angles $\theta (\mathbf{X}_t)$ and $\theta (\mathbf{X}_{t + 1})$ respectively. If $\varDelta t$ is the time difference between when the $t$ -th and $(t + 1)$ -th pixel columns are active, the galvanometer needs to rotate by an angle of $\varDelta\theta(\mathbf{X}_t)=\theta(\mathbf{X}_{t+1})-\theta(\mathbf{X}_t)$ within $\varDelta t$ time. Denote $\varDelta\theta_{\mathrm{max}}=\omega_{\mathrm{max}}\cdot\varDelta t$ . Then the light curtain can only image control points subject to $|\theta (\mathbf{X}_{t + 1}) - \theta (\mathbf{X}_t)|\leq \varDelta\theta_{\mathrm{max}},\forall 1\leq t < T$ .
|
| 87 |
+
|
| 88 |
+
# 4 Approach
|
| 89 |
+
|
| 90 |
+
# 4.1 Overview
|
| 91 |
+
|
| 92 |
+
Our aim is to use light curtains for detecting objects in a 3D scene. The overall approach is illustrated in Fig. 3. We use a voxel-based point cloud detector [25] and train it to use light curtain data without any architectural changes. The pipeline illustrated in Fig. 3 proceeds as follows.
|
| 93 |
+
|
| 94 |
+
To obtain an initial set of object detections, we use data from an inexpensive single-beam LiDAR as input to the detector. This produces rough estimates of object locations in the scene. Single-beam LiDAR is inexpensive because it
|
| 95 |
+
|
| 96 |
+
consists of only one laser beam as opposed to 64 or 128 beams that are common in autonomous driving. The downside is that the data from the single beam contains very few points; this results in inaccurate detections and high uncertainty about object locations in the scene (see Fig. 1b).
|
| 97 |
+
|
| 98 |
+
Alongside bounding box detections, we can also extract from the detector an "uncertainty map" (explained in Sec. 4.2). We then use light curtains, placed in regions guided by the detector's uncertainty, to collect more data and iteratively refine the object detections. In order to get more data from the regions the detector is most uncertain about, we derive an information-gain based objective function that sums the uncertainties along the light curtain control points (Sec. 4.3 and Appendix A), and we develop a constrained optimization algorithm that places the light curtain to maximize this objective (Sec. 4.4).
|
| 99 |
+
|
| 100 |
+
Once the light curtain is placed, it returns a dense set of points where the curtain intersects with visible objects in the scene. We maintain a unified point cloud, which we define as the union of all points observed so far. The unified point cloud is initialized with the points from the single-beam LiDAR. Points from the light curtain are added to the unified point cloud and this data is input back into the detector. Note that the input representation for the detector remains the same (point clouds), enabling the use of existing state-of-the-art point cloud detection methods without any architectural modifications.
|
| 101 |
+
|
| 102 |
+
As new data from the light curtains are added to the unified point cloud and input to the detector, the detector refines its predictions and improves its accuracy. Furthermore, the additional inputs cause the network to update its uncertainty map; the network may no longer be uncertain about the areas that were sensed by the light curtain. Our algorithm uses the new uncertainty map to generate a new light curtain placement. We can iteratively place light curtains to cover the current uncertain regions and input the sensed points back into the network, closing the loop and iteratively improving detection performance.
|
| 103 |
+
|
| 104 |
+
# 4.2 Extracting uncertainty from the detector
|
| 105 |
+
|
| 106 |
+
The standard pipeline for 3D object detection [27,25,15] proceeds as follows. First, the ground plane (parallel to the $xz$ -plane) is uniformly tiled with "anchor boxes"; these are reference boxes used by a 3D detector to produce detections. They are located on points in a uniformly discretized grid $G = [x_{\mathrm{min}}, x_{\mathrm{max}}] \times [z_{\mathrm{min}}, z_{\mathrm{max}}]$ . For example, a $[-40\mathrm{m}, 40\mathrm{m}] \times [0\mathrm{m}, 70.4\mathrm{m}]$ grid is used for detecting cars in KITTI [10]. A 3D detector, which is usually a binary detector, takes a point cloud as input, and produces a binary classification score $p \in [0,1]$ and bounding box regression offsets for every anchor box. The score $p$ is the estimated probability that the anchor box contains an object of a specific class (such as car/pedestrian). The detector produces a detection for that anchor box if $p$ exceeds a certain threshold. If so, the detector combines the fixed dimensions of the anchor box with its predicted regression offsets to output a detection box.
|
| 107 |
+
|
| 108 |
+
We can convert the confidence score to binary entropy $H(p) \in [0,1]$ where $H(p) = -p\log_2p - (1 - p)\log_2(1 - p)$ . Entropy is a measure of the detector's uncertainty about the presence of an object at the anchor location. Since we
|
| 109 |
+
|
| 110 |
+
have an uncertainty score at uniformly spaced anchor locations parallel to the $xz$ -plane, they form an "uncertainty map" in the top-down view. We use this uncertainty map to place light curtains.
|
| 111 |
+
|
| 112 |
+
# 4.3 Information gain objective
|
| 113 |
+
|
| 114 |
+
Based on the uncertainty estimates given by Sec. 4.2, our method determines how to place the light curtain to sense the most uncertain/ambiguous regions. It seems intuitive that sensing the locations of highest detector uncertainty can provide the largest amount of information from a single light curtain placement, towards improving detector accuracy. As discussed in Sec. 3, a single light curtain placement is defined by a set of $T$ control points $\{\mathbf{X}_t\}_{t=1}^T$ . The light curtain will be placed to lie vertically on top of these control points. To define an optimization objective, we use the framework of information gain (commonly used in next-best view methods; see Sec. 2.1) along with some simplifying assumptions (see Appendix A). We show that under these assumptions, placing a light curtain to maximize information gain (a mathematically defined information-theoretic quantity) is equivalent to maximizing the objective $J(\mathbf{X}_1, \ldots, \mathbf{X}_T) = \sum_{t=1}^{T} H(\mathbf{X}_t)$ , where $H(\mathbf{X})$ is the binary entropy of the detector's confidence at the anchor location of $\mathbf{X}$ . When the control point $\mathbf{X}$ does not exactly correspond to an anchor location, we impute $H(\mathbf{X})$ by nearest-neighbor interpolation from the uncertainty map. Please see Appendix A for a detailed derivation.
|
| 115 |
+
|
| 116 |
+
# 4.4 Optimal light curtain placement
|
| 117 |
+
|
| 118 |
+
In this section, we will describe an exact optimization algorithm to maximize the objective function $J(\mathbf{X}_1, \ldots, \mathbf{X}_T) = \sum_{t=1}^T H(\mathbf{X}_t)$ .
|
| 119 |
+
|
| 120 |
+
Constrained optimization: The control points $\{\mathbf{X}_t\}_{t=1}^T$ , where each $\mathbf{X}_t$ lies on the camera ray $\mathbf{R}_t$ , must be chosen to satisfy the physical constraints of the light curtain device: $|\theta(\mathbf{X}_{t+1}) - \theta(\mathbf{X}_t)| \leq \Delta \theta_{\max}$ (see Sec. 3: light curtain constraints). Hence, this is a constrained optimization problem. We discretize the problem by considering a dense set of $N$ discrete, equally spaced points $\mathcal{D}_t = \{\mathbf{X}_t^{(n)}\}_{n=1}^N$ on each ray $\mathbf{R}_t$ . We will assume that $\mathbf{X}_t \in \mathcal{D}_t$ for all $1 \leq t \leq T$ henceforth unless stated otherwise. We use $N = 80$ in all our experiments which we found to be sufficiently large. Overall, the optimization problem can be formulated as:
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\arg \max _ {\left\{\mathbf {X} _ {t} \right\} _ {t = 1} ^ {T}} \sum_ {t = 1} ^ {T} H \left(\mathbf {X} _ {t}\right) \tag {1}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\text {w h e r e} \mathbf {X} _ {t} \in \mathcal {D} _ {t} \forall 1 \leq t \leq T \tag {2}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
\text {s u b j e c t} \left| \theta \left(\mathbf {X} _ {t + 1}\right) - \theta \left(\mathbf {X} _ {t}\right) \right| \leq \Delta \theta_ {\max }, \forall 1 \leq t < T \tag {3}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
Light Curtain Constraint Graph: we encode the light curtain constraints into a graph, as illustrated in Figure 4. Each black ray corresponds to a camera ray. Each black dot on the ray is a vertex in the constraint graph. It represents a
|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
(a)
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
(b)
|
| 141 |
+
Fig. 4: (a) Light curtain constraint graph. Black dots are nodes and blue arrows are the edges of the graph. The optimized light curtain profile is depicted as red arrows. (b) Example uncertainty map from the detector, and optimized light curtain profile in red. Black is lowest uncertainty and white is highest uncertainty. The optimized light curtain covers the most uncertain regions.
|
| 142 |
+
|
| 143 |
+
candidate control point and is associated with an uncertainty score. Exactly one control point must be chosen per camera ray. The optimization objective is to choose such points to maximize the total sum of uncertainties. An edge between two control points indicates that the light curtain is able to transition from one control point $\mathbf{X}_t$ to the next, $\mathbf{X}_{t + 1}$ without violating the maximum velocity light curtain constraints. Thus, the maximum velocity constraint (Eqn. 3) can be specified by restricting the set of edges (depicted using blue arrows). We note that the graph only needs to be constructed once and can be done offline.
|
| 144 |
+
|
| 145 |
+
Dynamic programming for constrained optimization: The number of possible light curtain in placements, $|\mathcal{D}_1 \times \dots \times \mathcal{D}_T| = N^T$ , is exponentially large, which prevents us from searching for the optimal solution by brute force. However, we observe that the problem can be decomposed into simpler subproblems. In particular, let us define $J_{t}^{*}(\mathbf{X}_{t})$ as the optimal sum of uncertainties of the tail subproblem starting from $\mathbf{X}_t$ i.e.
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
J _ {t} ^ {*} (\mathbf {X} _ {t}) = \max _ {\mathbf {X} _ {t + 1}, \dots , \mathbf {X} _ {T}} H (\mathbf {X} _ {t}) + \sum_ {k = t + 1} ^ {T} H (\mathbf {X} _ {k}); \tag {4}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
$$
|
| 152 |
+
\text {s u b j e c t} \left| \theta \left(\mathbf {X} _ {k + 1}\right) - \theta \left(\mathbf {X} _ {k}\right) \right| \leq \Delta \theta_ {\max }, \forall t \leq k < T \tag {5}
|
| 153 |
+
$$
|
| 154 |
+
|
| 155 |
+
If we were able to compute $J_{t}^{*}(\mathbf{X}_{t})$ , then this would help in solving a more complex subproblem using recursion: we observe that $J_{t}^{*}(\mathbf{X}_{t})$ has the property of optimal substructure, i.e. the optimal solution of $J_{t - 1}^{*}(\mathbf{X}_{t - 1})$ can be computed from the optimal solution of $J_{t}^{*}(\mathbf{X}_{t})$ via
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
J _ {t - 1} ^ {*} (\mathbf {X} _ {t - 1}) = H \left(\mathbf {X} _ {t - 1}\right) + \max _ {\mathbf {X} _ {t} \in \mathcal {D} _ {t}} J _ {t} ^ {*} \left(\mathbf {X} _ {t}\right) \tag {6}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
subject to $|\theta (\mathbf{X}_t) - \theta (\mathbf{X}_{t - 1})|\leq \varDelta \theta_{\mathrm{max}}$
|
| 162 |
+
|
| 163 |
+
Because of this optimal substructure property, we can solve for $J_{t - 1}^{*}(\mathbf{X}_{t - 1})$ via dynamic programming. We also note that the solution to $\max_{\mathbf{X}_1}J_1^* (\mathbf{X}_1)$ is the solution to our original constrained optimization problem (Eqn. 1-3).
|
| 164 |
+
|
| 165 |
+
We thus perform the dynamic programming optimization as follows: the recursion from Eqn. 6 can be implemented by first performing a backwards pass, starting from $T$ and computing $J_{t}^{*}(\mathbf{X}_{t})$ for each $\mathbf{X}_t$ . Computing each $J_{t}^{*}(\mathbf{X}_{t})$ takes only $O(B_{\mathrm{avg}})$ time where $B_{\mathrm{avg}}$ is the average degree of a vertex (number of edges starting from a vertex) in the constraint graph, since we iterate once over all edges of $\mathbf{X}_t$ in Eqn. 6. Then, we do a forward pass, starting with $\arg \max_{\mathbf{X}_1\in \mathcal{D}_1}J_1^* (\mathbf{X}_1)$ and for a given $\mathbf{X}_{t - 1}^*$ , choosing $\mathbf{X}_t^*$ according to Eqn. 6. Since there are $N$ vertices per ray and $T$ rays in the graph, the overall algorithm takes $O(NTB_{\mathrm{avg}})$ time; this is a significant reduction from the $O(N^T)$ brute-force solution. We describe a simple extension of this objective that encourages smoothness in Appendix B.
|
| 166 |
+
|
| 167 |
+
# 4.5 Training active detector with online training data generation
|
| 168 |
+
|
| 169 |
+
The same detector is used to process data from the single beam LiDAR and all light curtain placement. Since the light curtains are placed based on the output (uncertainty maps) of the detector, the input point cloud for the next iteration depends on the current weights of the detector. As the weights change during training, so does the input data distribution. We account for non-stationarity of the training data by generating it online during the training process. This prevents the input distribution from diverging from the network weights during training. See Appendix C for algorithmic details and ablation experiments.
|
| 170 |
+
|
| 171 |
+
# 5 Experiments
|
| 172 |
+
|
| 173 |
+
To evaluate our algorithm, we need dense ground truth depth maps to simulate an arbitrary placement of a light curtain. However, standard autonomous driving datasets, such as KITTI [10] and nuScenes [3], contain only sparse LiDAR data, and hence the data is not suitable to accurately simulate a dense light curtain to evaluate our method. To circumvent this problem, we demonstrate our method on two synthetic datasets that provide dense ground truth depth maps, namely the Virtual KITTI [9] and SYNTHIA [29] datasets. Please find more details of the datasets and the evaluation metrics in Appendix D.
|
| 174 |
+
|
| 175 |
+
Our experiments demonstrate the following: First, we show that our method for successive placement of light curtains improves detection performance; particularly, there is a significant increase between the performance of single-beam LiDAR and the performance after placing the first light curtain. We also compare our method to multiple ablations and alternative placement strategies that demonstrate that each component of our method is crucial to achieve good performance. Finally, we show that our method can generalize to many more light curtain placements at test time than the method was trained on. In the appendix, we perform further experiments that include evaluating the generalization of our method to noise in the light curtain data, an ablation experiment for training with online data generation (Sec. 4.5), and efficiency analysis.
|
| 176 |
+
|
| 177 |
+
# 5.1 Comparison with varying number of light curtains
|
| 178 |
+
|
| 179 |
+
We train our method using online training data generation simultaneously on data from single-beam LiDAR and one, two, and three light curtain placements. We perform this experiment for both the Virtual KITTI and SYNTHIA datasets. The accuracies on their tests sets are reported in Table 1.
|
| 180 |
+
|
| 181 |
+
<table><tr><td></td><td colspan="4">Virtual KITTI</td><td colspan="4">SYNTHIA</td></tr><tr><td></td><td colspan="2">3D mAP</td><td colspan="2">BEV mAP</td><td colspan="2">3D mAP</td><td colspan="2">BEV mAP</td></tr><tr><td></td><td>0.5 IoU</td><td>0.7 IoU</td><td>0.5 IoU</td><td>0.7 IoU</td><td>0.5 IoU</td><td>0.7 IoU</td><td>0.5 IoU</td><td>0.7 IoU</td></tr><tr><td>Single Beam Lidar</td><td>39.91</td><td>15.49</td><td>40.77</td><td>36.54</td><td>60.49</td><td>47.73</td><td>60.69</td><td>51.22</td></tr><tr><td>Single Beam Lidar (separate model)</td><td>42.35</td><td>23.66</td><td>47.77</td><td>40.15</td><td>60.69</td><td>48.23</td><td>60.84</td><td>57.98</td></tr><tr><td>1 Light Curtain</td><td>58.01</td><td>35.29</td><td>58.51</td><td>47.05</td><td>68.79</td><td>55.99</td><td>68.97</td><td>59.63</td></tr><tr><td>2 Light Curtains</td><td>60.86</td><td>37.91</td><td>61.10</td><td>49.84</td><td>69.02</td><td>57.08</td><td>69.17</td><td>67.14</td></tr><tr><td>3 Light Curtains</td><td>68.52</td><td>38.47</td><td>68.82</td><td>50.53</td><td>69.16</td><td>57.30</td><td>69.25</td><td>67.25</td></tr></table>
|
| 182 |
+
|
| 183 |
+
Table 1: Performance of the detector trained with single-beam LiDAR and up to three light curtains. Performance improves with more light curtain placements, with a significant jump at the first light curtain placement.
|
| 184 |
+
|
| 185 |
+
Note that there is a significant and consistent increase in the accuracy between single-beam LiDAR performance and the first light curtain placement (row 1 and row 3). This shows that actively placing light curtains on the most uncertain regions can improve performance over a single-beam LiDAR that performs fixed scans. Furthermore, placing more light curtains consistently improves detection accuracy.
|
| 186 |
+
|
| 187 |
+
As an ablation experiment, we train a separate model only on single-beam LiDAR data (row 2), for the same number of training iterations. This is different from row 1 which was trained with both single beam LiDAR and light curtain data but evaluated using only data for a single beam LiDAR. Although training a model with only single-beam LiDAR data (row 2) improves performance over row 1, it is still significantly outperformed by our method which uses data from light curtain placements.
|
| 188 |
+
|
| 189 |
+
Noise simulations: In order to simulate noise in the real-world sensor, we perform experiments with added noise in the light curtain input. We demonstrate that the results are comparable to the noiseless case, indicating that our method is robust to noise and is likely to transfer well to the real world. Please see Appendix E for more details.
|
| 190 |
+
|
| 191 |
+
# 5.2 Comparison with alternative light curtain placement strategies
|
| 192 |
+
|
| 193 |
+
In our approach, light curtains are placed by maximizing the coverage of uncertain regions using a dynamic programming optimization. How does this compare to other strategies for light curtain placement? We experiment with several baselines:
|
| 194 |
+
|
| 195 |
+
1. Random: we place frontoparallel light curtains at a random $z$ -distance from the sensor, ignoring the detector's uncertainty map.
|
| 196 |
+
2. Fixed depth: we place a frontoparallel light curtain at a fixed $z$ -distance (15m, 30m, 45m) from the sensor, ignoring the detector's uncertainty map.
|
| 197 |
+
3. Greedy optimization: this baseline tries to evaluate the benefits of using a dynamic programming optimization. Here, we use the same light curtain constraints described in Section 4.4 (Figure 4(a)). We greedily select the next control point based on local uncertainty instead of optimizing for the future sum of uncertainties. Ties are broken by (a) choosing smaller laser angle changes, and (b) randomly.
|
| 198 |
+
4. Frontoparallel + Uncertainty: Our optimization process finds light curtains with flexible shapes. What if the shapes were constrained to make the optimization problem easier? If we restrict ourselves to frontoparallel curtains, we can place them at the $z$ -distance of maximum uncertainty by simply summing the uncertainties for every fixed value of $z$ .
|
| 199 |
+
|
| 200 |
+
The results on the Virtual KITTI and SYNTHIA datasets are shown in Table 2. Our method significantly and consistently outperforms all baselines. This empirically demonstrates the value of using dynamic programming for light curtain placement to improve object detection performance.
|
| 201 |
+
|
| 202 |
+
# 5.3 Generalization to successive light curtain placements
|
| 203 |
+
|
| 204 |
+
If we train a detector using our online light curtain data generation approach for $k$ light curtains, can the performance generalize to more than $k$ light curtains? Specifically, if we continue to place light curtains beyond the number trained for,
|
| 205 |
+
|
| 206 |
+
<table><tr><td></td><td colspan="4">Virtual KITTI</td><td colspan="4">SYNTHIA</td></tr><tr><td></td><td colspan="2">3D mAP</td><td colspan="2">BEV mAP</td><td colspan="2">3D mAP</td><td colspan="2">BEV mAP</td></tr><tr><td></td><td>.5 IoU</td><td>.7 IoU</td><td>.5 IoU</td><td>.7 IoU</td><td>.5 IoU</td><td>.7 IoU</td><td>.5 IoU</td><td>.7 IoU</td></tr><tr><td>Random</td><td>41.29</td><td>17.49</td><td>46.65</td><td>38.09</td><td>60.43</td><td>47.09</td><td>60.66</td><td>58.14</td></tr><tr><td>Fixed depth - 15m</td><td>44.99</td><td>22.20</td><td>46.07</td><td>38.05</td><td>60.74</td><td>48.16</td><td>60.89</td><td>58.48</td></tr><tr><td>Fixed depth - 30m</td><td>39.72</td><td>19.05</td><td>45.21</td><td>35.83</td><td>60.02</td><td>47.88</td><td>60.23</td><td>57.89</td></tr><tr><td>Fixed depth - 45m</td><td>39.86</td><td>20.02</td><td>40.61</td><td>36.87</td><td>60.23</td><td>48.12</td><td>60.43</td><td>57.77</td></tr><tr><td>Greedy Optimization (Randomly break ties)</td><td>37.40</td><td>19.93</td><td>42.80</td><td>35.33</td><td>60.62</td><td>47.46</td><td>60.83</td><td>58.22</td></tr><tr><td>Greedy Optimization (Min laser angle change)</td><td>39.20</td><td>20.19</td><td>44.80</td><td>36.94</td><td>60.61</td><td>47.05</td><td>60.76</td><td>58.07</td></tr><tr><td>Frontoparallel + Uncertainty</td><td>39.41</td><td>21.25</td><td>45.10</td><td>37.80</td><td>60.36</td><td>47.20</td><td>60.52</td><td>58.00</td></tr><tr><td>Ours</td><td>58.01</td><td>35.29</td><td>58.51</td><td>47.05</td><td>68.79</td><td>55.99</td><td>68.97</td><td>59.63</td></tr></table>
|
| 207 |
+
|
| 208 |
+
Table 2: Baselines for alternate light curtain placement strategies, trained and tested on (a) Virtual KITTI and (b) SYNTHIA datasets. Our dynamic programming optimization approach significantly outperforms all other strategies.
|
| 209 |
+
|
| 210 |
+

|
| 211 |
+
(a) Generalization in Virtual KITTI
|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
(b) Generalization in SYNTHIA
|
| 215 |
+
Fig. 5: Generalization to many more light curtains than what the detector was trained for. We train using online data generation on single-beam lidar and only 3 light curtains. We then test with placing 10 curtains, on (a) Virtual KITTI, and (b) SYNTHIA. Performance continues to increase monotonically according to multiple metrics. Takeaway: one can safely place more light curtains at test time and expect to see sustained improvement in accuracy.
|
| 216 |
+
|
| 217 |
+
will the accuracy continue improving? We test this hypothesis by evaluating on 10 light curtains, many more than the model was trained for (3 light curtains). Figure 5 shows the performance as a function of the number of light curtains. We find that in both Virtual KITTI and SYNTHIA, the accuracy monotonically improves with the number of curtains.
|
| 218 |
+
|
| 219 |
+
This result implies that a priori one need not worry about how many light curtains will be placed at test time. If we train on only 3 light curtains, we can place many more light curtains at test time; our results indicate that the performance will keep improving.
|
| 220 |
+
|
| 221 |
+
# 5.4 Qualitative analysis
|
| 222 |
+
|
| 223 |
+
We visualized a successful case of our method in Fig. 1. This is an example where our method detects false negatives missed by the single-beam LiDAR. We also show two other types of successful cases where light curtains remove false positive detections and fix misalignment errors in Figure 6. In Figure 7, we show the predominant failure case of our method. See captions for more details.
|
| 224 |
+
|
| 225 |
+
# 6 Conclusions
|
| 226 |
+
|
| 227 |
+
In this work, we develop a method to use light curtains, an actively controllable resource-efficient sensor, for object recognition in static scenes. We propose to use a 3D object detector's prediction uncertainty as a guide for deciding where to sense. By encoding the constraints of the light curtain into a graph, we show how to optimally and feasibly place a light curtain that maximizes the coverage of uncertain regions. We are able to train an active detector that interacts with light
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
Fig. 6: Successful cases: Other type of successful cases than Fig. 1. In (A), the single-beam LiDAR incorrectly detects a bus and a piece of lawn as false positives. They get eliminated successively after placing the first and second light curtains. In (B), the first light curtain fixes misalignment in the bounding box predicted by the single beam LiDAR.
|
| 233 |
+
|
| 234 |
+
curtains to iteratively and efficiently sense parts of scene in an uncertainty-guided manner, successively improving detection accuracy. We hope this works pushes towards designing perception algorithms that integrate sensing and recognition, towards intelligent and adaptive perception.
|
| 235 |
+
|
| 236 |
+
# Acknowledgements
|
| 237 |
+
|
| 238 |
+
We thank Matthew O'Toole for feedback on the initial draft of this paper. This material is based upon work supported by the National Science Foundation under Grants No. IIS-1849154, IIS-1900821 and by the United States Air Force and DARPA under Contract No. FA8750-18-C-0092.
|
| 239 |
+
|
| 240 |
+

|
| 241 |
+
Fig. 7: Failure cases: The predominant failure mode is that the single beam LiDAR detects a false positive which is not removed by light curtains because the detector is overly confident in its prediction (so the estimated uncertainty is low). Middle: Falsely detecting a tree to be a car. Right: After three light curtains, the detection persists because light curtains do not get placed on this false positive. False positive gets removed eventually only after six light curtain placements.
|
| 242 |
+
|
| 243 |
+
# References
|
| 244 |
+
|
| 245 |
+
1. Bajcsy, R.: Active perception. Proceedings of the IEEE 76(8), 966-1005 (1988)
|
| 246 |
+
2. Bartels, J.R., Wang, J., Whittaker, W.R., Narasimhan, S.G.: Agile depth sensing using triangulation light curtains. In: The IEEE International Conference on Computer Vision (ICCV) (October 2019)
|
| 247 |
+
3. Caesar, H., Bankiti, V., Lang, A.H., Vora, S., Liong, V.E., Xu, Q., Krishnan, A., Pan, Y., Baldan, G., Beijbom, O.: nuscenes: A multimodal dataset for autonomous driving. arXiv preprint arXiv:1903.11027 (2019)
|
| 248 |
+
4. Cheng, R., Agarwal, A., Fragkiadaki, K.: Reinforcement learning of active vision for manipulating objects under occlusions. arXiv preprint arXiv:1811.08067 (2018)
|
| 249 |
+
5. Connolly, C.: The determination of next best views. In: Proceedings. 1985 IEEE international conference on robotics and automation. vol. 2, pp. 432-435. IEEE (1985)
|
| 250 |
+
6. Daudelin, J., Campbell, M.: An adaptable, probabilistic, next-best view algorithm for reconstruction of unknown 3-d objects. IEEE Robotics and Automation Letters 2(3), 1540-1547 (2017)
|
| 251 |
+
7. Denzler, J., Brown, C.M.: Information theoretic sensor data selection for active object recognition and state estimation. IEEE Transactions on pattern analysis and machine intelligence 24(2), 145-157 (2002)
|
| 252 |
+
8. Doumanoglou, A., Kouskouridas, R., Malassiotis, S., Kim, T.K.: Recovering 6d object pose and predicting next-best-view in the crowd. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 3583-3592 (2016)
|
| 253 |
+
9. Gaidon, A., Wang, Q., Cabon, Y., Vig, E.: Virtual worlds as proxy for multi-object tracking analysis. In: CVPR (2016)
|
| 254 |
+
0. Geiger, A., Lenz, P., Stiller, C., Urtasun, R.: Vision meets robotics: The kitti dataset. The International Journal of Robotics Research 32(11), 1231-1237 (2013)
|
| 255 |
+
1. Haner, S., Heyden, A.: Covariance propagation and next best view planning for 3d reconstruction. In: European Conference on Computer Vision. pp. 545-556. Springer (2012)
|
| 256 |
+
2. Isler, S., Sabzevari, R., Delmerico, J., Scaramuzza, D.: An information gain formulation for active volumetric 3d reconstruction. In: 2016 IEEE International Conference on Robotics and Automation (ICRA). pp. 3477-3484. IEEE (2016)
|
| 257 |
+
3. Kriegel, S., Rink, C., Bodenmüller, T., Suppa, M.: Efficient next-best-scan planning for autonomous 3d surface reconstruction of unknown objects. Journal of Real-Time Image Processing 10(4), 611-631 (2015)
|
| 258 |
+
4. Ku, J., Mozifian, M., Lee, J., Harakeh, A., Waslander, S.L.: Joint 3d proposal generation and object detection from view aggregation. In: 2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). pp. 1-8. IEEE (2018)
|
| 259 |
+
5. Lang, A.H., Vora, S., Caesar, H., Zhou, L., Yang, J., Beijbom, O.: Pointpillars: Fast encoders for object detection from point clouds. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 12697-12705 (2019)
|
| 260 |
+
6. Meyer, G.P., Laddha, A., Kee, E., Vallespi-Gonzalez, C., Wellington, C.K.: Lasernet: An efficient probabilistic 3d object detector for autonomous driving. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 12677-12686 (2019)
|
| 261 |
+
7. Qi, C.R., Su, H., Mo, K., Guibas, L.J.: Pointnet: Deep learning on point sets for 3d classification and segmentation. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 652-660 (2017)
|
| 262 |
+
|
| 263 |
+
18. Scott, W.R., Roth, G., Rivest, J.F.: View planning for automated three-dimensional object reconstruction and inspection. ACM Computing Surveys (CSUR) 35(1), 64-96 (2003)
|
| 264 |
+
19. Shi, S., Wang, X., Li, H.: Pointcnn: 3d object proposal generation and detection from point cloud. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 770-779 (2019)
|
| 265 |
+
20. Simony, M., Milzy, S., Amendey, K., Gross, H.M.: Complex-yolo: An euler-regionproposal for real-time 3d object detection on point clouds. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 0-0 (2018)
|
| 266 |
+
21. Vasquez-Gomez, J.I., Sucar, L.E., Murrieta-Cid, R., Lopez-Damian, E.: Volumetric next-best-view planning for 3d object reconstruction with positioning error. International Journal of Advanced Robotic Systems 11(10), 159 (2014)
|
| 267 |
+
22. Wang, J., Bartels, J., Whittaker, W., Sankaranarayanan, A.C., Narasimhan, S.G.: Programmable triangulation light curtains. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 19-34 (2018)
|
| 268 |
+
23. Wilkes, D.: Active object recognition (1994)
|
| 269 |
+
24. Wu, Z., Song, S., Khosla, A., Yu, F., Zhang, L., Tang, X., Xiao, J.: 3d shapenets: A deep representation for volumetric shapes. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (June 2015)
|
| 270 |
+
25. Yan, Y., Mao, Y., Li, B.: Second: Sparsely embedded convolutional detection. Sensors 18(10), 3337 (2018)
|
| 271 |
+
26. Yang, B., Liang, M., Urtasun, R.: Hdnet: Exploiting hd maps for 3d object detection. In: Conference on Robot Learning. pp. 146-155 (2018)
|
| 272 |
+
27. Zhou, Y., Tuzel, O.: Voxelnet: End-to-end learning for point cloud based 3d object detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 4490-4499 (2018)
|
| 273 |
+
28. Zhu, B., Jiang, Z., Zhou, X., Li, Z., Yu, G.: Class-balanced grouping and sampling for point cloud 3d object detection. arXiv preprint arXiv:1908.09492 (2019)
|
| 274 |
+
29. Zolfaghari Bengar, J., Gonzalez-Garcia, A., Villalonga, G., Raducanu, B., Aghdam, H.H., Mozerov, M., Lopez, A.M., van de Weijer, J.: Temporal coherence for active learning in videos. arXiv preprint arXiv:1908.11757 (2019)
|
activeperceptionusinglightcurtainsforautonomousdriving/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:21351e6385daeaf18199e4d7101ad0e970a2252802927e4759758425cd75fd66
|
| 3 |
+
size 444360
|
activeperceptionusinglightcurtainsforautonomousdriving/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:41d8938e0a1e724c72e4b7b9a74f8ec2875023cd759ebaf4d0dc0b98c4665e23
|
| 3 |
+
size 365183
|
activevisualinformationgatheringforvisionlanguagenavigation/a84150dc-dd46-413d-8323-ec629cc4b60a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2b6e218db01c2e961357983b067f7108dfc04ecc7b81944f74a51aff06183c1
|
| 3 |
+
size 88483
|
activevisualinformationgatheringforvisionlanguagenavigation/a84150dc-dd46-413d-8323-ec629cc4b60a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:efd40f4db325920c8c960df5916cba1431b33cf4157e8ea353a3b2b07955ce70
|
| 3 |
+
size 101642
|
activevisualinformationgatheringforvisionlanguagenavigation/a84150dc-dd46-413d-8323-ec629cc4b60a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:26fcc921ec056fbdbc1faeaf2a26b8b9fac0f948e559828df1f5cacad6b03a86
|
| 3 |
+
size 7855553
|
activevisualinformationgatheringforvisionlanguagenavigation/full.md
ADDED
|
@@ -0,0 +1,319 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Active Visual Information Gathering for Vision-Language Navigation
|
| 2 |
+
|
| 3 |
+
Hanqing Wang $^{1}$ , $\boxtimes$ Wenguan Wang $^{2}$ , Tianmin Shu $^{3}$ , Wei Liang $^{1}$ , and Jianbing Shen $^{4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ School of Computer Science, Beijing Institute of Technology $^{2}$ ETH Zurich $^{3}$ Massachusetts Institute of Technology $^{4}$ Inception Institute of Artificial Intelligence https://github.com/HanqingWangAI/Active_VLN
|
| 6 |
+
|
| 7 |
+
Abstract. Vision-language navigation (VLN) is the task of entailing an agent to carry out navigational instructions inside photo-realistic environments. One of the key challenges in VLN is how to conduct a robust navigation by mitigating the uncertainty caused by ambiguous instructions and insufficient observation of the environment. Agents trained by current approaches typically suffer from this and would consequently struggle to avoid random and inefficient actions at every step. In contrast, when humans face such a challenge, they can still maintain robust navigation by actively exploring the surroundings to gather more information and thus make more confident navigation decisions. This work draws inspiration from human navigation behavior and endows an agent with an active information gathering ability for a more intelligent vision-language navigation policy. To achieve this, we propose an end-to-end framework for learning an exploration policy that decides i) when and where to explore, ii) what information is worth gathering during exploration, and iii) how to adjust the navigation decision after the exploration. The experimental results show promising exploration strategies emerged from training, which leads to significant boost in navigation performance. On the R2R challenge leaderboard, our agent gets promising results all three VLN settings, i.e., single run, pre-exploration, and beam search.
|
| 8 |
+
|
| 9 |
+
Keywords: Vision-Language Navigation $\cdot$ Active Exploration
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Vision-language navigation (VLN) [1] aims to build an agent that can navigate a complex environment following human instructions. Existing methods have made amazing progress via i) efficient learning paradigms (e.g., using an ensemble of imitation learning and reinforcement learning [23,24], auxiliary task learning [10, 12,23,28], or instruction augmentation based semi-supervised learning [7,20]), ii) multi-modal information association [9], and iii) self-correction [11,13]. However, these approaches have not addressed one of the core challenges in VLN - the uncertainty caused by ambiguous instructions and partial observability.
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Fig. 1. (a) A top-down view of the environment with the groundtruth navigation path, based on the instructions. The start and end points are noted as red and blue circles, respectively. The navigation paths are labeled in white. (b) A side view of the bathroom in (a). (c) Previous agents face difficulties as there are two doors in the bathroom, hence causing the navigation fail. (d) Our agent is able to actively explore the environment for more efficient information collection. The exploration paths are labeled in yellow. (e) After exploring the two doors, our agent executes the instructions successfully.
|
| 17 |
+
|
| 18 |
+
Consider the example in Fig. 1, where an agent is required to navigate across rooms following human instructions: "Leave the bathroom and walk forward along the pool. . . .". The agent might be confused because the bathroom has two doors, and it consequently fails to navigate to the correct location (Fig. 1(c)). In contrast, when faced with the same situation, our humans may perform better as we would first explore the two doors, instead of directly making a risky navigation decision. After collecting enough information, i.e., confirming which one allows us to "walk forward along the pool", we can take a more confident navigation action. This insight from human navigation behavior motivates us to develop an agent that has a similar active exploration and information gathering capability. When facing ambiguous instructions or low confidence on his navigation choices, our agent can actively explore his surroundings and gather information to better support navigation-decision making (Fig. 1(d-e)). However, previous agents are expected to conduct navigation at all times and only collect information from a limited scope. Compared with these, which perceive a scene passively, our agent gains a larger visual field and improved robustness against complex environments and ambiguous instructions by actively exploring the surrounding.
|
| 19 |
+
|
| 20 |
+
To achieve this, we develop an active exploration module, which learns to 1) decide when the exploration is necessary, 2) identify which part of the surroundings is worth exploring, and 3) gather useful knowledge from the environment to support more robust navigation. During training, we encourage the agent to collect relevant information to help itself make better decisions. We empirically show that our exploration module successfully learns a good information gathering policy and, as a result, the navigation performance is significantly improved.
|
| 21 |
+
|
| 22 |
+
With above designs, our agent gets promising results on R2R [1] benchmark leaderboard, over all three VLN settings, i.e., single run, pre-exploration, and beam search. In addition, the experiments show that our agent performs well in both seen and unseen environments.
|
| 23 |
+
|
| 24 |
+
# 2 Related Work
|
| 25 |
+
|
| 26 |
+
Vision and Language. Over the last few years, unprecedented advances in the design and optimization of deep neural network architectures have led to tremendous progress in computer vision and natural language processing. This progress, in turn, has enabled a multitude of multi-modal applications spanning both disciplines, including image captioning [25], visual question answering [3], visual grounding [26], visual dialog [6, 27], and vision-language navigation [1]. The formulation of these tasks requires a comprehensive understanding of both visual and linguistic content. A typical solution is to learn a joint multi-modal embedding space, i.e., CNN-based visual features and RNN-based linguistic representations are mapped to a common space by several non-linear operations. Recently, neural attention [25], which is good at mining cross-modal knowledge, has shown to be a pivotal technique for multi-modal representation learning.
|
| 27 |
+
|
| 28 |
+
Vision-Language Navigation (VLN). In contrast to previous vision-language tasks (e.g., image captioning, visual dialog) only involving static visual content, VLN entails an agent to actively interact with the environment to fulfill navigational instructions. Although VLN is relatively new in computer vision (dating back to [1]), many of its core units/technologies (such as instruction following [2] and instruction-action mapping [15]) were introduced much earlier. Specifically, these were originally studied in natural language processing and robotics communities, for the focus of either language-based navigation in a controlled environmental context [2, 5, 14, 15, 17, 21], or vision-based navigation in visually-rich real-world scenes [16, 29]. The VLN simulator described in [1] unites these two lines of research, providing photo-realistic environments and human-annotated instructions (as opposed to many prior efforts using virtual scenes or formulaic instructions). Since its release, increased research has been conducted in this direction. Sequence-to-sequence [1] and reinforcement learning [24] based solutions were first adopted. Then, [7, 20] strengthened the navigator by synthesizing new instructions. Later, combining imitation learning and reinforcement learning became a popular choice [23]. Some recent studies explored auxiliary tasks as self-supervised signals [10, 12, 23, 28], while some others addressed self-correction for intelligent path planning [11, 13]. In addition, Thomason et al. [22] identified unimodal biases in VLN, and Hu et al. [9] then achieved multi-modal grounding using a mixture-of-experts framework.
|
| 29 |
+
|
| 30 |
+
# 3 Methodology
|
| 31 |
+
|
| 32 |
+
Problem Description. Navigation in the Room-to-Room task [1] demands an agent to perform a sequence of navigation actions in real indoor environments and reach a target location by following natural language instructions.
|
| 33 |
+
|
| 34 |
+
Problem Formulation and Basic Agent. Formally, a language instruction is represented via textual embeddings as $\mathbf{X}$ . At each navigation step $t$ , the agent has a panoramic view [7], which is discretized into 36 single views (i.e., RGB images). The agent makes a navigation decision in the panoramic action
|
| 35 |
+
|
| 36 |
+
space, which consists of $K$ navigable views (reachable and visible), represented as $\pmb{V}_t = \{\pmb{v}_{t,1},\pmb{v}_{t,2},\dots,\pmb{v}_{t,K}\}$ . The agent needs to make a decision on which navigable view to go to (i.e., choose an action $a_{t}^{\mathrm{nv}}\in \{1,\dots ,K\}$ with the embedding $\pmb{a}_{t}^{\mathrm{nv}} = \pmb{v}_{t,a_{t}^{\mathrm{nv}}}$ ), according to the given instruction $\pmb{X}$ , history panoramic views $\{\pmb{V}_1,\pmb{V}_2,\dots,\pmb{V}_{t - 1}\}$ and previous actions $\{\pmb{a}_1^{\mathrm{nv}},\pmb{a}_2^{\mathrm{nv}},\dots,\pmb{a}_{t - 1}^{\mathrm{nv}}\}$ . Conventionally, this dynamic navigation process is formulated in a recurrent form [1,20]:
|
| 37 |
+
|
| 38 |
+
$$
|
| 39 |
+
\boldsymbol {h} _ {t} ^ {\mathrm {n v}} = \operatorname {L S T M} \left(\left[ \boldsymbol {X}, \boldsymbol {V} _ {t - 1}, \boldsymbol {a} _ {t - 1} ^ {\mathrm {n v}} \right], \boldsymbol {h} _ {t - 1} ^ {\mathrm {n v}}\right). \tag {1}
|
| 40 |
+
$$
|
| 41 |
+
|
| 42 |
+
With current navigation state $\pmb{h}_t^{\mathrm{nv}}$ , the probability of $k^{th}$ navigation action is:
|
| 43 |
+
|
| 44 |
+
$$
|
| 45 |
+
p _ {t, k} ^ {\mathrm {n v}} = \operatorname {s o f t m a x} _ {k} \left(\boldsymbol {v} _ {t, k} ^ {\top} \boldsymbol {W} ^ {\mathrm {n v}} \boldsymbol {h} _ {t} ^ {\mathrm {n v}}\right). \tag {2}
|
| 46 |
+
$$
|
| 47 |
+
|
| 48 |
+
Here, $\mathbf{W}^{\mathrm{nv}}$ indicates a learnable parameter matrix. The navigation action $a_{t}^{\mathrm{nv}}$ is chosen according to the probability distribution $\{p_{t,k}^{\mathrm{nv}}\}_{k=1}^{K}$ .
|
| 49 |
+
|
| 50 |
+
Basic Agent Implementation. So far, we have given a brief description of our basic navigation agent from a high-level view, also commonly shared with prior art. In practice, we choose [20] to implement our agent (but not limited to).
|
| 51 |
+
|
| 52 |
+
Core Idea. When following instructions, humans do not expect every step to be a "perfect" navigation decision, due to current limited visual perception, the inevitable ambiguity in instructions, and the complexity of environments. Instead, when we are uncertain about the future steps, we tend to explore the surrounding first and gather more information to mitigate the ambiguity, and then make a more informed decision. Our core idea is thus to equip an agent with such active exploration/learning ability. To ease understanding, we start with a naive model which is equipped with a simplest exploration function (§3.1). We then complete the naive model in §3.2 and §3.3 and showcase how a learned active exploration policy can greatly improve the navigation performance.
|
| 53 |
+
|
| 54 |
+
# 3.1 A Naïve Model with A Simple Exploration Ability
|
| 55 |
+
|
| 56 |
+
Here, we consider the most straightforward way of achieving our idea. At each navigation step, the agent simply explores all the navigable views and only one exploration step is allowed for each. This means that the agent explores the first direction, gathers surrounding information and then returns to the original navigation position. Next, it goes one step towards the second navigable direction and turns back. Such one-step exploration process is repeated until all the possible directions have been visited. The information gathered during exploration will be used to support current navigation-decision making.
|
| 57 |
+
|
| 58 |
+
Formally, at $t^{th}$ navigation step, the agent has $K$ navigable views, i.e., $V_{t} = \{v_{t,1}, v_{t,2}, \dots, v_{t,K}\}$ . For $k^{th}$ view, we further denote its $K'$ navigable views as $O_{t,k} = \{o_{t,k,1}, o_{t,k,2}, \dots, o_{t,k,K'}\}$ (see Fig. 2(a)). The subscript $(t,k)$ will be omitted for notation simplicity. If the agent makes a one-step exploration in $k^{th}$ direction, he is desired to collect surrounding information from $O$ . Specifically, keeping current navigation state $h_{t}^{\mathrm{nv}}$ in mind, the agent assembles the visual information from $O$ by an attention operation (Fig. 2(b)):
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\hat {\boldsymbol {o}} _ {t, k} = \operatorname {a t t} (\boldsymbol {O}, \boldsymbol {h} _ {t} ^ {\mathrm {n v}}) = \sum_ {k ^ {\prime} = 1} ^ {K ^ {\prime}} \alpha_ {k ^ {\prime}} \boldsymbol {o} _ {k ^ {\prime}}, \text {w h e r e} \alpha_ {k ^ {\prime}} = \operatorname {s o f t m a x} _ {k ^ {\prime}} \left(\boldsymbol {o} _ {k ^ {\prime}} ^ {\top} \boldsymbol {W} ^ {\text {a t t}} \boldsymbol {h} _ {t} ^ {\mathrm {n v}}\right). \tag {3}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
Fig. 2. Illustration of our naïve model (§3.1). (a) At $t^{th}$ navigation step, the agent has a panoramic view $V_{t}$ . For $k^{th}$ subview, we further denote its panoramic view as $O_{t,k}$ . (b) After making a one-step exploration in the first direction $\boldsymbol{v}_{t,1}$ , the agent collects information $\hat{o}_{t,1}$ from $O_{t,1}$ via Eq. 3. (c) After exploring all the directions, the agent updates his knowledge, i.e., $\{\tilde{\boldsymbol{v}}_{t,1},\tilde{\boldsymbol{v}}_{t,2}\}$ , via Eq. 4. (d) With the updated knowledge, the agent computes the navigation probability distribution $\{p_{t,k}^{\mathrm{nv}}\}_{k}$ (Eq. 5) and makes a more reliable navigation decision (i.e., $a_{t}^{\mathrm{nv}} = 2$ ). (e) Visualization of navigation route, where yellow lines are the exploration routes and green circles are navigation landmarks.
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
|
| 71 |
+

|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
|
| 75 |
+
Then, the collected information $\hat{\pmb{o}}_{t,k}$ is used to update the current visual knowledge $\pmb{v}_{t,k}$ about $k^{th}$ view, computed in a residual form (Fig. 2(c)):
|
| 76 |
+
|
| 77 |
+
$$
|
| 78 |
+
\tilde {\boldsymbol {v}} _ {t, k} = \boldsymbol {v} _ {t, k} + \boldsymbol {W} ^ {\mathrm {o}} \hat {\boldsymbol {o}} _ {t, k}. \tag {4}
|
| 79 |
+
$$
|
| 80 |
+
|
| 81 |
+
In this way, the agent successively makes one-step explorations of all $K$ navigable views and enriches his corresponding knowledge. Later, with the updated knowledge $\{\tilde{\pmb{v}}_{t,1},\tilde{\pmb{v}}_{t,2},\dots ,\tilde{\pmb{v}}_{t,K}\}$ , the probability of making $k^{th}$ navigable action (originated in Eq. 2) can be formulated as (Fig. 2(d)):
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
p _ {t, k} ^ {\mathrm {n v}} = \operatorname {s o f t m a x} _ {k} \left(\tilde {\boldsymbol {v}} _ {t, k} ^ {\top} \boldsymbol {W} ^ {\mathrm {n v}} \boldsymbol {h} _ {t} ^ {\mathrm {n v}}\right). \tag {5}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
Through this exploration, the agent should be able to gather more information from its surroundings, and then make a more reasonable navigation decision. In §4.3, we empirically demonstrate that, by equipping the basic agent with such a naive exploration module, we achieve $4\sim 6\%$ performance improvement in terms of Successful Rate (SR). This is impressive, as we only allow the agent to make one-step exploration. Another notable issue is that the agent simply explores all the possible directions, resulting in long Trajectory Length $(\mathrm{TL})^{1}$ . Next we will improve the naive model, by tackling two key issues: "how to decide where to explore" (§3.2) and "how to make deeper exploration" (§3.3).
|
| 88 |
+
|
| 89 |
+
# 3.2 Where to Explore
|
| 90 |
+
|
| 91 |
+
In the naïve model (§3.1), the agent conducts exploration of all navigable views at every navigation step. Such a strategy is unwise and brings longer trajectories, and goes against the intuition that exploration is only needed at a few navigation steps, in a few directions. To address this, the agent should learn an exploration-decision making strategy, i.e., more actively deciding which direction to explore.
|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
Fig. 3. Equip our agent with an exploration-decision making ability (§3.2). (a) The agent predicts a probability distribution $\{p_{t,k}^{\mathrm{ep}}\}_{k=1}^{K+1}$ over exploration action candidates (i.e., Eq. 6). (b) According to $\{p_{t,k}^{\mathrm{ep}}\}_{k=1}^{K+1}$ , the most "valuable" view is selected to make a one-step exploration. (c) The agent updates his knowledge $\tilde{v}_{t,2}$ and makes a second-round exploration decision (Eq. 7). If STOP action is selected, the agent will make a navigation decision (Eq. 5) and start $(t+1)^{th}$ navigation step.
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
|
| 100 |
+

|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
|
| 104 |
+
To achieve this, at each navigation step $t$ , we let the agent make an exploration decision $a_{t}^{\mathrm{ep}} \in \{1, \dots, K + 1\}$ from current $K$ navigable views as well as a STOP action. Thus, the exploration action embedding $\pmb{a}_{t}^{\mathrm{ep}}$ is a vector selected from the visual features of the $K$ navigable views (i.e., $V_{t} = \{v_{t,1}, v_{t,2}, \dots, v_{t,K}\}$ ), and the STOP action embedding (i.e., $v_{t,K+1} = \mathbf{0}$ ). To learn the exploration-decision making strategy, with current navigation state $h_{t}^{\mathrm{nv}}$ and current visual surrounding knowledge $V_{t}$ , the agent predicts a probability distribution $\{p_{t,k}^{\mathrm{ep}}\}_{k=1}^{K+1}$ for the $K+1$ exploration action candidates (Fig. 3(a)):
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\hat {\boldsymbol {v}} _ {t} = \mathrm {a t t} (\boldsymbol {V} _ {t}, \boldsymbol {h} _ {t} ^ {\mathrm {n v}}), \qquad p _ {t, k} ^ {\mathrm {e p}} = \operatorname {s o f t m a x} _ {k} (\boldsymbol {v} _ {t, k} ^ {\top} \boldsymbol {W} ^ {\mathrm {e p}} [ \hat {\boldsymbol {v}} _ {t}, \boldsymbol {h} _ {t} ^ {\mathrm {n v}} ]). \qquad (6)
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
Then, an exploration action $k^{*}$ is made according to $\arg \max_{k} p_{t,k}^{\mathrm{ep}}$ . If the STOP action is selected (i.e., $k^{*} = K + 1$ ), the agent directly turns to making a navigation decision by Eq.2, without exploration. Otherwise, the agent will make a one-step exploration in a most "valuable" direction $k^{*} \in \{1, \dots, K\}$ (Fig. 3(b)). Then, the agent uses the collected information $\hat{\pmb{o}}_{t,k^{*}}$ (Eq. 3) to enrich his knowledge $\pmb{v}_{t,k^{*}}$ about $k^{*th}$ viewpoint (Eq. 4). With the updated knowledge, the agent makes a second-round exploration decision (Fig. 3(c)):
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\begin{array}{l} \tilde {\boldsymbol {V}} _ {t} \leftarrow \{\boldsymbol {v} _ {t, 1}, \dots , \tilde {\boldsymbol {v}} _ {t, k ^ {*}}, \dots , \boldsymbol {v} _ {t, K} \}, \quad \hat {\boldsymbol {v}} _ {t} \leftarrow \operatorname {a t t} (\tilde {\boldsymbol {V}} _ {t}, \boldsymbol {h} _ {t} ^ {\mathrm {n v}}), \\ p _ {t, k ^ {u}} ^ {\mathrm {e p}} \leftarrow \operatorname {s o f t m a x} _ {k ^ {u}} \left(\boldsymbol {v} _ {t, k ^ {u}} ^ {\top} \boldsymbol {W} ^ {\mathrm {e p}} \left[ \hat {\boldsymbol {v}} _ {t}, \boldsymbol {h} _ {t} ^ {\mathrm {n v}} \right]\right). \tag {7} \\ \end{array}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
Note that the views that have been already explored are removed from the exploration action candidate set, and $k^u$ indicates an exploration action that has not been selected yet. Based on the new exploration probability distribution $\{p_{t,k^u}^{\mathrm{ep}}\}_{k^u}^{K+1}$ , if the STOP action is still not selected, the agent will make a second-round exploration in a new direction. The above multi-round exploration process will be repeated until either the agent is satisfied with his current knowledge about the surroundings (i.e., choosing the STOP decision), or all the $K$ navigable directions are explored. Finally, with the newest knowledge about the surroundings $\tilde{\mathbf{V}}_t$ , the agent makes a more reasonable navigation decision (Eq.5, Fig.3(d)). Our experiments in §4.3 show that, when allowing the agent to actively select
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
Fig. 4. Our full model can actively make multi-direction, multi-step exploration. (a) The agent is in $1^{st}$ exploration step $(s = 1)$ , starting from $k^{th}$ view at $t^{th}$ navigation step. According to the exploration probability $\{p_{s,k'}^{\mathrm{ep}}\}_{k'}$ (Eq. 11), the agent decides to make a further step exploration. (b) At $2^{nd}$ exploration step, the agent decides to finish the exploration of $k^{th}$ view. (c) The agent thinks there is no other direction worth exploring, then makes a navigation decision based on the updated knowledge.
|
| 120 |
+
|
| 121 |
+
navigation directions, compared with the naive model, TL is greatly decreased and even SR is improved (as the agent focuses on the most valuable directions).
|
| 122 |
+
|
| 123 |
+
# 3.3 Deeper Exploration
|
| 124 |
+
|
| 125 |
+
So far, our agent is able to make explorations only when necessary. Now we focus on how to let him conduct multi-step exploration, instead of simply constraining the maximum exploration length as one. Ideally, during the exploration of a certain direction, the agent should be able to go ahead a few steps until sufficient information is collected. To model such a sequential exploration decision-making process, we design a recurrent network based exploration module, which also well generalizes to the cases discussed in §3.1 and §3.2. Specifically, let us assume that the agent starts an exploration episode from $k^{th}$ view $\boldsymbol{v}_{t,k}$ at $t^{th}$ navigation step (Fig. 4(a)). At an exploration step $s$ , the agent perceives the surroundings with a panoramic view and collects information from $K'$ navigable views $\boldsymbol{Y}_{t,k,s} = \{\boldsymbol{y}_{t,k,s,1}, \boldsymbol{y}_{t,k,s,2}, \dots, \boldsymbol{y}_{t,k,s,K'}\}$ . With such a definition, we have $\boldsymbol{Y}_{t,k,0} = \boldsymbol{V}_t$ . In §3.1 and §3.2, for $k^{th}$ view at $t^{th}$ navigation step, its panoramic view $\boldsymbol{O}_{t,k}$ is also $\boldsymbol{Y}_{t,k,1}$ . The subscript $(t,k)$ will be omitted for notation simplicity.
|
| 126 |
+
|
| 127 |
+
Knowledge Collection During Exploration: As the exploration module is in a recurrent form, the agent has a specific state $\pmb{h}_s^{\mathrm{ep}}$ at $s^{th}$ exploration step. With $\pmb{h}_s^{\mathrm{ep}}$ , the agent actively collects knowledge by assembling the surrounding information $\pmb{Y}_s$ using an attention operation (similar to Eq. 3):
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\hat {\pmb {y}} _ {s} = \mathrm {a t t} (\pmb {Y} _ {s}, \pmb {h} _ {s} ^ {\mathrm {e p}}). \tag {8}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
Knowledge Storage During Exploration: As the agent performs multi-step exploration, the learned knowledge $\hat{\pmb{y}}_s$ is stored in a memory network:
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\boldsymbol {h} _ {s} ^ {\mathrm {k w}} = \operatorname {L S T M} ^ {\mathrm {k w}} \left(\hat {\boldsymbol {y}} _ {s}, \boldsymbol {h} _ {s - 1} ^ {\mathrm {k w}}\right), \tag {9}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
which will eventually be used for supporting navigation-decision making.
|
| 140 |
+
|
| 141 |
+
Sequential Exploration-Decision Making for Multi-Step Exploration: Next, the agent needs to decide whether or not to choose a new direction for further exploration. In the exploration action space, the agent either selects one direction from the current $K'$ reachable views to explore or stops the current exploration episode and returns to the original position at $t^{th}$ navigation step. The exploration action $a_{s}^{\mathrm{ep}}$ is represented as a vector $\pmb{a}_{s}^{\mathrm{ep}}$ from the visual features of the $K'$ navigable views (i.e., $\pmb{Y}_{s} = \{\pmb{y}_{s,1},\pmb{y}_{s,2},\dots,\pmb{y}_{s,K'}\}$ ), as well as the STOP action embedding (i.e., $\pmb{y}_{s,K'+1} = \mathbf{0}$ ). $a_{s}^{\mathrm{ep}}$ is predicted according to the current exploration state $h_{s}^{\mathrm{ep}}$ and collected information $h_{s}^{\mathrm{kw}}$ . Hence, the computation of $h_{s}^{\mathrm{ep}}$ is conditioned on the current navigation state $h_{t}^{\mathrm{nv}}$ , history exploration views $\{Y_{1},Y_{2},\dots,Y_{s-1}\}$ , and previous exploration actions $\{a_{1}^{\mathrm{ep}},a_{2}^{\mathrm{ep}},\dots,a_{s-1}^{\mathrm{ep}}\}$ :
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\boldsymbol {h} _ {s} ^ {\mathrm {e p}} = \operatorname {L S T M} ^ {\mathrm {e p}} \left(\left[ \boldsymbol {h} _ {t} ^ {\mathrm {n v}}, \boldsymbol {Y} _ {s - 1}, \boldsymbol {a} _ {s - 1} ^ {\mathrm {e p}} \right], \boldsymbol {h} _ {s - 1} ^ {\mathrm {e p}}\right), \text {w h e r e} \boldsymbol {h} _ {0} ^ {\mathrm {e p}} = \boldsymbol {h} _ {t} ^ {\mathrm {n v}}. \tag {10}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
For $k^{\prime th}$ exploration action candidate (reachable view), its probability is:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
p _ {s, k ^ {\prime}} ^ {\mathrm {e p}} = \operatorname {s o f t m a x} _ {k ^ {\prime}} \left(\boldsymbol {y} _ {s, k ^ {\prime}} ^ {\top} \boldsymbol {W} ^ {\mathrm {e p}} \left[ \boldsymbol {h} _ {s} ^ {\mathrm {k w}}, \boldsymbol {h} _ {s} ^ {\mathrm {e p}} \right]\right). \tag {11}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
The exploration action $a_{s}^{\mathrm{ep}}$ is chosen according to $\{p_{s,k'}^{\mathrm{ep}}\}_{k'=1}^{K'+1}$ .
|
| 154 |
+
|
| 155 |
+
Multi-Round Exploration-Decision Making for Multi-Direction Exploration: After $S$ -step exploration, the agent chooses the STOP action when he thinks sufficient information along a certain direction $k$ has been gathered (Fig. 4 (b)). He goes back to the start point at $t^{th}$ navigation step and updates his knowledge about $k^{th}$ direction, i.e., $\boldsymbol{v}_{t,k}$ , with the gathered information $h_S^{\mathrm{kw}}$ . Thus, Eq. 4 is improved as:
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
\tilde {\boldsymbol {v}} _ {t, k} = \boldsymbol {v} _ {t, k} + \boldsymbol {W} ^ {\mathrm {o}} \boldsymbol {h} _ {S} ^ {\mathrm {k w}}. \tag {12}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
With the updated knowledge regarding the surroundings, the agent makes a second-round exploration decision:
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\begin{array}{l} \tilde {\boldsymbol {V}} _ {t} \leftarrow \left\{\boldsymbol {v} _ {t, 1}, \dots , \tilde {\boldsymbol {v}} _ {t, k}, \dots , \boldsymbol {v} _ {t, K} \right\}, \quad \hat {\boldsymbol {v}} _ {t} \leftarrow \operatorname {a t t} \left(\tilde {\boldsymbol {V}} _ {t}, \boldsymbol {h} _ {t} ^ {\mathrm {n v}}\right), \tag {13} \\ p _ {t, k ^ {u}} ^ {\mathrm {e p}} \leftarrow \operatorname {s o f t m a x} _ {k ^ {u}} \left(\boldsymbol {v} _ {t, k ^ {u}} ^ {\top} \boldsymbol {W} ^ {\mathrm {e p}} \left[ \hat {\boldsymbol {v}} _ {t}, \boldsymbol {h} _ {t} ^ {\mathrm {n v}} \right]\right). \\ \end{array}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
Again, $k^u$ indicates an exploration action that has not been selected yet. Then the agent can make another round of exploration in a new direction, until he chooses the STOP action (i.e., the collected information is enough to help make a confident navigation decision), or has explored all $K$ directions (Fig. 4(c)).
|
| 168 |
+
|
| 169 |
+
Exploration-Assisted Navigation-Decision Making: After multi-round multi-step exploration, with the newest knowledge $\tilde{\pmb{V}}_t$ about the surroundings, the agent makes a more reliable navigation decision (Eq.5): $p_{t,k}^{\mathrm{nv}} = \mathrm{softmax}_k(\tilde{v}_{t,k}^\top \pmb{W}^{\mathrm{nv}}\pmb{h}_t^{\mathrm{nv}})$ . Then, at $(t + 1)^{th}$ navigation step, the agent makes multi-step explorations in several directions (or can even omit exploration) and then chooses a new navigation action. In §4.3, we will empirically demonstrate that our full model gains the highest SR score with only slightly increased TL.
|
| 170 |
+
|
| 171 |
+
Memory based Late Action-Taking Strategy: After finishing exploration towards a certain direction, if directly "going back" the start position and making next-round exploration/observation, it may cause a lot of revisits. To alleviate
|
| 172 |
+
|
| 173 |
+
this, we let the agent store the visited views during exploration in an outside memory. The agent then follows a late action-taking strategy, i.e., moving only when it is necessary. When the agent decides to stop his exploration at a direction, he stays at his current position and "images" the execution of his following actions without really going back. When he needs to visit a new point that is not stored in the memory, he will go to that point directly and updates the memory accordingly. Then, again, holding the position until he needs to visit a new point that is not met before. Please refer to the supplementary for more details.
|
| 174 |
+
|
| 175 |
+
# 3.4 Training
|
| 176 |
+
|
| 177 |
+
Our entire agent model is trained with two distinct learning paradigms, i.e., 1) imitation learning, and 2) reinforcement learning.
|
| 178 |
+
|
| 179 |
+
Imitation Learning (IL). In IL, an agent is forced to mimic the behavior of its teacher. Such a strategy has been proved effective in VLN [1, 7, 13, 15, 20, 23, 24]. Specifically, at navigation step $t$ , the teacher provides the teacher action $a_{t}^{*} \in \{1, \dots, K\}$ , which selects the next navigable viewpoint on the shortest route from the current viewpoint to the target viewpoint. The negative log-likelihood of the demonstrated action is computed as the IL loss:
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
\mathcal {L} _ {\mathrm {I L}} ^ {\mathrm {n v}} = \sum_ {t} - \log p _ {t, a _ {t} ^ {*}} ^ {\mathrm {n v}}. \tag {14}
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
The IL loss for the exploration is defined as:
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\mathcal {L} _ {\mathrm {I L}} ^ {\mathrm {e p}} = \sum_ {t} \sum_ {s = 0} ^ {S} - \log p _ {s, a _ {t + s} ^ {*}} ^ {\mathrm {e p}}, \tag {15}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
where $S$ is the maximum number of steps allowed for exploration. At $t^{th}$ navigation step, the agent performs $S$ -step exploration, simply imitating the teacher's navigation actions from $t$ to $t + S$ steps. Though the goals of navigation and exploration are different, here we simply use the teacher navigation actions to guide the learning of exploration, which helps the exploration module learn better representations, and quickly obtain an initial exploration policy.
|
| 192 |
+
|
| 193 |
+
Reinforcement Learning (RL). Through IL, the agent can learn an off-policy that works relatively well on seen scenes, but it is biased towards copying the route introduced by the teacher, rather than learning how to recover from its erroneous behavior in an unseen environment [24]. Recent methods [20, 23, 24, 29] demonstrate that the on-policy RL method Advantage Actor-Critic (A2C) [18] can help the agent explore the state-action space outside the demonstration path.
|
| 194 |
+
|
| 195 |
+
For RL based navigation learning, our agent samples a navigation action from the distribution $\{p_{t,k}^{\mathrm{nv}}\}_{k = 1}^{K}$ (see Eq. 2) and learns from rewards. Let us denote the reward after taking a navigation action $a_{t}^{\mathrm{nv}}$ at current view $v_{t}$ as $r(v_{t},a_{t}^{\mathrm{nv}})$ . As in [20,23], at each non-stop step $t$ , $r^{\mathrm{nv}}(v_t,a_t^{\mathrm{nv}})$ is the change in the distance to the target navigation location. At the final step $T$ , if the agent stops within 3 meters of the target location, we set $r^{\mathrm{nv}}(v_T,a_T^{\mathrm{nv}}) = +3$ ; otherwise $r^{\mathrm{nv}}(v_T,a_T^{\mathrm{nv}}) = -3$ . Then, to incorporate the influence of the action $a_{t}^{\mathrm{nv}}$ on the future and account for the local greedy search, the total accumulated return with a discount factor
|
| 196 |
+
|
| 197 |
+
is adopted: $R_{t}^{\mathrm{nv}} = \sum_{t' = t}^{T}\gamma^{t' - t}r^{\mathrm{nv}}(v_{t'},a_{t'}^{\mathrm{nv}})$ , where the discounted factor $\gamma$ is set as 0.9. In A2C, our agent can be viewed as an actor and a state-value function $b^{\mathrm{nv}}(\pmb {h}_t)$ , viewed as critic, is evaluated. For training, the actor aims to minimize the negative log-probability of action $a_{t}^{\mathrm{nv}}$ scaled by $R_{t}^{\mathrm{nv}} - b^{\mathrm{nv}}(\pmb{h}_{t}^{\mathrm{nv}})$ (known as the advantage of action $a_{t}^{\mathrm{nv}}$ ), and the critic $b^{\mathrm{nv}}(\pmb {h}_t)$ aims to minimize the Mean-Square-Error between $R_{t}^{\mathrm{nv}}$ and the estimated value:
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\mathcal {L} _ {\mathrm {R L}} ^ {\mathrm {n v}} = - \sum_ {t} \left(R _ {t} ^ {\mathrm {n v}} - b ^ {\mathrm {n v}} \left(\boldsymbol {h} _ {t} ^ {\mathrm {n v}}\right)\right) \log p _ {t, a _ {t} ^ {\mathrm {n v}}} ^ {\mathrm {n v}} + \sum_ {t} \left(R _ {t} ^ {\mathrm {n v}} - b ^ {\mathrm {n v}} \left(\boldsymbol {h} _ {t} ^ {\mathrm {n v}}\right)\right) ^ {2}. \tag {16}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
For RL-based exploration learning, we also adopt on-policy A2C for training. Specifically, let us assume a set of explorations $\{a_{t,k,s}^{\mathrm{ep}}\}_{s=1}^{S_{t,k}}$ are made in a certain direction $k$ at navigation step $t$ , and the original navigation action (before exploration) is $a_t^{\mathrm{nv}}$ . Also assume that the exploration-assisted navigation action (after exploration) is $a_t^{\mathrm{nv}}$ . The basic reward $r^{\mathrm{ep}}(v_t,\{a_{t,k,s}^{\mathrm{ep}}\}_s)$ for the exploration actions $\{a_{t,k,s}^{\mathrm{ep}}\}_s$ is defined as:
|
| 204 |
+
|
| 205 |
+
$$
|
| 206 |
+
r ^ {\mathrm {e p}} \left(v _ {t}, \left\{a _ {t, k, s} ^ {\mathrm {e p}} \right\} _ {s}\right) = r ^ {\mathrm {n v}} \left(v _ {t}, a _ {t} ^ {\mathrm {n v}}\right) - r ^ {\mathrm {n v}} \left(v _ {t}, a _ {t} ^ {\mathrm {n v}}\right). \tag {17}
|
| 207 |
+
$$
|
| 208 |
+
|
| 209 |
+
This means that, after making explorations $\{a_{t,k,s}^{\mathrm{ep}}\}_{s}$ at $t^{th}$ navigation step in $k^{th}$ direction, if the new navigation decision $a_{t}^{\mathrm{nv}}$ is better than the original one $a_{t}^{\prime \mathrm{nv}}$ , i.e., helps the agent make a better navigation decision, a positive exploration reward will be assigned. More intuitively, such an exploration reward represents the benefit that this set of explorations $\{a_{t,k,s}^{\mathrm{ep}}\}_{s}$ could bring for the navigation. We average $r^{\mathrm{ep}}(v_t,\{a_{t,k,s}^{\mathrm{ep}}\}_s)$ to each exploration action $a_{t,k,s}^{\mathrm{ep}}$ as the immediate reward, i.e., $r^{\mathrm{ep}}(v_t,a_{t,k,s}^{\mathrm{ep}}) = \frac{1}{S_{t,k}} r^{\mathrm{ep}}(v_t,\{a_{t,k,s}^{\mathrm{ep}}\}_s)$ . In addition, to limit the length of exploration, we add a negative term $\beta$ $(= -0.1)$ to the reward of each exploration step. Then, the total accumulated discount return for an exploration action $a_{t,k,s}^{\mathrm{ep}}$ is defined as: $R_{t,k,s}^{\mathrm{ep}} = \sum_{s' = s}^{S_{t,k}}\gamma^{s' - s}(r^{\mathrm{ep}}(v_t,a_{t,k,s'}^{\mathrm{ep}}) + \beta)$ . The RL loss for the exploration action $a_{t,k,s}^{\mathrm{ep}}$ is defined as:
|
| 210 |
+
|
| 211 |
+
$$
|
| 212 |
+
\mathcal {L} \left(a _ {t, k, s} ^ {\mathrm {e p}}\right) = - \left(R _ {t, k, s} ^ {\mathrm {e p}} - b ^ {\mathrm {e p}} \left(\boldsymbol {h} _ {t, k, s} ^ {\mathrm {e p}}\right)\right) \log p _ {t, k, a _ {t, k, s} ^ {\mathrm {e p}}} ^ {\mathrm {e p}} + \left(R _ {t, k, s} ^ {\mathrm {e p}} - b ^ {\mathrm {e p}} \left(\boldsymbol {h} _ {t, k, s} ^ {\mathrm {e p}}\right)\right) ^ {2}, \tag {18}
|
| 213 |
+
$$
|
| 214 |
+
|
| 215 |
+
where $b^{\mathrm{ep}}$ is the critic. Then, similar to Eq. 16, the RL loss for all the exploration actions is defined as:
|
| 216 |
+
|
| 217 |
+
$$
|
| 218 |
+
\mathcal {L} _ {\mathrm {R L}} ^ {\exp} = - \sum_ {t} \sum_ {k} \sum_ {s} \mathcal {L} \left(a _ {t, k, s} ^ {\mathrm {e p}}\right). \tag {19}
|
| 219 |
+
$$
|
| 220 |
+
|
| 221 |
+
Curriculum Learning for Multi-Step Exploration. During training, we find that, once the exploration policy is updated, the model easily suffers from extreme variations in gathered information, particularly for long-term exploration, making the training jitter. To avoid this, we adopt curriculum learning[4] to train our agent with incrementally improved exploration length. Specifically, in the beginning, the maximum exploration length is set to 1. After the training loss converges, we use current parameters to initialize the training of the agent with at most 2-step exploration. In this way, we train an agent with at most 6-step exploration (due to the limited GPU memory and time). This strategy
|
| 222 |
+
|
| 223 |
+
greatly improves the convergence speed (about $\times 8$ faster) with no noticeable diminishment in performance. Experiments related to the influence of the maximum exploration length can be found in §4.3.
|
| 224 |
+
|
| 225 |
+
Back Translation Based Training Data Augmentation. Following [7, 20], we use back translation to augment training data. The basic idea is that, in addition to training a navigator that finds the correct route in an environment according to the given instructions, an auxiliary speaker is trained for generating an instruction given a route inside an environment. In this way, we generate extra instructions for 176k unlabeled routes in Room-to-Room[1] training environments. After training the agent on the labeled samples from the Room-to-Room training set, we use the back translation augmented data for fine-tuning.
|
| 226 |
+
|
| 227 |
+
# 4 Experiment
|
| 228 |
+
|
| 229 |
+
# 4.1 Experimental Setup
|
| 230 |
+
|
| 231 |
+
Dataset. We conduct experiments on the Room-to-Room (R2R) dataset [1], which has 10,800 panoramic views in 90 housing environments, and 7,189 paths sampled from its navigation graphs. Each path is associated with three ground-truth navigation instructions. R2R is split into four sets: training, validation seen, validation unseen, and test unseen. There are no overlapping environments between the unseen and training sets.
|
| 232 |
+
|
| 233 |
+
Evaluation Metric. As in conventions [1, 7], five metrics are used for evaluation: Success Rate (SR), Navigation Error (NE), Trajectory Length (TL), Oracle success Rate (OR), and Success rate weighted by Path Length (SPL).
|
| 234 |
+
|
| 235 |
+
Implementation Detail. As in [1, 7, 23, 24], the viewpoint embedding $\pmb{v}_{t,k}$ is a concatenation of image feature (from an ImageNet [19] pre-trained ResNet-152 [8]) and a 4-d orientation descriptor. A bottleneck layer is applied to reduce the dimension of $\pmb{v}_{t,k}$ to 512. Instruction embeddings $\pmb{X}$ are obtained from an LSTM with a 512 hidden size. For each LSTM in our exploration module, the hidden size is 512. For back translation, the speaker is implemented as described in [7].
|
| 236 |
+
|
| 237 |
+
# 4.2 Comparison Results
|
| 238 |
+
|
| 239 |
+
Performance Comparisons Under Different VLN Settings. We extensively evaluate our performance under three different VLN setups in R2R.
|
| 240 |
+
|
| 241 |
+
(1) Single Run Setting: This is the basic setup in R2R, where the agent conducts navigation by selecting the actions in a step-by-step, greedy manner. The agent is not allowed to: 1) run multiple trials, 2) explore or map the test environments before starting. Table 1 reports the comparison results under such a setting. The following are some essential observations. i) Our agent outperforms other competitors on the main metric SR, and some other criteria, e.g., NE and OR. For example, in terms of SR, our model improves AuxRN [28] $3\%$ and $5\%$ , on validation unseen and test unseen sets, respectively, demonstrating our strong generalizability. ii) Our agent without data augmentation already outperforms many existing methods on SR and NE. iii) Our TL and SPL scores
|
| 242 |
+
|
| 243 |
+
Table 1. Comparison results on validation seen, validation unseen, and test unseen sets of R2R [1] under Single Run setting (§4.2). For compliance with the evaluation server, we report SR as fractions. *: back translation augmentation.
|
| 244 |
+
|
| 245 |
+
<table><tr><td rowspan="3">Models</td><td colspan="13">Single Run Setting</td><td></td><td></td></tr><tr><td colspan="4">validation seen</td><td colspan="4">validation unseen</td><td colspan="5">test unseen</td><td></td><td></td></tr><tr><td>SR↑</td><td>NE↓</td><td>TL↓</td><td>OR↑</td><td>SPL↑</td><td>SR↑</td><td>NE↓</td><td>TL↓</td><td>OR↑</td><td>SPL↑</td><td>SR↑</td><td>NE↓</td><td>TL↓</td><td>OR↑</td><td>SPL↑</td></tr><tr><td>Random</td><td>0.16</td><td>9.45</td><td>9.58</td><td>0.21</td><td>-</td><td>0.16</td><td>9.23</td><td>9.77</td><td>0.22</td><td>-</td><td>0.13</td><td>9.77</td><td>9.93</td><td>0.18</td><td>0.12</td></tr><tr><td>Student-Forcing [1]</td><td>0.39</td><td>6.01</td><td>11.3</td><td>0.53</td><td>-</td><td>0.22</td><td>7.81</td><td>8.39</td><td>0.28</td><td>-</td><td>0.20</td><td>7.85</td><td>8.13</td><td>0.27</td><td>0.18</td></tr><tr><td>RPA [24]</td><td>0.43</td><td>5.56</td><td>8.46</td><td>0.53</td><td>-</td><td>0.25</td><td>7.65</td><td>7.22</td><td>0.32</td><td>-</td><td>0.25</td><td>7.53</td><td>9.15</td><td>0.33</td><td>0.23</td></tr><tr><td>E-Dropout [20]</td><td>0.55</td><td>4.71</td><td>10.1</td><td>-</td><td>0.53</td><td>0.47</td><td>5.49</td><td>9.37</td><td>-</td><td>0.43</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Regretful [13]</td><td>0.65</td><td>3.69</td><td>-</td><td>0.72</td><td>0.59</td><td>0.48</td><td>5.36</td><td>-</td><td>0.61</td><td>0.37</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Ours</td><td>0.66</td><td>3.35</td><td>19.8</td><td>0.79</td><td>0.49</td><td>0.55</td><td>4.40</td><td>19.9</td><td>0.70</td><td>0.38</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Speaker-Follower [7]*</td><td>0.66</td><td>3.36</td><td>-</td><td>0.74</td><td>-</td><td>0.36</td><td>6.62</td><td>-</td><td>0.45</td><td>-</td><td>0.35</td><td>6.62</td><td>14.8</td><td>0.44</td><td>0.28</td></tr><tr><td>RCM [23]*</td><td>0.67</td><td>3.53</td><td>10.7</td><td>0.75</td><td>-</td><td>0.43</td><td>6.09</td><td>11.5</td><td>0.50</td><td>-</td><td>0.43</td><td>6.12</td><td>12.0</td><td>0.50</td><td>0.38</td></tr><tr><td>Self-Monitoring [12]*</td><td>0.67</td><td>3.22</td><td>-</td><td>0.78</td><td>0.58</td><td>0.45</td><td>5.52</td><td>-</td><td>0.56</td><td>0.32</td><td>0.43</td><td>5.99</td><td>18.0</td><td>0.55</td><td>0.32</td></tr><tr><td>Regretful [13]*</td><td>0.69</td><td>3.23</td><td>-</td><td>0.77</td><td>0.63</td><td>0.50</td><td>5.32</td><td>-</td><td>0.59</td><td>0.41</td><td>0.48</td><td>5.69</td><td>13.7</td><td>0.56</td><td>0.40</td></tr><tr><td>E-Dropout [20]*</td><td>0.62</td><td>3.99</td><td>11.0</td><td>-</td><td>0.59</td><td>0.52</td><td>5.22</td><td>10.7</td><td>-</td><td>0.48</td><td>0.51</td><td>5.23</td><td>11.7</td><td>0.59</td><td>0.47</td></tr><tr><td>Tactical Rewind [11]*</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.56</td><td>4.97</td><td>21.2</td><td>-</td><td>0.43</td><td>0.54</td><td>5.14</td><td>22.1</td><td>0.64</td><td>0.41</td></tr><tr><td>AuxRN [28]*</td><td>0.70</td><td>3.33</td><td>-</td><td>0.78</td><td>0.67</td><td>0.55</td><td>5.28</td><td>-</td><td>0.62</td><td>0.50</td><td>0.55</td><td>5.15</td><td>-</td><td>0.62</td><td>0.51</td></tr><tr><td>Ours*</td><td>0.70</td><td>3.20</td><td>19.7</td><td>0.80</td><td>0.52</td><td>0.58</td><td>4.36</td><td>20.6</td><td>0.70</td><td>0.40</td><td>0.60</td><td>4.33</td><td>21.6</td><td>0.71</td><td>0.41</td></tr></table>
|
| 246 |
+
|
| 247 |
+
Table 2. Comparison results on test unseen set of R2R [1], under Pre-Explore and Beam Search settings (§4.2). To comply with the evaluation server, we report SR as fractions. *: back translation augmentation. -: unavailable statistics. †: a different beam search strategy is used, making the scores incomparable.
|
| 248 |
+
|
| 249 |
+
<table><tr><td rowspan="3">Models</td><td colspan="5">Pre-Explore Setting</td><td colspan="3">Beam Search Setting</td></tr><tr><td colspan="8">test unseen</td></tr><tr><td>SR↑</td><td>NE↓</td><td>TL↓</td><td>OR↑</td><td>SPL↑</td><td>SR↑</td><td>TL↓</td><td>SPL↑</td></tr><tr><td>Speaker-Follower [7]*</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.53</td><td>1257.4</td><td>0.01</td></tr><tr><td>RCM [23]*</td><td>0.60</td><td>4.21</td><td>9.48</td><td>0.67</td><td>0.59</td><td>0.63</td><td>357.6</td><td>0.02</td></tr><tr><td>Self-Monitoring [12]*</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.61</td><td>373.1</td><td>0.02</td></tr><tr><td>E-Dropout [20]*</td><td>0.64</td><td>3.97</td><td>9.79</td><td>0.70</td><td>0.61</td><td>0.69</td><td>686.8</td><td>0.01</td></tr><tr><td>AuxRN [28]*</td><td>0.68</td><td>3.69</td><td>-</td><td>0.75</td><td>0.65</td><td>0.70</td><td>†</td><td>†</td></tr><tr><td>Ours*</td><td>0.67</td><td>3.66</td><td>9.78</td><td>0.73</td><td>0.64</td><td>0.70</td><td>204.4</td><td>0.05</td></tr></table>
|
| 250 |
+
|
| 251 |
+
are on par with current art, with considering exploration routes into the metric computation. iv) If considering the routes for pure navigation, on validation unseen set, our TL is only about 9.4.
|
| 252 |
+
|
| 253 |
+
(2) Pre-Explore Setting: This setup, first introduced by [23], allows the agent to pre-exlore the unseen environment before conducting navigation. In [23], the agent learns to adapt to the unseen environment through semi-supervised methods, using only pre-given instructions [23], without paired routes. Here, we follow a more strict setting, as in [20], where only the unseen environments can be accessed. Specifically, we use back translation to synthesize instructions for routes sampled from the unseen environments and fine-tune the agent on the synthetic data. As can be seen from Table 2, the performance of our method is significantly better than the existing methods [20, 23], improving the SR score from 0.64 to 0.67, and is on par with AuxRN [28].
|
| 254 |
+
|
| 255 |
+
Table 3. Ablation study on the validation seen and validation unseen sets of R2R [1] under the Single Run setting. See §4.3 for details.
|
| 256 |
+
|
| 257 |
+
<table><tr><td rowspan="3">Aspect</td><td rowspan="3">Model</td><td colspan="8">Single Run Setting</td><td></td></tr><tr><td colspan="4">validation seen</td><td colspan="4">validation unseen</td><td></td></tr><tr><td>SR↑</td><td>NE↓</td><td>TL↓</td><td>OR↑</td><td>SPL↑</td><td>SR↑</td><td>NE↓</td><td>TL↓</td><td>OR↑</td></tr><tr><td>Basic agent</td><td>w/o. any exploration</td><td>0.62</td><td>3.99</td><td>11.0</td><td>0.71</td><td>0.59</td><td>0.52</td><td>5.22</td><td>10.7</td><td>0.58</td></tr><tr><td rowspan="6">Component</td><td>Our naïve model (§3.1)</td><td rowspan="2">0.66</td><td rowspan="2">3.55</td><td rowspan="2">40.9</td><td rowspan="2">0.81</td><td rowspan="2">0.19</td><td rowspan="2">0.54</td><td rowspan="2">4.76</td><td rowspan="2">35.7</td><td rowspan="2">0.71</td></tr><tr><td>1-step exploration+all directions</td></tr><tr><td>w. exploration decision (§3.2)</td><td rowspan="2">0.66</td><td rowspan="2">3.72</td><td rowspan="2">12.2</td><td rowspan="2">0.76</td><td rowspan="2">0.53</td><td rowspan="2">0.55</td><td rowspan="2">4.82</td><td rowspan="2">13.7</td><td rowspan="2">0.66</td></tr><tr><td>1-step exploration+parts of directions</td></tr><tr><td>w. further exploration</td><td rowspan="2">0.70</td><td rowspan="2">3.15</td><td rowspan="2">69.6</td><td rowspan="2">0.95</td><td rowspan="2">0.13</td><td rowspan="2">0.60</td><td rowspan="2">4.27</td><td rowspan="2">58.8</td><td rowspan="2">0.89</td></tr><tr><td>at most 4-step exploration+all directions</td></tr><tr><td rowspan="2">Full model (§3.3)</td><td>as most 1-step exploration</td><td>0.66</td><td>3.72</td><td>12.2</td><td>0.76</td><td>0.53</td><td>0.55</td><td>4.82</td><td>13.7</td><td>0.66</td></tr><tr><td>at most 3-step exploration</td><td>0.68</td><td>3.21</td><td>17.3</td><td>0.79</td><td>0.52</td><td>0.57</td><td>4.50</td><td>18.6</td><td>0.69</td></tr><tr><td rowspan="2">parts of directions</td><td>at most 4-step exploration</td><td>0.70</td><td>3.20</td><td>19.7</td><td>0.80</td><td>0.52</td><td>0.58</td><td>4.36</td><td>20.6</td><td>0.70</td></tr><tr><td>at most 6-step exploration</td><td>0.70</td><td>3.13</td><td>22.7</td><td>0.83</td><td>0.49</td><td>0.58</td><td>4.21</td><td>23.6</td><td>0.73</td></tr></table>
|
| 258 |
+
|
| 259 |
+
(3) Beam Search Setting: Beam search was originally used in [7] to optimize SR metric. Given an instruction, the agent is allowed to collect multiple candidate routes to score and pick the best one [11]. Following [7,20], we use the speaker to estimate the candidate routes and pick the best one as the final result. As shown in Table 2, our performance is on par with or better than previous methods.
|
| 260 |
+
|
| 261 |
+
# 4.3 Diagnostic Experiments
|
| 262 |
+
|
| 263 |
+
Effectiveness of Our Basic Idea. We first examine the performance of the naive model (§3.1). As shown in Table 3, even with a simple exploration ability, the agent gains significant improvements over SR, NE and OR. It is no surprise to see drops in TL and SPL, as the agent simply explores all directions.
|
| 264 |
+
|
| 265 |
+
Exploration Decision Making. In §3.2, the agent learns to select some valuable directions to explore. As seen, the improved agent is indeed able to collect useful surrounding information by only conducting necessary exploration, as TL and SPL are improved without sacrificing improvements in SR, NE and OR.
|
| 266 |
+
|
| 267 |
+
Allowing Multi-Step Exploration. In §3.3, instead of only allowing one-step exploration, the agent learns to conduct multi-step exploration. To investigate the efficacy of such a strategy individually, we allow our naïve model to make at most 4-step exploration ( $w / o$ . exploration decision making). In Table 3, we can observe further improvements over SR, NE and OR scores, with larger TL.
|
| 268 |
+
|
| 269 |
+
Importance of All Components. Next we study the efficacy of our full model from §3.3, which is able to make multi-direction, multi-step exploration. We find that, by integrating all the components together, our agent with at most 4-step exploration achieves the best performance in most metrics.
|
| 270 |
+
|
| 271 |
+
Influence of Maximum Allowable Exploration Step. From Table 3, we find that, with more maximum allowable exploration steps $(1\rightarrow 4)$ , the agent attains better performance. However, allowing further exploration steps $(4\rightarrow 6)$ will hurt the performance. For at most 4-step exploration, the average exploration rate is $15.3\%$ . During exploration, the percentage of wrong navigation actions being
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
Fig. 5. Left: The basic agent is confused by the ambiguous instruction "Travel to the end of the hallway...", causing failed navigation. Our agent can actively collect information (the yellow part) and then make a better navigation decision. Middle Bottom: First view during exploration. Right: First view during navigation. We can find that, before exploration, the wrong direction gains a high navigation probability (i.e., 0.6). However, after exploration, the score for the correct direction is improved.
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
|
| 280 |
+
corrected is $\sim 65.2\%$ , while right navigation action being changed wrongly is $\sim 10.7\%$ . The percentages of maximum exploration steps, from 1 to 4, are $53.6\%$ , $12.5\%$ , $8.7\%$ , and $25.3\%$ , respectively. We find that, in most cases, one-step exploration is enough. Sometimes the agent may choose long exploration, which maybe because he needs to collect more information for hard examples.
|
| 281 |
+
|
| 282 |
+
Qualitative Results. Fig. 5 depicts a challenge example, with the ambiguous instruction "Travel to the end of the hallway...". The basic agent chooses the wrong direction and ultimately fails. However, our agent is able to actively explore the environment and collect useful information, to support the navigation-decision making. We observe that, after exploration, the correct direction gains a significant score and our agent reaches the goal location successfully.
|
| 283 |
+
|
| 284 |
+
# 5 Conclusion
|
| 285 |
+
|
| 286 |
+
This work proposes an end-to-end trainable agent for the VLN task, with an active exploration ability. The agent is able to intelligently interact with the environment and actively gather information when faced with ambiguous instructions or unconfident navigation decisions. The elaborately designed exploration module successfully learns its own policy with the purpose of supporting better navigation-decision making. Our agent shows promising results on R2R dataset. Acknowledgements This work was partially supported by Natural Science Foundation of China (NSFC) grant (No.61472038), Zhejiang Lab's Open Fund (No. 2020AA3AB14), Zhejiang Lab's International Talent Fund for Young Professionals, and Key Laboratory of Electronic Information Technology in Satellite Navigation (Beijing Institute of Technology), Ministry of Education, China.
|
| 287 |
+
|
| 288 |
+
# References
|
| 289 |
+
|
| 290 |
+
1. Anderson, P., Wu, Q., Teney, D., Bruce, J., Johnson, M., Sünderhauf, N., Reid, I., Gould, S., van den Hengel, A.: Vision-and-language navigation: Interpreting visually-grounded navigation instructions in real environments. In: CVPR (2018) 1, 2, 3, 4, 9, 11, 12, 13
|
| 291 |
+
2. Andreas, J., Klein, D.: Alignment-based compositional semantics for instruction following. In: EMNLP (2015) 3
|
| 292 |
+
3. Antol, S., Agrawal, A., Lu, J., Mitchell, M., Batra, D., Lawrence Zitnick, C., Parikh, D.: VQA: Visual question answering. In: ICCV (2015) 3
|
| 293 |
+
4. Bengio, Y., Louradour, J., Collobert, R., Weston, J.: Curriculum learning. In: ICML (2009) 10
|
| 294 |
+
5. Chen, D.L., Mooney, R.J.: Learning to interpret natural language navigation instructions from observations. In: AAAI (2011) 3
|
| 295 |
+
6. Das, A., Kottur, S., Gupta, K., Singh, A., Yadav, D., Moura, J.M., Parikh, D., Batra, D.: Visual dialog. In: CVPR (2017) 3
|
| 296 |
+
7. Fried, D., Hu, R., Cirik, V., Rohrbach, A., Andreas, J., Morency, L.P., Berg-Kirkpatrick, T., Saenko, K., Klein, D., Darrell, T.: Speaker-follower models for vision-and-language navigation. In: NeurIPS (2018) 1, 3, 9, 11, 12, 13
|
| 297 |
+
8. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016) 11
|
| 298 |
+
9. Hu, R., Fried, D., Rohrbach, A., Klein, D., Darrell, T., Saenko, K.: Are you looking? grounding to multiple modalities in vision-and-language navigation. In: ACL (2019) 1, 3
|
| 299 |
+
10. Huang, H., Jain, V., Mehta, H., Ku, A., Magalhaes, G., Baldridge, J., Ie, E.: Transferable representation learning in vision-and-language navigation. In: ICCV (2019) 1, 3
|
| 300 |
+
11. Ke, L., Li, X., Bisk, Y., Holtzman, A., Gan, Z., Liu, J., Gao, J., Choi, Y., Srinivasa, S.: Tactical rewind: Self-correction via backtracking in vision-and-language navigation. In: CVPR (2019) 1, 3, 12
|
| 301 |
+
12. Ma, C.Y., Lu, J., Wu, Z., AlRegib, G., Kira, Z., Socher, R., Xiong, C.: Self-monitoring navigation agent via auxiliary progress estimation. In: ICLR (2019) 1, 3, 12, 13
|
| 302 |
+
13. Ma, C.Y., Wu, Z., AlRegib, G., Xiong, C., Kira, Z.: The regretful agent: Heuristic-aided navigation through progress estimation. In: CVPR (2019) 1, 3, 9, 12
|
| 303 |
+
14. MacMahon, M., Stankiewicz, B., Kuipers, B.: Walk the talk: connecting language, knowledge, and action in route instructions. In: AAAI (2006) 3
|
| 304 |
+
15. Mei, H., Bansal, M., Walter, M.R.: Listen, attend, and walk: Neural mapping of navigational instructions to action sequences. In: AAAI (2016) 3, 9
|
| 305 |
+
16. Mirowski, P., Pascanu, R., Viola, F., Soyer, H., Ballard, A.J., Banino, A., Denil, M., Goroshin, R., Sifre, L., Kavukcuoglu, K., et al.: Learning to navigate in complex environments. In: ICLR (2017) 3
|
| 306 |
+
17. Misra, D., Bennett, A., Blukis, V., Niklasson, E., Shatkhin, M., Artzi, Y.: Mapping instructions to actions in 3d environments with visual goal prediction. In: EMNLP (2018) 3
|
| 307 |
+
18. Mnih, V., Badia, A.P., Mirza, M., Graves, A., Lillicrap, T., Harley, T., Silver, D., Kavukcuoglu, K.: Asynchronous methods for deep reinforcement learning. In: ICML (2016) 9
|
| 308 |
+
19. Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., Berg, A.C., Fei-Fei, L.: ImageNet Large Scale Visual Recognition Challenge. IJCV 115(3), 211-252 (2015) 11
|
| 309 |
+
|
| 310 |
+
20. Tan, H., Yu, L., Bansal, M.: Learning to navigate unseen environments: Back translation with environmental dropout. In: NAACL (2019) 1, 3, 4, 9, 11, 12, 13
|
| 311 |
+
21. Tellex, S., Kollar, T., Dickerson, S., Walter, M.R., Banerjee, A.G., Teller, S., Roy, N.: Understanding natural language commands for robotic navigation and mobile manipulation. In: AAAI (2011) 3
|
| 312 |
+
22. Thomason, J., Gordon, D., Bisk, Y.: Shifting the baseline: Single modality performance on visual navigation & qa. In: NAACL (2019) 3
|
| 313 |
+
23. Wang, X., Huang, Q., Celikyilmaz, A., Gao, J., Shen, D., Wang, Y.F., Wang, W.Y., Zhang, L.: Reinforced cross-modal matching and self-supervised imitation learning for vision-language navigation. In: CVPR (2019) 1, 3, 9, 11, 12, 13
|
| 314 |
+
24. Wang, X., Xiong, W., Wang, H., Yang Wang, W.: Look before you leap: Bridging model-free and model-based reinforcement learning for planned-ahead vision-and-language navigation. In: ECCV (2018) 1, 3, 9, 11, 12
|
| 315 |
+
25. Xu, K., Ba, J., Kiros, R., Cho, K., Courville, A., Salakhudinov, R., Zemel, R., Bengio, Y.: Show, attend and tell: Neural image caption generation with visual attention. In: ICML (2015) 3
|
| 316 |
+
26. Yu, L., Poirson, P., Yang, S., Berg, A.C., Berg, T.L.: Modeling context in referring expressions. In: ECCV (2016) 3
|
| 317 |
+
27. Zheng, Z., Wang, W., Qi, S., Zhu, S.C.: Reasoning visual dialogs with structural and partial observations. In: CVPR (2019) 3
|
| 318 |
+
28. Zhu, F., Zhu, Y., Chang, X., Liang, X.: Vision-language navigation with self-supervised auxiliary reasoning tasks. In: CVPR (2020) 1, 3, 11, 12, 13
|
| 319 |
+
29. Zhu, Y., Mottaghi, R., Kolve, E., Lim, J.J., Gupta, A., Fei-Fei, L., Farhadi, A.: Target-driven visual navigation in indoor scenes using deep reinforcement learning. In: ICRA (2017) 3, 9
|
activevisualinformationgatheringforvisionlanguagenavigation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:39283ed57ec4e212461c45cfcf0e8156df1929ba6268c4513c144d0e9756b9fa
|
| 3 |
+
size 617640
|
activevisualinformationgatheringforvisionlanguagenavigation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e563facc55b3eec4dadc9797f5caab2a47af684cfedca4412f27bcf93749da1c
|
| 3 |
+
size 471693
|
adaptingobjectdetectorswithconditionaldomainnormalization/05885c2c-b740-4e53-9dfe-2678a4cac04a_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4b6be935421ec5da9c4867d3fd65ce55065cef1c13d2a0c123f6c9b06cef22c7
|
| 3 |
+
size 81867
|
adaptingobjectdetectorswithconditionaldomainnormalization/05885c2c-b740-4e53-9dfe-2678a4cac04a_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:73c84eaffa4765307b0d41653528fa0243edf9397a8c2a5fc8ef0464c00d7e9e
|
| 3 |
+
size 99106
|
adaptingobjectdetectorswithconditionaldomainnormalization/05885c2c-b740-4e53-9dfe-2678a4cac04a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d3ae3d8a734ca459ab3a11943a1891994f4612af359636a4a79e255a8c10f463
|
| 3 |
+
size 28762076
|
adaptingobjectdetectorswithconditionaldomainnormalization/full.md
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adapting Object Detectors with Conditional Domain Normalization
|
| 2 |
+
|
| 3 |
+
Peng Su $^{1,2}$ , Kun Wang $^{2}$ , Xingyu Zeng $^{2}$ , Shixiang Tang $^{2}$ , Dapeng Chen $^{2}$ , Di Qiu $^{2}$ , and Xiaogang Wang $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> The Chinese University of Hong Kong
|
| 6 |
+
<sup>2</sup> SenseTime Research
|
| 7 |
+
{psu, xgwang}@ee.cuhk.edu.hk
|
| 8 |
+
|
| 9 |
+
Abstract. Real-world object detectors are often challenged by the domain gaps between different datasets. In this work, we present the Conditional Domain Normalization (CDN) to bridge the domain distribution gap. CDN is designed to encode different domain inputs into a shared latent space, where the features from different domains carry the same domain attribute. To achieve this, we first disentangle the domain-specific attribute out of the semantic features from source domain via a domain embedding module, which learns a domain-vector to characterize the domain attribute information. Then this domain-vector is used to encode the features from target domain through a conditional normalization, resulting in different domains' features carrying the same domain attribute. We incorporate CDN into various convolution stages of an object detector to adaptively address the domain shifts of different level's representation. In contrast to existing adaptation works that conduct domain confusion learning on semantic features to remove domain-specific factors, CDN aligns different domain distributions by modulating the semantic features of target domains conditioned on the learned domain-vector of the source domain. Extensive experiments show that CDN outperforms existing methods remarkably on both real-to-real and synthetic-to-real adaptation benchmarks, including 2D image detection and 3D point cloud detection.
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Deep neural networks have achieved remarkable success on visual recognition tasks. However, it is still very challenging for deep networks to generalize on a different domain, whose data distribution is not identical with original training data. Such a problem is known as dataset bias or domain shift [31]. For example, to guarantee safety in autonomous driving, the perception model is required to perform well under all conditions, like sunny, night, rainy, snowy, etc. However, even top-grade object detectors still face significant challenges when deployed in such varying real-world settings. Although collecting and annotating more data from unseen domains can help, it is prohibitively expensive, laborious and time-consuming. Another appealing application is to adapt from synthetic data to
|
| 14 |
+
|
| 15 |
+
real data, as it can save the amount of cost and time. However, current objector detectors trained with synthetic data can rarely generalize on real data due to a significant domain distribution gap [36, 38].
|
| 16 |
+
|
| 17 |
+
Adversarial domain adaptation emerges as a hopeful method to learn transferable representations across domains. It has achieved noticeable progress in various machine learning tasks, from image classification [24, 27], semantic segmentation [39, 36, 47], object detection [33, 46] to reinforcement learning [38, 28, 20]. According to Ben-David's theory [1], the empirical risk on the target domain is bounded by the source domain risk and the $\mathcal{H}$ domain divergence. Adversarial adaptation dedicates to learn domain invariant representation to reduce the $\mathcal{H}$ divergence, which eventually decreases the upper bound of the empirical error on the target domain.
|
| 18 |
+
|
| 19 |
+
However, existing adversarial adaptation methods still suffer from several problems. First, previous methods [8, 4, 38] directly feed semantic features into a domain discriminator to conduct domain confusion learning. But the semantic features contain both image contents and domain attribute information. It's difficult to make the discriminator only focusing on removing domain-specific information without inducing undesirable influence on the images contents. Second, existing adversarial adaptation methods [8, 4, 38] use domain confusion learning at one or few convolution stages to handle the distribution mismatch, which ignores the differences of domain shifts at various representation levels. For example, the first few convolution layers' features mainly convey low-level information of local patterns, while the higher convolution layers' features include more abstract global patterns with semantics [43]. Such differences born within deep convolution neural networks naturally exhibit different types of domain shift at various convolution stages.
|
| 20 |
+
|
| 21 |
+
Motivated by this, we propose the Conditional Domain Normalization (CDN) to embed different domain inputs into a shared latent space, where the features of all different domains inputs carry the same domain attribute information. Specifically, CDN utilizes a domain embedding module to learn a domain-vector to characterize the domain attribute information, through disentangling the domain attribute out of the semantic features of domain inputs. We use this domain-vector to encode the semantic features of another domain's inputs via a conditional normalization. Thus different domain features carry the same domain attributes information. The experiment on both real-to-real and synthetic-to-real adaptation benchmarks demonstrate that our method outperforms the-state-of-the-art adaptation methods. To summarize, our contributions are three folds: (1) We propose the Conditional Domain Normalization (CDN) to bridge the domain distribution gap, through embedding different domain inputs into a shared latent space, where the features from different domains carry the same domain attribute. (2) CDN achieves state-of-the-art unsupervised domain adaptation performance on both real-to-real and synthetic-to-real benchmarks, including 2D image and 3D point cloud detection tasks. And we conduct both quantitative and qualitative comparisons to analyze the features learned by CDN. (3)
|
| 22 |
+
|
| 23 |
+
We construct a large-scale synthetic-to-real driving benchmark for 2D object detection, including a variety of public datasets.
|
| 24 |
+
|
| 25 |
+
# 2 Related work
|
| 26 |
+
|
| 27 |
+
Object Detection is the center topic in computer vision, which is crucial for many real-world applications, such as autonomous driving. In 2D detection, following the pioneering work of RCNN [11], a number of object detection frameworks based on convolutional networks have been developed like Fast R-CNN [10], Faster R-CNN [32], and Mask R-CNN [12], which significantly push forward the state of the art. In 3D detection, spanning from detecting 3d objects from 2d images [3], to directly generate 3D box from point cloud [29, 37], abundant works has been successfully explored. All these 2D and 3D objectors have achieved remarkable success on one or few specific public datasets. However, even top-grade object detectors still face significant challenges when deployed in real-world settings. The difficulties usually arise from the changes in environmental conditions.
|
| 28 |
+
|
| 29 |
+
Domain Adaptation generalizes a model across different domains, and it has been extensively explored in various tasks, spanning from image classification [2, 40, 24, 27, 23], semantic segmentation [15, 39, 36] to reinforcement learning [38, 28, 20]. For 2D detection, domain confusion learning via a domain discriminator has achieved noticeable progress in cross-domain detection. [4] incorporated a gradient reversal layer [8] into a Faster R-CNN model. [33, 46] adopt domain confusion learning on both global and local levels to align source and target distributions. In contrast to existing methods conducting domain confusion learning directly on semantic features, we explicitly disentangle the domain attribute out of semantic features. And this domain attribute is used to encode other domains' features, thus different domain inputs share the same domain attribute in the feature space. For 3D detection, only a few works [45, 17] has been explored to adapt object detectors across different point cloud dataset. Different from existing works [45, 17] are specifically designed for point cloud data, our proposed CDN is a general adaptation framework that adapts both 2D image and 3D point cloud object detector through the conditional domain normalization.
|
| 30 |
+
|
| 31 |
+
Conditional Normalization is a technique to modulate the neural activation using a transformation that depends on external data. It has been successfully used in the generative models and style transfer, like conditional batch normalization [6], adaptive instance normalization (AdaIN) [16] and spatial adaptive batch normalization [25]. [16] proposes AdaIN to control the global style of the synthesized image. [41] modulates the features conditioned on semantic masks for image super-resolution. [25] adopts a spatially-varying transformation, making it suitable for image synthesis from semantic masks. Inspired by these works, we propose Conditional Domain Normalization (CDN) to modulate one domain's inputs condition on another domain's attributes information. But our method exhibits significant difference with style transfer works: Style transfer works modify a content image conditioned on another style image, which is a conditional
|
| 32 |
+
|
| 33 |
+
instance normalization by nature; but CDN modulates one domain's features conditioned on the domain embedding learned from another domains' inputs (a group of images), which is like a domain-to-domain translation. Hence we use different types of conditional normalization to achieve different goals.
|
| 34 |
+
|
| 35 |
+
# 3 Method
|
| 36 |
+
|
| 37 |
+
We first introduce the general unsupervised domain adaptation approach in section 3.1. Then we present the proposed Conditional Domain Normalization (CDN) in section 3.2. Last we adapt object detectors with the CDN in section 3.3.
|
| 38 |
+
|
| 39 |
+
# 3.1 General Adversarial Adaptation Framework
|
| 40 |
+
|
| 41 |
+
Given source images and labels $\{(x_i^S, y_i^S)\}_{i=1}^{N_S}$ drawn from $P_s$ , and target images $\{x_i^T\}_{i=1}^{N_T}$ from target domain $P_t$ , the goal of unsupervised domain adaptation is to find a function $f: x \to y$ that minimizes the empirical error on target data. For object detection task, the $f$ can be decomposed as $f = G(\cdot; \theta_g) \circ H(\cdot; \theta_h)$ , where $G(\cdot; \theta_g)$ represents a feature extractor network and $H(\cdot; \theta_h)$ denotes a bounding box head network. The adversarial domain adaptation introduces a discriminator network $D(\cdot; \theta_d)$ that tries to determine the domain labels of feature maps generated by $G(\cdot; \theta_g)$ .
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
\min _ {\theta_ {g}, \theta_ {h}} \mathcal {L} _ {d e t} = \mathcal {L} _ {c l s} (G (x; \theta_ {g}), H (x; \theta_ {h})) + \mathcal {L} _ {r e g} (G (x; \theta_ {g}), H (x; \theta_ {h}))
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
$$
|
| 48 |
+
\min _ {\theta_ {d}} \max _ {\theta_ {g}} \mathcal {L} _ {a d v} = \mathbb {E} _ {x \sim P _ {s}} [ \log (D (G (x; \theta_ {g}); \theta_ {d})) ] + \mathbb {E} _ {x \sim P _ {t}} [ \log (1 - D (G (x; \theta_ {g}); \theta_ {d}) ] ^ {(1)}
|
| 49 |
+
$$
|
| 50 |
+
|
| 51 |
+
As illustrated in Eq.1, $G(\cdot; \theta_g)$ and $H(\cdot; \theta_h)$ are jointly trained to minimize the detection loss $\mathcal{L}_{det}$ by supervised training on the labeled source domain. At the same time, the backbone $G(\cdot; \theta_g)$ is optimized to maximize the probability of $D(\cdot; \theta_d)$ to make mistakes. Through this two-player min-max game, the final $G(\cdot; \theta_g)$ will converge to extract features that are indistinguishable for $D(\cdot; \theta_d)$ , thus domain invariant representations are learned.
|
| 52 |
+
|
| 53 |
+
# 3.2 Conditional Domain Normalization
|
| 54 |
+
|
| 55 |
+
Conditional Domain Normalization (CDN) is designed to embed source and target domain inputs into a shared latent space, where the semantic features from different domains carry the same domain attribute information. Formally, let $v^{s} \in \mathbb{R}^{N \times C \times H \times W}$ and $v^{t} \in \mathbb{R}^{N \times C \times H \times W}$ represent feature maps of source and target inputs, respectively. $C$ is the channel dimension and $N$ denotes the mini-batch size. We first learn a domain embedding vector $e_{domain}^{s} \in \mathbb{R}^{1 \times C \times 1}$ to characterize the domain attribute of source inputs. It is accomplished by a domain embedding network $\mathbf{F}_{d}(\cdot;W)$ parameterized by two fully-connect layers with ReLU non-linearity $\delta$ as
|
| 56 |
+
|
| 57 |
+
$$
|
| 58 |
+
e _ {d o m a i n} ^ {s} = \mathbf {F} _ {d} \left(v _ {a v g} ^ {s}; W\right) = \delta \left(W _ {2} \delta \left(W _ {1} v _ {a v g} ^ {s}\right)\right). \tag {2}
|
| 59 |
+
$$
|
| 60 |
+
|
| 61 |
+

|
| 62 |
+
Fig. 1: (Left) Traditional domain adversarial approach. (Right) Conditional Domain Normalization (CDN). The green and blue cubes represent the feature maps of domain A and domain B respectively.
|
| 63 |
+
|
| 64 |
+
And $v_{avg}^{s} \in \mathbb{R}^{N \times C \times 1}$ represents the channel-wise statistics of source feature $v^{s}$ generated by global average pooling
|
| 65 |
+
|
| 66 |
+
$$
|
| 67 |
+
v _ {a v g} ^ {s} = \frac {1}{H W} \sum_ {h = 1} ^ {H} \sum_ {w = 1} ^ {W} v ^ {s} (h, w). \tag {3}
|
| 68 |
+
$$
|
| 69 |
+
|
| 70 |
+
To embed both source and target domain inputs into a shared latent space, where source and target features carry the same domain attributes while preserving individual image contents. We encode the target features $v^{t}$ with the source domain embedding via an affine transformation as
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\hat {v} ^ {t} = \mathbf {F} \left(e _ {\text {d o m a i n}} ^ {s}; W _ {\gamma}, b _ {\gamma}\right) \cdot \left(\frac {v ^ {t} - \mu^ {t}}{\sigma^ {t}}\right) + \mathbf {F} \left(e _ {\text {d o m a i n}} ^ {s}; W _ {\beta}, b _ {\beta}\right), \tag {4}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
where $\mu^t$ and $\sigma^t$ denote the mean and variance of target feature $v^t$ . The affine parameters are learned by function $F(\cdot; W_{\gamma}, b_{\gamma})$ and $F(\cdot; W_{\beta}, b_{\beta})$ conditioned on the source domain embedding vector $e_{domain}^s$ ,
|
| 77 |
+
|
| 78 |
+
$$
|
| 79 |
+
F \left(e _ {\text {d o m a i n}} ^ {s}; W _ {\gamma}, b _ {\gamma}\right) = W _ {\gamma} e _ {\text {d o m a i n}} ^ {s} + b _ {\gamma}, \quad F \left(e _ {\text {d o m a i n}} ^ {s}; W _ {\beta}, b _ {\beta}\right) = W _ {\beta} e _ {\text {d o m a i n}} ^ {s} + b _ {\beta}. \tag {5}
|
| 80 |
+
$$
|
| 81 |
+
|
| 82 |
+
For the target feature mean $\mu^t\in \mathbb{R}^{1\times C\times 1}$ and variance $\sigma^t\in \mathbb{R}^{1\times C\times 1}$ , we calculate it with a standard batch normalization [19]
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\mu_ {c} ^ {t} = \frac {1}{N H W} \sum_ {n = 1} ^ {N} \sum_ {h = 1} ^ {H} \sum_ {w = 1} ^ {W} v _ {n c h w} ^ {t}, \quad \sigma_ {c} ^ {t} = \sqrt {\frac {1}{N H W} \sum_ {n = 1} ^ {N} \sum_ {h = 1} ^ {H} \sum_ {w = 1} ^ {W} \left(v _ {n c h w} ^ {t} - \mu_ {c} ^ {t}\right) ^ {2} + \epsilon}, \tag {6}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
where $\mu_c^t$ and $\sigma_c^t$ denotes $c$ -th channel of $\mu^t$ and $\sigma^t$ . Finally, we have a discriminator to supervise the encoding process of domain attribute as
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
\min _ {\theta_ {d}} \max _ {\theta_ {g}} \mathcal {L} _ {a d v} = \mathbb {E} [ \log (D (\mathbf {F} _ {d} (v ^ {s}); \theta_ {d}) ] + \mathbb {E} [ \log (1 - D (\mathbf {F} _ {d} (\hat {v} ^ {t}); \theta_ {d})) ], \tag {7}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $v^s$ and $v^t$ are generated by $G(\cdot ;\theta_g)$
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
Fig. 2: Faster R-CNN network incorporates with CDN. The CDN is adopted in both backbone network and bounding box head network to adaptively address the domain shift at different representation levels.
|
| 98 |
+
|
| 99 |
+
Discussion CDN exhibits a significant difference compared with existing adversarial adaptation works. As shown in Fig. 1, previous methods conduct domain confusion learning directly on semantic features to remove domain-specific factors. However, the semantic features contain both domain attribute and image contents. It is not easy to enforce the domain discriminator only regularizing the domain-specific factors without inducing any undesirable influence on image contents. In contrast, we disentangle the domain attribute out of the semantic features via conditional domain normalization. And this domain attribute is used to encode other domains' features, thus different domain features carry the same domain attribute information.
|
| 100 |
+
|
| 101 |
+
# 3.3 Adapting Detector with Conditional Domain Normalization
|
| 102 |
+
|
| 103 |
+
Convolution neural network's (CNN) success in pattern recognition has been largely attributed to its great capability of learning hierarchical representations [43]. More specifically, the first few layers of CNN focus on low-level features of local pattern, while higher layers capture semantic representations. Given this observation, CNN based object detectors naturally exhibit different types of domain shift at various levels' representations. Hence we incorporate CDN into different convolution stages in object detectors to address the domain mismatch adaptively, as shown in Fig.2.
|
| 104 |
+
|
| 105 |
+
Coincident to our analysis, some recent works [33, 46] empirically demonstrate that global and local region alignments have different influences on detection performance. For easy comparison, we refer to the CDN located at the backbone network as global alignment, and CDN in the bounding box head networks as local or instance alignment.
|
| 106 |
+
|
| 107 |
+
As shown in Fig. 2, taking faster-RCNN model [32] with ResNet [13] backbone as an example, we incorporate CDN in the last residual block at each stage. Thus
|
| 108 |
+
|
| 109 |
+
the global alignment loss can be computed by
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
L _ {a d v} ^ {\text {g l o b a l}} = \sum_ {l = 1} ^ {L} \mathbb {E} \left[ \right. \log \left( \right.D _ {l} \left(\mathbf {F} _ {d} ^ {l} \left(v _ {l} ^ {s}\right); \theta_ {d} ^ {l}\right)\left. \right] + \mathbb {E} \left[ \log \left(1 - D _ {l} \left(\mathbf {F} _ {d} ^ {l} \left(\hat {v} _ {l} ^ {t}\right); \theta_ {d} ^ {l}\right)\right)\right], \tag {8}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
where $v_{l}^{s}$ and $v_{l}^{t}$ denote $l$ -th layer's source feature and the encoded target feature, and $D_{l}$ represents the corresponding domain discriminator parameterized by $\theta_{d}^{l}$ .
|
| 116 |
+
|
| 117 |
+
As for bounding box head network, we adopt CDN on the fixed-size region of interest (ROI) features generated by ROI pooling [32]. Because the original ROIs are often noisy and the quantity of source and target ROIs are not equal, we randomly select $\min(N_{roi}^S, N_{roi}^T)$ ROIs from each domain. $N_{roi}^S$ and $N_{roi}^T$ represent the quantity of source and target ROIs after non-maximum suppression (NMS). Hence we have instance alignment regularization for ROI features as
|
| 118 |
+
|
| 119 |
+
$$
|
| 120 |
+
L _ {a d v} ^ {\text {i n s t a n c e}} = \mathbb {E} \left[ \right. \log \left( \right.D _ {r o i} \left(\mathbf {F} _ {d} ^ {r o i} \left(v _ {r o i} ^ {s}\right); \theta_ {d} ^ {r o i}\right)\left. \right] + \mathbb {E} \left[ \log \left(1 - D _ {r o i} \left(\mathbf {F} _ {d} ^ {r o i} \left(\hat {v} _ {r o i} ^ {t}\right); \theta_ {d} ^ {r o i}\right)\right)\right]. \tag {9}
|
| 121 |
+
$$
|
| 122 |
+
|
| 123 |
+
The overall training objective is to minimize the detection loss $\mathcal{L}_{\text{det}}$ (of the labeled source domain) that consists of a classification loss $\mathcal{L}_{\text{cls}}$ and a regression loss $\mathcal{L}_{\text{reg}}$ , and min-max a adversarial loss $\mathcal{L}_{\text{adv}}$ of discriminator network
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
\begin{array}{l} \min _ {\theta_ {d}} \max _ {\theta_ {g}} \mathcal {L} _ {a d v} = \lambda L _ {a d v} ^ {g l o b a l} + L _ {a d v} ^ {i n s t a n c e} \tag {10} \\ \min _ {\theta_ {g}, \theta_ {h}} \mathcal {L} _ {d e t} = \mathcal {L} _ {c l s} (G (x; \theta_ {g}), H (x; \theta_ {h})) + \mathcal {L} _ {r e g} (G (x; \theta_ {g}), H (x; \theta_ {h})), \\ \end{array}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
where $\lambda$ is a weight to balance the global and local alignment regularization.
|
| 130 |
+
|
| 131 |
+
# 4 Experiments
|
| 132 |
+
|
| 133 |
+
We evaluate CDN on various real-to-real (KITTI to Cityscapes) and synthetic-to-real (Virtual KITTI/Synscapes/SIM10K to BDD100K, PreSIL to KITTI) adaptation benchmarks. We also report results on cross-weather adaptation, Cityscapes to Foggy Cityscapes. Mean average precision (mAP) with an intersection-over-union (IOU) threshold of 0.5 is reported for 2D detection experiments. We use Source and Target to represent the results of supervised training on source and target domain, respectively. For 3D point cloud object detection, PointR-CNN [37] with backbone of PointNet++ [29] is adopted as our baseline model. Following standard metric on KITTI benchmark [37], we use Average Precision(AP) with IOU threshold 0.7 for car and 0.5 for pedestrian/cyclist.
|
| 134 |
+
|
| 135 |
+
# 4.1 Dataset
|
| 136 |
+
|
| 137 |
+
Cityscapes [5] is a European traffic scene dataset, which contains 2,975 images for training and 500 images for testing.
|
| 138 |
+
|
| 139 |
+
Foggy Cityscapes derives from Cityscapes with a fog simulation proposed by [34]. It also includes 2,975 images for training, 500 images for testing.
|
| 140 |
+
|
| 141 |
+
KITTI [9] contains 21,260 images collected from different urban scenes, which includes 2D RGB images and 3D point cloud data.
|
| 142 |
+
|
| 143 |
+
Virtual KITTI is derived from KITTI with a real-to-virtual cloning technique proposed by [7]. It has the same number of images and categories as KITTI.
|
| 144 |
+
|
| 145 |
+
Synscapes [42] is a synthetic dataset of street scene, which consists of 25,000 images created with a photo-realistic rendering technique.
|
| 146 |
+
|
| 147 |
+
SIM10K [21] is a street view dataset generated from the realistic computer game GTA-V. It has 10,000 training images and the same categories as in Cityscapes.
|
| 148 |
+
|
| 149 |
+
PreSIL [17] is synthetic point cloud dataset derived from GTA-V, which consists of 50,000 frames of high-definition images and point clouds.
|
| 150 |
+
|
| 151 |
+
BDD100K [44] is a large-scale dataset (contains 100k images) that covers diverse driving scenes. It is a good representative of real data in the wild.
|
| 152 |
+
|
| 153 |
+
# 4.2 Implementation Details
|
| 154 |
+
|
| 155 |
+
We train the Faster R-CNN [32] model for 12 epochs on all experiments. The model is optimized by SGD with multi-step learning rate decay. SGD uses the learning rate of 0.00625 multiplied by the batchsize, and momentum of 0.9. All experiments use sync BN [26] with a batchsize of 32. $\lambda$ is set as 0.4 by default in all experiments. On synthetic-to-real adaptation, for a fair comparison, we randomly select 7000 images for training and 3000 for testing, for all synthetic datasets and BDD100K dataset. For 3D point cloud detection, we use PointR-CNN [37] model with same setting as [37]. We incorporated the CDN layer in the point-wise feature generation stage (global alignment) and 3D ROIs proposal stage (instance alignment).
|
| 156 |
+
|
| 157 |
+
# 5 Experimental Results and Analysis
|
| 158 |
+
|
| 159 |
+
# 5.1 Results on Cityscapes to Foggy Cityscapes
|
| 160 |
+
|
| 161 |
+
We compare CDN with the state-of-the-art methods in Table 1. Following [33, 46], we also report results using Faster R-CNN model with VGG16 backbone. As shown in Table 1, CDN outperforms previous state-of-the-art methods by a large margin of $1.8\%$ mAP. The results demonstrate the effectiveness of CDN on reducing domain gaps. A detailed comparison of different CDN settings can be found at the ablation study 7. As shown in Fig. 3, our method exhibits good generalization capability under foggy weather conditions.
|
| 162 |
+
|
| 163 |
+
# 5.2 Results on KITTI to Cityscapes
|
| 164 |
+
|
| 165 |
+
Different camera settings may influence the detector performance in real-world applications. We conduct the cross-camera adaptation from KITTI to Cityscapes. Table 2 shows the adaptation results on car category produced by Faster R-CNN with VGG16. Global and Instance represent global and local alignment respectively. The results demonstrate that CDN achieves $1.7\%$ mAP improvements
|
| 166 |
+
|
| 167 |
+
<table><tr><td>Method</td><td>Person</td><td>Rider</td><td>Car</td><td>Truck</td><td>Bus</td><td>Train</td><td>Motorcycle</td><td>Bicycle</td><td>mAP</td></tr><tr><td>Source</td><td>29.3</td><td>31.9</td><td>43.5</td><td>15.8</td><td>27.4</td><td>9.0</td><td>20.3</td><td>29.9</td><td>26.1</td></tr><tr><td>DA-Faster [4]</td><td>25.0</td><td>31.0</td><td>40.5</td><td>22.1</td><td>35.3</td><td>20.2</td><td>20.0</td><td>27.1</td><td>27.9</td></tr><tr><td>DT [18]</td><td>25.4</td><td>39.3</td><td>42.4</td><td>24.9</td><td>40.4</td><td>23.1</td><td>25.9</td><td>30.4</td><td>31.5</td></tr><tr><td>SCDA [46]</td><td>33.5</td><td>38.0</td><td>48.5</td><td>26.5</td><td>39.0</td><td>23.3</td><td>28.0</td><td>33.6</td><td>33.8</td></tr><tr><td>DDMRL [22]</td><td>30.8</td><td>40.5</td><td>44.3</td><td>27.2</td><td>38.4</td><td>34.5</td><td>28.4</td><td>32.2</td><td>34.6</td></tr><tr><td>SWDA [33]</td><td>30.3</td><td>42.5</td><td>44.6</td><td>24.5</td><td>36.7</td><td>31.6</td><td>30.2</td><td>35.8</td><td>34.8</td></tr><tr><td>CDN (ours)</td><td>35.8</td><td>45.7</td><td>50.9</td><td>30.1</td><td>42.5</td><td>29.8</td><td>30.8</td><td>36.5</td><td>36.6</td></tr></table>
|
| 168 |
+
|
| 169 |
+
Table 1: Cityscapes to Foggy Cityscapes adaptation.
|
| 170 |
+
|
| 171 |
+
over the state-of-the-art methods. We can also find that instance feature alignment contributes to a larger performance boost than global counterpart, which is consistent with previous discovery [33, 46].
|
| 172 |
+
|
| 173 |
+
# 5.3 Results on SIM10K to Cityscapes
|
| 174 |
+
|
| 175 |
+
Following the setting of [33], we evaluate the detection performance on car on SIM10K-to-Cityscapes benchmark. The results in Table 3 demonstrate CDN constantly performs better than the baseline methods. CDN with both global and instance alignment achieves $49.3\%$ mAP on validation set of Cityscapes, which outperforms the previous state-of-the-art method by $1.6\%$ mAP.
|
| 176 |
+
|
| 177 |
+
# 5.4 Results on Synthetic to Real Data
|
| 178 |
+
|
| 179 |
+
To thoroughly evaluate the performance of the state-of-the-art methods on synthetic-to-real adaptation, we construct a large-scale synthetic-to-real adaptation benchmark on various public synthetic datasets, including Virtual KITTI, Synscapes and SIM10K. "All" represents using the combination of 3 synthetic datasets. Compared with SIM10K-to-Cityscapes, the proposed benchmark is more challenging in terms of much larger image diversity in both real and synthetic domains. We compare CDN with the state-of-the-art method SWDA[33] in Table 4. CDN consistently outperforms SWDA under different backbones, which achieves average $2.2\%$ mAP and $2.1\%$ mAP improvements on Faster-R18 and Faster-R50 respectively. Using the same adaptation method, the detection performance strongly depends on the quality of synthetic data. For instance, the adaptation performance of SIM10K is much better than Virtual KITTI. Some example predictions produced by our method are visualized in Fig. 3.
|
| 180 |
+
|
| 181 |
+
# 5.5 Adaptation on 3D Point Cloud Detection
|
| 182 |
+
|
| 183 |
+
We evaluate CDN on adapting 3D object detector from synthetic point cloud (PreSIL) to real point cloud data (KITTI). Table 5 shows that CDN constantly outperforms the state-of-the-art method PointDAN [30] across all categories, with an average improvement of $1.9\%$ AP. We notice that instance alignment
|
| 184 |
+
|
| 185 |
+
<table><tr><td>Method</td><td>Global</td><td>Instance</td><td>mAP (%)</td></tr><tr><td>Source only</td><td></td><td></td><td>37.1</td></tr><tr><td>DA-Faster [4]</td><td>✓</td><td>✓</td><td>38.3</td></tr><tr><td>SWDA [33]</td><td>✓</td><td>✓</td><td>43.2</td></tr><tr><td>SCDA [46]</td><td>✓</td><td>✓</td><td>42.9</td></tr><tr><td></td><td>✓</td><td></td><td>40.2</td></tr><tr><td>CDN</td><td></td><td>✓</td><td>43.1</td></tr><tr><td></td><td>✓</td><td>✓</td><td>44.9</td></tr></table>
|
| 186 |
+
|
| 187 |
+
Table 2: KITTI to Cityscapes.
|
| 188 |
+
|
| 189 |
+
<table><tr><td>Method</td><td>Global</td><td>Instance</td><td>mAP (%)</td></tr><tr><td>Source only</td><td></td><td></td><td>34.3</td></tr><tr><td>DA-Faster [4]</td><td>✓</td><td>✓</td><td>38.3</td></tr><tr><td>SWDA [33]</td><td>✓</td><td>✓</td><td>47.7</td></tr><tr><td>SCDA [46]</td><td>✓</td><td>✓</td><td>44.1</td></tr><tr><td></td><td>✓</td><td></td><td>41.2</td></tr><tr><td>CDN</td><td></td><td>✓</td><td>45.8</td></tr><tr><td></td><td>✓</td><td>✓</td><td>49.3</td></tr></table>
|
| 190 |
+
|
| 191 |
+
Table 3: SIM10K to Cityscapes.
|
| 192 |
+
|
| 193 |
+
<table><tr><td>Model</td><td>Method</td><td>Virtual KITTI</td><td>Synscapes</td><td>SIM10K</td><td>All</td></tr><tr><td rowspan="4">Faster-R18</td><td>Source</td><td>9.8</td><td>24.5</td><td>37.7</td><td>38.2</td></tr><tr><td>SWDA[33]</td><td>15.6</td><td>27.0</td><td>40.2</td><td>41.3</td></tr><tr><td>CDN</td><td>17.5</td><td>29.1</td><td>42.7</td><td>43.6</td></tr><tr><td>Target</td><td colspan="4">70.5</td></tr><tr><td rowspan="4">Faster-R50</td><td>Source</td><td>13.9</td><td>29.1</td><td>41.6</td><td>42.8</td></tr><tr><td>SWDA[33]</td><td>19.7</td><td>31.5</td><td>42.9</td><td>44.3</td></tr><tr><td>CDN</td><td>21.8</td><td>33.4</td><td>45.3</td><td>47.2</td></tr><tr><td>Target</td><td colspan="4">75.6</td></tr></table>
|
| 194 |
+
|
| 195 |
+
Table 4: Adaptation from different synthetic data to real data. mAP on car is reported on BDD100K validation. The results of supervised training on BDD100K are highlighted in gray.
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
Fig.3: Example results on Foggy Cityscapes/Synscapes/SIM10K/BDD100K (from top to bottom). The results are produced by a Faster R-CNN model incorporated with CDN. The class and score predictions are at the top left corner of the bounding box. Zoom in to visualize the details.
|
| 199 |
+
|
| 200 |
+
contributes to a larger performance boost than global alignment. It can be attributed by the fact that point cloud data spread over a huge 3D space but most information is stored in the local foreground points (see Fig. 4).
|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
Fig. 4: Top:PreSIL; Bottom:KITTI.
|
| 204 |
+
|
| 205 |
+
<table><tr><td>Model</td><td>Global Instance</td><td>Car</td><td>Pedestrian</td><td>Cyclist</td></tr><tr><td>Source</td><td></td><td>15.7</td><td>9.6</td><td>5.6</td></tr><tr><td>CycleGAN [35]</td><td>✓</td><td>16.5</td><td>10.3</td><td>5.9</td></tr><tr><td>PointDAN[30]</td><td>✓</td><td>17.1</td><td>10.9</td><td>7.5</td></tr><tr><td></td><td>✓</td><td></td><td>17.3</td><td>6.0</td></tr><tr><td>CDN</td><td></td><td>18.5</td><td>12.8</td><td>8.7</td></tr><tr><td></td><td>✓</td><td>19.0</td><td>13.2</td><td>9.1</td></tr><tr><td>Target</td><td></td><td>75.7</td><td>41.7</td><td>59.6</td></tr></table>
|
| 206 |
+
|
| 207 |
+
Table 5: Adapting from synthetic (PreSIL) to real (KITTI) pint cloud. AP of moderate level on KITTI test is reported.
|
| 208 |
+
|
| 209 |
+
# 6 Analysis
|
| 210 |
+
|
| 211 |
+
# 6.1 Visualize and Analyze the Feature Maps
|
| 212 |
+
|
| 213 |
+
Despite the general efficiency on various benchmarks, we are also interested in the underlying principle of CDN. We interpret the learned domain embedding via appending a decoder network after the backbone to reconstruct the RGB images from the feature maps. As shown in Fig. 5, the top row shows the original inputs from Foggy Cityscapes, SIM10K and Synscapes (left to right), and the bottom row shows the reconstructed images from the corresponding features encoded with the domain embedding of another domain. The reconstructed images carry the same domain style of another domain, suggesting the learned domain embedding captures the domain attribute information and CDN can effectively transform the domain style of different domains.
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
Fig. 5: Top row: Original inputs from Foggy Cityscapes, SIM10K and Synscapes (left to right); Bottom row: Reconstructed images from features encoded with the learned domain embedding of another domain.
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
|
| 234 |
+

|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
|
| 240 |
+
Furthermore, we compute Fréchet Inception Distance (FID)[14] score to quantitatively investigate the difference between source and target features. FID has been a popular metric to evaluate the style similarity between two groups of images in GANs. Lower FID score indicates a smaller style difference. For easy comparison, we normalize the FID score to $[0, 1]$ by dividing the maximum score. As shown in Table 6, the feature learned with CDN achieves significantly smaller FID score compared with feature learned on source domain only, suggesting CDN effectively reduces the domain gap in the feature space. Obviously, supervised joint training on source and target data gets the smallest FID score, which is verified by the best detection performance achieved by joint training. As shown in Fig. 6, synthetic-to-real has larger FID score than real-to-real dataset, since the former owns larger domain gaps.
|
| 241 |
+
|
| 242 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">SIM to BDD</td><td colspan="2">City to Foggy</td></tr><tr><td>FID</td><td>mAP</td><td>FID</td><td>mAP</td></tr><tr><td>Source</td><td>0.94</td><td>37.7</td><td>0.83</td><td>26.1</td></tr><tr><td>Joint training</td><td>0.67</td><td>79.3</td><td>0.41</td><td>49.5</td></tr><tr><td>SWDA [33]</td><td>0.83</td><td>40.2</td><td>0.76</td><td>34.8</td></tr><tr><td>CDN</td><td>0.71</td><td>42.7</td><td>0.60</td><td>36.6</td></tr></table>
|
| 243 |
+
|
| 244 |
+
Table 6: FID score and mAP.
|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
Fig. 6: FID scores on all datasets.
|
| 248 |
+
|
| 249 |
+
# 6.2 Analysis on Domain Discrepancy
|
| 250 |
+
|
| 251 |
+
We adopt symmetric Kullback-Leibler divergence to investigate the discrepancy between source and target domain in feature space. To simplify the analysis, we assume source and target features are drawn from the multivariate normal distribution. The divergence is calculated with the Res5-3 features and plotted in log scale. Fig. 7 (a) and (c) show that the domain divergence continues decreasing during training, indicating the Conditional Domain Normalization keeps reducing domain shift in feature space. Benefiting from the reduction of domain divergence, the adaptation performance on the target domain keeps increasing. Comparing with SWDA, CDN achieves lower domain discrepancy and higher adaptation performance.
|
| 252 |
+
|
| 253 |
+
Fig. 7 (b)(d) shows the t-SNE plot of instance features extracted by a Faster R-CNN model incorporated with CDN. The same category features from two domains group in tight clusters, suggesting source and target domain distributions are well aligned in feature space. Besides, features of different categories own clear decision boundaries, indicating discriminative features are learned by our method. These two factors contribute to the target performance boost.
|
| 254 |
+
|
| 255 |
+
# 7 Ablation Study
|
| 256 |
+
|
| 257 |
+
For the ablation study, we use a Faster R-CNN model with ResNet-18 on SIM10K-to-BDD100K adaptation benchmark, and a Faster R-CNN model with VGG16
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
(a) City-to-Foggy
|
| 261 |
+
|
| 262 |
+

|
| 263 |
+
(b) City-to-Foggy
|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
(c) CG-to-Real
|
| 267 |
+
Fig. 7: (a)(c): Divergence and adaptation performance. (c)(d): t-SNE plot of instance features.
|
| 268 |
+
|
| 269 |
+

|
| 270 |
+
(d) CG-to-Real
|
| 271 |
+
|
| 272 |
+
on Cityscapes-to-Foggy Cityscapes adaptation benchmark. G and I denote adopting CDN in the backbone and bounding box head network, respectively.
|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
(a)
|
| 276 |
+
Fig. 8: (a) Adopt CDN at different convolution stages of ResNet; (b) Adopt CDN in existing adaptation frameworks; (c) Domain embedding vs. semantic features.
|
| 277 |
+
|
| 278 |
+

|
| 279 |
+
(b)
|
| 280 |
+
|
| 281 |
+

|
| 282 |
+
(c)
|
| 283 |
+
|
| 284 |
+
Adopting CDN at different convolution stages. Fig. 8(a) compares the results of Faster R-CNN models adopting CDN at different convolution stages. We follow [13] to divide ResNet into 5 stages. Bbox head denotes the bounding box head network. From left to right, adding more CDN layers keeps boosting the adaptation performance on both benchmarks, benefiting from adaptive distribution alignments across different levels' representation. It suggests that adopting CDN in each convolution stage is a better choice than only aligning domain distributions at one or two specific convolution stages.
|
| 285 |
+
|
| 286 |
+
Comparing with existing domain adaptation frameworks adopting CDN. Fig. 8(b) shows the results of adopting CDN layer in existing adaptation methods like SWDA [33] and SCDA [46]. Directly adopting CDN in SWDA and SCDA can bring average $1.3\%$ mAP improvements on two adaptation benchmarks, suggesting CDN is more effective to address domain shifts than traditional domain confusion learning. It can be attributed to that CDN disentangle the domain-specific factors out of the semantic features via learning a domain-
|
| 287 |
+
|
| 288 |
+
vector. Leveraging the domain-vector to align the different domain distributions can be more efficient.
|
| 289 |
+
|
| 290 |
+
Compare domain embedding with semantic features. In Eq. 7, we can either use semantic features $(v^{s},\hat{v}^{t})$ or domain embedding $(\mathbf{F}_d(v^s),\mathbf{F}_d(\hat{v}^t))$ as inputs of discriminator. Fig. 8(c) compares the adaptation performance of using semantic features with using domain embedding. Although semantic features can improve the performance over baseline, domain embedding consistently achieves better results than directly using semantic features. Suggesting the learned domain embedding well captures the domain attribute information, and it is free from some undesirable regularization on specific image contents.
|
| 291 |
+
|
| 292 |
+
Value of $\lambda$ In Eq. 10, we use $\lambda$ controls the balance between global and local regularization. Fig. 9 (left) shows the influence on adaptation performance by different $\lambda$ . Because object detectors naturally focus more on local regions, we can see stronger instance regularization largely contributes to detection performance. In our experiments, $\lambda$ between 0.4 and 0.5 gives the best performance.
|
| 293 |
+
|
| 294 |
+

|
| 295 |
+
Fig. 9: Left: mAP vs. Value of $\lambda$ ; Middle: mAP vs. Percentage (\%) of synthetic image data; Right: AP vs. Percentage (\%) of synthetic point cloud.
|
| 296 |
+
|
| 297 |
+

|
| 298 |
+
|
| 299 |
+

|
| 300 |
+
|
| 301 |
+
Scale of target domain dataset Fig. 9 middle/right quantitatively investigate the relation between real data detection performance and percentage of synthetic data used for training. "All" means to use the combination of 3 different synthetic datasets. The larger synthetic dataset provides better adaptation performance, on both 2D image and 3D point cloud detection.
|
| 302 |
+
|
| 303 |
+
# 8 Conclusion
|
| 304 |
+
|
| 305 |
+
We present the Conditional Domain Normalization (CDN) to adapt object detectors across different domains. CDN aims to embed different domain inputs into a shared latent space, where the features from different domains carry the same domain attribute. Extensive experiments demonstrate the effectiveness of CDN on adapting object detectors, including 2D image and 3D point cloud detection tasks. And both quantitative and qualitative comparisons are conducted to analyze the features learned by our method.
|
| 306 |
+
|
| 307 |
+
# References
|
| 308 |
+
|
| 309 |
+
1. Ben-David, S., Blitzer, J., Crammer, K., Kulesza, A., Pereira, F., Vaughan, J.W.: A theory of learning from different domains. Machine learning (2010)
|
| 310 |
+
2. Bousmalis, K., Silberman, N., Dohan, D., Erhan, D., Krishnan, D.: Unsupervised pixel-level domain adaptation with generative adversarial networks. In: CVPR (2017)
|
| 311 |
+
3. Chen, X., Kundu, K., Zhu, Y., Berneshawi, A.G., Ma, H., Fidler, S., Urtasun, R.: 3d object proposals for accurate object class detection. In: Advances in Neural Information Processing Systems (2015)
|
| 312 |
+
4. Chen, Y., Li, W., Sakaridis, C., Dai, D., Van Gool, L.: Domain adaptive faster r-cnn for object detection in the wild. In: CVPR (2018)
|
| 313 |
+
5. Cordts, M., Omran, M., Ramos, S., Rehfeld, T., Enzweiler, M., Benenson, R., Franke, U., Roth, S., Schiele, B.: The cityscapes dataset for semantic urban scene understanding. In: CVPR (2016)
|
| 314 |
+
6. Dumoulin, V., Shlens, J., Kudlur, M.: A learned representation for artistic style. arXiv preprint arXiv:1610.07629 (2016)
|
| 315 |
+
7. Gaidon, A., Wang, Q., Cabon, Y., Vig, E.: Virtual worlds as proxy for multi-object tracking analysis. In: CVPR (2016)
|
| 316 |
+
8. Ganin, Y., Lempitsky, V.: Unsupervised domain adaptation by backpropagation. In: ICML (2015)
|
| 317 |
+
9. Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? the kitti vision benchmark suite. In: CVPR (2012)
|
| 318 |
+
0. Girshick, R.: Fast r-cnn. In: ICCV (2015)
|
| 319 |
+
1. Girshick, R., Donahue, J., Darrell, T., Malik, J.: Rich feature hierarchies for accurate object detection and semantic segmentation. In: CVPR (2014)
|
| 320 |
+
2. He, K., Gkioxari, G., Dollár, P., Girshick, R.: Mask r-cnn. In: ICCV (2017)
|
| 321 |
+
3. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR (2016)
|
| 322 |
+
4. Heusel, M., Ramsauer, H., Unterthiner, T., Nessler, B., Hochreiter, S.: Gans trained by a two time-scale update rule converge to a local nash equilibrium. In: Advances in neural information processing systems (2017)
|
| 323 |
+
5. Hoffman, J., Tzeng, E., Park, T., Zhu, J.Y., Isola, P., Saenko, K., Efros, A.A., Darrell, T.: Cycada: Cycle-consistent adversarial domain adaptation. arXiv preprint arXiv:1711.03213 (2017)
|
| 324 |
+
6. Huang, X., Belongie, S.: Arbitrary style transfer in real-time with adaptive instance normalization. In: ICCV (2017)
|
| 325 |
+
7. Hurl, B., Czarnecki, K., Waslander, S.: Precise synthetic image and lidar (presil) dataset for autonomous vehicle perception. In: 2019 IEEE Intelligent Vehicles Symposium (IV) (2019)
|
| 326 |
+
8. Inoue, N., Furuta, R., Yamasaki, T., Aizawa, K.: Cross-domain weakly-supervised object detection through progressive domain adaptation. In: CVPR (2018)
|
| 327 |
+
9. Ioffe, S., Szegedy, C.: Batch normalization: Accelerating deep network training by reducing internal covariate shift. arXiv preprint arXiv:1502.03167 (2015)
|
| 328 |
+
20. James, S., Wohlhart, P., Kalakrishnan, M., Kalashnikov, D., Irpan, A., Ibarz, J., Levine, S., Hadsell, R., Bousmalis, K.: Sim-to-real via sim-to-sim: Data-efficient robotic grasping via randomized-to-canonical adaptation networks. In: CVPR (2019)
|
| 329 |
+
21. Johnson-Roberson, M., Barto, C., Mehta, R., Sridhar, S.N., Rosaen, K., Vasudevan, R.: Driving in the matrix: Can virtual worlds replace human-generated annotations for real world tasks? arXiv preprint arXiv:1610.01983 (2016)
|
| 330 |
+
|
| 331 |
+
22. Kim, T., Jeong, M., Kim, S., Choi, S., Kim, C.: Diversify and match: A domain adaptive representation learning paradigm for object detection. In: CVPR (2019)
|
| 332 |
+
23. Liu, Z., Miao, Z., Pan, X., Zhan, X., Lin, D., Yu, S.X., Gong, B.: Open compound domain adaptation. In: CVPR (2020)
|
| 333 |
+
24. Long, M., Cao, Z., Wang, J., Jordan, M.I.: Conditional adversarial domain adaptation. In: Advances in Neural Information Processing Systems (2018)
|
| 334 |
+
25. Park, T., Liu, M.Y., Wang, T.C., Zhu, J.Y.: Semantic image synthesis with spatially-adaptive normalization. arXiv preprint arXiv:1903.07291 (2019)
|
| 335 |
+
26. Peng, C., Xiao, T., Li, Z., Jiang, Y., Zhang, X., Jia, K., Yu, G., Sun, J.: Megdet: A large mini-batch object detector. In: CVPR (2018)
|
| 336 |
+
27. Peng, X., Bai, Q., Xia, X., Huang, Z., Saenko, K., Wang, B.: Moment matching for multi-source domain adaptation. In: ICCV (2019)
|
| 337 |
+
28. Peng, X.B., Andrychowicz, M., Zaremba, W., Abbeel, P.: Sim-to-real transfer of robotic control with dynamics randomization. In: ICRA. IEEE (2018)
|
| 338 |
+
29. Qi, C.R., Yi, L., Su, H., Guibas, L.J.: Pointnet++: Deep hierarchical feature learning on point sets in a metric space. In: Advances in neural information processing systems (2017)
|
| 339 |
+
30. Qin, C., You, H., Wang, L., Kuo, C.C.J., Fu, Y.: Pointdan: A multi-scale 3d domain adaption network for point cloud representation. In: Advances in Neural Information Processing Systems (2019)
|
| 340 |
+
31. Quionero-Candela, J., Sugiyama, M., Schwaighofer, A., Lawrence, N.D.: Dataset shift in machine learning. The MIT Press (2009)
|
| 341 |
+
32. Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. In: Advances in neural information processing systems (2015)
|
| 342 |
+
33. Saito, K., Ushiku, Y., Harada, T., Saenko, K.: Strong-weak distribution alignment for adaptive object detection. In: CVPR (2019)
|
| 343 |
+
34. Sakaridis, C., Dai, D., Van Gool, L.: Semantic foggy scene understanding with synthetic data. International Journal of Computer Vision (2018)
|
| 344 |
+
35. Saleh, K., Abobakr, A., Attia, M., Iskander, J., Nahavandi, D., Hossny, M., Nahvandi, S.: Domain adaptation for vehicle detection from bird's eye view lidar point cloud data. In: ICCV Workshops (2019)
|
| 345 |
+
36. Sankaranarayanan, S., Balaji, Y., Jain, A., Nam Lim, S., Chellappa, R.: Learning from synthetic data: Addressing domain shift for semantic segmentation. In: CVPR (2018)
|
| 346 |
+
37. Shi, S., Wang, X., Li, H.: Pointcnn: 3d object proposal generation and detection from point cloud. In: CVPR (2019)
|
| 347 |
+
38. Tobin, J., Fong, R., Ray, A., Schneider, J., Zaremba, W., Abbeel, P.: Domain randomization for transferring deep neural networks from simulation to the real world. In: IROS. IEEE (2017)
|
| 348 |
+
39. Tsai, Y.H., Sohn, K., Schulter, S., Chandraker, M.: Domain adaptation for structured output via discriminative representations. arXiv preprint arXiv:1901.05427 (2019)
|
| 349 |
+
40. Tzeng, E., Hoffman, J., Saenko, K., Darrell, T.: Adversarial discriminative domain adaptation. In: CVPR (2017)
|
| 350 |
+
41. Wang, X., Yu, K., Dong, C., Change Loy, C.: Recovering realistic texture in image super-resolution by deep spatial feature transform. In: CVPR (2018)
|
| 351 |
+
42. Wrenninger, M., Unger, J.: Synscapes: A photorealistic synthetic dataset for street scene parsing. arXiv preprint arXiv:1810.08705 (2018)
|
| 352 |
+
43. Yosinski, J., Clune, J., Nguyen, A., Fuchs, T., Lipson, H.: Understanding neural networks through deep visualization. arXiv preprint arXiv:1506.06579 (2015)
|
| 353 |
+
|
| 354 |
+
44. Yu, F., Xian, W., Chen, Y., Liu, F., Liao, M., Madhavan, V., Darrell, T.: Bdd100k: A diverse driving video database with scalable annotation tooling. arXiv preprint arXiv:1805.04687 (2018)
|
| 355 |
+
45. Yue, X., Wu, B., Seshia, S.A., Keutzer, K., Sangiovanni-Vincentelli, A.L.: A lidar point cloud generator: from a virtual world to autonomous driving. In: Proceedings of the 2018 ACM on International Conference on Multimedia Retrieval (2018)
|
| 356 |
+
46. Zhu, X., Pang, J., Yang, C., Shi, J., Lin, D.: Adapting object detectors via selective cross-domain alignment. In: CVPR (2019)
|
| 357 |
+
47. Zou, Y., Yu, Z., Vijaya Kumar, B., Wang, J.: Unsupervised domain adaptation for semantic segmentation via class-balanced self-training. In: ECCV (2018)
|
adaptingobjectdetectorswithconditionaldomainnormalization/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f2cec6cf1aa3a863c5b2fc077cba517c1b83fb2e738438c54fa259ae9e70b92
|
| 3 |
+
size 630379
|
adaptingobjectdetectorswithconditionaldomainnormalization/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c605d378b37c4502d13e62f74f1756ccba99475c74528bcf6a8ef727fabe2cb
|
| 3 |
+
size 422700
|
adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/ce8cfb4f-aab6-4458-b05a-8376aede26a3_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98152fcf51e40ad631ae0841bf55e6ef74590aac10ac426d9404bbba7576cdaa
|
| 3 |
+
size 82931
|
adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/ce8cfb4f-aab6-4458-b05a-8376aede26a3_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d79c03ab037bfb62d6e687121b77d2bd4d418eaa267735e47f76c1b14c7b64f8
|
| 3 |
+
size 99910
|
adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/ce8cfb4f-aab6-4458-b05a-8376aede26a3_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:870da82eb4d87f4e5483d16a42db26518305ae941dcef3341acf8edba485d876
|
| 3 |
+
size 3706609
|
adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/full.md
ADDED
|
@@ -0,0 +1,325 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adaptive Computationally Efficient Network for Monocular 3D Hand Pose Estimation
|
| 2 |
+
|
| 3 |
+
Zhipeng Fan $^{1}$ , Jun Liu $^{2\star}$ , and Yao Wang $^{1}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Tandon School of Engineering, New York University, Brooklyn NY, USA {zf606, yw523}@nyu.edu
|
| 6 |
+
|
| 7 |
+
$^{2}$ Information Systems Technology and Design Pillar, Singapore University of Technology and Design, Singapore jun.liu@sutd.edu.sg
|
| 8 |
+
|
| 9 |
+
Abstract. 3D hand pose estimation is an important task for a wide range of real-world applications. Existing works in this domain mainly focus on designing advanced algorithms to achieve high pose estimation accuracy. However, besides accuracy, the computation efficiency that affects the computation speed and power consumption is also crucial for real-world applications. In this paper, we investigate the problem of reducing the overall computation cost yet maintaining the high accuracy for 3D hand pose estimation from video sequences. A novel model, called Adaptive Computationally Efficient (ACE) network, is proposed, which takes advantage of a Gaussian kernel based Gate Module to dynamically switch the computation between a light model and a heavy network for feature extraction. Our model employs the light model to compute efficient features for most of the frames and invokes the heavy model only when necessary. Combined with the temporal context, the proposed model accurately estimates the 3D hand pose. We evaluate our model on two publicly available datasets, and achieve state-of-the-art performance at $22\%$ of the computation cost compared to traditional temporal models.
|
| 10 |
+
|
| 11 |
+
Keywords: 3D Hand Pose Estimation, Computation Efficiency, Dynamic Adaption, Gaussian Gate
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Understanding human hand poses is a long lasting problem in computer vision community, due to the great amount of potential applications in action recognition, AR/VR [28], robotics and human computer interactions (HCI) [11]. The problem of inferring 3D configurations of human hands from images and videos is inherently challenging because of the frequent self-occlusion and the large variance of hand poses. A large body of existing works address the problem of hand pose estimation from depth data [7,37], as it reduces ambiguities in the
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Fig. 1: Illustration of our Adaptive Computationally Efficient (ACE) network. In most of the time, the LSTM takes features from the coarse pose encoder and refines the predicted pose. Occasionally, when the pose varies a lot or severely occluded, the Gaussian Kernel Gate opts to compute fine features with the computationally heavy model to inject more accurate features to the LSTM.
|
| 19 |
+
|
| 20 |
+
depth dimension and makes it easier to acquire the 3D poses of the corresponding hand. However, depth cameras, such as Kinect, are not always available and are prone to measurement errors if deployed in outdoor settings. Therefore, in this work we address the problem of 3D hand pose estimation with a monocular RGB commercial camera.
|
| 21 |
+
|
| 22 |
+
Recent successes in 3D hand pose estimation [2, 3, 22, 26, 46] mainly focus on employing the same computation framework for all video frames, without considering the redundancy that exists across adjacent frames and the variation of the pose estimation difficulties over frames. The moving speed and occlusion status of the human hands vary when performing different actions, which inspires us to design a new scheme to dynamically allocate the computation resources based on the ambiguity determined by the current input frame and temporal context status. This kind of adaption mechanism is useful for both online and offline applications. For offline pose estimation from videos, being able to use a simpler computation module in most of the frames saves the amount of the resource usage and reduces the total inference time for the entire video. For online pose estimation applications (e.g. HCI and robots), multiple tasks often run concurrently under a total computation resource constraint, thus the saved resources at most of the frames could be released for other important tasks at those time steps, which meanwhile also reduces the amount of energy consumed by the pose estimation task.
|
| 23 |
+
|
| 24 |
+
Motivated by our idea of reducing computation consumption, and given the fact that the information among video frames could be redundant and the pose estimation difficulty varies over frames, we propose a novel Adaptive Computationally Efficient (ACE) network using a recurrent 3D hand pose estimator with adaptive input. In our method, we design two base pose encoders based on the hourglass(HG) [27] architecture with different computational costs. A Long Short-Term Memory (LSTM) [14] model was introduced to refine the predicted pose and features from the single-frame base pose encoder, by considering the temporal consistency. We propose a new Gaussian Gate Module to automatically determine whether the low complexity coarse encoder output alone is sufficient for the LSTM, or the high complexity fine encoder is needed. The fine encoder is only invoked when necessary and its output is combined with the output of the coarse encoder to generate the input for the LSTM. The proposed network architecture is illustrated in Fig. 1. To facilitate the training of our switch module, which is naturally a discrete operation, an effective Gumbel-SoftMax strategy, as an approximation of sampling from discrete distributions, is introduced.
|
| 25 |
+
|
| 26 |
+
To summarize, a novel end-to-end ACE network is proposed for 3D hand pose estimation from monocular video. It dynamically switches between using coarse v.s. fine features at each time step, which eliminates the computational cost of the fine encoder when the prediction from the coarse encoder is deemed sufficient. We evaluate our network on two broadly used datasets, First-Person Hand Action (FPHA) and Stereo Tracking Benchmark (STB), and obtain state-of-the-art pose estimation accuracy, while greatly reducing the overall computation cost (around $78\%$ on STB dataset), compared to baseline models that constantly use the fine encoder for all time steps.
|
| 27 |
+
|
| 28 |
+
# 2 Related Work
|
| 29 |
+
|
| 30 |
+
Most of the existing works focus on the accuracy of 3D hand pose estimation without explicitly considering the important computation cost issue. We will briefly review the recent works in both the 3D hand pose estimation domain as well as the recent endeavor in designing computationally efficient architectures for image and video understanding.
|
| 31 |
+
|
| 32 |
+
3D hand pose estimation. 3D hand pose estimation is a long-standing problem in computer vision domain, and various methods have been proposed. We restrict ourselves to the more recent deep learning based approaches since they are more related to our work.
|
| 33 |
+
|
| 34 |
+
A large body of the works on hand pose estimation operate on the depth input, which greatly reduces the depth ambiguity of the task. Deephand proposes a ConvNet model with an additional matrix completion algorithm to retrieve the actual poses [34]. Volumetric representation was adopted to better encode the depth image recently [7,8]. The volumetric representation is projected to multiple views and then processed by several 2D ConvNets followed by fusion in [7]. Rather than tedious projections to multiple views, a 3D ConvNet is directly in
|
| 35 |
+
|
| 36 |
+
produced to infer the 3D position from the volumetric representations [8]. This line of work is further summarized in [9], in which the completeness of the 3D hand surface is leveraged as additional supervision. Rather than volumetric representations, the skeleton annotation could be represented as dense pixel-wise labels [37]. The predicted dense estimations are then converted back to 3D coordinates with a vote casting mechanism. Recently, self-supervised methods are also explored on a mixture of synthetic and unlabelled dataset by exploring the approximate depth and the kinematic feasibility as the weak supervision [36].
|
| 37 |
+
|
| 38 |
+
Rather than performing pose estimation on depth data, we lay more focus on the works with RGB inputs, which are often less restricted in real-world applications. Zimmermann and Brox proposed a multi-stage network, which performs hand segmentation, localization, 2D and 3D pose estimations one by one [46]. Similar to the depth based method, depth regularization was employed to enable weakly supervised learning [2]. Instead of regressing the joint positions independently, kinematic model could be naturally integrated into the model to yield anatomically plausible results [26]. A latent 2.5D representation is introduced in [16], where the ConvNet also learns the implicit depth map of the entire palm. Numerous graphic models are also proposed to better handle the joint relationships [3, 22]. Spatial dependencies and temporal consistencies could be modeled explicitly with graph neural net [3] and could further boost the quality of estimated features [22] from hourglass models [27]. Another line of works reconstruct the shape and the pose of hands at the same time [1, 10, 25, 42, 45], in which either a hand mesh model [25, 33] or a generative GNN [45] is leveraged to map the low-dimensional hand pose & shape manifold to the full 3D meshes.
|
| 39 |
+
|
| 40 |
+
Despite all the success in accurate hand pose estimation, we argue that the efficiency problem is also of vital importance, especially for AR/VR [28] and mobile devices [11], where resources are often limited. To harvest the redundancy present in the consecutive frames, we propose an adaptive dynamic gate to efficiently switch between an efficient light pose estimator and a computationally heavy pose estimator for 3D hand pose estimation from sequences of frames.
|
| 41 |
+
|
| 42 |
+
Computationally efficient architectures. Recent progresses have shown that the computation efficiency of neural net models could be improved in various ways. Neural network pruning was first realized using second-order derivative [13, 19] and then evolved into pruning weights with relatively small magnitude [12]. Different from the pruning technique operated on fully trained models [12, 13, 19], recent developments reveal that pruning while training often results in better performance. This was achieved by enforcing additional loss ( $L1$ norm [23], Group LASSO [39] or $L0$ norm approximations [24].) during training. Other innovative ideas include specially designed architectures for high-efficiency computing [15, 44] and network quantization [4, 5, 20, 31].
|
| 43 |
+
|
| 44 |
+
In videos, consecutive frames are often quite similar and strongly co-dependent, which leave lots of space for efficiency optimization. Recently, various works have been developed to improve the computation efficiency for video classification [18,29,38,40]. Leveraging the fact that most of the computational expansive layers (w/o activation) are linear and sparse feature updates are more efficient,
|
| 45 |
+
|
| 46 |
+
a recurrent residual model was introduced [29] to incur minimum amount of feature updates between consecutive frames. Hierarchical coarse-to-fine architectures are also introduced for more efficient video inference [40]. Recently, RL frameworks are adopted to learn an efficient sampling agent to filter out salient parts/frames from videos for fast recognition [18] [41].
|
| 47 |
+
|
| 48 |
+
In this work, we address the problem of dense hand pose estimation from video sequences, where we need to derive corresponding poses for each individual frame. We take advantage of the fact that at most of the time, when the motion of the hand is not extreme or the hand pose is not severely occluded, the 3D hand pose could be safely derived from the temporal context. We thus propose a novel Gaussian Kernel-based Adaptive Dynamic Gate module that explicitly measures the necessity to compute fine features with a costly model, which significantly reduces the total amount of computation in general. Our scheme is also orthogonal to many of the aforementioned methods, such as the pruning methods, which leaves the potential to further boost the efficiency.
|
| 49 |
+
|
| 50 |
+
# 3 Method
|
| 51 |
+
|
| 52 |
+
# 3.1 Overview
|
| 53 |
+
|
| 54 |
+
Given a sequence of video frames $\{I^t\}_{t=1}^T$ , our task is to infer the 3D pose $\mathbf{P}^t = \{P_k^t\}_{k=1}^K$ of the hand at each frame $t$ , where $K$ denotes the number of hand joints, and $P_k^t$ denotes the 3D position of the joint $k$ at frame $t$ .
|
| 55 |
+
|
| 56 |
+
The overall pipeline of our proposed ACE network is illustrated in Fig. 1. In our method, at each time step, both a less accurate yet computationally light model and an accurate but computationally heavy model can be selected as the pose encoder for the RGB input. The features from either models could be fed into a LSTM to refine the inferred features and the estimated pose based on the temporal coherence. To reduce the computation cost, inspired by the idea that temporal context can provide sufficient information when the motion of the target hand is slow or the pose is less challenging, we propose a novel Gaussian Kernel-based gate module as the key component of our ACE network, which compares the temporal context information provided by the LSTM model with the coarse features computed by the light encoder to assess the necessity of extracting fine features with the heavier encoder for the current time step. Below we introduce each component in more detail.
|
| 57 |
+
|
| 58 |
+
# 3.2 Single Frame Hand Pose Estimator
|
| 59 |
+
|
| 60 |
+
We first introduce two base pose encoders: coarse pose encoder and fine pose encoder, which have significantly different computation profiles for a single frame. Both models are constructed with the state-of-the-art hourglass (HG) network [27]. Furthermore, as illustrated in Fig. 2a, we augment it to directly regress the hand joint coordinates $\mathbf{P}^t$ via a ConvNet from the heat map of joint probabilities $\mathbf{H}^t = \{H_k^t\}_{k=1}^K$ , output feature map from HG as well as feature maps from early
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
(a) Architecture of pose encoder
|
| 64 |
+
(b) Schema of the Gaussian Kernel Gate.
|
| 65 |
+
Fig. 2: Graphical illustration of the Single frame pose encoder and the Gaussian Kernel Based Gate.
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
|
| 69 |
+
downsampling layers. The complexity of the models is adjusted by changing the number of convolutional layers and the size of the inputs of the hourglass module. We denote the light-weight coarse pose encoder model as $\mathbf{M}_{\mathrm{Coarse - Enc}}$ , and the heavy model as $\mathbf{M}_{\mathrm{Fine - Enc}}$ . These encoders extract pose related features, $\mathbf{F}_{\mathrm{coarse}}^t$ and $\mathbf{F}_{\mathrm{fine}}^t$ , based on the input frame $I^t$ , as follows:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\mathbf {F} _ {\text {c o a r s e}} ^ {t} = \mathbf {M} _ {\text {C o a r s e - E n c}} \left(I ^ {t}\right) \tag {1}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\mathbf {F} _ {\text {f i n e}} ^ {t} = \mathbf {M} _ {\text {F i n e - E n c}} \left(I ^ {t}\right) \tag {2}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
Note that in our final ACE network with the gate mechanism, we compute the coarse features $\left(\mathbf{F}_{\mathrm{coarse}}^{t}\right)$ for each frame, while the fine features $\left(\mathbf{F}_{\mathrm{fine}}^{t}\right)$ are computed for a fraction of time only, thus reducing the overall computation cost.
|
| 80 |
+
|
| 81 |
+
# 3.3 Pose Refinement Recurrent Model
|
| 82 |
+
|
| 83 |
+
In pose estimation from videos, a natural idea is to exploit the temporal context information for more smooth and accurate estimations, i.e., instead of solely relying on the information of the current frame, historical context can also be incorporated to reduce the ambiguities in pose estimation [21,30,32,35]. Thus we introduce a LSTM model to refine the estimations from the hourglass modules. The LSTM module, denoted as $\mathrm{M_{LSTM}}$ , takes the sequential features from pose encoder as inputs, and refine these input features using the temporal context.
|
| 84 |
+
|
| 85 |
+
More formally, at the $t$ -th time step, the LSTM takes the pose-related features from the current frame as the input, and infer the 3D pose $(\mathbf{P}^t)$ for the current step based on the hidden state, as follows:
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
h ^ {t}, c ^ {t} = \mathrm {M} _ {\text {L S T M}} \left(\mathbf {F} _ {\text {f r a m e}} ^ {t}, \left(h ^ {t - 1}, c ^ {t - 1}\right)\right) \tag {3}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mathbf {P} ^ {t} = W _ {\text {p o s e}} ^ {\top} h ^ {t} + b _ {\text {p o s e}} \tag {4}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
where $h^t$ and $c^t$ are the hidden state and cell state of the LSTM module respectively. $W_{\mathrm{pose}}$ and $b_{\mathrm{pose}}$ are the parameters of the output linear layer for regressing the final 3D hand joint coordinates. Here we denote the features from the single frame pose estimator as $\mathbf{F}_{\mathrm{frame}}^t$ , which is controlled by our adaptive dynamic gate model (introduced next) and could be either the coarse features $\mathbf{F}_{\mathrm{coarse}}^t$ or the weighted combination of the coarse features $\mathbf{F}_{\mathrm{coarse}}^t$ and fine features $\mathbf{F}_{\mathrm{Fine}}^t$ .
|
| 96 |
+
|
| 97 |
+
# 3.4 Adaptive Dynamic Gate Model
|
| 98 |
+
|
| 99 |
+
Recall that when humans perform activities with their hands, the motion speed and the self-occlusion status of the hands vary across different activities and different frames. In some of the actions like "high five", the palm is often less occluded and the pose pattern is relatively static and simple, while in some other actions, like "open soda can" and "handshake", the human hand is often under severe occlusions and presents rich and delicate movements of the fingers.
|
| 100 |
+
|
| 101 |
+
This inspires us to rely more on the temporal context information (and only use a brief glimpse over the current frame with the coarse pose encoder) for pose inference when the pose pattern is simple, stable and could be safely derived from the temporal context. However, if the temporal context is not consistent with the current frame information, this means either the current frame could be challenging for pose inference (i.e. pose inaccurately estimated by coarse pose encoder but temporal context is reliable) or significantly differs from previous frames due to large motions (i.e. temporal context becomes unstable), and thus the network needs to take a more careful examination for the current frame by using the fine pose encoder. Therefore, we propose an adaptive dynamic gate model in our ACE framework to dynamically determine the granularity of the features needed for pose estimation with our LSTM model.
|
| 102 |
+
|
| 103 |
+
Assuming the motion of the hand is smooth, the first and second-order statistics of the hand's status over different frames provide useful context information for estimating the evolution of the hand pose over time. Accordingly, we compute the first-order difference $(h^{t'})$ and second-order difference $(h^{t''})$ over the history hidden states of the LSTM to estimate the motion status information of the hand pose as:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
h ^ {t ^ {\prime}} = h ^ {t} - h ^ {t - 1} \tag {5}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
h ^ {t ^ {\prime \prime}} = \left(h ^ {t} - h ^ {t - 1}\right) - \left(h ^ {t - 1} - h ^ {t - 2}\right) \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
At the time step $t$ , we feed the hidden state of the previous frame $(h^{t - 1})$ , as well as its first and second-order information $(h^{t - 1^{\prime}}$ and $h^{t - 1^{\prime \prime}}$ ) as the history context information, to our gate module, which then estimates the pose feature information of current frame $(t)$ with a sub-network, as follows:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\widetilde {\mathbf {F}} ^ {t} = W _ {g} ^ {\top} \left[ h ^ {t - 1}, h ^ {t - 1 ^ {\prime}}, h ^ {t - 1 ^ {\prime \prime}} \right] + b _ {g} \tag {7}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
We then measure the similarity of the predicted pose feature information $(\widetilde{\mathbf{F}}^t)$ that is completely estimated from the temporal context of previous frames, with the pose features $(\mathbf{F}_{\mathrm{coarse}}^t)$ that are extracted with the coarse pose encoder solely based on current frame $I^t$ , via a Gaussian Kernel with a fixed spread $\omega$ as follows:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\mathcal {G} _ {\mathrm {c o a r s e}} ^ {t} = \left[ \exp \left(- \frac {(\widetilde {\mathbf {F}} ^ {t} - \mathbf {F} _ {\mathrm {c o a r s e}} ^ {t}) ^ {2}}{\omega^ {2}}\right) \right] _ {\mathrm {M e a n}} \tag {8}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
This Gaussian Kernel based gate outputs mean value $(\mathcal{G}_{coarse}^{t})$ between 0 and 1, which provides an explicit measurement of the consistency and similarity between $\widetilde{\mathbf{F}}^t$ and $\mathbf{F}_{\mathrm{coarse}}^t$ , implying the pose estimation difficulty of current frame, i.e., higher $\mathcal{G}_{coarse}^{t}$ value indicates simple pose and stable movements of the hand.
|
| 126 |
+
|
| 127 |
+
If the hand pose status at this step changes a lot and pose feature becomes unpredictable from the temporal context, or the pose at current frame becomes challenging, leading to the pose features $\left(\mathbf{F}_{\mathrm{coarse}}^{t}\right)$ extracted by the coarse pose encoder not reliable and therefore inconsistent with the temporal context, the discrepancy between $\widetilde{\mathbf{F}}^t$ and $\mathbf{F}_{\mathrm{coarse}}^t$ grows larger, and thus our Gaussian gate will output a relatively small value close to 0.
|
| 128 |
+
|
| 129 |
+
With an estimation of the difficulty of current frame, we then decide if we need to employ the more powerful fine pose encoder to carefully examine the input frame of current time step. Specifically, we can use the $\mathcal{G}_{\mathrm{coarse}}^t$ from our Gaussian gate as the confidence score of staying with the coarse pose encoder for current time step, and naturally $\mathcal{G}_{\mathrm{fine}}^t = 1 - \mathcal{G}_{\mathrm{coarse}}^t$ becomes the score that we need to use the more powerful fine pose encoder.
|
| 130 |
+
|
| 131 |
+
A straight-forward switching mechanism would be to directly follow the one with a larger confidence score, i.e., if $\mathcal{G}_{\mathrm{fine}}^t > \mathcal{G}_{\mathrm{coarse}}^t$ , we need to involve the fine pose encoder for the current frame. This switching operation is however a discrete operation that is not differentiable. To facilitate the network training, following the recent work on reparameterization for the categorical distribution [17], we reparameterize the Bernoulli distribution with the Gumbel-Softmax trick, which introduces a simple yet efficient way to draw samples $z$ from a categorical distribution parameterized by the unnormalized probability $\pi$ . Specifically, we can approximately sample from $\pi_i$ following:
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
z _ {i} = \underset {i \in \mathcal {M}} {\operatorname {a r g m a x}} \left[ g _ {i} + \log \pi_ {i} \right] \quad \mathcal {M} = \{\text {c o a r s e , f i n e} \} \tag {9}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where at each time step $t$ , we set $\pi_i^t = -\log (1 - \mathcal{G}_i^t)$ , which is the unnormalized version of the predict probability $\mathcal{G}_i^t$ in Bernoulli distribution $\mathcal{G}_i^t \in \{\mathcal{G}_{\mathrm{coarse}}^t, \mathcal{G}_{\mathrm{fine}}^t\}$ . $g_i$ is the Gumbel noise. Here we draw samples from the Gumbel distribution following $g_i = -\log (-\log (u_i))$ , where $u_i$ is the i.i.d. samples drawn from Uniform(0,1). We further relax the non-differentiable operation argmax with softmax to facilitate back propagation. The final sampled probability is obtained with:
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
z _ {i} ^ {t} = \frac {\exp \left(\left(g _ {i} + \log \pi_ {i} ^ {t}\right) / \tau\right)}{\sum_ {j} \exp \left(\left(g _ {j} + \log \pi_ {j} ^ {t}\right) / \tau\right)} \quad \text {f o r} i, j \in \{\text {c o a r s e , f i n e} \} \tag {10}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
where $\tau$ is the hyper-parameter of temperature, which controls the discreteness of the sampling mechanism. When $\tau \to \infty$ , the sample approximates the uniform sampling, and when $\tau \to 0$ , it yields the argmax operation while allows the gradient to be back-propagated.
|
| 144 |
+
|
| 145 |
+
During training, we obtain the confidence scores of using rough glimpse for the input frame via the coarse pose encoder or using careful derived features
|
| 146 |
+
|
| 147 |
+
with the fine encoder, via the Gumbel-SoftMax trick following Eq. (10), and then we combine the coarse features $\mathbf{F}_{\mathrm{coarse}}^{t}$ and fine features $\mathbf{F}_{\mathrm{fine}}^{t}$ as:
|
| 148 |
+
|
| 149 |
+
$$
|
| 150 |
+
\mathbf {F} _ {\text {w e i g h t e d}} ^ {t} = z _ {\text {c o a r s e}} ^ {t} \mathbf {F} _ {\text {c o a r s e}} ^ {t} + z _ {\text {f i n e}} ^ {t} \mathbf {F} _ {\text {f i n e}} ^ {t} \tag {11}
|
| 151 |
+
$$
|
| 152 |
+
|
| 153 |
+
During evaluation, we omit the sampling process and directly use the coarse features when $\mathcal{G}_{\mathrm{fine}}^t \leq \lambda$ , and use the weighted average features when $\mathcal{G}_{\mathrm{fine}}^t > \lambda$ with weight $\mathcal{G}_{\mathrm{fine}}^t$ and $\mathcal{G}_{\mathrm{coarse}}^t$ . In general, $\lambda$ is set to 0.5, which essentially follows the larger probability. This threshold $\lambda$ could also be tweaked to balance between accuracy and efficiency during inference.
|
| 154 |
+
|
| 155 |
+
# 3.5 Training Strategy and Losses
|
| 156 |
+
|
| 157 |
+
We employ a two-step training strategy, in which we separately train the single-frame coarse pose encoder and fine pose encoder first, and then fine-tune them during the training of the LSTM pose refinement module and the adaptive gate module. To train the single frame pose encoder, we use the combination of 2D heat map regression loss and 3D coordinate regression loss:
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
\mathcal {L} _ {\text {s i n g l e}} = \frac {1}{K} \sum_ {k = 1} ^ {K} \left(\widetilde {H} ^ {k} - H ^ {k}\right) ^ {2} + \beta \cdot \text {S m o o t h L 1} (\widetilde {\mathbf {P}}, \mathbf {P}) \tag {12}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
where $H^{k}$ corresponds to the 2D heat map of joint $k$ and $\mathbf{P}$ is the 3D joint coordinates. We use the mean squared loss for the heat map and Smooth L1 loss for the 3D coordinates, which has a squared term when the absolute element-wise difference is below 1 (otherwise it is essentially a L1 term).
|
| 164 |
+
|
| 165 |
+
The single frame pose estimator is then fine-tuned when training the pose refinement LSTM and the gate module. To prevent the gate module from constantly using the fine features, we set an expected activation frequency $(\gamma_{g})$ for the gate, and optimize the mean square error between mean probability of using fine encoder and the expected activation frequency. Specifically, we define the loss mathematically given the expected activate rate, $\gamma_{g}$ as:
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\mathcal {L} _ {\text {w h o l e}} = \sum_ {d \in \mathcal {S}} \operatorname {S m o o t h} \mathrm {L} 1 (\widetilde {\mathbf {P}} _ {\mathrm {d}}, \mathbf {P} _ {\mathrm {d}}) + \delta \cdot \mathbb {E} _ {z ^ {t} \sim \text {B e r n o u l l i} \left(\mathcal {G} ^ {t} \mid \theta_ {g}\right)} \left(\frac {1}{T} \sum_ {t = 1} ^ {T} z _ {\text {f i n e}} ^ {t} - \gamma_ {g}\right) ^ {2} \tag {13}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
where $S = \{\text{coarse, fine, LSTM}\}$ and $z_{\text{fine}}^{t}$ is the sample probability based on the prediction $\mathcal{G}^t$ given by the adaptive dynamic gate model. $\theta_g$ denotes the parameter of the gate and $\delta$ balances the accuracy and the efficiency.
|
| 172 |
+
|
| 173 |
+
# 4 Experiments
|
| 174 |
+
|
| 175 |
+
# 4.1 Datasets and Metrics
|
| 176 |
+
|
| 177 |
+
We evaluate our ACE network on two publicly available datasets, namely the Stereo Tracking Benchmark (STB) [43] dataset and the First-Person Hand Action (FPHA) [6] dataset.
|
| 178 |
+
|
| 179 |
+
Stereo Tracking Benchmark (STB) provides 2D and 3D pose annotations of the 21 hand keypoints for 12 stereo video sequences. Each sequence consists of 1500 RGB frames for both the left camera and right camera. In total, this dataset consists of 18000 frames and the resolution of the frame is $640 \times 480$ . Within the dataset, 6 set of different backgrounds are captured, with each background appears in two video sequences. Following the setting of [46], we separate the dataset into a training set of 10 videos (15000 frames) and a evaluation set of 2 video sequences (3000 frames).
|
| 180 |
+
|
| 181 |
+
First Person Hand Action (FPHA) contains video sequences for 45 different daily actions from 6 different subjects in egocentric views. In total, FPHA contains more than $100\mathrm{k}$ frames with a resolution of $1920 \times 1080$ . The ground truth is provided via a mo-cap system and derived with inverse kinematics. Similar to the STB dataset, 21 keypoints on the human hand are annotated. Object interaction with 26 different objects is involved, which introduces additional challenges to hand pose estimation. We follow the official split of the dataset.
|
| 182 |
+
|
| 183 |
+
Metrics. We report the Percentage of Correct Keypoints (PCKs) under $20~\mathrm{mm}$ and the Area Under Curve (AUC) of PCKs under the error thresholds from $20~\mathrm{mm}$ to $50~\mathrm{mm}$ for STB dataset following [46], and from $0~\mathrm{mm}$ to $50~\mathrm{mm}$ for FPHA dataset. We report average GFLOPs<sup>1</sup> per frame for speed comparison, which does not rely on the hardware configurations and thus provides more objective evaluations.
|
| 184 |
+
|
| 185 |
+
# 4.2 Implementation Details
|
| 186 |
+
|
| 187 |
+
Although the proposed ACE module is theoretically compatible with different pose encoder architectures, we mainly evaluate it with the hourglass (HG) architecture [27] as it is widely used and works well in many existing works [22, 45]. Compared to the FPHA dataset, STB is less challenging as no hand-object interaction is involved. Therefore, different HG architectures are employed for different datasets. For the STB dataset, the coarse pose encoder contains one hourglass module with 32 feature channels, while for the fine pose encoder, we employ 64 channels. In addition to the different configurations of the module, the input images to the coarse and fine modules are set to $64 \times 64$ and $256 \times 256$ respectively, which greatly reduce the amount of computation. For the more challenging FPHA dataset, we keep the configurations of the fine pose encoder as STB, while for the coarse pose encoder, we double the size of input to $128 \times 128$ . Please see the supplementary materials for more details of the pose encoder.
|
| 188 |
+
|
| 189 |
+
For the LSTM refinement module, we use one layer of LSTM with hidden state dimension of 256. The hidden states and its order statistics are first mapped to a fixed dimension of 256 and then concatenated as the input to our adaptive Gaussian gate. During training, we set $\gamma_{g} = 0.05$ for STB and $\gamma_{g} = 0.01$ for FPHA and $\omega = 0.1$ .
|
| 190 |
+
|
| 191 |
+
Table 1: Results of various models (vanilla single frame coarse/fine models and their variants considering temporal dynamics) for 3D hand pose estimation. Our adaptive model uses much less computation with minor accuracy drops.
|
| 192 |
+
|
| 193 |
+
<table><tr><td rowspan="2">Method</td><td colspan="3">STB</td><td colspan="3">FPHA</td></tr><tr><td>3D PCK20</td><td>AUC(20-50)</td><td>GFLOPs</td><td>3D PCK20</td><td>AUC(0-50)</td><td>GFLOPs</td></tr><tr><td>Coarse-HG</td><td>85.1%</td><td>0.946</td><td>0.28</td><td>72.6%</td><td>0.674</td><td>1.10</td></tr><tr><td>Fine-HG</td><td>96.3%</td><td>0.994</td><td>6.96</td><td>79.7%</td><td>0.714</td><td>6.96</td></tr><tr><td>Vanilla-LSTM-Coarse-HG</td><td>92.1%</td><td>0.973</td><td>0.28</td><td>78.9%</td><td>0.707</td><td>1.10</td></tr><tr><td>Vanilla-LSTM-Fine-HG</td><td>98.7%</td><td>0.997</td><td>6.96</td><td>83.9%</td><td>0.740</td><td>6.96</td></tr><tr><td>Vanilla-LSTM-Mix-HG</td><td>98.7%</td><td>0.997</td><td>7.24</td><td>83.1%</td><td>0.734</td><td>8.06</td></tr><tr><td>Adaptive-LSTM-Mix-HG</td><td>97.9%</td><td>0.996</td><td>1.56</td><td>82.9%</td><td>0.731</td><td>1.37</td></tr></table>
|
| 194 |
+
|
| 195 |
+
# 4.3 Main Results
|
| 196 |
+
|
| 197 |
+
We conduct extensive experiments to show the advantages of our proposed ACE framework for hand pose estimation from videos. We compare the accuracy and computation efficiency among different models and further visualize the prediction results of our model. To facilitate the understanding of the gate behaviour, we also present the frames selected for fine feature computation.
|
| 198 |
+
|
| 199 |
+

|
| 200 |
+
(a) STB dataset
|
| 201 |
+
|
| 202 |
+

|
| 203 |
+
(b) FPHA dataset
|
| 204 |
+
Fig. 3: Quantitative evaluations. We achieve state-of-the-art performance on STB, and outperform the existing methods on FPHA by a large margin.
|
| 205 |
+
|
| 206 |
+
Quantitative comparison. We present the comparison among our adaptive dynamic gate model and various baselines in Table 1, where Coarse-HG/fine-HG indicates that the baseline pose encoder (hourglass structure) is employed to predict 3D joint coordinates frame by frame. For the Vanilla-LSTM variants, we take features from either coarse pose encoder, fine pose encoder, or average features from coarse and fine pose encoders, and then feed them into an ordinary LSTM model without gate module. The detailed results are in Table. 1.
|
| 207 |
+
|
| 208 |
+
Table 2: Comparison of the computation cost with state-of-the-arts on STB. Our method achieves higher AUC yet consumes significantly less computation.
|
| 209 |
+
|
| 210 |
+
<table><tr><td>Method</td><td>3D PCK20</td><td>AUC</td><td>GFLOPs</td></tr><tr><td>Z&B [46]</td><td>0.870</td><td>0.948</td><td>78.2</td></tr><tr><td>Liu et al. [22]</td><td>0.895</td><td>0.964</td><td>16.0</td></tr><tr><td>HAMR [45]</td><td>0.982</td><td>0.995</td><td>8.0</td></tr><tr><td>Cai et al. [3]</td><td>0.973</td><td>0.995</td><td>6.2</td></tr><tr><td>Ours</td><td>0.979</td><td>0.996</td><td>1.6</td></tr></table>
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
|
| 224 |
+

|
| 225 |
+
Fig. 4: Visualization of pose estimation. The top row shows input frames and the bottom row visualizes the predicted poses (red) and ground-truth poses (green).
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
|
| 235 |
+

|
| 236 |
+
|
| 237 |
+
As shown in Table 1, our adaptive model obtains comparable performance to our designed baseline model "Vanilla-LSTM-Fine-HG" that constantly takes the fine features for pose estimation with less than $1/4$ computation cost by computing the fine features only on selected frames. Besides, our proposed method obtains state-of-the-art performance on both benchmarks, which is presented in Fig. 3a and 3b, where we plot the area under the curve (AUC) on the percentage of the correct key points (PCK) with various thresholds.
|
| 238 |
+
|
| 239 |
+
In addition to the comparison in terms of the accuracy, we further evaluate the speed of our model compared to the existing art. The detailed comparison are illustrated in Table 2. As FPHA dataset is relatively new and fewer works report their performance, we mainly conduct the evaluation on the STB dataset.
|
| 240 |
+
|
| 241 |
+
Visualization To verify our model works well in terms of accurately deriving poses from the RGB images. We visualize a few predictions by our network in Fig. 4. Our model is capable of inferring precise poses from RGB input images even under severe occlusion and challenging lightning conditions.
|
| 242 |
+
|
| 243 |
+
We further look into the mechanism of the Gaussian kernel based gate. We visualize a few test sequences as in Fig. 5. In (a), the fine pose encoder activates less often for the straightforward poses while more densely used for the challenging poses close to the end of the sequence. For (b), the gate tends to invoke fine pose encoder more often when occlusion presents (1st half v.s. 2nd half), while in (c) and (d), when large motion presents (see the rightmost blurry frames from both sequences), the gate chooses to examine the frame more closely with the fine pose encoder. Those observations are in par with our motivations that to only invoke the computationally heavy pose encoders when necessary.
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Fig. 5: Visualization of frames selected (marked with yellow boxes) to adopt the fine pose encoder. The fine encoder activates sparsely when the pose is straightforward while is frequently used when the pose becomes challenging (left part v.s. right part of (a)). When the hand pose status becomes less stable (see rightmost part of (c) and (d)) or occlusions become more severe (see rightmost part of (b)), our model tends to use the fine encoder more frequently. The frequency of invoking fine pose encoder is much lower when the poses are relatively stable.
|
| 247 |
+
|
| 248 |
+
Table 3: Evaluation of using different gate architectures on STB dataset. $P_{\mathrm{fine}}$ denotes the frequency of using the fine pose encoder. Our Gaussian kernel gate achieves highest accuracy yet at lowest computation cost.
|
| 249 |
+
|
| 250 |
+
<table><tr><td>Gate</td><td>γg</td><td>3D</td><td>PCK20</td><td>AUC</td><td>GFLOPs</td><td>Pfine</td></tr><tr><td>Neural Gate</td><td>0.1</td><td>0.981</td><td>0.995</td><td>2.54</td><td>0.32</td><td></td></tr><tr><td>Neural Temporal Gate</td><td>0.1</td><td>0.977</td><td>0.996</td><td>2.20</td><td>0.43</td><td></td></tr><tr><td>Gaussian Kernel Gate</td><td>0.1</td><td>0.983</td><td>0.997</td><td>2.09</td><td>0.26</td><td></td></tr></table>
|
| 251 |
+
|
| 252 |
+
# 4.4 Ablation study
|
| 253 |
+
|
| 254 |
+
We first study on the design choice of the Gaussian kernel based adaptive gate. Instead of explicitly parameterize the difference with Gaussian function, one straight forward way would be to directly predict the probability via a linear module. The linear module takes the hidden state, 1st and 2nd order statistics and coarse feature as the input and yields the probability of introducing the fine module. This model is referred as Neural Gate. Going one step further, although the coarse pose encoder is relatively light, we could still obtain performance gains by avoiding it and derive probability solely based on the temporal context. Therefore, we also evaluate the model that make decisions based on the temporal context only, which is referred as Neural Temporal Gate. The detailed results are in Table. 3.
|
| 255 |
+
|
| 256 |
+
As shown in Table. 3, different gates offer similar performance while the Gaussian Kernel is slightly more accurate and more efficient. We further investigate the impact of a few hyper parameters on the overall performance. Specifically, we look into the $\gamma_{g}$ in Table 4 and $\lambda$ in Table 5, which could be tweaked to adjust the rate of computing fine features before and after training.
|
| 257 |
+
|
| 258 |
+
When varying $\gamma_{g}$ from 0.3 to 0.01, the accuracy of the models does not vary much while the frequency of using fine features drops from 0.43 to 0.15, which suggests the large amount redundancy in consecutive frames are exploited by
|
| 259 |
+
|
| 260 |
+
Table 4: Evaluation of different $\gamma_{g}$ values for network training on STB dataset. As the expected usage of fine pose encoder drops, the computation cost falls significantly, while the accuracy decreases marginally
|
| 261 |
+
|
| 262 |
+
<table><tr><td>γg</td><td>3D PCK20</td><td>AUC</td><td>GFLOPs</td><td>Pfine</td></tr><tr><td>1</td><td>0.987</td><td>0.9978</td><td>6.96</td><td>1</td></tr><tr><td>0.3</td><td>0.984</td><td>0.9972</td><td>3.30</td><td>0.43</td></tr><tr><td>0.2</td><td>0.985</td><td>0.9973</td><td>2.34</td><td>0.29</td></tr><tr><td>0.1</td><td>0.983</td><td>0.9970</td><td>2.09</td><td>0.26</td></tr><tr><td>0.05</td><td>0.979</td><td>0.9962</td><td>1.56</td><td>0.18</td></tr><tr><td>0.01</td><td>0.977</td><td>0.9956</td><td>1.37</td><td>0.15</td></tr><tr><td>0.001</td><td>0.955</td><td>0.9897</td><td>1.43</td><td>0.16</td></tr></table>
|
| 263 |
+
|
| 264 |
+
Table 5: Evaluation of different $\lambda$ ( $\gamma_{g} = 0.1$ ) during testing on the STB dataset. For the same trained model, with higher $\lambda$ , fine encoder is used less often, i.e., we can configure $\lambda$ to balance the trade-off between the efficiency and accuracy.
|
| 265 |
+
|
| 266 |
+
<table><tr><td>λ</td><td>3D</td><td>PCK20</td><td>AUC</td><td>GFLOPs</td><td>\( P_{\text{fine}} \)</td></tr><tr><td>0.1</td><td>0.987</td><td></td><td>0.9977</td><td>7.01</td><td>0.97</td></tr><tr><td>0.3</td><td>0.986</td><td></td><td>0.9976</td><td>3.31</td><td>0.43</td></tr><tr><td>0.5</td><td>0.983</td><td></td><td>0.9970</td><td>2.09</td><td>0.26</td></tr><tr><td>0.7</td><td>0.943</td><td></td><td>0.9894</td><td>0.88</td><td>0.08</td></tr><tr><td>0.9</td><td>0.505</td><td></td><td>0.8277</td><td>0.30</td><td>0</td></tr></table>
|
| 267 |
+
|
| 268 |
+
the ACE model. While for $\lambda$ , with a larger threshold, we greatly reduce the frequency of using fine encoders at the cost of accuracy. $\lambda$ could be adjusted during inference to balance the trade off between efficiency and accuracy.
|
| 269 |
+
|
| 270 |
+
# 5 Conclusion
|
| 271 |
+
|
| 272 |
+
We present the ACE framework, an adaptive dynamic model for efficient hand pose estimation from monocular videos. At the core of the ACE model is the Gaussian kernel based gate, which determines whether to carefully examine the current frame using a computationally heavy pose encoder based on a quick glimpse of the current frame with a light pose encoder and the temporal context. We further introduce the Gumbel-SoftMax trick to enable the learning of the discrete decision gate. As a result, we obtain state of the art performance on 2 widely used datasets, STB and FPHA, while with less than $1/4$ of the computation compared to the baseline models. The proposed ACE model is general and could be built upon any single frame pose encoder, which indicates the efficiency could be further improved by harvesting more efficient structures as single frame pose encoder.
|
| 273 |
+
|
| 274 |
+
Acknowledgements This work is partially supported by the National Institutes of Health under Grant R01CA214085 as well as SUTD Projects PIE-SGP-Al-2020-02 and SRG-ISTD-2020-153.
|
| 275 |
+
|
| 276 |
+
# References
|
| 277 |
+
|
| 278 |
+
1. Boukhayma, A., Bem, R.d., Torr, P.H.: 3d hand shape and pose from images in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 10843-10852 (2019)
|
| 279 |
+
2. Cai, Y., Ge, L., Cai, J., Yuan, J.: Weakly-supervised 3d hand pose estimation from monocular rgb images. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 666-682 (2018)
|
| 280 |
+
3. Cai, Y., Ge, L., Liu, J., Cai, J., Cham, T.J., Yuan, J., Thalmann, N.M.: Exploiting spatial-temporal relationships for 3d pose estimation via graph convolutional networks. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 2272-2281 (2019)
|
| 281 |
+
4. Courbariaux, M., Bengio, Y., David, J.P.: Binaryconnect: Training deep neural networks with binary weights during propagations. In: Advances in neural information processing systems. pp. 3123-3131 (2015)
|
| 282 |
+
5. Courbariaux, M., Hubara, I., Soudry, D., El-Yaniv, R., Bengio, Y.: Binarized neural networks: Training deep neural networks with weights and activations constrained to + 1 or -1. arXiv preprint arXiv:1602.02830 (2016)
|
| 283 |
+
6. Garcia-Hernando, G., Yuan, S., Baek, S., Kim, T.K.: First-person hand action benchmark with rgb-d videos and 3d hand pose annotations. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 409-419 (2018)
|
| 284 |
+
7. Ge, L., Liang, H., Yuan, J., Thalmann, D.: Robust 3d hand pose estimation in single depth images: from single-view cnn to multi-view cnns. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 3593-3601 (2016)
|
| 285 |
+
8. Ge, L., Liang, H., Yuan, J., Thalmann, D.: 3d convolutional neural networks for efficient and robust hand pose estimation from single depth images. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1991-2000 (2017)
|
| 286 |
+
9. Ge, L., Liang, H., Yuan, J., Thalmann, D.: Real-time 3d hand pose estimation with 3d convolutional neural networks. IEEE transactions on pattern analysis and machine intelligence 41(4), 956-970 (2018)
|
| 287 |
+
0. Ge, L., Ren, Z., Li, Y., Xue, Z., Wang, Y., Cai, J., Yuan, J.: 3d hand shape and pose estimation from a single rgb image. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 10833-10842 (2019)
|
| 288 |
+
1. Goudidis, F., Panteleris, P., Oikonomidis, I., Argyros, A.: Accurate hand keypoint localization on mobile devices. In: 2019 16th International Conference on Machine Vision Applications (MVA). pp. 1-6. IEEE (2019)
|
| 289 |
+
2. Han, S., Pool, J., Tran, J., Dally, W.: Learning both weights and connections for efficient neural network. In: Advances in neural information processing systems. pp. 1135-1143 (2015)
|
| 290 |
+
3. Hassibi, B., Stork, D.G.: Second order derivatives for network pruning: Optimal brain surgeon. In: Advances in neural information processing systems. pp. 164-171 (1993)
|
| 291 |
+
4. Hochreiter, S., Schmidhuber, J.: Long short-term memory. Neural computation 9(8), 1735-1780 (1997)
|
| 292 |
+
5. Howard, A.G., Zhu, M., Chen, B., Kalenichenko, D., Wang, W., Weyand, T., Andreetto, M., Adam, H.: Mobilenets: Efficient convolutional neural networks for mobile vision applications. arXiv preprint arXiv:1704.04861 (2017)
|
| 293 |
+
|
| 294 |
+
16. Iqbal, U., Molchanov, P., Breuel Juergen Gall, T., Kautz, J.: Hand pose estimation via latent 2.5 d heatmap regression. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 118-134 (2018)
|
| 295 |
+
17. Jang, E., Gu, S., Poole, B.: Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144 (2016)
|
| 296 |
+
18. Korbar, B., Tran, D., Torresani, L.: Scsampler: Sampling salient clips from video for efficient action recognition. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 6232-6242 (2019)
|
| 297 |
+
19. LeCun, Y., Denker, J.S., Solla, S.A.: Optimal brain damage. In: Advances in neural information processing systems. pp. 598-605 (1990)
|
| 298 |
+
20. Li, Z., Ni, B., Zhang, W., Yang, X., Gao, W.: Performance guaranteed network acceleration via high-order residual quantization. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 2584-2592 (2017)
|
| 299 |
+
21. Lin, M., Lin, L., Liang, X., Wang, K., Cheng, H.: Recurrent 3d pose sequence machines. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 810-819 (2017)
|
| 300 |
+
22. Liu, J., Ding, H., Shahroudy, A., Duan, L.Y., Jiang, X., Wang, G., Kot, A.C.: Feature boosting network for 3d pose estimation. IEEE Transactions on Pattern Analysis and Machine Intelligence 42(2), 494-501 (2020)
|
| 301 |
+
23. Liu, Z., Li, J., Shen, Z., Huang, G., Yan, S., Zhang, C.: Learning efficient convolutional networks through network slimming. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 2736-2744 (2017)
|
| 302 |
+
24. Louizos, C., Welling, M., Kingma, D.P.: Learning sparse neural networks through $l_{-}0$ regularization. arXiv preprint arXiv:1712.01312 (2017)
|
| 303 |
+
25. Malik, J., Elhayek, A., Nunnari, F., Varanasi, K., Tamaddon, K., Heloir, A., Stricker, D.: Deephps: End-to-end estimation of 3d hand pose and shape by learning from synthetic depth. In: 2018 International Conference on 3D Vision (3DV). pp. 110-119. IEEE (2018)
|
| 304 |
+
26. Mueller, F., Bernard, F., Sotnychenko, O., Mehta, D., Sridhar, S., Casas, D., Theobalt, C.: Generated hands for real-time 3d hand tracking from monocular rgb. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 49-59 (2018)
|
| 305 |
+
27. Newell, A., Yang, K., Deng, J.: Stacked hourglass networks for human pose estimation. In: European conference on computer vision. pp. 483-499. Springer (2016)
|
| 306 |
+
28. Oculus: Hand tracking SDK for Oculus quest available with v12 release, https://developer.oculus.com/blog/hand-tracking-sdk-for-oculus-quest-available
|
| 307 |
+
29. Pan, B., Lin, W., Fang, X., Huang, C., Zhou, B., Lu, C.: Recurrent residual module for fast inference in videos. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1536-1545 (2018)
|
| 308 |
+
30. Pavllo, D., Feichtenhofer, C., Grangier, D., Auli, M.: 3d human pose estimation in video with temporal convolutions and semi-supervised training. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 7753-7762 (2019)
|
| 309 |
+
31. Rastegari, M., Ordonez, V., Redmon, J., Farhadi, A.: Xnor-net: Imagenet classification using binary convolutional neural networks. In: European conference on computer vision. pp. 525-542. Springer (2016)
|
| 310 |
+
32. Rayat Imtiaz Hossain, M., Little, J.J.: Exploiting temporal information for 3d human pose estimation. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 68-84 (2018)
|
| 311 |
+
|
| 312 |
+
33. Romero, J., Tzionas, D., Black, M.J.: Embodied hands: Modeling and capturing hands and bodies together. ACM Transactions on Graphics (ToG) 36(6), 245 (2017)
|
| 313 |
+
34. Sinha, A., Choi, C., Ramani, K.: Deephand: Robust hand pose estimation by completing a matrix imputed with deep features. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 4150-4158 (2016)
|
| 314 |
+
35. Tekin, B., Rozantsev, A., Lepetit, V., Fua, P.: Direct prediction of 3d body poses from motion compensated sequences. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 991-1000 (2016)
|
| 315 |
+
36. Wan, C., Probst, T., Gool, L.V., Yao, A.: Self-supervised 3d hand pose estimation through training by fitting. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 10853-10862 (2019)
|
| 316 |
+
37. Wan, C., Probst, T., Van Gool, L., Yao, A.: Dense 3d regression for hand pose estimation. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 5147-5156 (2018)
|
| 317 |
+
38. Wang, F., Wang, G., Huang, Y., Chu, H.: Sast: Learning semantic action-aware spatial-temporal features for efficient action recognition. IEEE Access 7, 164876-164886 (2019)
|
| 318 |
+
39. Wen, W., Wu, C., Wang, Y., Chen, Y., Li, H.: Learning structured sparsity in deep neural networks. In: Advances in neural information processing systems. pp. 2074-2082 (2016)
|
| 319 |
+
40. Wu, Z., Xiong, C., Jiang, Y.G., Davis, L.S.: Liteeval: A coarse-to-fine framework for resource efficient video recognition. In: Advances in Neural Information Processing Systems. pp. 7778-7787 (2019)
|
| 320 |
+
41. Wu, Z., Xiong, C., Ma, C.Y., Socher, R., Davis, L.S.: Adaframe: Adaptive frame selection for fast video recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 1278-1287 (2019)
|
| 321 |
+
42. Xiang, D., Joo, H., Sheikh, Y.: Monocular total capture: Posing face, body, and hands in the wild. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition. pp. 10965-10974 (2019)
|
| 322 |
+
43. Zhang, J., Jiao, J., Chen, M., Qu, L., Xu, X., Yang, Q.: 3d hand pose tracking and estimation using stereo matching. arXiv preprint arXiv:1610.07214 (2016)
|
| 323 |
+
44. Zhang, X., Zhou, X., Lin, M., Sun, J.: Shufflenet: An extremely efficient convolutional neural network for mobile devices. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 6848-6856 (2018)
|
| 324 |
+
45. Zhang, X., Li, Q., Mo, H., Zhang, W., Zheng, W.: End-to-end hand mesh recovery from a monocular rgb image. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 2354-2364 (2019)
|
| 325 |
+
46. Zimmermann, C., Brox, T.: Learning to estimate 3d hand pose from single rgb images. In: Proceedings of the IEEE International Conference on Computer Vision. pp. 4903-4911 (2017)
|
adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:46ec1318515331d19a3c3c1b8b1a5dd39dbec46c29c5903b0a3d3293e85b2aa8
|
| 3 |
+
size 455591
|
adaptivecomputationallyefficientnetworkformonocular3dhandposeestimation/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:178e9b1fc6df731764b94ecb2f7cc33d5a7181fb324ae9fcb03891e2c0986bf2
|
| 3 |
+
size 428000
|
adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/20cc9823-27e8-4b97-95f3-8c57433a4366_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5341e733a7e33f22f4b46ed22b336f7db6a5c0c4a0d9b8ab2aae62950a87652
|
| 3 |
+
size 72059
|
adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/20cc9823-27e8-4b97-95f3-8c57433a4366_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33f262fb42745482652d39c0af26d6a739c35afbd95364ae429db42f3cc80ac5
|
| 3 |
+
size 88683
|
adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/20cc9823-27e8-4b97-95f3-8c57433a4366_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a8cc6a5e7935196d905634b487d0e9ebb1f4064a6e98d735f8b505d2f40f9f52
|
| 3 |
+
size 1103622
|
adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/full.md
ADDED
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Adaptive Margin Diversity Regularizer for handling Data Imbalance in Zero-Shot SBIR
|
| 2 |
+
|
| 3 |
+
Titir Dutta, Anurag Singh, and Soma Biswas
|
| 4 |
+
|
| 5 |
+
Indian Institute of Science, Bangalore, India {titird, anuragsingh2, somabiswas}@iisc.ac.in
|
| 6 |
+
|
| 7 |
+
Abstract. Data from new categories are continuously being discovered, which has sparked significant amount of research in developing approaches which generalizes to previously unseen categories, i.e. zero-shot setting. Zero-shot sketch-based image retrieval (ZS-SBIR) is one such problem in the context of cross-domain retrieval, which has received lot of attention due to its various real-life applications. Since most real-world training data have a fair amount of imbalance; in this work, for the first time in literature, we extensively study the effect of training data imbalance on the generalization to unseen categories, with ZS-SBIR as the application area. We evaluate several state-of-the-art data imbalance mitigating techniques and analyze their results. Furthermore, we propose a novel framework AMDReg (Adaptive Margin Diversity Regularizer), which ensures that the embeddings of the sketch and images in the latent space are not only semantically meaningful, but they are also separated according to their class-representations in the training set. The proposed approach is model-independent, and it can be incorporated seamlessly with several state-of-the-art ZS-SBIR methods to improve their performance under imbalanced condition. Extensive experiments and analysis justifies the effectiveness of the proposed AMDReg for mitigating the effect of data imbalance for generalization to unseen classes in ZS-SBIR.
|
| 8 |
+
|
| 9 |
+
# 1 Introduction
|
| 10 |
+
|
| 11 |
+
Sketch-based image retrieval (SBIR) [15][35], which deals with retrieving natural images, given a hand-drawn sketch query, has gained significant traction because of its potential applications in e-commerce, forensics, etc. Since new categories of data are continuously being added to the system, it is important for algorithms to generalize well to unseen classes, which is termed as Zero-Shot Sketch-Based Image Retrieval (ZS-SBIR) [6][5][16][7]. Majority of ZS-SBIR approaches learn a shared latent-space representation for both sketch and image, where sketches and images from same category come closer to each other and also incorporate additional techniques to facilitate generalization to unseen classes.
|
| 12 |
+
|
| 13 |
+
One important factor that has been largely overlooked in this task of generalization to unseen classes is the distribution of the training data. Real-world data, used to train the model, is not always class-wise or domain-wise well-balanced. When training and test categories are same, as expected, class imbalance in
|
| 14 |
+
|
| 15 |
+
the training data results in severe degradation in testing performance, specially for the minority classes. Many seminal approaches have been proposed to mitigate this effect for the task of image classification [14][11][2][4], but the effect of data imbalance on generalization to unseen classes is relatively unexplored, both for single and cross-domain applications. In fact, both of the two large-scale datasets, widely used for SBIR/ ZS-SBIR, namely Sketchy Extended [25] and TU-Berlin Extended [8] have data imbalance. In cross-domain data, there can be two types of imbalance: 1) domain imbalance - where the number of data samples in one domain is significantly different compared to the other domain; 2) class imbalance - where there is a significant difference in the number of data samples per class. TU-Berlin Ext. exhibits imbalance of both types. Although a recent paper [5] has attributed poor retrieval performance for TU-Berlin Ext. to data imbalance, no measures have been proposed to handle this.
|
| 16 |
+
|
| 17 |
+
Here, we aim to study the effect of class imbalance in the training data on the retrieval performance of unseen classes in the context of ZS-SBIR, but interestingly we observe that the proposed framework works well even when both types of imbalances are present. We analyze several state-of-the-art approaches for mitigating the effect of training data imbalance on the final retrieval performance. To this end, we propose a novel regularizer termed AMDReg - Adaptive Margin Diversity Regularizer, which ensures that the embeddings of the data samples in the latent space account for the distribution of classes in the training set. To facilitate generalization to unseen classes for ZS-SBIR, majority of the ZS-SBIR approaches impose a direct or indirect semantic constraint on the latent-space which ensures that the sketch and image samples from unseen classes during testing are embedded in the neighborhood of its related seen classes. But merely imposing a semantic constraint does not account for the training class imbalance. The proposed AMDReg, which is computed from the class-wise training data distribution present in sketch and image domains helps to appropriately position the semantic embeddings. It tries to enforce a broader margin / spread for the classes for which less number of training samples are available as compared to the classes which have larger number of samples. Extensive analysis and evaluation on two benchmark datasets validate the effectiveness of the proposed approach. The contributions of this paper have been summarized below.
|
| 18 |
+
|
| 19 |
+
1. We analyze the effect of class-imbalance on generalization to unseen classes for the ZS-SBIR task. To the best of our knowledge, this is the first work in literature which addresses the data-imbalance problem in the context of cross-domain retrieval.
|
| 20 |
+
2. We analyze the performance of several state-of-the-art techniques for handling data imbalance problem for this task.
|
| 21 |
+
3. We propose a novel regularizer termed AMDReg, which can seamlessly be used with several ZS-SBIR methods to improve their performance. We have observed significant improvement in the performance of three state-of-the-art ZS-SBIR methods.
|
| 22 |
+
4. We obtain state-of-the-art performance for ZS-SBIR and generalized ZS-SBIR for two large-scale benchmark datasets.
|
| 23 |
+
|
| 24 |
+
# 2 Related Work
|
| 25 |
+
|
| 26 |
+
Here, we discuss relevant work in the literature for this study. We include recent papers for sketch-based image retrieval (SBIR), zero-shot sketch-based image retrieval (ZS-SBIR), as well as the class-imbalanced problems in classification.
|
| 27 |
+
|
| 28 |
+
Sketch-based Image Retrieval (SBIR): The primary goal of these approaches is to bridge the domain-gap between natural images and hand-drawn sketches. Early methods for SBIR, such as HOG [12], LKS [24] aim to extract hand-crafted features from the sketches as well as from the edge-maps obtained from natural images, which are then directly used for retrieval. The advent of deep networks have advanced the state-of-the-art significantly. Siamese network [22] with triplet-loss or contrastive-loss, GoogleNet [25] with triplet loss, etc. are some of the initial architectures. Recently a number of hashing-based methods, such as [15][35] have achieved significant success. [15] uses a heterogeneous network, which employs the edge maps from images, along with the sketch-image training data to learn a shared representation space. In contrast, GDH [35] exploits a generative model to learn the equivalent image representation from a given sketch and performs the final retrieval in the image space.
|
| 29 |
+
|
| 30 |
+
Zero-shot Sketch-based Image Retrieval (ZS-SBIR): The knowledge-gap encountered by the retrieval model, when a sketch query or database image is from a previously unseen class makes ZS-SBIR extremely challenging. ZSIH [26], generative-model based ZS-SBIR [32] are some of the pioneering works in this direction. However, as identified by [6], ZSIH [26] requires a fusion-layer for learning the model, which shoots up the learning cost and [32] requires strictly paired sketch-image data for training. Some of the recent works, [5][6][7][16] have reported improved performance for ZS-SBIR over the early techniques. [6] introduces a further generalization in the evaluation protocol for ZS-SBIR, termed as generalized ZS-SBIR; where the search set contains images from both the sets of seen and unseen classes. This poses even greater challenge to the algorithm, and the performances degrade significantly for this evaluation protocol [6][7]. Few of the ZS-SBIR approaches are discussed in more details later.
|
| 31 |
+
|
| 32 |
+
Handling data Imbalance for Classification: Since real-world training data are often imbalanced, recently, a number of works [14][11][2][4] have been proposed to address this problem. [14] mitigates the problem of foreground background class imbalance problem in the context of object recognition and proposes a modification to the traditional cross-entropy based classification loss. [4] introduces an additional cost-sensitive term to be included with any classification loss, designed on the basis of effective number of samples in a particular class. [2] and [11] both propose a modification in the margin of the class-boundary learned via minimizing intra-class variations and maximizing inter-class margin. [17] discusses a dynamic meta-embedding technique to address classification problem under long-tailed training data scenario.
|
| 33 |
+
|
| 34 |
+
Equipped with the knowledge of recent algorithms for both ZS-SBIR and single domain class imbalance mitigating techniques, we now move forward to discuss the problem of imbalanced training data for cross-domain retrieval.
|
| 35 |
+
|
| 36 |
+
# 3 Does Imbalanced Training Data Effect ZS-SBIR?
|
| 37 |
+
|
| 38 |
+
First, we analyze what is the effect of training data imbalance on generalization to unseen classes in the context of ZS-SBIR. Here, for ease of analysis, we consider only class imbalance, but our approach is effective for the mixed imbalance too, as justified by the experimental results later. Since both the standard datasets for this task, namely Sketchy Ext. [25] and TU-Berlin Ext. [8] are already imbalanced, to systematically study the effect of imbalance, we create a smaller balanced dataset, which is a subset of Sketchy Ext. dataset. This is termed as mini-Sketchy Dataset and contains sketches and images from 60 classes, with 500 images and sketches per class. Among them, randomly selected 10 classes are used as unseen classes and the rest 50 classes are used for training.
|
| 39 |
+
|
| 40 |
+
To study the effect of imbalance, motivated by the class-imbalance literature in image classification [14][11], we introduce two different types of class imbalance: 1) Step imbalance - where few of the classes in the training set contains less amount of samples compared to other classes; 2) Long-tailed imbalance - where the number of samples across the classes decrease gradually following the rule, $n_k^{lt} = n_k \mu^{\frac{k}{C_{seen} - 1}}$ ; where $n_k^{lt}$ is the available samples for $k^{th}$ class under long-tailed distribution and $n_k$ is the number of original samples of that class (=500 here). Here, $k \in \{1, 2, \dots, C_{seen}\}$ , i.e. $C_{seen}$ is the number of training classes and $\mu = \frac{1}{p}$ . We define imbalance factor $p$ for a particular data-distribution to be the ratio of the highest number of samples in any class to the lowest number of samples in any class in that data and higher value of $p$ implies more severe training class imbalance. Since the analysis is with class-imbalance, we assume that the data samples in image and sketch domain is the same.
|
| 41 |
+
|
| 42 |
+
As mentioned earlier, the proposed regularizer is generic and can be used with several baseline approaches to improve their performance in presence of data imbalance. For this analysis, we choose one recent auto-encoder based approach [7]. We term this as Baseline Model for this discussion, since the analysis is equally applicable for other approaches as well. We systematically introduce both the step and long-tailed imbalances for two different values of $p$ and observe the performance for each of them. The results are reported in Table 1.
|
| 43 |
+
|
| 44 |
+
As compared to the balanced setting, we observe significant degradation in performance of the baseline whenever any kind of imbalance is present in the training data. This implies that training data imbalance not only effects the test performance when the classes remain the same, it also adversely effects the generalization performance significantly. This is due to the fact that unseen classes are recognized by embedding them close to their semantically relevant seen classes. Data imbalance results in (1) latent embedding space which is not sufficiently discriminative and (2) improperly learnt embedding functions, both of which negatively affects the embeddings of the unseen classes. The goal of the proposed AMDReg is to mitigate these limitations, which in turn will help in better generalization to unseen classes (Table 1 bottom row). Thus we see, that if the imbalance is handled properly, it may reduce the need for collecting large-scale balanced training samples.
|
| 45 |
+
|
| 46 |
+
Table 1. Evaluation (MAP@200) of Baseline Model [7] for ZS SBIR on mini-Sketchy dataset. Results for long-tailed and step imbalance with different imbalance factors are reported. The final performance using the proposed AMDReg is also compared.
|
| 47 |
+
|
| 48 |
+
<table><tr><td rowspan="3">Experimental Protocol</td><td rowspan="3">Balanced data</td><td colspan="4">Imbalanced Data</td></tr><tr><td colspan="2">Long-tailed</td><td colspan="2">Step</td></tr><tr><td>p = 10</td><td>p = 100</td><td>p = 10</td><td>p = 100</td></tr><tr><td>Baseline [7]</td><td>0.395</td><td>0.234</td><td>0.185</td><td>0.241</td><td>0.156</td></tr><tr><td>Baseline [7] + AMDReg</td><td></td><td>0.332</td><td>0.240</td><td>0.315</td><td>0.218</td></tr></table>
|
| 49 |
+
|
| 50 |
+
# 4 Proposed Approach
|
| 51 |
+
|
| 52 |
+
Here, we describe the proposed Adaptive Margin Diversity Regularizer (AMDReg), which when used with existing ZS-SBIR approaches can help to mitigate the adverse effect of training data imbalance. We observe that majority of the state-of-the-art ZS-SBIR [6][16][7] approaches have two objectives: (1) projecting the sketches and images to a common discriminative latent space, where retrieval can be performed; (2) to ensure that the latent space is semantically meaningful so that the approach generalizes to unseen classes. For the first objective, a classification loss is used while learning the shared latent-space, which constraints the latent-space embeddings of both sketches and images from same classes to be clustered together, and samples from different classes to be well-separated. For the second objective, different direct or indirect techniques are utilized to make the embeddings semantically meaningful to ensure better generalization.
|
| 53 |
+
|
| 54 |
+
Semantically Meaningful Class Prototypes: Without loss of generality, we again chose the same baseline [7] to explain how to incorporate the proposed AMDReg into an existing ZS-SBIR approach. Let us consider that there are $C_{seen}$ number of classes present in the dataset, and $d$ is the latent space dimension. The baseline model has two parallel branches $F_{im}(\theta_{im})$ and $F_{sk}(\theta_{sk})$ for extracting features from images and sketches, $\{f^{(m)}\}$ , where $m \in \{im, sk\}$ , respectively. These features are then passed through corresponding content encoder networks to learn the shared latent-space embeddings for the same, i.e. $z^{(m)} = E_m(f^{(m)})$ . In [7], a distance-based cross-entropy loss is used to learn these latent embeddings such that the embeddings is close to the semantic information. As is widely used, the class-name embeddings $h(y)$ of the seen-class labels $y \in \{1, 2, \dots, C_{seen}\}$ are used as the semantic information. These embeddings are extracted from a pre-trained language model, such as, word2vec [18] or GloVe [20]. Please refer to Fig. 1 for illustration of the proposed AMDReg with respect to this baseline model.
|
| 55 |
+
|
| 56 |
+
The last fully connected (fc) layer of the encoders is essentially the classification layer and the weights of this layer, $\mathbf{P} = [\mathbf{p}_1,\mathbf{p}_2,\dots,\mathbf{p}_{C_{seen}}],\mathbf{p}_i\in \mathbb{R}^d$ can be considered as the shared class-prototypes or the representatives of the corresponding class [21]. To ensure a semantically meaningful latent representation, one can learn the prototypes ( $\mathbf{p}_i$ 's) such that they are close to the class-name
|
| 57 |
+
|
| 58 |
+
embeddings, or the prototypes can themselves be set equal to the semantic embeddings, i.e. $\mathbf{p}_i = h(y)$ and kept fixed. If the training data is imbalanced, just ensuring that the prototypes are semantically meaningful is not sufficient, we should also ensure that they take into account the label distribution of the training data. In our modification, to be able to adjust the prototypes properly, instead of fixing them as the class-embeddings, we initialize them using these attributes. Since the output of this fc layer is given by $\mathbf{z}^{(m)} = [z_1^{(m)}, z_2^{(m)}, \dots, z_{C_{seen}}^{(m)}]$ ; the encoder with the prototypes is learnt using standard cross-entropy loss as,
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\mathcal {L} _ {C E} \left(\mathbf {z} ^ {\left(\mathbf {m}\right)}, y\right) = - \log \frac {\exp \left(z _ {y} ^ {(m)}\right)}{\sum_ {j = 1} ^ {C _ {\text {s e e n}}} \exp \left(z _ {j} ^ {(m)}\right)} \tag {1}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
Now, with this as the background, we will describe the proposed regularizer, AMDReg, which ensures that the prototypes are modified in such a way that they are spread out according to their class representation in the training set.
|
| 65 |
+
|
| 66 |
+
Adaptive Margin Diversity Regularizer: Our proposed AMDReg is inspired from the recently proposed Diversity Regularizer [11], which addresses data imbalance in image classification by adjusting the classifier weights (here prototypes) so that they are uniformly spread out in the feature space. In our context, it can be enforced by the following regularizer
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\mathcal {R} (\mathbf {P}) = \frac {1}{C _ {\text {s e e n}}} \sum_ {i < j} [ \| \mathbf {p} _ {i} - \mathbf {p} _ {j} \| _ {2} ^ {2} - d _ {\text {m e a n}} ] ^ {2}, \forall j \in \{1, 2, \dots , C _ {\text {s e e n}} \} \tag {2}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
Here $d_{mean}$ is the mean distance between all the class prototypes and is computed as
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
d _ {\text {m e a n}} = \frac {2}{C _ {\text {s e e n}} ^ {2} - C _ {\text {s e e n}}} \sum_ {i < j} \left\| \mathbf {p} _ {i} - \mathbf {p} _ {j} \right\| _ {2} ^ {2}, \forall j \in \{1, 2, \dots , C _ {\text {s e e n}} \} \tag {3}
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
The above regularizer tries to spread out all the class prototypes, without considering the amount of imbalance present in the training data. As has been observed in many recent works [2], due to insufficient number of samples of the minority classes, it is more likely that their test samples will have a wider spread instead of being clustered around the class prototype during testing. For our problem, this implies greater uncertainty for samples of unseen classes, which are semantically similar to the minority classes in the training set.
|
| 79 |
+
|
| 80 |
+
Towards this end, we propose to adjust the class prototypes adaptively, which takes into account the data imbalance. Since there can be both class and domain imbalance in the cross-domain retrieval problem, we propose to use the total number of sketch and image samples per class in the training set, and we refer to this combined number for $k^{th}$ -class as the effective number of samples, $n_k^{eff}$ , in this work. We then define the imbalance-based margin for the $k^{th}$ class as,
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\Delta_ {k} = \frac {K}{n _ {k} ^ {\text {e f f}}} \tag {4}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
Fig. 1. Illustration of the proposed Adaptive Margin Diversity Regularizer (AMDReg). The AMDReg ensures that the embeddings of the shared prototypes of the images and sketches are not only placed away from each other, but also account for the increased uncertainty when the training class distribution is imbalanced. This results in better generalization to unseen classes.
|
| 88 |
+
|
| 89 |
+
This is similar to the inverse frequency of occurrence, except for the experimental hyper-parameter $K$ . Thus the final AMDReg is given by
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
\mathcal {R} _ {\Delta} (\mathbf {P}) = \frac {1}{C _ {s e e n}} \sum_ {i < j} \left[ \left\| \mathbf {p} _ {i} - \mathbf {p} _ {j} \right\| _ {2} ^ {2} - \left(d _ {m e a n} + \Delta_ {j}\right) \right] ^ {2}, \forall j \in \{1, 2, \dots , C _ {s e e n} \} \tag {5}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
Thus, we adjust the relative distance between $\mathbf{p}_i$ 's such that they are at least separated by a distance which is more than the mean-distance by the class imbalance margin. This ensures that the prototypes for the minority classes have more margin around them, which will reduce the chances of confusion for the semantically similar unseen classes during testing. Finally, the encoder with the prototypes are learnt using the CE loss along with the AMDReg as
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\mathcal {L} _ {C E} ^ {A M D R e g} = \mathcal {L} _ {C E} + \lambda \mathcal {R} _ {\Delta} \tag {6}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
where $\lambda$ is an experimental hyper-parameter, which controls the contribution of the regularizer towards the learning.
|
| 102 |
+
|
| 103 |
+
Difference with Related Works: Even though the proposed AMDReg is inspired from [11], there are significant differences, namely (1) [11] addresses the imbalanced classification task for a single domain, while our work addresses generalization to unseen classes in the context of cross-domain retrieval (ZS-SBIR); (2) While [11] ensures that the weight vectors are equally spread out, AMDReg
|
| 104 |
+
|
| 105 |
+
accounts for the training data distribution while designing the relative distances between the semantic embeddings; (3) Finally, [11] works with the max-margin loss, but AMDReg is used with the standard CE loss while learning the semantic class prototypes.
|
| 106 |
+
|
| 107 |
+
The proposed approach also differs from another closely related work LDAM [2]. LDAM loss is a modification on the standard cross-entropy or Hinge-loss to incorporate class-wise margin. In contrast, proposed AMDReg is a margin-based regularizer with adaptive margins between class-prototypes, based on the corresponding representation of classes in the training set. Thus, while [2] is inspired from margin-based generalization bound, the proposed AMDReg is inspired from the widely used inverse frequency of occurrence.
|
| 108 |
+
|
| 109 |
+
# 4.1 Analysis with standard & SOTA imbalance-aware approaches
|
| 110 |
+
|
| 111 |
+
Here, we analyze how the proposed AMDReg compares with several existing state-of-the-art techniques used for addressing the problem of imbalance in the training data mainly for the task of image classification. These techniques can be broadly classified into two categories, (1) re-sampling techniques to balance the existing imbalanced dataset and (2) cost-sensitive learning or modification of the classifier. For this analysis also, we use the same retrieval backbone [7]. In this context, we first compute the average number of samples in the dataset. Any class which has lesser number of samples than the average are considered minority classes, and the remaining are considered majority classes.
|
| 112 |
+
|
| 113 |
+
1) Re-balancing the dataset: Re-sampling is a standard and effective technique used to balance out the dataset distribution bias. The most common methods are under-sampling of the majority classes [1] or over-sampling of minority classes [3]. We systematically use such imbalance data-sampling techniques on the training data to address the class imbalance for ZS-SBIR as discussed below. Here, the re-sampled / balanced dataset created by individual re-sampling operations described below is used for training the baseline network and reporting the retrieval performance.
|
| 114 |
+
|
| 115 |
+
1. Naive under-sampling: Here, we randomly select $1/p$ -th of total samples per class for the majority classes and discard their remaining samples. Naturally, we lose a significant amount of important samples with such random sampling technique.
|
| 116 |
+
2. Selective Decontamination [1]: This technique is used to intelligently under-sample the majority classes instead of randomly throwing away excess samples. As per [1], we also modify the Euclidean distance function $d_{E}(\mathbf{x}_{i},\mathbf{x}_{j})$ between two samples of $c^{th}$ class, $\mathbf{x}_i$ and $\mathbf{x}_j$ as,
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
d _ {\text {m o d i f i e d}} \left(\mathbf {x} _ {i}, \mathbf {x} _ {j}\right) = \left(\frac {n _ {c}}{N}\right) ^ {(1 / m)} d _ {E} \left(\mathbf {x} _ {i}, \mathbf {x} _ {j}\right) \tag {7}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $n_c$ and $N$ are the number of samples in $c^{th}$ class and in all classes, respectively. $m$ represents the dimension of the feature space. We retain only those samples in the majority classes for which the classes of majority of samples in top- $K$ nearest neighbors agree completely.
|
| 123 |
+
|
| 124 |
+
3. Naive over-sampling: Here, the minority classes are augmented by repeating the instances (as in [35]) and using the standard image augmentation techniques (such as, rotation, translation, flipping etc.).
|
| 125 |
+
4. SMOTE [3]: In this intelligent over-sampling technique, instead of replacing the samples, the minority classes are augmented by generating synthetic features along the line-segment joining each minority class sample with its $K$ -nearest neighbors.
|
| 126 |
+
5. GAN Based Augmentation [29]: Finally, we propose to augment the minority classes by generating features with the help of generative models, which have been very successful for zero-shot [29] / few-shot [19] / any-shot [30] image classification. Towards that goal, we use f-GAN [29] model to generate synthetic features for the minority classes using their attributes and augment those features with the available training dataset to reduce the imbalance.
|
| 127 |
+
2) Cost-sensitive Learning of Classifier: The goal of cost-sensitive learning based methods is to learn a better classifier using the original imbalanced training data, but with a more suitable loss function which can account for the data imbalance. To observe the effect of the different kinds of losses, we modify the distance-based CE-loss in the baseline model to the following ones, keeping the rest of the network fixed.
|
| 128 |
+
1. Focal loss: This loss [14] was proposed to address foreground-background class imbalance issue in the context of object detection. It is based on a simple yet effective modification of standard cross-entropy loss, such that while computation, the easy or well-classified samples are given less weights compared to the difficult samples.
|
| 129 |
+
2. Class-balanced Focal Loss: It is a variant of focal loss, recently proposed in [4], which incorporates the effective number of samples for a class in the imbalanced dataset.
|
| 130 |
+
3. Diversity Regularizer: This recently proposed regularizer [11] ensures that both the majority and minority classes are at equal distance from each other in the latent-space and reported significant performance improvement for imbalanced training data for image classification.
|
| 131 |
+
4. $LDAM$ : [2] proposes a margin-based modification of standard cross-entropy loss or hinge loss, to ensure that the classes are well-separated from each other.
|
| 132 |
+
|
| 133 |
+
The retrieval performance obtained with these imbalance-handling methods are reported in Table 2. We observe that all the techniques result in varying degree of improvement over the base model. Among the data augmentation techniques, GAN-based augmentation outperforms the other approaches. In general, all the cost-sensitive learning techniques performs quite well, specially the recently proposed diversity regularizer and the LDAM cross-entropy loss. However, the proposed AMDReg outperforms both the data balancing and cost-sensitive learning approaches, giving the best performance across all types and degrees of imbalance.
|
| 134 |
+
|
| 135 |
+
Table 2. ZS-SBIR performance (MAP@200) of different kinds of imbalance handling techniques applied on the Baseline Model [7] for the mini-Sketchy dataset. Results of the original Baseline Model is also reported for reference.
|
| 136 |
+
|
| 137 |
+
<table><tr><td rowspan="2">Imbalance Handler</td><td rowspan="2">Methods</td><td colspan="2">Long-tailed</td><td colspan="2">Step</td></tr><tr><td>p = 10</td><td>p = 100</td><td>p = 10</td><td>p = 100</td></tr><tr><td></td><td>Baseline Model [7]</td><td>0.234</td><td>0.185</td><td>0.241</td><td>0.156</td></tr><tr><td rowspan="5">Data balancing methods</td><td>Naive under-sampling</td><td>0.235</td><td>0.191</td><td>0.256</td><td>0.159</td></tr><tr><td>Naive over-sampling</td><td>0.269</td><td>0.219</td><td>0.258</td><td>0.155</td></tr><tr><td>Selective decontamination [1]</td><td>0.268</td><td>0.221</td><td>0.251</td><td>0.164</td></tr><tr><td>SMOTE [3]</td><td>0.269</td><td>0.217</td><td>0.269</td><td>0.183</td></tr><tr><td>GAN-based Augmentation [29]</td><td>0.305</td><td>0.229</td><td>0.274</td><td>0.188</td></tr><tr><td rowspan="5">Loss-Modification Techniques</td><td>Focal loss [14]</td><td>0.273</td><td>0.228</td><td>0.289</td><td>0.195</td></tr><tr><td>Class-balanced Focal Loss [4]</td><td>0.299</td><td>0.236</td><td>0.296</td><td>0.210</td></tr><tr><td>Diversity-Regularizer [11]</td><td>0.296</td><td>0.222</td><td>0.285</td><td>0.207</td></tr><tr><td>LDAM-CE loss [2]</td><td>0.329</td><td>0.234</td><td>0.310</td><td>0.213</td></tr><tr><td>Proposed AMDReg</td><td>0.332</td><td>0.240</td><td>0.315</td><td>0.218</td></tr></table>
|
| 138 |
+
|
| 139 |
+
# 5 Experimental Evaluation on ZS-SBIR
|
| 140 |
+
|
| 141 |
+
Here, we provide details of the extensive experiments performed to evaluate the effectiveness of the proposed AMDReg for handing data imbalance in ZS-SBIR.
|
| 142 |
+
|
| 143 |
+
Datasets Used and Experimental Protocol: We have used two large-scale standard benchmarks for evaluating ZS-SBIR approaches, namely, Sketchy Ext. [25] and TU-Berlin Ext. [8].
|
| 144 |
+
|
| 145 |
+
Sketchy Ext. [25] dataset originally contained approximately 75,000 sketches and 12,500 images from 125 object categories. Later, [15] collected and added additional 60,502 images to this dataset. Following the standard protocol [6][16], we randomly choose 25 classes as unseen-classes (sketches as query and images in the search set) and the rest 100 classes for training.
|
| 146 |
+
|
| 147 |
+
TU-Berlin Ext. [8] originally contained 80 hand-drawn sketches per class from total 250 classes. To make it a better fit for large-scale experiments, [34] included additional 2,04,489 images. As followed in literature [6] [7], we randomly select 30-classes as unseen, while the rest 220-classes are used for training.
|
| 148 |
+
|
| 149 |
+
The dataset statistics are shown in Fig. 2, which depicts data imbalance in both the datasets. This is specially evident in TU-Berlin Ext., which has huge domain-wise imbalance as well as class-wise imbalance. These real-world datasets reinforce the importance of handling data imbalance for the ZS-SBIR task.
|
| 150 |
+
|
| 151 |
+
# 5.1 State-of-the-art ZS-SBIR approaches integrated with AMDReg
|
| 152 |
+
|
| 153 |
+
As already mentioned, the proposed AMDReg is generic and can be seamlessly integrated with most state-of-the-art ZS-SBIR approaches for handling the training data imbalance. Here, we have integrated AMDReg with three state-of-the-art approaches, namely (1) Semantically-tied paired cycle-consistency based
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Fig. 2. Dataset statistics of sketches and images of Sketchy-extended and TU-Berlin-extended are shown in the first two and last two plots respectively in that order.
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
|
| 164 |
+
network (SEM-PCYC) [6]; (2) Semantic-aware knowledge preservation for ZS-SBIR (SAKE) [16]. (3) Style-guided network for ZS-SBIR [7]. Now, we briefly describe the three approaches along with the integration of AMDReg.
|
| 165 |
+
|
| 166 |
+
SEM-PCYC [6] with AMDReg: SEM-PCYC is a generative model with two separate branches for image and sketch; for visual-to-semantic mapping along with cyclic consistency loss. Further, to ensure that the semantic output of the generators is also class-discriminative, a classification loss is used. This classifier is pre-trained on seen-class training data and kept frozen while the whole retrieval model is trained. We modify the training methodology by enabling the classifier to train along with the rest of the model, by including the AMDReg with the CE-loss. Here, the semantic information is enforced through an autoencoder, which uses a hierarchical and a text-based model as input, and thus the weights are randomly initialized. Please refer to [6] for more details.
|
| 167 |
+
|
| 168 |
+
SAKE [16] with AMDReg: This ZS-SBIR method extends the concept of domain-adaptation for fine-tuning a pre-trained model on ImageNet [23] for the specific ZS-SBIR datasets. The network contains a shared branch to extract features from both sketches and images, which are later used for the categorical classification task using the soft-max CE-loss. Simultaneously, the semantic structure with respect to the ImageNet [23] classes are maintained. Here also, we modify the CE-loss using the proposed AMDReg to mitigate the adverse effect of training data imbalance. The rest of the branches and the proposed SAKE-loss remain unchanged. Please refer to [16] for more details of the base algorithm.
|
| 169 |
+
|
| 170 |
+
Style-guide [7] with AMDReg: This is a two-step process, where the shared latent-space is learnt first. Then, the latent-space content extracted from the sketch query is combined with the styles of the relevant images to obtain the final retrieval in the image-space. While learning the latent-space, a distance-based cross-entropy loss is used, which is modified as explained in details earlier. Please refer to [7] for more details of the base algorithm.
|
| 171 |
+
|
| 172 |
+
Implementation Details The proposed regularizer is implemented using Pytorch. We use a single Nvidia GeForce GTX TITAN X for all our experiments.
|
| 173 |
+
|
| 174 |
+
Table 3. Performance of several state-of-the-art approaches for ZS-SBIR and generalized ZS-SBIR.
|
| 175 |
+
|
| 176 |
+
<table><tr><td rowspan="2" colspan="2">Algorithms</td><td colspan="2">TU-Berlin extended</td><td colspan="2">Sketchy-extended</td></tr><tr><td>MAP@all</td><td>Prec@100</td><td>MAP@all</td><td>Prec@100</td></tr><tr><td rowspan="7">SBIR</td><td>Softmax Baseline</td><td>0.089</td><td>0.143</td><td>0.114</td><td>0.172</td></tr><tr><td>Siamese CNN [22]</td><td>0.109</td><td>0.141</td><td>0.132</td><td>0.175</td></tr><tr><td>SaN [33]</td><td>0.089</td><td>0.108</td><td>0.115</td><td>0.125</td></tr><tr><td>GN Triplet [25]</td><td>0.175</td><td>0.253</td><td>0.204</td><td>0.296</td></tr><tr><td>3D shape [28]</td><td>0.054</td><td>0.067</td><td>0.067</td><td>0.078</td></tr><tr><td>DSH (binary) [15]</td><td>0.129</td><td>0.189</td><td>0.171</td><td>0.231</td></tr><tr><td>GDH (binary) [35]</td><td>0.135</td><td>0.212</td><td>0.187</td><td>0.259</td></tr><tr><td rowspan="7">ZSL</td><td>CMT [27]</td><td>0.062</td><td>0.078</td><td>0.087</td><td>0.102</td></tr><tr><td>DeViSE [10]</td><td>0.059</td><td>0.071</td><td>0.067</td><td>0.077</td></tr><tr><td>SSE [36]</td><td>0.089</td><td>0.121</td><td>0.116</td><td>0.161</td></tr><tr><td>JLSE [37]</td><td>0.109</td><td>0.155</td><td>0.131</td><td>0.185</td></tr><tr><td>SAE [13]</td><td>0.167</td><td>0.221</td><td>0.216</td><td>0.293</td></tr><tr><td>FRWGAN [9]</td><td>0.110</td><td>0.157</td><td>0.127</td><td>0.169</td></tr><tr><td>ZSH [31]</td><td>0.141</td><td>0.177</td><td>0.159</td><td>0.214</td></tr><tr><td rowspan="2"></td><td>ZSIH (binary) [26]</td><td>0.223</td><td>0.294</td><td>0.258</td><td>0.342</td></tr><tr><td>ZS-SBIR [32]</td><td>0.005</td><td>0.001</td><td>0.196</td><td>0.284</td></tr><tr><td rowspan="4">Zero-Shot SBIR</td><td>SEM-PCYC [6]</td><td>0.297</td><td>0.426</td><td>0.349</td><td>0.463</td></tr><tr><td>SEM-PCYC + AMDReg</td><td>0.330</td><td>0.473</td><td>0.397</td><td>0.494</td></tr><tr><td>Style-guide [7]</td><td>0.254</td><td>0.355</td><td>0.375</td><td>0.484</td></tr><tr><td>Style-guide + AMDReg</td><td>0.291</td><td>0.376</td><td>0.410</td><td>0.512</td></tr><tr><td rowspan="2"></td><td>SAKE [16]</td><td>0.428*</td><td>0.534*</td><td>0.547</td><td>0.692</td></tr><tr><td>SAKE + AMDReg</td><td>0.447</td><td>0.574</td><td>0.551</td><td>0.715</td></tr><tr><td rowspan="3">Generalized Zero-shot SBIR</td><td>Style-guide [7]</td><td>0.149</td><td>0.226</td><td>0.330</td><td>0.381</td></tr><tr><td>SEM-PCYC [6]</td><td>0.192</td><td>0.298</td><td>0.307</td><td>0.364</td></tr><tr><td>SEM-PCYC + AMDReg</td><td>0.245</td><td>0.303</td><td>0.320</td><td>0.398</td></tr></table>
|
| 177 |
+
|
| 178 |
+
For all the experiments, we set $\lambda = 10^{3}$ and $K = 1$ . Adam optimizer has been used with $\beta_{1} = 0.5$ , $\beta_{2} = 0.999$ and a learning rate of $lr = 10^{-3}$ . The implementation of different baselines and the choice of hyper-parameters for their implementation has been done as described in the corresponding papers.
|
| 179 |
+
|
| 180 |
+
# 5.2 Evaluation for ZS-SBIR
|
| 181 |
+
|
| 182 |
+
Here, we report the results of the modifications to the state-of-the-art approaches for ZS-SBIR. We first train all the three original models (as described before) to replicate the results reported in the respective papers. We use the codes given by the authors and are able to replicate all the results for SEM-PCYC and Styleguide as reported. However, for SAKE, in two cases, the results we obtained are slightly different from that reported in the paper. So we report the results as we obtained, for fair evaluation of proposed improvement (marked with a star to indicate that they are different from the reported numbers in the paper).
|
| 183 |
+
|
| 184 |
+
We incorporate the proposed modifications for AMDReg in all three approaches and retrained the models. The results are reported in Table 3. All the
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Fig.3. Performance comparison of the base model (SEM-PCYC) and the modified base-model using proposed AMDReg: (a) Few examples of top-5 retrieved images against the given unseen sketch query from TU-Berlin dataset; (b) P-R curve on Sketchy dataset; (c) P-R curve on TU-Berlin dataset.
|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
|
| 193 |
+
results of the other approaches are taken directly from [6]. We observe significant improvement in the performance of all the state-of-the-art approaches, when trained using the proposed regularizer. This experiment throws insight that by handling the data-imabalance, which is inherently present in the collected data, it is possible to gain significant improvement in the final performance. Since AMDReg is generic, it can potentially be incorporated with other approaches, developed for the ZS-SBIR task, to handle the training data imbalance problem.
|
| 194 |
+
|
| 195 |
+
Fig. 3 shows top-5 retrieved results for a few unseen queries (first column), using SEM-PCYC as the baseline model, without and with AMDReg, respectively. We observe significant improvement when AMDReg is used, justifying its effectiveness. We make similar observations from the P-R curves in Fig. 3.
|
| 196 |
+
|
| 197 |
+
# 5.3 Evaluation for Generalized ZS-SBIR
|
| 198 |
+
|
| 199 |
+
In real scenarios, the search set may consist of both the seen and unseen image samples, which makes the problem much more challenging. This is termed as the generalized ZS-SBIR. To evaluate the effectiveness of proposed AMDReg for this scenario, we follow the experimental protocol in [6] and SEM-PCYC [6] as the base model. From the results in Table 3, we observe that AMDReg is able to significantly improve the performance of the base model and yields state-of
|
| 200 |
+
|
| 201 |
+
the-art results for three out of the four cases. Only for Sketchy Ext., it performs slightly less than Style-Guide, but still improves upon its baseline performance.
|
| 202 |
+
|
| 203 |
+
# 5.4 Evaluation for SBIR
|
| 204 |
+
|
| 205 |
+
Though the main purpose of this work is to analyze the effect of training data imbalance on generalization to unseen classes, this approach should also benefit standard SBIR in presence of imbalance. We observe from Table 4, that the
|
| 206 |
+
|
| 207 |
+
Table 4. SBIR evaluation (MAP@200) of Baseline Model [7] on mini-Sketchy.
|
| 208 |
+
|
| 209 |
+
<table><tr><td>Balanced Data</td><td>Step Imb. (p=100)</td><td>GAN-based Aug. [29]</td><td>CB Focal Loss [4]</td><td>Diversity Regularizer [11]</td><td>Proposed AMDReg</td></tr><tr><td>0.839</td><td>0.571</td><td>0.580</td><td>0.613</td><td>0.636</td><td>0.647</td></tr></table>
|
| 210 |
+
|
| 211 |
+
performance of SBIR indeed decreases drastically with training data imbalance. Proposed AMDReg is able to mitigate this by a significant margin as compared to the other state-of-the-art imbalance handling techniques. We further analyze the performance of SEM-PCYC [6] on Sketchy Ext. dataset for standard SBIR protocol with and without AMDReg. We observe significant improvement when proposed AMDReg is used (MAP@all: 0.811; Prec@100: 0.897) as compared to the baseline SEM-PCYC (MAP@all: 0.771; Prec@100: 0.871).
|
| 212 |
+
|
| 213 |
+
# 6 Conclusion
|
| 214 |
+
|
| 215 |
+
In this work, for the first time in literature, we analyzed the effect of training data imbalance for the task of generalization to unseen classes in context of ZS-SBIR. We observe that most real-world SBIR datasets are in-fact imbalanced, and that this imbalance does effect the generalization adversely. We systematically evaluate several state-of-the-art imbalanced mitigating approaches (for classification) for this problem. Additionally, we propose a novel adaptive margin diversity regularizer (AMDReg), which ensures that the shared latent space embeddings of the images and sketches account for the data imbalance in the training set. The proposed regularizer is generic, and we show how it can be seamlessly incorporated in three existing state-of-the-art ZS-SBIR approaches with slight modifications. Finally, we show that the proposed AMDReg results in significant improvement in both ZS-SBIR and generalized ZS-SBIR protocols, setting the new state-of-the-art result.
|
| 216 |
+
|
| 217 |
+
# Acknowledgement
|
| 218 |
+
|
| 219 |
+
This work is partly supported through a research grant from SERB, Department of Science and Technology, Government of India.
|
| 220 |
+
|
| 221 |
+
# References
|
| 222 |
+
|
| 223 |
+
1. Barandela, R., Rangel, E., Sanchez, J.S., Ferri, F.J.: Restricted decontamination for the imbalanced training sample problem. Iberoamerican Congress on Pattern Recognition, Springer (2003)
|
| 224 |
+
2. Cao, K., Wei, C., Gaidon, A., Arechiga, N., Ma, T.: Learning imbalanced datasets with label-distribution-aware margin loss. NeurIPS (2019)
|
| 225 |
+
3. Chawla, N.V., Bowyer, K.W., Hall, L.O., Kegelmeyer, W.P.: Smote: synthetic minority over-sampling technique. Journal of Artificial Intelligence Research 16, 321-357 (2002)
|
| 226 |
+
4. Cui, Y., Jia, M., Lin, T.Y., Song, Y.: Class-balanced loss based on effective number of samples. CVPR (2019)
|
| 227 |
+
5. Dey, S., Riba, P., Dutta, A., Llados, J., Song, Y.Z.: Doodle to search: practical zero-shot sketch-based image retrieval. CVPR (2019)
|
| 228 |
+
6. Dutta, A., Akata, Z.: Sematically tied paired cycle consistency for zero-shot sketch-based image retrieval. CVPR (2019)
|
| 229 |
+
7. Dutta, T., Biswas, S.: Style-guided zero-shot sketch-based image retrieval. BMVC (2019)
|
| 230 |
+
8. Eitz, M., Hays, J., Alexa, M.: How do humans sketch objects? ACM TOG 31(4), 44.1-44.10 (2012)
|
| 231 |
+
9. Felix, R., Kumar, V.B., Reid, I., Carneiro, G.: Multi-modal cycle-consistent generalized zero-shot learning. In: ECCV (2018)
|
| 232 |
+
0. Frome, A., Corrado, G.S., Shlens, J., Bengio, S., Dean, J., Ranzato, M., Mikolov, T.: Devise: A deep visual-semantic embedding model. In: NeurIPS (2013)
|
| 233 |
+
1. Hayat, M., Khan, S., Zamir, S.W., Shen, J., Shao, L.: Gaussian affinity for max-margin class imbalanced learning. ICCV (2019)
|
| 234 |
+
2. Hu, R., Collomosse, J.: A performance evaluation of gradient field hog descriptor for sketch based image retrieval. CVIU 117(7), 790-806 (2013)
|
| 235 |
+
3. Kodirov, E., Xiang, T., Gong, S.: Semantic autoencoder for zero-shot learning. In: CVPR (2017)
|
| 236 |
+
4. Lin, T.Y., Goyal, P., Girshiick, R., He, K., Dollar, P.: Focal loss for dense object detection. arXiv:1708.02002 [cs.CV] (2018)
|
| 237 |
+
5. Liu, L., Shen, F., Shen, Y., Liu, X., Shao, L.: Deep sketch hashing: fast free-hand sketch-based image retrieval. CVPR (2017)
|
| 238 |
+
6. Liu, Q., Xie, L., Wang, H., Yuille, A.: Semantic-aware knowledge preservation for zero-shot sketch-based image retrieval. ICCV (2019)
|
| 239 |
+
7. Liu, Z., Miao, Z., Zhan, X., Wang, J., Gong, B., Yu, S.X.: Large-scale long-tailed recognition in an open world. In: CVPR (2019)
|
| 240 |
+
8. Mikolov, T., Sutskever, I., Chen, K., Corrado, G.S., Dean, J.: Distributed representations of words and phrases and their compositionality. NeurIPS (2013)
|
| 241 |
+
9. Mishra, A., Reddy, S.K., Mittal, A., Murthy, H.A.: A generative model for zero-shot learning using conditional variational auto-encoders. CVPR-W (2018)
|
| 242 |
+
20. Pennington, J., Socher, R., Manning, C.D.: Glove: global vectors for word representation. EMNLP (2014)
|
| 243 |
+
21. Qi, H., Brown, M., Lowe, D.G.: Low-shot learning with imprinted weights. CVPR (2018)
|
| 244 |
+
22. Qi, Y., Song, Y.Z., Zhang, H., Liu, J.: Sketch-based image retrieval via siamese convolutional neural network. ICIP (2016)
|
| 245 |
+
23. Russakovsky, O., Deng, J., Su, H., Krause, J., Satheesh, S., Ma, S., Huang, Z., Karpathy, A., Khosla, A., Bernstein, M., Berg, A.C., Li, F.F.: Imagenet: largescale visual recognition challenge. IJCV 115(3), 211-252 (2015)
|
| 246 |
+
|
| 247 |
+
24. Saavedra, J.M., Barrios, J.M.: Sketch-based image retrieval using learned keyshapes (lks). BMVC (2015)
|
| 248 |
+
25. Sangkloy, P., Burnell, N., Ham, C., Hays, J.: The sketchy database: learning to retrieve badly drawn bunnies. ACM TOG (2016)
|
| 249 |
+
26. Shen, Y., Liu, L., Shen, F., Shao, L.: Zero-shot sketch-image hashing. CVPR (2018)
|
| 250 |
+
27. Socher, R., Ganjoo, M., Manning, C.D., Ng, A.: Zero-shot learning through cross-modal transfer. In: NeurIPS (2013)
|
| 251 |
+
28. Wang, M., Wang, C., Wu, J.X., Zhang, J.: Community detection in social networks: an in-depth benchmarking study with a procedure-oriented framework. VLDB (2015)
|
| 252 |
+
29. Xian, Y., Lorenz, T., Schiele, B., Akata, Z.: Feature generating networks for zero-shot learning. CVPR (2018)
|
| 253 |
+
30. Xian, Y., Sharma, S., Schiele, B., Akata, Z.: f-vaegan-d2: A feature generating framework for any-shot learning. CVPR (2019)
|
| 254 |
+
31. Yang, Z., Cohen, W.W., Salakhutdinov, R.: Revisiting semi-supervised learning with graph embeddings. arXiv preprint arXiv:1603.08861 (2016)
|
| 255 |
+
32. Yelamarthi, S.K., Reddy, S.K., Mishra, A., Mittal, A.: A zero-shot framework for sketch-based image retrieval. ECCV (2018)
|
| 256 |
+
33. Yu, Q., Yang, Y., Liu, F., Song, Y.Z., Xiang, T., Hospedales, T.M.: Sketch-a-net that beats humans. BMVC (2015)
|
| 257 |
+
34. Zhang, J., Liu, S., Zhang, C., Ren, W., Wang, R., Cao, X.: Sketchnet: sketch classification with web images. CVPR (2016)
|
| 258 |
+
35. Zhang, J., Shen, F., Liu, L., Zhu, F., Yu, M., Shao, L., ad L. V. Gool, H.T.S.: Generative domain-migration hashing for sketch-to-image retrieval. ECCV (2018)
|
| 259 |
+
36. Zhang, R., Lin, L., Zhang, R., Zuo, W., Zhang, L.: Bit-scalable deep hashing with regularized similarity learning for image retrieval and person re-identification. IEEE Transactions on Image Processing 24(12), 4766-4779 (2015)
|
| 260 |
+
37. Zhang, Z., Saligrama, V.: Zero-shot learning via joint latent similarity embedding. In: CVPR (2016)
|
adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:298c8ebc5b4a4d605d45b3385ae13320b76fe8ba13bb7a85868d458e65c47adb
|
| 3 |
+
size 472159
|
adaptivemargindiversityregularizerforhandlingdataimbalanceinzeroshotsbir/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fbc933e3def4928245ae4ef5b60f03e2e786855ec8859021cbb3e3c80cf80feb
|
| 3 |
+
size 320170
|
adaptivemixtureregressionnetworkwithlocalcountingmapforcrowdcounting/8104dbf5-7c8b-4e86-821c-1a6135e556ba_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb35f5cd811107daa302044d62a89c5637977539ddb59b5d0858d4d5903e4ff6
|
| 3 |
+
size 79870
|
adaptivemixtureregressionnetworkwithlocalcountingmapforcrowdcounting/8104dbf5-7c8b-4e86-821c-1a6135e556ba_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79a992d287a9ebe42026d9dc14461f4812c9f65b5841bdd8b7b4768a73303062
|
| 3 |
+
size 94407
|