Add Batch ab2ce00b-5e8a-48c3-9d46-31e27f9b7fe9
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/e46dca63-94f5-4e89-b576-c99e50f629ca_content_list.json +3 -0
- redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/e46dca63-94f5-4e89-b576-c99e50f629ca_model.json +3 -0
- redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/e46dca63-94f5-4e89-b576-c99e50f629ca_origin.pdf +3 -0
- redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/full.md +202 -0
- redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/images.zip +3 -0
- redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/layout.json +3 -0
- refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/fda5b8d0-ce56-4875-9676-0c81b8fa7c89_content_list.json +3 -0
- refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/fda5b8d0-ce56-4875-9676-0c81b8fa7c89_model.json +3 -0
- refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/fda5b8d0-ce56-4875-9676-0c81b8fa7c89_origin.pdf +3 -0
- refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/full.md +391 -0
- refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/images.zip +3 -0
- refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/layout.json +3 -0
- representationlearningforresourceconstrainedkeyphrasegeneration/bf57ab26-286d-4d72-ad69-0a0bea7817dc_content_list.json +3 -0
- representationlearningforresourceconstrainedkeyphrasegeneration/bf57ab26-286d-4d72-ad69-0a0bea7817dc_model.json +3 -0
- representationlearningforresourceconstrainedkeyphrasegeneration/bf57ab26-286d-4d72-ad69-0a0bea7817dc_origin.pdf +3 -0
- representationlearningforresourceconstrainedkeyphrasegeneration/full.md +494 -0
- representationlearningforresourceconstrainedkeyphrasegeneration/images.zip +3 -0
- representationlearningforresourceconstrainedkeyphrasegeneration/layout.json +3 -0
- residuallearningofneuraltextgenerationwithngramlanguagemodel/ca51a554-0f1e-4834-b6d8-cbab18356468_content_list.json +3 -0
- residuallearningofneuraltextgenerationwithngramlanguagemodel/ca51a554-0f1e-4834-b6d8-cbab18356468_model.json +3 -0
- residuallearningofneuraltextgenerationwithngramlanguagemodel/ca51a554-0f1e-4834-b6d8-cbab18356468_origin.pdf +3 -0
- residuallearningofneuraltextgenerationwithngramlanguagemodel/full.md +307 -0
- residuallearningofneuraltextgenerationwithngramlanguagemodel/images.zip +3 -0
- residuallearningofneuraltextgenerationwithngramlanguagemodel/layout.json +3 -0
- rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/d1f6ba3e-d3d1-4fd1-aa85-df92519933af_content_list.json +3 -0
- rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/d1f6ba3e-d3d1-4fd1-aa85-df92519933af_model.json +3 -0
- rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/d1f6ba3e-d3d1-4fd1-aa85-df92519933af_origin.pdf +3 -0
- rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/full.md +317 -0
- rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/images.zip +3 -0
- rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/layout.json +3 -0
- revisitingtherolesoftextintextgames/d883147c-abf3-45e2-930a-abfa9c21b9ca_content_list.json +3 -0
- revisitingtherolesoftextintextgames/d883147c-abf3-45e2-930a-abfa9c21b9ca_model.json +3 -0
- revisitingtherolesoftextintextgames/d883147c-abf3-45e2-930a-abfa9c21b9ca_origin.pdf +3 -0
- revisitingtherolesoftextintextgames/full.md +381 -0
- revisitingtherolesoftextintextgames/images.zip +3 -0
- revisitingtherolesoftextintextgames/layout.json +3 -0
- revisitingtransformerbasedmodelsforlongdocumentclassification/4a5cfe1e-a844-47bf-b5de-92c2d02317b6_content_list.json +3 -0
- revisitingtransformerbasedmodelsforlongdocumentclassification/4a5cfe1e-a844-47bf-b5de-92c2d02317b6_model.json +3 -0
- revisitingtransformerbasedmodelsforlongdocumentclassification/4a5cfe1e-a844-47bf-b5de-92c2d02317b6_origin.pdf +3 -0
- revisitingtransformerbasedmodelsforlongdocumentclassification/full.md +442 -0
- revisitingtransformerbasedmodelsforlongdocumentclassification/images.zip +3 -0
- revisitingtransformerbasedmodelsforlongdocumentclassification/layout.json +3 -0
- robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/d0c305d6-88a5-4b7c-a7a3-0915e07c5604_content_list.json +3 -0
- robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/d0c305d6-88a5-4b7c-a7a3-0915e07c5604_model.json +3 -0
- robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/d0c305d6-88a5-4b7c-a7a3-0915e07c5604_origin.pdf +3 -0
- robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/full.md +411 -0
- robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/images.zip +3 -0
- robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/layout.json +3 -0
- robusttaskorienteddialoguegenerationwithcontrastivepretrainingandadversarialfiltering/a15e73ef-1aec-4cf2-ab25-137f1fe5c70c_content_list.json +3 -0
- robusttaskorienteddialoguegenerationwithcontrastivepretrainingandadversarialfiltering/a15e73ef-1aec-4cf2-ab25-137f1fe5c70c_model.json +3 -0
redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/e46dca63-94f5-4e89-b576-c99e50f629ca_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b578d0212dd7b5d2120ebc6d0cdf3fbb1349275d106d2ab78ef8d48a7a037b6a
|
| 3 |
+
size 50796
|
redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/e46dca63-94f5-4e89-b576-c99e50f629ca_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bb0454907d89069641c936d15a692907a50e4d42716e9b7ebb1ba635ad02a810
|
| 3 |
+
size 61117
|
redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/e46dca63-94f5-4e89-b576-c99e50f629ca_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:371807920b8f4298bf1184c26409e6ebc481d44463f0ed09d09a38489d1c7f16
|
| 3 |
+
size 1423994
|
redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/full.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# RedApt: An Adaptor for WAV2VEC 2 Encoding Faster and Smaller Speech Translation without Quality Compromise
|
| 2 |
+
|
| 3 |
+
Jinming Zhao Hao Yang Gholamreza Haffari Ehsan Shareghi
|
| 4 |
+
|
| 5 |
+
Department of Data Science & AI, Monash University
|
| 6 |
+
|
| 7 |
+
first.last@{monash.edu}
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Pre-trained speech Transformers in speech translation (ST) have facilitated state-of-the-art (SotA) results; yet, using such encoders is computationally expensive. To improve this, we present a novel Reducer Adaptor block, RedApt, that could be seamlessly integrated within any Transformer-based speech encoding architecture. Integrating the pretrained WAV2VEC 2 speech encoder with RedApt brings $41\%$ speedup, $33\%$ memory reduction with $24\%$ fewer FLOPs at inference. To our positive surprise, our ST model with RedApt outperforms the SotA architecture by an average of 0.68 BLEU score on 8 language pairs from Must-C.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Leveraging pre-trained speech Transformers, such as WAV2VEC 2 (W2V2) (Baevski et al., 2020), in speech-to-text translation (ST) systems have established state-of-the-art (SotA) results across several languages (Li et al., 2021; Ye et al., 2021). Meanwhile, the high computational cost of such encoders is well-documented (Wu et al., 2022) and mainly attributed to the self-attention mechanism inside Transformers (Vaswani et al., 2017).
|
| 16 |
+
|
| 17 |
+
However, speech modality introduces unique challenges compared to text: representation of raw speech signals are orders of magnitude longer<sup>1</sup>, while empirical findings suggest that high-quality ST systems require much deeper encoders compared to text-to-text translation (Wang et al., 2020). Consequently, training and inference with such encoders is expensive, in terms of memory and time, even for reasonably powerful hardware.<sup>2</sup>
|
| 18 |
+
|
| 19 |
+
As a mitigation, Li et al. (2021) augmented w2v2 at the output by CNN layers to reduce the
|
| 20 |
+
|
| 21 |
+
representation length, and Zhao et al. (2022) proposed a Transformer-based adaptor to shrink a sequence. Yet, the complexity of encoding remains high. Wu et al. (2022) proposed lower feature dimensions in w2v2 which improved efficiency at the expense of performance drop. Earlier studies focused on designing feature selection modules at the input level by using phone labels for merging adjacent vectors (Salesky and Black, 2020), using dynamic sparsification mechanism (Zhang et al., 2020), or injecting Connectionist Temporal Classification (Gaido et al., 2021) to regulate feature passing between layers. These approaches improve speed at inference but are limited as they rely either on hand-crafted features, transcripts, or external modules. $^3$
|
| 22 |
+
|
| 23 |
+
In this work we focus on w2v2, as one of the most widely used pre-trained speech encoder for ST, and propose a novel block, Reducer Adaptor (RedApt), to reduce the computational load of processing speech sequences through w2v2 while improving translation quality. Our approach does not require any additional pretraining or information beyond the audio input, and works similar to adaptor blocks (Houlsby et al., 2019) placed on top of any layers of a pretrained transformer but trained along with the underlying Transformer on ST task.
|
| 24 |
+
|
| 25 |
+
Through extensive experiments on 8 language pairs from Must-C, we show that integrating RedApt into WAV2VEC yields $41\%$ speedup, $33\%$ memory savings, and $24\%$ fewer FLOPs at inference time. Meanwhile, our ST model outperforms existing SotA by 0.68 BLEU score. To the best of our knowledge, we are the first to target the efficiency of pretrained speech encoders for ST. We hope this to facilitate further improvement across a broader range of speech processing tasks that require pretrained speech encoders.
|
| 26 |
+
|
| 27 |
+
# 2 Reducer Adaptor (RedApt)
|
| 28 |
+
|
| 29 |
+
Our proposed RedApt has some key properties: (i) can be integrated seamlessly with pretrained speech Transformer such as WAV2VEC 2 (w2v2), (ii) flexibly reduces the computational load of encoding (both in terms of memory and time), and (iii) allows to retain the downstream ST performance. While beneficial for training, RedApt is in particular useful in the repetitive nature of inference phase. In this section, we will present RedApt architecture, and show how it can be integrated into WAV2VEC and a full speech-to-text translation (ST) system.4
|
| 30 |
+
|
| 31 |
+
# 2.1 Architecture of RedApt
|
| 32 |
+
|
| 33 |
+
The core idea is to pool a temporal speech sequence, to reduce it length while learning local information from the shrunk sequence. Suppose w2v2 feature encoder yields a sequence, a. As shown in Figure 1 (w2v2 is omitted for brevity), RedApt is built on two CNN blocks which are wrapped by layer normalization, residual connection, and GELU nonlinear activation.
|
| 34 |
+
|
| 35 |
+
The first CNN block is a pooling module to shrink the length of $\mathbf{a}$ . It is parameterized with kernel $k$ , stride length $s$ and padding $p$ . The input sequence length thus can be reduced,
|
| 36 |
+
|
| 37 |
+
$$
|
| 38 |
+
n ^ {\prime} = \left\lfloor \frac {n + 2 p - k}{s} \right\rfloor + 1 \tag {1}
|
| 39 |
+
$$
|
| 40 |
+
|
| 41 |
+
where $n$ and $n'$ are the lengths of the original and shortened sequences, respectively. After GELU activation, we can get $\mathbf{a}'$ . The second CNN block learns shared position-wise kernels within a given window which can re-capture local information,
|
| 42 |
+
|
| 43 |
+
$$
|
| 44 |
+
\mathbf {a} ^ {\prime \prime} = \mathbf {a} ^ {\prime} + \operatorname {G E L U} \left(\operatorname {N o r m} \left(\operatorname {C N N} \left(\mathbf {a} ^ {\prime}\right)\right)\right) \tag {2}
|
| 45 |
+
$$
|
| 46 |
+
|
| 47 |
+
The intuition is that certain information (e.g., positional awareness (Dai et al., 2020)) is lost during pooling, and requires restoration. The inclusion of layer normalization, residual connection, and nonlinearity follows the same rational as Transformer blocks (Vaswani et al., 2017). The total number of parameters for a single block of RedApt is 11.5M.
|
| 48 |
+
|
| 49 |
+
# 2.2 RedApt Integration into WAV2VEC 2
|
| 50 |
+
|
| 51 |
+
WAV2VEC 2 (w2v2) architecture starts with a CNN-based feature encoder to extract features from raw speech signals, while performing down-sampling. A quantization module is attached on top of the feature encoder to learn discrete latent
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Figure 1: The RedApt architecture and integration.
|
| 55 |
+
|
| 56 |
+
speech vectors. The output of the feature encoder is masked and forwarded into a context network, consisting of 24 Transformer blocks and 16 self-attention heads (for the LARGE configuration), to learn contextualized speech representations. The entire model is pretrained with a contrastive loss to distinguish a true masked latent vector from those generated by the model. After pretraining, only the feature extractor and context network are finetuned in the downstream tasks.
|
| 57 |
+
|
| 58 |
+
RedApt progressively shrinks a temporal sequence during the forward propagation in the w2v2 Transformer-based encoder. Assuming an integration of $m$ RedApt blocks into Transformer blocks, RedApt blocks compress a sequence by a factor of $s^m$ . As each Transformer block has a quadratic complexity w.r.t. the input length $n_0$ , pooling tensors have significant benefits on memory and compute requirements of w2v2. Denoting the sequence length at i-th layer of w2v2 as $n_i$ and the stride size as $s_i$ , the complexity of each subsequent Transformer layer (until the next pooling) is $O\left(\left(\frac{n_i}{s_i}\right)^2\right)$ compared to the $O(n_0^2)$ in vanilla Transformer block.
|
| 59 |
+
|
| 60 |
+
# 2.3 RedApt Integration into ST
|
| 61 |
+
|
| 62 |
+
We are motivated to capitalize on pre-trained modules, w2v2 and mBART (Liu et al., 2020), that were trained on large unlabelled and labelled data. Our ST model consists of the w2v2+RedApt encoder, and an mBART decoder. Since the goal of our work is to enable faster signal encoding, we train the encoder while freezing the decoder in all experiments. Unlike Gallego et al. (2021) that use stage-training, we train w2v2 and RedApt jointly in one step.
|
| 63 |
+
|
| 64 |
+
# 3 Experiments
|
| 65 |
+
|
| 66 |
+
In this section, we first describe our experimental settings (§3.1). Next, we investigate the effect of the number of RedApt blocks (§3.2) and their positions (§3.3) on speed, memory, and translation quality. Then, we evaluate our ST model on 8 language pairs from Must-C benchmarks (§3.4) and analyze inference time (§3.5). Lastly, we provide an ablation study on RedApt components (§3.6).
|
| 67 |
+
|
| 68 |
+
# 3.1 Experimental Settings
|
| 69 |
+
|
| 70 |
+
Dataset. We use the Must-C dataset (Cattoni et al., 2021), a multilingual ST corpus collected from TED talks. We experimented with 8 language pairs, using English (EN) as source and the followings as target: German (DE), Romanian (RO), Spanish (ES), French (FR), Dutch (NI), Portuguese (PT), Russian (RU), and Italian (IT). The data is preprocessed and filtered following steps outlined in Gallego et al. (2021). The best systems were selected on dev sets, and results are reported on test set (tst-COMMON). We use the EN-DE pair for the detailed analysis and ablation.
|
| 71 |
+
|
| 72 |
+
Implementation Details. Similar to the modern ST architectures (Gálego et al., 2021; Tang et al., 2020), we use pretrained w2v2 large as our encoder and pretrained mBART50 decoder as the decoder. We randomly initialize the top 3 layers of w2v2 in experiments involving RedApt and find that it enables faster convergence, verifying earlier observations by Sun et al. (2021). We free w2v2 feature extractor. For full details on training configurations and hardware, please refer to Appendix A.2.
|
| 73 |
+
|
| 74 |
+
Baseline. We use the SotA ST model from Gallego et al. (2021) as our baseline. The model uses a similar w2v2 large encoder, along with a CNN-based length adapter on top of the encoder which reduces the sequence length by a factor of 8. The decoder is similar to ours and is frozen during training for fair comparison between the two models. This model is denoted as w2v2+ hereafter. While RedApt offers various degrees of layer-wise reduction, for comparability in the translation experiments on 8 language pairs (§3.4), the configuration with the same reduction factor (i.e., 3 blocks of RedApt) and total encoder parameter size is used.
|
| 75 |
+
|
| 76 |
+
Metrics. We measure efficiency at inference in terms of throughput (the number of speech data that can be processed in a unit of time), memory (GPU memory usage); and FLOPs (floating-point
|
| 77 |
+
|
| 78 |
+
<table><tr><td>m</td><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td></tr><tr><td>BLEU</td><td>26.51</td><td>27.69</td><td>27.61</td><td>27.42</td><td>25.86</td></tr><tr><td>Throughput ↑</td><td>1.00x</td><td>1.22x</td><td>1.26x</td><td>1.31x</td><td>1.35x</td></tr><tr><td>Memory ↓</td><td>1.00x</td><td>0.80x</td><td>0.77x</td><td>0.73x</td><td>0.67x</td></tr><tr><td>FLOPs ↓</td><td>1.00x</td><td>0.86x</td><td>0.84x</td><td>0.81x</td><td>0.76x</td></tr></table>
|
| 79 |
+
|
| 80 |
+
Table 1: BLEU,throughput,memory consumption,and FLOPs at training and inference for various number of RedApt blocks: $m = \{ 1,2,3,4\}$ ,the positions of blocks are $\left\{\left[15\right],\left[15,20\right],\left[15,18,19\right],\left[14,15,18,19\right]\right\} .$
|
| 81 |
+
|
| 82 |
+
operations performed given a single process, higher FLOPs means slower inference speed). See Appendix A.4 for more details. We use $\mathrm{BLEU}^5$ to evaluate translation quality.
|
| 83 |
+
|
| 84 |
+
# 3.2 Selecting the Number of RedApt Blocks
|
| 85 |
+
|
| 86 |
+
We examine the impact of RedApt on translation quality and efficiency by injecting various number of RedApt blocks, $m = \{0,1,2,3,4\}$ , where 0 refers to the baseline and the rest indicate our models. As a heuristic for setting the cap on $m$ , we use the maximum representation length reduction that matches that of a corresponding text transcripts to avoid the risk of information loss. The intuition is that given the same content, the length of speech representations (after w2v2) should not be less than that of text representations; the former may vary depending on the degree of compression, whereas the latter is a fixed number. This reflects on the fact that text, unlike speech, carries only the content information that is essential for translation task. Further investigation of text representations optimality compared with speech is beyond our current focus and we leave it to future work.
|
| 87 |
+
|
| 88 |
+
Table 1 summarizes the results.6 Overall, we achieve significant throughput speedup, memory footprint saving and FLOPs reduction as $m$ increases, while the trend follows a law of diminishing returns. All models with $m \leq 3$ retained the same level of translation quality as the baseline ( $m = 0$ ). Particularly for $m = 3$ , memory consumption, throughput, and FLOPs are $0.73 \times$ , $1.27 \times$ , and $0.81 \times$ of the baseline. The gains from reduced computational cost can be re-invested to increase the batch size at inference and we report the changes of these metrics as the batch size varies in Appendix A.3. We observe a trade-off associated
|
| 89 |
+
|
| 90 |
+
<table><tr><td>Model</td><td>Positions</td><td>BLEU</td><td>T.put↑</td><td>Mem↓</td><td>FLOPs↓</td></tr><tr><td>W2V2+</td><td>-</td><td>26.51</td><td>1.00x</td><td>1.00x</td><td>1.00x</td></tr><tr><td rowspan="7">RedApt</td><td>[2,5,6]</td><td>0.7°</td><td>1.47x</td><td>0.28x</td><td>0.45x</td></tr><tr><td>[7,9,11]</td><td>22.02</td><td>1.41x</td><td>0.45x</td><td>0.58x</td></tr><tr><td>[13,15,20]</td><td>27.24</td><td>1.41x</td><td>0.67x</td><td>0.76x</td></tr><tr><td>[14,18,20]</td><td>26.83</td><td>1.33x</td><td>0.60x</td><td>0.80x</td></tr><tr><td>[15,18,19]</td><td>27.42</td><td>1.31x</td><td>0.73x</td><td>0.81x</td></tr><tr><td>[16,18,20]</td><td>27.17</td><td>1.28x</td><td>0.76x</td><td>0.83x</td></tr><tr><td>[17,19,20]</td><td>26.78</td><td>1.24x</td><td>0.79x</td><td>0.85x</td></tr></table>
|
| 91 |
+
|
| 92 |
+
with $m$ in translation quality and efficiency, and we set $m$ to 3 in all our following experiments. For brevity, we use RedApt to refer to our ST models in which RedApt blocks are injected into the encoder, hereafter. In the next section, we will investigate various layers of placing RedApt blocks.
|
| 93 |
+
|
| 94 |
+
# 3.3 Selecting the Positions of RedApt Blocks
|
| 95 |
+
|
| 96 |
+
We investigated various positions of RedApt blocks. Since it is not ideal to experiment all $24^{m}$ choices, to determine optimal positions, we apply a backward selection mechanism starting from the configuration of [14,15,18,19] by removing one position, or replacing it with another position. For selection purposes, we segment Transformer networks of WAV2VEC 2 to 2 buckets, i.e., low-mid (0-11), mid-top (12-23). While we treat positions as hyper-parameter in our work, a more principled approach could frame it as neural architecture search (Elsken et al., 2019). We leave further investigation to future work.
|
| 97 |
+
|
| 98 |
+
The majority of the models with exceptional performance come from mid-high layers. Table 2 presents BLEU scores, memory usage, throughput and FLOPs for EN-DE, with different position configurations. We observe injecting RedApt into lower level of w2v2 leaves a major toll on BLEU. This verifies the earlier findings on text transformers (Goyal et al., 2020) that higher layers typically convey similar overlapping information while disturbing the lower layers could result in a great deal of information loss. Additionally, compressing sequences in lower levels has greater impact on the pre-trained weights in the subsequent layers, which can result in optimization issues.
|
| 99 |
+
|
| 100 |
+
We choose our best configuration, [13,15,20],
|
| 101 |
+
|
| 102 |
+
Table 2: Comparison of different position configurations in terms of translation quality, throughput (T.put), memory (Mem) and FLOPs, at training and inference time. $\diamond$ : models did not converge. **Bold:** Best BLEU score. **Underline:** Best configuration.
|
| 103 |
+
|
| 104 |
+
<table><tr><td>Model</td><td>DE</td><td>RO</td><td>ES</td><td>FR</td><td>NI</td><td>PT</td><td>RU</td><td>IT</td></tr><tr><td>w2v2+</td><td>26.51</td><td>24.66</td><td>30.04</td><td>36.26</td><td>31.08</td><td>32.67</td><td>17.19</td><td>22.13</td></tr><tr><td>RedApt</td><td>27.24</td><td>24.34</td><td>30.49</td><td>37.59</td><td>29.82</td><td>32.65</td><td>18.08</td><td>25.73</td></tr></table>
|
| 105 |
+
|
| 106 |
+
Table 3: Translation BLEU of the SotA model (w2v2+) and our model on 8 language pairs from Must-C.
|
| 107 |
+
|
| 108 |
+
which exhibits efficiency improvements of $1.41 \times$ in throughput, $0.67 \times$ in memory usage and $0.76 \times$ with FLOPs, and use it in our next translation experiments on 8 language pairs.
|
| 109 |
+
|
| 110 |
+
# 3.4 Translation Quality on Must-C
|
| 111 |
+
|
| 112 |
+
Table 3 reports the results for the 8 Must-C language pairs, using our best configuration (§3.3). Our ST models outperform the baseline models (§3.1) on 5 language pairs by a large margin, while being comparable on the rest. On average, we observe a boost of 0.68 BLEU scores across 8 languages. We speculate that these gains could be attributed to the positive impact of dimensionality reduction on filtering out redundancy and noise from the representations, verifying the earlier observations by Zhang et al. (2020).
|
| 113 |
+
|
| 114 |
+
# 3.5 Inference time for ST
|
| 115 |
+
|
| 116 |
+
In order to measure the inference time for the entire ST model, we partitioned audio of EN-DE test set into 5 buckets based on length (seconds) Compared to the baseline, the decoding speedups are $7\%$ , $7\%$ , $5\%$ , $3\%$ , $3\%$ for these buckets (0,4), [4,8), [8,13), [13,20], $(20,\infty)$ , which have 1024, 896, 384, 128 and 64 examples, respectively. As expected, the efficiency gain in encoding tends to vanish in the full ST setup due to the depth of the mBART decoder (i.e., 12 layers) and the auto-regressive decoding.
|
| 117 |
+
|
| 118 |
+
# 3.6 Ablation of RedApt components
|
| 119 |
+
|
| 120 |
+
To study the contribution from each components of RedApt block beyond the first CNN block (§2.1), we conduct an ablation by removing the remaining three components one at a time. The ablation (on EN-DE) for our best configuration (§3.3) indicates that removing the second CNN block leads to 0.44 BLEU decay, while removing either the GELU and LayerNorm leads to convergence issues (neither models converge). We report further details and positions in Appendix A.5.
|
| 121 |
+
|
| 122 |
+
# 4 Conclusion
|
| 123 |
+
|
| 124 |
+
We proposed a novel dimensionality reduction block, RedApt, to improve the efficiency of pretrained speech encoders, e.g. WAV2VEC 2 (w2V2), in speech translation (ST). We demonstrated that the integration of RedApt brings $1.41 \times 0.67 \times$ and $0.76 \times$ in speedup, memory usage and FLOPs reduction at inference. Meanwhile, compared with SotA, our ST system on average improves translation quality by 0.68 BLEU scores over 8 Must-C language pairs. As our future work, we will be investigating the impact of RedApt in other speech processing tasks (Yang et al., 2021), as well as learning the optimal positions for injecting RedApt blocks via neural architecture search.
|
| 125 |
+
|
| 126 |
+
# 5 Limitations
|
| 127 |
+
|
| 128 |
+
While hardware requirement is a common challenge shared across all modern ST models, it is worth mentioning that our work requires GPUs with 16 GB of memory for inference and 48 GB memory for training.
|
| 129 |
+
|
| 130 |
+
# 6 Ethics Statement
|
| 131 |
+
|
| 132 |
+
Our work is leveraging pretrained models of language (WAV2VEC2 for speech, mBART for text). However, our method is not designed or intended to rectify any of the well-documented issues of such models. Hence, our work inherits similar potential risks that these models pose.
|
| 133 |
+
|
| 134 |
+
# References
|
| 135 |
+
|
| 136 |
+
Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. 2020. wav2vec 2.0: A framework for self-supervised learning of speech representations. Advances in Neural Information Processing Systems, 33:12449-12460.
|
| 137 |
+
Roldano Cattoni, Mattia Antonino Di Gangi, Luisa Bentivogli, Matteo Negri, and Marco Turchi. 2021. Mustc: A multilingual corpus for end-to-end speech translation. Computer Speech & Language, 66:101155.
|
| 138 |
+
Heng-Jui Chang, Shu-wen Yang, and Hung-yi Lee. 2022. Distilhubert: Speech representation learning by layer-wise distillation of hidden-unit bert. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7087-7091. IEEE.
|
| 139 |
+
Zihang Dai, Guokun Lai, Yiming Yang, and Quoc Le. 2020. Funnel-transformer: Filtering out sequential redundancy for efficient language processing. Advances in neural information processing systems, 33:4271-4282.
|
| 140 |
+
|
| 141 |
+
Thomas Elsken, Jan Hendrik Metzen, and Frank Hutter. 2019. Neural architecture search: A survey. The Journal of Machine Learning Research, 20(1):1997-2017.
|
| 142 |
+
Marco Gaido, Mauro Cettolo, Matteo Negri, and Marco Turchi. 2021. Ctc-based compression for direct speech translation. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 690-696.
|
| 143 |
+
Gerard I Gallego, Ioannis Tsiamas, Carlos Escolano, José AR Fonollosa, and Marta R Costa-jussà. 2021. End-to-end speech translation with pre-trained models and adapters: Upc at iwslt 2021. In Proceedings of the 18th International Conference on Spoken Language Translation (IWSLT 2021), pages 110-119.
|
| 144 |
+
Saurabh Goyal, Anamitra Roy Choudhury, Saurabh Raje, Venkatesan Chakaravarthy, Yogish Sabharwal, and Ashish Verma. 2020. Power-bert: Accelerating bert inference via progressive word-vector elimination. In International Conference on Machine Learning, pages 3690-3699. PMLR.
|
| 145 |
+
Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin de Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In Proceedings of the 36th International Conference on Machine Learning, ICML 2019, 9-15 June 2019, Long Beach, California, USA, volume 97 of Proceedings of Machine Learning Research, pages 2790-2799. PMLR.
|
| 146 |
+
Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
|
| 147 |
+
Cheng-I Jeff Lai, Yang Zhang, Alexander H Liu, Shiyu Chang, Yi-Lun Liao, Yung-Sung Chuang, Kaizhi Qian, Sameer Khurana, David Cox, and Jim Glass. 2021. Parp: Prune, adjust and re-prune for self-supervised speech recognition. Advances in Neural Information Processing Systems, 34.
|
| 148 |
+
Xian Li, Changhan Wang, Yun Tang, Chau Tran, Yuqing Tang, Juan Pino, Alexei Baevski, Alexis Conneau, and Michael Auli. 2021. Multilingual speech translation from efficient finetuning of pretrained models. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 827-838.
|
| 149 |
+
Yinhan Liu, Jiatao Gu, Naman Goyal, Xian Li, Sergey Edunov, Marjan Ghazvininejad, Mike Lewis, and Luke Zettlemoyer. 2020. Multilingual denoising pretraining for neural machine translation. Transactions of the Association for Computational Linguistics, 8:726-742.
|
| 150 |
+
|
| 151 |
+
Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. *fairoseq: A fast, extensible toolkit for sequence modeling*. In *Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics* (Demonstrations), pages 48-53.
|
| 152 |
+
Zilun Peng, Akshay Budhkar, Ilana Tuil, Jason Levy, Parinaz Sobhani, Raphael Cohen, and Jumana Nassour. 2021. Shrinking bigfoot: Reducing wav2vec 2.0 footprint. arXiv preprint arXiv:2103.15760.
|
| 153 |
+
Tomasz Potapczyk, Paweł Przybysz, Marcin Chochowski, and Artur Szumaczuk. 2019. Samsung's system for the iwslt 2019 end-to-end speech translation task. In Proceedings of the 16th International Conference on Spoken Language Translation.
|
| 154 |
+
Elizabeth Salesky and Alan W Black. 2020. Phone features improve speech translation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2388-2397.
|
| 155 |
+
Zewei Sun, Mingxuan Wang, and Lei Li. 2021. Multilingual translation via grafting pre-trained language models. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 2735-2747.
|
| 156 |
+
Yuqing Tang, Chau Tran, Xian Li, Peng-Jen Chen, Naman Goyal, Vishrav Chaudhary, Jiatao Gu, and Angela Fan. 2020. Multilingual translation with extensible multilingual pretraining and finetuning. arXiv preprint arXiv:2008.00401.
|
| 157 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In I. Guyon, U. V. Luxburg, S. Bengio, H. Wallach, R. Fergus, S. Vishwanathan, and R. Garnett, editors, Advances in Neural Information Processing Systems 30, pages 5998-6008. Curran Associates, Inc.
|
| 158 |
+
Changhan Wang, Yun Tang, Xutai Ma, Anne Wu, Dmytro Okhonko, and Juan Pino. 2020. Fairseq s2t: Fast speech-to-text modeling with fairseq. In Proceedings of the 1st Conference of the Asia-Pacific Chapter of the Association for Computational Linguistics and the 10th International Joint Conference on Natural Language Processing: System Demonstrations, pages 33-39.
|
| 159 |
+
Felix Wu, Kwangyoun Kim, Jing Pan, Kyu J Han, Kilian Q Weinberger, and Yoav Artzi. 2022. Performance-efficiency trade-offs in unsupervised pre-training for speech recognition. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 7667-7671. IEEE.
|
| 160 |
+
Shu-Wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y. Lin, Andy T. Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, Tzu-Hsien Huang, Wei-Cheng Tseng, Ko-tik
|
| 161 |
+
|
| 162 |
+
Lee, Da-Rong Liu, Zili Huang, Shuyan Dong, Shang-Wen Li, Shinji Watanabe, Abdelrahman Mohamed, and Hung-yi Lee. 2021. SUPERB: speech processing universal performance benchmark. In Interspeech 2021, 22nd Annual Conference of the International Speech Communication Association, Brno, Czechia, 30 August - 3 September 2021, pages 1194-1198. ISCA.
|
| 163 |
+
Rong Ye, Mingxuan Wang, and Lei Li. 2021. End-to-end speech translation via cross-modal progressive training. arXiv preprint arXiv:2104.10380.
|
| 164 |
+
Biao Zhang, Ivan Titov, Barry Haddow, and Rico Senrich. 2020. Adaptive feature selection for end-to-end speech translation. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 2533-2544.
|
| 165 |
+
Jinming Zhao, Hao Yang, Gholamreza Haffari, and Ehsan Shareghi. 2022. M-adapter: Modality adaptation for end-to-end speech-to-text translation. In Interspeech 2022, 23rd Annual Conference of the International Speech Communication Association, Incheon, Korea, 18-22 September 2022, pages 111-115. ISCA.
|
| 166 |
+
|
| 167 |
+
# A Appendix
|
| 168 |
+
|
| 169 |
+
# A.1 Dataset
|
| 170 |
+
|
| 171 |
+
We use all 8 language pairs from Must-C. Please refer to Cattoni et al. (2021) for details for the size of the datasets and train/dev/test splits. We adopt the filtering techniques proposed in Gallego et al. (2021). We remove instances whose audios are over 25 seconds. We then filter out examples whose transcriptions that 1) have speaker names and non-textual events; 2) start with certain patterns indicating noise. Next, we apply ASR to audios and remove those whose ASR outputs have lowWER scores.
|
| 172 |
+
|
| 173 |
+
To diversify training data, we also apply data augmentation on the audio data on-the-fly, an effective technique used in ST (Potapczyk et al., 2019). We apply "tempo" and "pitch" to make the model become invariant to speaking speed, and "echo" to simulate echoing. Each instance is augmented with a probability of 0.8 where all effects are applied. We then normalize it to zero mean and unit variance. The parameters of tempo, pitch, echo-delay and echo-decay are set to (0.85, 1.3), (-300, 300), (20, 200) and (0.05, 0.2).
|
| 174 |
+
|
| 175 |
+
# A.2 Training Details
|
| 176 |
+
|
| 177 |
+
All models are trained with fairseq (Ott et al., 2019) on 4 RTX 6000 GPUs, using 16 floating point precision, for $25k$ updates. We use WAV2VEC $2^{7}$ and the mBart $50^{8}$ decoder. We limit the source and target lengths and to $400\mathrm{k}$ (i.e., 25 seconds) and 1,024 tokens, respectively. We use an Adam optimizer with (Kingma and Ba, 2015) $\beta_{1} = 0.99$ and $\beta_{2} = 0.98$ . We set the dropout to 0.1, clip norm to 20, and the label smoothing value to 0.2. For the baseline models, we use a learning rate of 5e-04 and reduce it when loss stops improving. Depending on speech lengths for each source language, we set the average batch size being either 64, or 128. For our models, we use a learning rate of 5e-04 for DE and NL, 4e-04 for Fr and 3e-04 for the rest, and we also decrease the learning rate at plateau. We use an effective batch size of 64 for all language pairs. We set kernel size, stride and padding for the two CNN blocks in RedApt to $<3$ , 2, 1> and $<3$ , 1, 1>. We report BLEU results on single models
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
Figure 2: memory usage (Upper) throughput (middle) and FLOPs (bottom)
|
| 185 |
+
|
| 186 |
+
without checkpoint averaging.
|
| 187 |
+
|
| 188 |
+
# A.3 Memory and Throughput vs. Batch Size
|
| 189 |
+
|
| 190 |
+
See Figure 2 (upper, middle) for memory usage and throughput (bottom) for each model when we increase batch size until GPU memory gets full. Our methods have better utilization of GPU memory.
|
| 191 |
+
|
| 192 |
+
# A.4 Measuring Throughput and FLOPs
|
| 193 |
+
|
| 194 |
+
We set the length of raw signals as 88,000, which is the average signals length in the training set, and the batch size as 64 when computing throughput and memory. We use batch size 1 for calculating FLOPs, whose value is mainly affected by the input length. All testings are performed on one RTX 8000 GPU.
|
| 195 |
+
|
| 196 |
+
# A.5 Ablation of RedApt components
|
| 197 |
+
|
| 198 |
+
Table 4 shows the effect of removing each components in RedApt.
|
| 199 |
+
|
| 200 |
+
<table><tr><td># Positions</td><td>2nd CNN</td><td>LayerNorm</td><td>GELU</td><td>BLEU</td></tr><tr><td>13-15-20</td><td>✓</td><td>✓</td><td>✓</td><td>27.24</td></tr><tr><td></td><td>-</td><td>✓</td><td>✓</td><td>26.80</td></tr><tr><td></td><td>✓</td><td>-</td><td>✓</td><td>◇</td></tr><tr><td></td><td>✓</td><td>-</td><td>-</td><td>◇</td></tr><tr><td>15-18-19</td><td>✓</td><td>✓</td><td>✓</td><td>27.42</td></tr><tr><td></td><td>-</td><td>✓</td><td>✓</td><td>27.11</td></tr><tr><td></td><td>✓</td><td>-</td><td>✓</td><td>◇</td></tr><tr><td></td><td>✓</td><td>-</td><td>-</td><td>◇</td></tr></table>
|
| 201 |
+
|
| 202 |
+
Table 4: Ablation on Acoustic Pooler. $\diamond$ : models did not converge.
|
redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f072cb031c9e099383d6f5b7fae4317c23aecbb9f49562ac14f7d2eedd9d150a
|
| 3 |
+
size 158651
|
redaptanadaptorforwav2vec2encodingfasterandsmallerspeechtranslationwithoutqualitycompromise/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6592a4d617d1d3c2b0c0707a2a1a070e2a73815a6b643d021f9f8bb30879f9c
|
| 3 |
+
size 244266
|
refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/fda5b8d0-ce56-4875-9676-0c81b8fa7c89_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:341056fa7b7c28656948ccf73b315f31618ec9c1b21eaaae1423cf72660772d3
|
| 3 |
+
size 101098
|
refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/fda5b8d0-ce56-4875-9676-0c81b8fa7c89_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9b9bd2a2407bd38514d1f2cb182b058a09f6aa156e517972e016f231adce87ac
|
| 3 |
+
size 126184
|
refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/fda5b8d0-ce56-4875-9676-0c81b8fa7c89_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2aeabbdbfd727685d68220721fecab386422c3cf8b8e298a2c2fdac9f5950961
|
| 3 |
+
size 1289448
|
refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/full.md
ADDED
|
@@ -0,0 +1,391 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Refinement Matters: Textual Description Needs to be Refined for Zero-shot Learning
|
| 2 |
+
|
| 3 |
+
Chandan Gautam*
|
| 4 |
+
|
| 5 |
+
Institute for Infocomm Research
|
| 6 |
+
|
| 7 |
+
A*STAR
|
| 8 |
+
|
| 9 |
+
Sethupathy Parameswaran*
|
| 10 |
+
|
| 11 |
+
Indian Institute of Science
|
| 12 |
+
|
| 13 |
+
setupathyp@iisc.ac.in
|
| 14 |
+
|
| 15 |
+
Vinay Kumar Verma
|
| 16 |
+
|
| 17 |
+
Duke University
|
| 18 |
+
|
| 19 |
+
vinayugc@gmail.com
|
| 20 |
+
|
| 21 |
+
gautamc@i2r.a-star.edu.sg
|
| 22 |
+
|
| 23 |
+
Suresh Sundaram
|
| 24 |
+
|
| 25 |
+
Indian Institute of Science vssuresh@iisc.ac.in Abstract
|
| 26 |
+
|
| 27 |
+
Zero-Shot Learning (ZSL) has shown great promise at the intersection of vision and language, and generative methods for ZSL are predominant owing to their efficiency. Moreover, textual description or attribute plays a critical role in transferring knowledge from the seen to unseen classes in ZSL. Such generative approaches for ZSL are very costly to train and require the class description of the unseen classes during training. In this work, we propose a non-generative gating-based attribute refinement network for ZSL, which achieves similar accuracies to generative methods of ZSL, at a much lower computational cost. The refined attributes are mapped into the visual domain through an attribute embedder, and the whole network is guided by the circle loss and the well-known softmax cross-entropy loss to obtain a robust class embedding. We refer to our approach as Circle loss guided gating-based Attribute-Refinement Network (CARNet). We perform extensive experiments on the five benchmark datasets over the various challenging scenarios viz., Generalized ZSL (GZSL), Continual GZSL (CGZSL), and conventional ZSL. We observe that the CARNet significantly outperforms recent non-generative ZSL methods and most generative ZSL methods in all three settings by a significant margin. Our extensive ablation study disentangles the performance of various components and justifies their importance<sup>1</sup>.
|
| 28 |
+
|
| 29 |
+
# 1 Introduction
|
| 30 |
+
|
| 31 |
+
Humans can recognize samples from unseen classes by leveraging the visual information of seen categories and textual descriptions of seen and unseen classes (Larochelle et al., 2008; Palatucci et al., 2009; Lampert et al., 2009). Zero-Shot Learning, inspired by this recognition ability of humans,
|
| 32 |
+
|
| 33 |
+
# Savitha Ramasamy
|
| 34 |
+
|
| 35 |
+
Institute for Infocomm Research, A*STAR ramasamysa@i2r.a-star.edu.sg
|
| 36 |
+
|
| 37 |
+
<table><tr><td>Class Name
|
| 38 |
+
Anna
|
| 39 |
+
Hummingbird</td><td>Class Description
|
| 40 |
+
the small bird has a long, thin, pointed bea
|
| 41 |
+
green feathers on its back, light grey belly a
|
| 42 |
+
dark brown wings.</td></tr><tr><td>Ruby Throated
|
| 43 |
+
Hummingbird</td><td>this tiny bird has a ruby red throat, a long
|
| 44 |
+
thin beak, and small but fast wings.</td></tr><tr><td>Rufous
|
| 45 |
+
Hummingbird</td><td>very small orange bird with white feathers
|
| 46 |
+
on its wing, and black feathers
|
| 47 |
+
underneath its head with a black pointed
|
| 48 |
+
beak.</td></tr></table>
|
| 49 |
+
|
| 50 |
+
Figure 1: It can be seen that the attributes "small bird" and "long black beak" (words in bold) are common in all three species of the Hummingbird. However, attributes like "ruby red throat" or "orange bird" (words in red) distinguish one species from another. Hence the distinguishing attributes must be given more weight than the common attributes in the class attribute vector. We achieve this through the gating unit in the attribute refinement network.
|
| 51 |
+
|
| 52 |
+
learns unseen classes through the textual description (also referred to as side-information or class attribute vector or semantic information) (Xian et al., 2017). A typical ZSL algorithm does not need training samples from unseen classes. However, it requires the class description for both seen and unseen classes (Zhang and Saligram, 2015; Reed et al., 2016).
|
| 53 |
+
|
| 54 |
+
The generative model has recently been the most popular approach for ZSL. It uses generators like VAE (Mishra et al., 2018; Schonfeld et al., 2019) or GAN (Narayan et al., 2020; Vyas et al., 2020) to generate synthetic samples for unseen classes using the class attribute vector. However, despite their promising results, such methods are not very efficient due to the following reasons: (i) The method requires the knowledge of the number of unseen classes and respective attribute vectors during training which is not always feasible, and (ii) retraining of the classifier with seen and unseen samples, with each new unseen classes. On the other hand, non-generative approaches for ZSL alleviate the
|
| 55 |
+
|
| 56 |
+
above problems but shows inferior accuracies. Typically, the non-generative models learn mapping in three ways: (i) visual to attribute space (Xian et al., 2016) or (ii) attribute to visual space (Zhang et al., 2017; Li et al., 2019), or (iii) joint embedding of attribute and visual space (Cacheux et al., 2019). It is to be noted that most of the existing non-generative or embedding-based ZSL approaches are formulated to learn embedding from visual to attribute space. They assume that the seen and unseen classes share the same representational characteristics and are linked in the attribute space (Frome et al., 2013; Wang et al., 2019; Chen et al., 2021a). However, this approach leads to the well-known hubness problem (Dinu et al., 2014), where the representations are skewed towards seen classes (Zhang et al., 2017; Li et al., 2019). Another problem with this approach is that it implicitly loses the discriminative power of visual features that are generally extracted from a powerful pre-trained deep learning model (like ResNet (Xian et al., 2018a, 2016) and GoogleNet (Song et al., 2018)) but are then mapped to a different smaller attribute space (Li et al., 2019, 2018).
|
| 57 |
+
|
| 58 |
+
Although the aforementioned issues in the nongenerative model are mitigated by mapping the attribute to visual features (Zhang et al., 2017; Li et al., 2019; Skorokhodov and Elhoseiny, 2021), they have lower classification accuracies. In this work, we propose a non-generative method with an Attribute-Refinement Network (ARN) that leverages the gating mechanism. The ARN enables highly robust representation of the description/attribute vector for the seen and unseen classes. In recent years, the gating mechanism has shown good performance without any complex architecture in supervised learning tasks (Srivastava et al., 2015; Dauphin et al., 2017; Sandler et al., 2018; Wu et al., 2018; Liu et al., 2021). In this paper, we propose a gating mechanism for refining the textual description in the ZSL task. The ARN learns to refine the attribute in a self-weighing manner from the seen class attribute (Fig.1). These refined attributes are mapped onto the visual space using an attribute embedder (AE) to obtain the class prototype vector of each class. The class prototype vector is then combined with the visual features in the feature-prototype combiner (FPC) to obtain classifications. The ARN, AE, and FPC are trained jointly using the circle loss and standard softmax cross-entropy in such a way that it minimizes inter
|
| 59 |
+
|
| 60 |
+
class and maximizes intra-class similarity. The circle loss achieves better within-class compactness and between-class discrepancy compared to triplet loss and adaptive margin softmax loss, as it unifies both classification and pair-wise similarity representation objectives (Sun et al., 2020). We refer to our approach as Circle loss guided gating-based Attribute-Refinement Network (CARNet).
|
| 61 |
+
|
| 62 |
+
We evaluate the performance of CARNet in three scenarios: (a) Conventional Zero-Shot Learning (ZSL), where only the unseen classes are available during inference (b) Generalized Zero-Shot Learning (GZSL), where both the seen and unseen classes are available during inference and (c) Continual Generalized Zero-Shot Learning (CGZSL), where data arrives as a sequence of tasks and only current task data is available during training, with the challenge of handling catastrophic forgetting of the past tasks. The performance of CARNet for conventional ZSL and GZSL is evaluated on five standard datasets. The CGZSL method is evaluated for the challenging CUB and SUN datasets. The extensive experiment shows that CARNet outperforms the recent generative (unlike the generative model, we do not require the unseen class description during training) and the non-generative model by a significant margin. Our ablation study emphasizes the significance of each component of the proposed learning algorithm. The main contributions of our work are summarised as follows: (i) We propose a gating-based attribute-refinement network (ARN) to enhance the class description/attribute for zero-shot learning. (ii) The ARN and AE are guided by circle loss to achieve better within-class compactness and between-class discrepancy. (iii) We propose a highly competitive, simple, and fast non-generative method. Our model achieves $\sim 70\times$ speedup compared to generative ZSL methods.
|
| 63 |
+
|
| 64 |
+
# 2 Related Work
|
| 65 |
+
|
| 66 |
+
The proposed CARNet is evaluated for the three kinds of ZSL settings: conventional ZSL, GZSL, and CGZSL. We provide a brief survey on all these three settings. ZSL aims to construct the recognition model for the samples from unseen classes using the textual description (i.e., attribute information) of unseen classes. These attribute information can be obtained through various ways, like human-annotated attributes (Farhadi et al., 2009), textual descriptions (Reed et al., 2016), and word vectors (Socher et al., 2013; Frome et al., 2013). In recent
|
| 67 |
+
|
| 68 |
+
years, there has been a surge of interest in this area. The whole literature of ZSL can be broadly categorized into two parts: generative and non-generative (i.e., embedding-based) approaches.
|
| 69 |
+
|
| 70 |
+
The first popular category is the generative approach, which solves the ZSL problem by synthesizing the unseen class samples. To synthesize the samples from the unseen classes, models leverage on a powerful generative models like conditional variational autoencoder (VAE) (Mishra et al., 2018; Kumar Verma et al., 2018) or generative adversarial network (GAN) (Vyas et al., 2020; Xian et al., 2018b; Felix et al., 2018; Keshari et al., 2020; Verma et al., 2020) or a combination of VAE and GAN (Xian et al., 2019; Narayan et al., 2020).
|
| 71 |
+
|
| 72 |
+
Another popular category is the non-generative approach, and it does not need class attribute information of unseen classes during training. In the early ZSL work (Lampert et al., 2009; Farhadi et al., 2009; Lampert et al., 2013), models directly predict the attribute confidence from images. Methods based on this approach can be further divided into three groups. In the first group, we project visual feature into attribute (i.e., semantic) space (Lampert et al., 2013; Socher et al., 2013; Frome et al., 2013; Akata et al., 2016; Fu and Sigal, 2016). In the second group, both visual and attribute data are projected into intermediate space (Akata et al., 2015; Fu et al., 2014; Lei Ba et al., 2015; Romero-Paredes and Torr, 2015; Cacheux et al., 2019). In the third group, visual space is spanned by attribute to visual mapping (Zhang et al., 2017; Li et al., 2019; Skorokhodov and Elhoseiny, 2021). ZSL methods developed based on the projection from attribute space to visual space approach are more suitable for mitigating the hubness problem, and recent works (Zhang et al., 2017; Li et al., 2019; Skorokhodov and Elhoseiny, 2021) show promising results for the ZSL and GZSL setting. Surprisingly, despite the fast, accurate, and realistic setting, this approach has not been explored much in the past. In this work, we consider the non-generative model for further exploration and learn the mapping from the attribute space to the visual space similar to (Li et al., 2019; Skorokhodov and Elhoseiny, 2021).
|
| 73 |
+
|
| 74 |
+
The above-discussed ZSL methods can handle data only in an offline setting, and cannot be used in a setting with a streaming sequence of tasks (Delange et al., 2021), known as Continual GZSL (CGZSL). Only a handful of research is available for CGZSL (Chaudhry et al., 2019a; Wei et al.,
|
| 75 |
+
|
| 76 |
+
2020; Skorokhodov and Elhoseiny, 2021; Gautam et al., 2021a, 2020, 2021b). For the extensive evaluation, apart from the conventional ZSL and GZSL, CARNet is also evaluated on the CGZSL setting as proposed in Skorokhodov and Elhoseiny (2021).
|
| 77 |
+
|
| 78 |
+
# 3 Problem Definition
|
| 79 |
+
|
| 80 |
+
In this section, we define the problem formally and introduce the notations. The objective of ZSL is to learn a model that can generalize the novel classes (i.e., unseen classes) with the help of side information (attribute/descriptions) without training data for the novel classes. The attribute vector of each class is constructed by either using a word embedding vector generated from a language model or manually defining the key features like color, size, shape, pattern, etc. Primarily, the ZSL setting consists of two sets of classes known as seen and unseen classes. Let $\mathcal{D}_{tr}^s$ and $\mathcal{D}_{ts}$ be the training and testing data, respectively, for the $C^s$ seen and $C^u$ unseen classes. We also have set of seen $(\{C^s\})$ and unseen $(\{C^u\})$ classes where $\{C^s\} \cap \{C^u\} = \phi$ i.e. seen and unseen classes set are disjoint. It is to be noted that $\{C^s\}$ and $\{C^u\}$ denote the set of seen and unseen classes, while $C^s$ and $C^u$ denote the number of seen classes and unseen classes, respectively. Corresponding to each seen class $i$ ( $i \in \{C^s\}$ ) and unseen class $j$ ( $j \in \{C^u\}$ ), there is a $d$ -dimensional class attribute vector, i.e., $A_i^s \in \mathbb{R}^d$ and $A_j^u \in \mathbb{R}^d$ , respectively. In the ZSL training, data is represented as: $\mathcal{D}_{tr}^s = \{x_i, y_i, A_{y_i}^s\}_{i=1}^N$ where $N$ is the number of seen class images and $\{x_i, y_i\}$ is the image and label pair. During inference for conventional ZSL, we have $\mathcal{D}_{ts} = \{x_j\}_{j=1}^M$ with attribute set $A = A^u$ where $\forall j, x_j$ belongs to the unseen class. However, in GZSL, we have $\mathcal{D}_{ts} = \{x_j\}_{j=1}^M$ with attribute set $A = A^s \cup A^u$ where $\forall j, x_j$ belongs to either seen or unseen class. Here $A^s$ and $A^u$ are the seen and unseen class attribute information, respectively. Overall, our objective is to develop a model based on the training dataset $\mathcal{D}_{tr}$ (i.e., seen data), and it needs to be generalized over all class labels $\{C\}$ where $\{C\} = \{C^s\} \cup \{C^u\}$ and the total number of classes in $\{C\}$ is $C$ .
|
| 81 |
+
|
| 82 |
+
# 4 Proposed Method
|
| 83 |
+
|
| 84 |
+
Zero-Shot Learning (ZSL) aims at classification in the absence of input images for unseen classes, using textual description, namely attribute vectors. In this section, we propose the CARNet for zero-shot
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
Figure 2: CARNet: circle loss guided gating-based attribute refinement network for ZSL. It primarily consists of three blocks: (i) ARN, (ii) AE, and (iii) FPC
|
| 88 |
+
|
| 89 |
+
learning. CARNet refines the class attribute vectors for better representation with the help of end-to-end joint loss. Here, a gating-based attribute refinement network (ARN) is used to refine the class attribute vectors. These refined class attribute vectors are mapped through an attribute embedder (AE) to extract an efficient class prototype vector corresponding to each class. The visual features are extracted using a pre-trained ResNet-101 model. These visual features are then combined with the class prototype vectors in the feature-prototype combiner (FPC) for classification, as shown in Fig. 2. The ARN, the AE, and the FPC are trained end-to-end based on the sum of two losses, namely, the circle loss and the softmax cross-entropy loss. These two losses guide the gating unit to yield refined attributes, which lead to better class prototype vectors through AE. In this section, we present a detailed description of our model CARNet.
|
| 90 |
+
|
| 91 |
+
# 4.1 Gating-based Attribute Refinement Network (ARN)
|
| 92 |
+
|
| 93 |
+
The class attribute vector plays a very crucial role in ZSL, as there are no visual samples for unseen classes during training. Moreover, the attribute vector is the only information that is available for both the seen and unseen classes. Therefore, it is highly important that the attribute representation
|
| 94 |
+
|
| 95 |
+
has minimal noise and highlights its prime dimensions. The objective of the ARN is to obtain an accurately representative class attribute vector with high weight on its key dimensions, as shown in Fig. 2. Let $A^s \in \mathbb{R}^{C^s \times d}$ be the class attribute matrix for $C^s$ seen classes where each row corresponds to the $d$ dimensional class attribute vector of the corresponding class. The ARN consists of the following stack of operations.
|
| 96 |
+
|
| 97 |
+
We first normalize the input $A^s$ across the dimension $d$ for each class independently using layer-norm (Ba et al., 2016), as $A_L^s = \text{LayerNorm}(A^s)$ . Over the layer-norm, we perform the linear projection followed by the Gaussian error linear unit (GELU) (Hendrycks and Gimpel, 2016) activation function as $A_P^s = \text{GELU}(A_L^s W_1)$ . Here, the linear projection helps in the expansion of the current dimension of the class attribute vector. Here, $W_1 \in \mathbb{R}^{d \times h}$ denotes weight for the linear projection and $A_P^s \in \mathbb{R}^{C^s \times h}$ .
|
| 98 |
+
|
| 99 |
+
Further, we apply the gating unit, which helps achieve a better representation of the attribute information. In the ARN, the gating unit performs cross-feature learning on the higher dimensional class attribute information vector $(A_P^s)$ . For this purpose, we split the $A_P^s$ into two parts, each with half the dimension, i.e. $A_{P1}^{s}\in \mathbb{R}^{C^{s}\times h / 2}$ and $A_{P2}^{s}\in \mathbb{R}^{C^{s}\times h / 2}$ . Both halves are processed as
|
| 100 |
+
|
| 101 |
+
follows:
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
A _ {G} ^ {s} = A _ {P 1} ^ {s} \odot \operatorname {C o n v 1 D} \left(\text {L a y e r N o r m} \left(A _ {P 2} ^ {s}\right)\right), \tag {1}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where Conv1D represents 1-D Convolution, which enables the spatial projection, $A_G^s \in \mathbb{R}^{C^s \times h/2}$ , and $\odot$ represents Hadamard product (i.e., element-wise multiplication) which is a linear gating. Also, we can interpret this operation as self-weighting on each dimension of the attribute vector. The important dimensions will get high weight while the other has lower weight. The Hadamard product enables the refinement network to keep both information (i.e., raw $A_{P1}^s$ and spatial projection of $A_{P2}^s$ ) in the output of the gating unit, i.e., $A_G^s$ . During training, the Conv1D(LayerNorm( $A_{P2}^s$ )) is initialized as an identity matrix. Finally, the output of the ARN is obtained through residual learning, as shown below:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
A _ {R} ^ {s} = A _ {G} ^ {s} W _ {2} \oplus A ^ {s} \tag {2}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $W_{2}\in \mathbb{R}^{h / 2\times d}$ denotes weight for the linear projection, $\oplus$ denotes element-wise-addition, and $A_R^s\in \mathbb{R}^{C^s\times d}$ is the final refined class attribute information. The linear projection helps $A_G^s$ to have the same dimension as $A^s$ .
|
| 114 |
+
|
| 115 |
+
Overall, the refinement network stacks the above-mentioned operations one after the other, as shown in Fig. 2, and this set of operations can be repeated multiple times. As the set of operations can be repeated multiple times for better attribute refinement, it can cause the vanishing gradient problem that is very common in typical gating units. However, the residual learning in Eq. (2) helps to alleviate this issue.
|
| 116 |
+
|
| 117 |
+
# 4.2 Attribute Embedder (AE)
|
| 118 |
+
|
| 119 |
+
After getting the refined attribute for seen classes, we perform attribute to visual mapping using the AE (as shown in Fig. 2) to obtain the class prototype matrix $P^s$ for the $C^s$ seen classes, where each row corresponds to the class prototype vector $p_c^s$ of the respective seen class $c \in \{C^s\}$ .
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
P ^ {s} = A E \left(A _ {R} ^ {s}\right), P ^ {s} \in \mathbb {R} ^ {C ^ {s} \times f}, \tag {3}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
Overall, the AE is a simple 3-layered multi-layer perceptron (MLP) architecture, which is used to perform the attribute-to-visual mapping. Here, $f$ denotes the dimension of the visual feature vector.
|
| 126 |
+
|
| 127 |
+
# 4.3 Feature-Prototype Combiner (FPC)
|
| 128 |
+
|
| 129 |
+
The visual features $V_{tr}^{s}\in \mathbb{R}^{N\times f}$ are extracted by passing the images of the seen classes $(\mathcal{D}_{tr}^{s})$ through a pretrained ResNet-101 model (no finetuning). These visual features are combined with the class prototype vectors in the FPC through scaled cosine similarity between the $P^s$ and the $V_{tr}^{s}$ (Skorokhodov and Elhoseiny, 2021). The scaled cosine similarity (scos) scales and normalizes the class prototype vectors and the extracted visual features before computing the dot product between them as follows:
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
s c o s \left(v _ {t r} ^ {s}, p _ {c} ^ {s}\right) = \left(\beta \cdot \frac {v _ {t r} ^ {s}}{\| v _ {t r} ^ {s} \|}\right) ^ {\top} \left(\beta \cdot \frac {p _ {c} ^ {s}}{\| p _ {c} ^ {s} \|}\right), \tag {4}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
where $v_{tr}^{s} \in V_{tr}^{s}$ is the $f$ -dimensional extracted visual feature for a sample, $p_{c}^{s}$ is the class prototype vector of class $c \in \{C^s\}$ , $\beta$ is the scaling hyperparameter, which has the same impact as setting a high temperature of $\beta^2$ in softmax (Liu et al., 2018). Here, normalization reduces the variance of the class prototype vectors and the visual features, which helps in achieving better performance.
|
| 136 |
+
|
| 137 |
+
# 4.4 Training of the CARNet using only Seen Classes
|
| 138 |
+
|
| 139 |
+
The CARNet is trained by minimizing the circle loss and the softmax cross-entropy loss over the end-to-end network comprising the ARN and the AE. We present these loss functions and the learning algorithm of CARNet in this subsection. Without loss of generality, let us assume that $v_{tr}^{s} \in V_{tr}^{s}$ be the extracted visual feature of a sample, which belongs to the seen class $k \in \{C^s\}$ .
|
| 140 |
+
|
| 141 |
+
Circle Loss: Generally, two kinds of losses are involved in the literature: one kind of losses, like L2-softmax, AM-softmax, and angular softmax are good candidates for classification, while the other kind of losses, like triplet loss, N-pair loss, contrastive loss, and the margin loss are good candidates for pair-wise similarity. The circle loss (Sun et al., 2020) aims to unify both classification and pair-wise similarity representation. Hence, it is a good candidate for optimizing ARN, AE, and FPC. Moreover, it enhances the feature learning and better separability by using flexible optimization and definite convergence target (Sun et al., 2020). The main objective of feature learning is to increase within-class similarity $s_p$ while reducing between-class similarity $s_n$ . The circle loss unifies both
|
| 142 |
+
|
| 143 |
+
class-level labels and pair-wise similarity with $K$ within-class similarity scores $(s_p)$ and $L$ between-class similarity scores $(s_n)$ and is defined as:
|
| 144 |
+
|
| 145 |
+
$$
|
| 146 |
+
\begin{array}{l} \mathcal {L} _ {\text {c i r c l e}} = \log \left[ 1 + \sum_ {j = 1} ^ {L} \exp \left(\gamma \alpha_ {n} ^ {j} \left(s _ {n} ^ {j} - \Delta_ {n}\right)\right) \right. \tag {5} \\ \sum_ {i = 1} ^ {K} \exp (- \gamma \alpha_ {p} ^ {i} (s _ {p} ^ {i} - \Delta_ {p})) ], \\ \end{array}
|
| 147 |
+
$$
|
| 148 |
+
|
| 149 |
+
where $\alpha_{p}^{i} = [1 + m - s_{p}^{i}]_{+}$ , and $\alpha_{n}^{j} = [s_{n}^{j} + m]_{+}$ are weighting factors, $\alpha_{n}^{j} > 0$ , $\alpha_{p}^{i} > 0$ , and $\gamma$ is a scaling factor. Here $[\cdot]_{+}$ denotes cut-off at Zero. $\Delta_{n} = m$ and $\Delta_{p} = 1 - m$ are the between-class and within-class margins, respectively.
|
| 150 |
+
|
| 151 |
+
In CARNet, we use the sample $v_{tr}^{s}$ as the anchor and the corresponding class prototype vector $p_{k}^{s}$ of class $k$ as the positive sample and the remaining class prototype vectors $p_{j}^{s}$ of the seen classes as negative samples. The cosine similarity is used to determine the positive similarity $s_{p}$ and negative similarity $s_{n}$ as follows:
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
s _ {p} ^ {k} = \frac {v _ {t r} ^ {s} . p _ {k} ^ {s}}{\| v _ {t r} ^ {s} \| \| p _ {k} ^ {s} \|} \tag {6}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
s _ {n} ^ {j} = \frac {v _ {t r} ^ {s} . p _ {j} ^ {s}}{\| v _ {t r} ^ {s} \| \| p _ {j} ^ {s} \|}, \quad \text {w h e r e} k, j \in \{C ^ {s} \} \text {a n d} k \neq j \tag {7}
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
Hence, the circle loss in Eq. (5) is modified in CARNet as:
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\begin{array}{l} \mathcal {L} _ {\text {c i r c l e}} = \log \left[ 1 + \exp \left(- \gamma \alpha_ {p} ^ {k} \left(s _ {p} ^ {k} - \Delta_ {p}\right)\right) \right. \\ \sum_ {\substack {j \in \{C ^ {s} \} \\ k \neq j}} \exp (\gamma \alpha_ {n} ^ {j} (s _ {n} ^ {j} - \Delta_ {n})) ] \quad (8) \\ \end{array}
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
Softmax Cross-Entropy Loss: To improve the classification, the softmax cross-entropy loss $(\mathcal{L}_{soft-ce})$ is applied over the computed scaled cosine similarity in Eq. (4), as shown below:
|
| 168 |
+
|
| 169 |
+
$$
|
| 170 |
+
\mathcal {L} _ {\text {s o f t - c e}} = - \log \frac {e ^ {s c o s \left(v _ {t r} ^ {s} , p _ {k} ^ {s}\right)}}{\sum_ {i \in \{C _ {s} \}} e ^ {s c o s \left(v _ {t r} ^ {s} , p _ {i} ^ {s}\right)}} \tag {9}
|
| 171 |
+
$$
|
| 172 |
+
|
| 173 |
+
Thus, the training of the CARNet is achieved by learning the weights of the ARN and the AE using the losses in Eq. (8) and Eq. (9), as shown below:
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\mathcal {L} _ {C A R N e t} = \mathcal {L} _ {s o f t - c e} + \lambda \mathcal {L} _ {c i r c l e} \tag {10}
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
Thus, the loss in Eq. (10) is optimized during training. It is to be noted that only the seen class information $(\mathcal{D}_{tr}^{s},\{C^{s}\},A^{s})$ is used during training.
|
| 180 |
+
|
| 181 |
+
# 4.5 Inference: Seen and Unseen Classes
|
| 182 |
+
|
| 183 |
+
The proposed CARNet method is based on the fixed body and dynamic head (classification layer) architecture. As the model is trained with only seen classes, the classification layer has neurons corresponding to the seen classes only, i.e., $C^s$ neurons. Further, we simply modify the output head and enable it for unseen classes $\{C^u\}$ using its class attribute information $(A^u)$ as per the following procedure:
|
| 184 |
+
|
| 185 |
+
1. Pass the unseen class attribute information $A^u$ to the trained ARN and get the output $A_B^u$ .
|
| 186 |
+
2. Pass the $A_R^u$ to the trained AE and get the unseen class prototype vectors $P^u \in \mathbb{R}^{C^u \times f}$ for $C^u$ unseen classes.
|
| 187 |
+
3. The unseen class prototype vectors $(P^u)$ are stacked with seen class prototype vectors $(P^s)$ as follows:
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
P = \left[ \begin{array}{l} P ^ {s} \\ P ^ {u} \end{array} \right], \tag {11}
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
where $P\in \mathbb{R}^{C\times f}$
|
| 194 |
+
|
| 195 |
+
After computation of $P$ , we compute the scaled cosine similarity score $scos(v_{ts},p_i)$ as follows:
|
| 196 |
+
|
| 197 |
+
$$
|
| 198 |
+
\operatorname {s c o s} \left(v _ {t s}, p _ {i}\right) = \left(\beta \cdot \frac {v _ {t s}}{\| v _ {t s} \|}\right) ^ {\top} \left(\beta \cdot \frac {p _ {i}}{\| p _ {i} \|}\right), \tag {12}
|
| 199 |
+
$$
|
| 200 |
+
|
| 201 |
+
where $p_i \in P$ is the class prototype vector of class $i \in \{C^s\} \cup \{C^u\}$ , and $v_{ts} \in V_{ts}^s \cup V_{ts}^u$ . Here, $V_{ts}^s$ and $V_{ts}^u$ are the extracted visual features using pretrained Resnet-101 model for the test images of seen and unseen classes, respectively. Finally, we perform a traditional way of classification and choose the class based on the highest cosine similarity score.
|
| 202 |
+
|
| 203 |
+
# 5 Experiments
|
| 204 |
+
|
| 205 |
+
We conduct extensive experiments on five benchmark ZSL datasets (description given in Table 5 in appendix) to evaluate the performance of ZSL in two settings, i.e., conventional ZSL and GZSL. In conventional ZSL, test samples only consist of unseen classes, and we compute Top-1 accuracy for unseen classes $(Acc)$ during inference. In GZSL, test samples consist of both seen and unseen classes, and we compute Top-1 accuracy for both seen $(SA)$ and unseen $(UA)$ classes. Further, we compute its corresponding harmonic mean $(HM)$ using $SA$ and $UA$ , which is defined as $HM$ . We also evaluate the model on the continual GZSL (CGZSL) setting.
|
| 206 |
+
|
| 207 |
+
<table><tr><td rowspan="3"></td><td rowspan="3">Methods</td><td colspan="4">SUN</td><td colspan="4">CUB</td><td colspan="4">AWA1</td><td colspan="4">AWA2</td><td colspan="4">APY</td></tr><tr><td>ZSL</td><td colspan="3">GZSL</td><td>ZSL</td><td colspan="3">GZSL</td><td>ZSL</td><td colspan="3">GZSL</td><td>ZSL</td><td colspan="3">GZSL</td><td>APY</td><td colspan="3">GZSL</td></tr><tr><td>Acc</td><td>UA</td><td>SA</td><td>HM</td><td>Acc</td><td>UA</td><td>SA</td><td>HM</td><td>Acc</td><td>UA</td><td>SA</td><td>HM</td><td>Acc</td><td>UA</td><td>SA</td><td>HM</td><td>Acc</td><td>UA</td><td>SA</td><td>HM</td></tr><tr><td rowspan="11">Generative Methods</td><td>SGAL (Yu and Lee, 2019)</td><td>-</td><td>42.9</td><td>31.2</td><td>36.1</td><td>-</td><td>47.1</td><td>44.7</td><td>45.9</td><td>-</td><td>52.7</td><td>75.7</td><td>62.2</td><td>-</td><td>55.1</td><td>81.2</td><td>65.6</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DASCN (Ni et al., 2019)</td><td>-</td><td>42.4</td><td>38.5</td><td>40.3</td><td>-</td><td>45.9</td><td>59.0</td><td>51.6</td><td>-</td><td>59.3</td><td>68.0</td><td>63.4</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>39.7</td><td>59.5</td><td>47.6</td></tr><tr><td>CIZSL (Elhoseiny and Elfeki, 2019)</td><td>-</td><td>-</td><td>-</td><td>27.8</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>24.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>TF-VAEGAN (Narayan et al., 2020)</td><td>-</td><td>45.6</td><td>40.7</td><td>43.0</td><td>-</td><td>52.8</td><td>64.7</td><td>58.1</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>59.8</td><td>75.1</td><td>66.6</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>F-VAEGAN-D2 (Xian et al., 2019)</td><td>64.7</td><td>45.1</td><td>38.0</td><td>41.3</td><td>61.0</td><td>48.4</td><td>60.1</td><td>53.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>71.1</td><td>57.6</td><td>70.6</td><td>63.5</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>CADA-VAE (Schonfeld et al., 2019)</td><td>-</td><td>47.2</td><td>35.7</td><td>40.6</td><td>-</td><td>51.6</td><td>53.5</td><td>52.4</td><td>-</td><td>57.3</td><td>72.8</td><td>64.1</td><td>-</td><td>55.8</td><td>75.0</td><td>63.9</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>EPGN (Yu et al., 2020)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>52.0</td><td>61.1</td><td>56.2</td><td>-</td><td>62.1</td><td>83.4</td><td>71.2</td><td>-</td><td>52.6</td><td>83.5</td><td>64.6</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>LsrGAN (Vyas et al., 2020)</td><td>-</td><td>44.8</td><td>37.7</td><td>40.9</td><td>-</td><td>48.1</td><td>59.1</td><td>53.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>54.6</td><td>74.6</td><td>63.0</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>ZSML (Verma et al., 2020)</td><td>-</td><td>21.7</td><td>45.1</td><td>29.3</td><td>-</td><td>60</td><td>52.1</td><td>55.7</td><td>-</td><td>57.4</td><td>71.1</td><td>63.5</td><td>-</td><td>58.9</td><td>74.6</td><td>65.8</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>IZF (Shen et al., 2020)</td><td>68.4</td><td>52.7</td><td>57</td><td>54.8</td><td>67.1</td><td>52.7</td><td>68</td><td>59.4</td><td>74.3</td><td>61.3</td><td>80.5</td><td>69.6</td><td>74.5</td><td>60.6</td><td>77.5</td><td>68.0</td><td>44.9</td><td>42.3</td><td>60.5</td><td>49.8</td></tr><tr><td>FREE (Chen et al., 2021b)</td><td>-</td><td>47.4</td><td>37.2</td><td>41.7</td><td>-</td><td>55.7</td><td>59.9</td><td>57.7</td><td>-</td><td>62.9</td><td>69.4</td><td>66.0</td><td>-</td><td>60.4</td><td>75.4</td><td>67.1</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td rowspan="24">Non-generative Methods</td><td>DEVISE (Frome et al., 2013)</td><td>56.5</td><td>16.9</td><td>27.4</td><td>20.9</td><td>52.0</td><td>23.8</td><td>53.0</td><td>32.8</td><td>54.2</td><td>23.8</td><td>53.0</td><td>32.8</td><td>59.7</td><td>17.1</td><td>74.7</td><td>27.8</td><td>39.8</td><td>4.9</td><td>76.9</td><td>9.2</td></tr><tr><td>ESZSL (Romera-Paredes and Torr, 2015)</td><td>54.5</td><td>11.0</td><td>27.9</td><td>15.8</td><td>53.9</td><td>12.6</td><td>63.8</td><td>21.0</td><td>58.2</td><td>6.6</td><td>75.6</td><td>12.1</td><td>58.6</td><td>5.9</td><td>77.8</td><td>11.0</td><td>38.3</td><td>2.4</td><td>70.1</td><td>4.6</td></tr><tr><td>LATEM (Xian et al., 2016)</td><td>55.3</td><td>14.7</td><td>28.8</td><td>19.5</td><td>49.3</td><td>15.2</td><td>57.3</td><td>24.0</td><td>55.1</td><td>7.3</td><td>71.7</td><td>13.3</td><td>55.8</td><td>11.5</td><td>77.3</td><td>20.0</td><td>35.2</td><td>0.1</td><td>73.0</td><td>0.2</td></tr><tr><td>SYNC (Changpinyo et al., 2016)</td><td>56.3</td><td>7.9</td><td>43.3</td><td>13.4</td><td>55.6</td><td>11.5</td><td>70.9</td><td>19.8</td><td>54.0</td><td>8.9</td><td>87.3</td><td>16.2</td><td>46.6</td><td>10.0</td><td>90.5</td><td>18.0</td><td>23.9</td><td>7.4</td><td>66.3</td><td>13.3</td></tr><tr><td>SAE (Kodirov et al., 2017)</td><td>40.3</td><td>8.8</td><td>18.0</td><td>11.8</td><td>33.3</td><td>7.8</td><td>54.0</td><td>13.6</td><td>53.0</td><td>1.8</td><td>77.1</td><td>3.5</td><td>54.1</td><td>1.1</td><td>82.2</td><td>2.2</td><td>8.3</td><td>0.4</td><td>80.9</td><td>0.9</td></tr><tr><td>DEM (Zhang et al., 2017)</td><td>40.3</td><td>20.5</td><td>34.3</td><td>25.6</td><td>51.7</td><td>19.6</td><td>57.9</td><td>29.2</td><td>68.4</td><td>32.8</td><td>84.7</td><td>47.3</td><td>67.2</td><td>30.5</td><td>86.4</td><td>45.1</td><td>35.0</td><td>11.1</td><td>75.1</td><td>19.4</td></tr><tr><td>ZSKL (Zhang and Koniusz, 2018)</td><td>-</td><td>20.1</td><td>31.4</td><td>24.5</td><td>-</td><td>21.6</td><td>52.8</td><td>30.6</td><td>-</td><td>18.3</td><td>79.3</td><td>29.8</td><td>-</td><td>18.9</td><td>82.7</td><td>30.8</td><td>-</td><td>10.5</td><td>76.2</td><td>18.5</td></tr><tr><td>DCN (Liu et al., 2018)</td><td>61.8</td><td>25.5</td><td>37.0</td><td>30.2</td><td>56.2</td><td>28.4</td><td>60.7</td><td>38.7</td><td>65.2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>25.5</td><td>84.2</td><td>39.1</td><td>43.6</td><td>14.2</td><td>75.0</td><td>23.9</td></tr><tr><td>SP-AEN (Chen et al., 2018)</td><td>59.2</td><td>24.9</td><td>38.6</td><td>30.3</td><td>55.4</td><td>34.7</td><td>70.6</td><td>46.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>58.5</td><td>23.3</td><td>90.9</td><td>37.1</td><td>24.1</td><td>13.7</td><td>63.4</td><td>22.6</td></tr><tr><td>CDL (Jiang et al., 2018)</td><td>-</td><td>21.5</td><td>34.7</td><td>26.5</td><td>-</td><td>23.5</td><td>55.2</td><td>32.9</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>19.8</td><td>48.6</td><td>28.1</td></tr><tr><td>PSR (Annadani and Biswas, 2018)</td><td>61.4</td><td>20.8</td><td>37.2</td><td>26.7</td><td>56.0</td><td>24.6</td><td>54.3</td><td>33.9</td><td>-</td><td>-</td><td>-</td><td>-</td><td>63.8</td><td>20.7</td><td>73.8</td><td>32.2</td><td>38.4</td><td>13.5</td><td>51.4</td><td>21.4</td></tr><tr><td>RelNet (Sung et al., 2018)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>55.6</td><td>38.1</td><td>61.4</td><td>47.0</td><td>68.2</td><td>31.4</td><td>91.3</td><td>46.7</td><td>64.2</td><td>30.9</td><td>93.4</td><td>45.3</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>COSMO (Atzmon and Chechik, 2019)</td><td>-</td><td>44.9</td><td>37.7</td><td>41.0</td><td>-</td><td>44.4</td><td>57.8</td><td>50.2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>CRNet (Zhang and Shi, 2019)</td><td>-</td><td>34.1</td><td>36.5</td><td>35.3</td><td>-</td><td>45.5</td><td>56.8</td><td>50.5</td><td>-</td><td>52.6</td><td>78.8</td><td>63.1</td><td>-</td><td>58.1</td><td>74.7</td><td>65.4</td><td>-</td><td>32.4</td><td>68.4</td><td>44.0</td></tr><tr><td>MLSE (Ding and Liu, 2019)</td><td>-</td><td>20.7</td><td>36.4</td><td>26.4</td><td>-</td><td>22.3</td><td>71.6</td><td>34.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>23.8</td><td>83.2</td><td>37.0</td><td>-</td><td>12.7</td><td>74.3</td><td>21.7</td></tr><tr><td>DLFZRL (Tong et al., 2019)</td><td>-</td><td>-</td><td>24.6</td><td>-</td><td>-</td><td>-</td><td>37.1</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>45.1</td><td>-</td><td>-</td><td>-</td><td>31.0</td><td></td></tr><tr><td>Triplet (Cacheux et al., 2019)</td><td>-</td><td>47.9</td><td>30.4</td><td>36.8</td><td>-</td><td>55.8</td><td>52.3</td><td>53.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>48.5</td><td>83.2</td><td>61.3</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>CVC-ZSL (Li et al., 2019)</td><td>62.6</td><td>36.3</td><td>42.8</td><td>39.3</td><td>54.4</td><td>47.4</td><td>47.6</td><td>47.5</td><td>70.9</td><td>62.7</td><td>77.0</td><td>69.1</td><td>71.1</td><td>56.4</td><td>81.4</td><td>66.7</td><td>26.5</td><td>26.5</td><td>74.0</td><td>39.0</td></tr><tr><td>APNet (Liu et al., 2020)</td><td>62.3</td><td>35.4</td><td>40.6</td><td>37.8</td><td>57.7</td><td>48.1</td><td>55.9</td><td>51.7</td><td>68.0</td><td>59.7</td><td>76.6</td><td>67.1</td><td>68.0</td><td>54.8</td><td>83.9</td><td>66.4</td><td>41.3</td><td>32.7</td><td>74.7</td><td>45.5</td></tr><tr><td>DAZLE (Huynh and Elhamifar, 2020)</td><td>-</td><td>52.3</td><td>24.3</td><td>33.2</td><td>-</td><td>56.7</td><td>59.6</td><td>58.1</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>60.3</td><td>75.7</td><td>67.1</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>DVBE (Min et al., 2020)</td><td>-</td><td>45.0</td><td>37.2</td><td>40.7</td><td>-</td><td>53.2</td><td>60.2</td><td>56.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>63.6</td><td>70.8</td><td>67.0</td><td>-</td><td>32.6</td><td>58.3</td><td>41.8</td></tr><tr><td>CNZSL (Skorokhodov and Elhoseiny, 2021)</td><td>-</td><td>44.7</td><td>41.6</td><td>43.1</td><td>-</td><td>49.9</td><td>50.7</td><td>50.3</td><td>-</td><td>63.1</td><td>73.4</td><td>67.8</td><td>-</td><td>60.2</td><td>77.1</td><td>67.6</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>HSVA (Chen et al., 2021c)</td><td>63.8</td><td>48.6</td><td>39.0</td><td>43.3</td><td>62.8</td><td>52.7</td><td>58.3</td><td>55.3</td><td>70.6</td><td>59.3</td><td>76.6</td><td>66.8</td><td>-</td><td>56.7</td><td>79.8</td><td>66.3</td><td>-</td><td>-</td><td>-</td><td></td></tr><tr><td>CARNet (Ours)</td><td>63.1</td><td>49.4</td><td>40.5</td><td>44.5</td><td>73.1</td><td>65.0</td><td>59.6</td><td>62.2</td><td>75.0</td><td>69.5</td><td>74.7</td><td>72.0</td><td>73.7</td><td>65.7</td><td>79.7</td><td>72.0</td><td>45.3</td><td>39.9</td><td>65.9</td><td>49.7</td></tr></table>
|
| 208 |
+
|
| 209 |
+
Table 1: ZSL and GZSL results (%) on the ZSL benchmark datasets. The best, the second best, and the third best results are made as bold. The best results are underlined. '-' denotes that results are not available in the paper.
|
| 210 |
+
|
| 211 |
+
# 5.1 Comparison with Baseline Methods
|
| 212 |
+
|
| 213 |
+
In this section, the performance of CARNet is evaluated against strong baseline models for three ZSL settings. Results for both the ZSL settings are provided in Table 1.
|
| 214 |
+
|
| 215 |
+
Conventional ZSL: From Table 1, it is observed that the proposed CARNet outperforms all nongenerative ZSL methods in conventional ZSL setting, by $10.3\%$ , $4.1\%$ , $2.6\%$ , $1.7\%$ and $1.3\%$ absolute gain for CUB (Welinder et al., 2010), AWA1 (Lampert et al., 2009), AWA2 (Lampert et al., 2013), and aPY (Farhadi et al., 2009) and SUN (Patterson and Hays, 2012) datasets, respectively. Also, in comparison to generative methods of ZSL, the proposed approach shows $6\%$ and $0.4\%$ absolute gain over CUB and AWA1 datasets, respectively.
|
| 216 |
+
|
| 217 |
+
On the remaining datasets, the model outperforms all the generative models but shows competitive performance to IZF IZF (Shen et al., 2020).
|
| 218 |
+
|
| 219 |
+
Generalized zero-shot learning: In this setting, the proposed non-generative CARNet model outperforms all non-generative ZSL methods with the absolute gain of $1.2\%$ , $4.1\%$ , $2.9\%$ , $4.4\%$ , and $4.2\%$ for SUN, CUB, AWA1, AWA2, and aPY datasets, respectively. Moreover, CARNet yields the best $HM$ compared to all generative/non-generative methods for CUB, AWA1, and AWA2 datasets and better/similar $HM$ on aPY dataset. The performance of CARNet is outperformed by IZF (Shen et al., 2020) on the SUN dataset. It should be noted that IZF is an invertible flow-based generative model which learns from the bidirectional mapping between the visual and the
|
| 220 |
+
|
| 221 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="3">CUB</td><td colspan="3">SUN</td></tr><tr><td>mUA</td><td>mSA</td><td>mHM</td><td>mUA</td><td>mSA</td><td>mHM</td></tr><tr><td>Seq-CARNet</td><td>4.5</td><td>11.3</td><td>5.9</td><td>4.3</td><td>11.8</td><td>6.1</td></tr><tr><td>Seq-CNZSL (Skorokhodov and Elhoseiny, 2021)</td><td>-</td><td>-</td><td>23</td><td>-</td><td>-</td><td>14</td></tr><tr><td>Seq-CVAE (Mishra et al., 2018)</td><td>8.6</td><td>24.7</td><td>12.2</td><td>11.4</td><td>16.9</td><td>13.4</td></tr><tr><td>Seq-CADA-VAE (Schonfeld et al., 2019)</td><td>14.4</td><td>40.8</td><td>21.1</td><td>16.2</td><td>25.9</td><td>20.1</td></tr><tr><td>CNZSL-AGEM (Skorokhodov and Elhoseiny, 2021)</td><td>-</td><td>-</td><td>23.8</td><td>-</td><td>-</td><td>14.2</td></tr><tr><td>CNZSL-EWC-online (Skorokhodov and Elhoseiny, 2021)</td><td>-</td><td>-</td><td>23.3</td><td>-</td><td>-</td><td>14.3</td></tr><tr><td>CNZSL-MAS-online (Skorokhodov and Elhoseiny, 2021)</td><td>-</td><td>-</td><td>23.8</td><td>-</td><td>-</td><td>14.2</td></tr><tr><td>GRCZSL (Gautam et al., 2021a)</td><td>14.1</td><td>41.9</td><td>20.5</td><td>11.5</td><td>17.7</td><td>13.7</td></tr><tr><td>CZSL-CV+res (Gautam et al., 2020)</td><td>13.5</td><td>44.9</td><td>20.2</td><td>14.1</td><td>24.0</td><td>17.6</td></tr><tr><td>CZSL-CA+res (Gautam et al., 2020)</td><td>32.8</td><td>44.0</td><td>36.1</td><td>21.7</td><td>27.1</td><td>22.9</td></tr><tr><td>Tf-GCZSL (Gautam et al., 2021b)</td><td>32.4</td><td>46.6</td><td>36.3</td><td>24.7</td><td>28.1</td><td>24.8</td></tr><tr><td>CARNet-ER (Ours)</td><td>43.0</td><td>45.8</td><td>43.4</td><td>23.3</td><td>30.3</td><td>25.6</td></tr><tr><td>CARNet-ER+CBR (Ours)</td><td>43.4</td><td>47.4</td><td>44.2</td><td>23.6</td><td>30.9</td><td>26.0</td></tr></table>
|
| 222 |
+
|
| 223 |
+
Table 2: Continual Generalized Zero-shot Learning Results
|
| 224 |
+
|
| 225 |
+
attribute space, enabling it to have better performances than other generative ZSL methods. However, CARNet outperforms IZF for CUB, AWA1, and AWA2 by a significant margin of $2.8\%$ , $2.4\%$ , $3.7\%$ , respectively, and yields similar result for aPY dataset (only a difference of $0.1\%$ ). In addition to performance gains, the proposed approached CARNet is characteristically advantageous over generative ZSL approaches in that, while the CARNet uses only the attribute vectors of seen classes during training, the generative ZSL methods use the attribute vectors of both seen and unseen classes during training which is not a realistic scenario in a dynamic environment.
|
| 226 |
+
|
| 227 |
+
Table 3 presents the computational time required to train the various ZSL methods. It can be observed from this table that the CARNet is at least $68 \times, 68 \times, 21 \times,$ and $31 \times$ times faster than generative methods for SUN, CUB, AWA1, and AWA2, as observed in Table 3. This can be attributed to the fact that CARNet only needs to process class attribute vectors through ARN and AE.
|
| 228 |
+
|
| 229 |
+
Thus, the proposed CARNet is a desirable candidate for conventional and generalized ZSL, owing to its performance, data requirements and computational speed.
|
| 230 |
+
|
| 231 |
+
Continual generalized zero-shot learning (CGZSL): While ZSL assumes data for all tasks to be available apriori, data may arrive in a sequential manner in real-world, and collecting all the data in memory is cumbersome. Hence, we further evaluate the performance of CARNet for the highly challenging CGZSL setting proposed in Skorokhodov and Elhoseiny (2021). This setting assumes that the data arrives in a sequence of
|
| 232 |
+
|
| 233 |
+
tasks and only the current task data is available for training. Thus, after training for a sequence of $[1,\dots ,t]$ tasks, all classes in the $[1,\dots ,t]$ tasks are considered as seen classes and classes from $(t + 1)$ onward are considered as unseen classes. As experience replay-based methods generally outperform regularization-based methods in the literature (Delange et al., 2021), CARNet is equipped for CGZSL using experience replay (ER) (Chaudhry et al., 2019b) strategy with classbalanced reservoir (CBR) sampling (Chrysakis and Moens, 2020). We measure the performance of CGZSL method using SA, UA, and HM at each task. Further, we compute the mean of SA, UA, and HM of overall tasks and denote it as mSA, mUA, and mHM (Skorokhodov and Elhoseiny, 2021). We present the CGZSL results in Table 2, along with the state-of-the-art CGZSL methods. Our method outperforms all existing methods by an absolute gain of $7.1\%$ and $0.4\%$ for CUB and SUN datasets, respectively. We also provide the performance of CARNet with CBR sampling (CARNet-ER+CBR) and without CBR sampling (CARNet-ER) in Table 2.
|
| 234 |
+
|
| 235 |
+
# 5.2 Ablation Study: Significance of Individual Components in CARNet
|
| 236 |
+
|
| 237 |
+
In this section, to emphasize the significance of individual components of CARNet, we perform an extensive ablation study over all the components and hyperparameters.
|
| 238 |
+
|
| 239 |
+
We study the effect of individual components of the CARNet, namely, (i) ARN (ii) AE (iii) circle loss (iv) softmax cross-entropy loss (v) scaled cosine similarity or Dot Product. We present the
|
| 240 |
+
|
| 241 |
+
<table><tr><td>Methods</td><td>SUN</td><td>CUB</td><td>AWA1</td><td>AWA2</td></tr><tr><td>RelNet (Sung et al., 2018)</td><td>-</td><td>25 min</td><td>40 min</td><td>40 min</td></tr><tr><td>DCN (Liu et al., 2018)</td><td>40 min</td><td>50 min</td><td>-</td><td>55 min</td></tr><tr><td>CIZSL (Elhoseiny and Elfeki, 2019)</td><td>3 Hr</td><td>2 Hr</td><td>3 Hr</td><td>3 Hr</td></tr><tr><td>CVC-ZSL (Li et al., 2019)</td><td>3 Hr</td><td>3 Hr</td><td>1.5 Hr</td><td>1.5 Hr</td></tr><tr><td>LsrGAN (Vyas et al., 2020)</td><td>1.1 Hr</td><td>1.25 Hr</td><td>-</td><td>1.5 Hr</td></tr><tr><td>TF-VAEGAN (Narayan et al., 2020)</td><td>1.5 Hr</td><td>1.75 Hr</td><td>-</td><td>2 Hr</td></tr><tr><td>CNZSL (Skorokhodov and Elhoseiny, 2021)</td><td>20 sec</td><td>20 sec</td><td>30 sec</td><td>30 sec</td></tr><tr><td>CARNet (Ours)</td><td>35 sec</td><td>22 sec</td><td>110 sec</td><td>77 sec</td></tr></table>
|
| 242 |
+
|
| 243 |
+
results of this study in Table 4. The softmax cross-entropy loss is an imperative loss for the model, as the proposed CARNet has to perform classification. Therefore, we kept it in all cases in the component analysis of Table 4. It is very evident from the results that attribute refinement significantly boosts the performance of CARNet. Moreover, the scaled-cosine similarity is another important component, and helps to outperform the model with another potential candidate, namely, dot product by a large margin.
|
| 244 |
+
|
| 245 |
+
Table 3: Training time comparison. CNZSL (Skorokhodov and Elhoseiny, 2021) is a non-generative model and remaining are generative models.
|
| 246 |
+
|
| 247 |
+
<table><tr><td>\( \mathcal{L}_{Soft-ce} \)</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>ARN</td><td></td><td>✓</td><td></td><td>✓</td><td>✓</td></tr><tr><td>\( \mathcal{L}_{Circle} \)</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Scaled-Cosine Similarity</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>✓</td></tr><tr><td>Dot-product</td><td></td><td></td><td></td><td>✓</td><td></td></tr><tr><td>SUN</td><td>43.1</td><td>43.9</td><td>43.4</td><td>37.8</td><td>44.5</td></tr><tr><td>CUB</td><td>58.1</td><td>58.8</td><td>59.4</td><td>42.7</td><td>62.2</td></tr><tr><td>AWA1</td><td>67.4</td><td>71.2</td><td>70.1</td><td>27.4</td><td>72.0</td></tr><tr><td>AWA2</td><td>69.6</td><td>70.5</td><td>70.0</td><td>17.3</td><td>72.0</td></tr><tr><td>APY</td><td>43.9</td><td>45.2</td><td>47.3</td><td>17.3</td><td>49.7</td></tr></table>
|
| 248 |
+
|
| 249 |
+
Table 4: Component Analysis
|
| 250 |
+
|
| 251 |
+
# 6 Conclusion
|
| 252 |
+
|
| 253 |
+
In this work, we developed the circle loss guided gating-based attribute-refinement network for handling ZSL, GZSL, and continual-GZSL tasks. CARNet refines the attribute through a gating unit
|
| 254 |
+
|
| 255 |
+
where it improves the attribute representation by learning a self-weight on each attribute dimension in a projected space. These refined attributes improve the embedding, which helps to overcome the model bias towards the seen classes. The whole model is guided by the circle loss along with the standard softmax cross-entropy loss, which maximizes the inter-class separability and intra-class similarity. Also, unlike the generative method, CARNet does not require the attribute vector of the unseen classes during training. The proposed method is quite fast, as the attribute refinement network and the attribute embedder need to process only the class attribute vectors during training. This work shows that a simple MLP-based architecture can outperform various highly computationally expensive ZSL methods. This approach needs to be explored with generative methods and other applications of ZSL, like zero-shot for sketch-based image retrieval, action recognition, and natural language processing.
|
| 256 |
+
|
| 257 |
+
# 7 Limitations
|
| 258 |
+
|
| 259 |
+
One major limitation is that the inference data must be from the same domain, as the proposed model cannot handle data from the other domains on which the model is not trained. Another limitation of the proposed method is that it requires task id during training in the CGZSL setting, without which CARNet cannot optimize the proposed model properly. However, in realistic scenarios, it is not necessary for the data to arrive with well-defined task-boundaries. Hence, the requirement of task id during training is a drawback of our proposed model.
|
| 260 |
+
|
| 261 |
+
# Acknowledgements
|
| 262 |
+
|
| 263 |
+
We would like to thank the Wipro IISc Research and Innovation Network (WIRIN, Grant No-99325T) and National Research Foundation, Singapore under its AI Singapore Programme (AISG Award No: AISG2-RP-2021-027) for funding this research.
|
| 264 |
+
|
| 265 |
+
# References
|
| 266 |
+
|
| 267 |
+
Zeynep Akata, Mateusz Malinowski, Mario Fritz, and Bernt Schiele. 2016. Multi-cue zero-shot learning with strong supervision. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 59-68.
|
| 268 |
+
Zeynep Akata, Scott Reed, Daniel Walter, Honglak Lee, and Bernt Schiele. 2015. Evaluation of output embeddings for fine-grained image classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2927-2936.
|
| 269 |
+
Yashas Annadani and Soma Biswas. 2018. Preserving semantic relations for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7603-7612.
|
| 270 |
+
Yuval Atzmon and Gal Chechik. 2019. Adaptive confidence smoothing for generalized zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11671-11680.
|
| 271 |
+
Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. 2016. Layer normalization. arXiv preprint arXiv:1607.06450.
|
| 272 |
+
Yannick Le Cacheux, Herve Le Borgne, and Michel Crucianu. 2019. Modeling inter and intra-class relations in the triplet loss for zero-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10333-10342.
|
| 273 |
+
Soravit Changpinyo, Wei-Lun Chao, Boqing Gong, and Fei Sha. 2016. Synthesized classifiers for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5327-5336.
|
| 274 |
+
Arslan Chaudhry, Marc'Aurelio Ranzato, Marcus Rohrbach, and Mohamed Elhoseiny. 2019a. Efficient lifelong learning with a-gem. In International Conference on Learning Representations.
|
| 275 |
+
Arslan Chaudhry, Marcus Rohrbach, Mohamed Elhoseiny, Thalaiyasingam Ajanthan, Puneet K Dokania, Philip HS Torr, and Marc'Aurelio Ranzato. 2019b. On tiny episodic memories in continual learning. arXiv preprint arXiv:1902.10486.
|
| 276 |
+
Jiaoyan Chen, Yuxia Geng, Zhuo Chen, Ian Horrocks, Jeff Z. Pan, and Huajun Chen. 2021a. Knowledge-aware zero-shot learning: Survey and perspective. In
|
| 277 |
+
|
| 278 |
+
Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, pages 4366-4373.
|
| 279 |
+
Long Chen, Hanwang Zhang, Jun Xiao, Wei Liu, and Shih-Fu Chang. 2018. Zero-shot visual recognition using semantics-preserving adversarial embedding networks. In CVPR, pages 1043-1052.
|
| 280 |
+
Shiming Chen, Wenjie Wang, Beihao Xia, Qinmu Peng, Xinge You, Feng Zheng, and Ling Shao. 2021b. Free: Feature refinement for generalized zero-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 122-131.
|
| 281 |
+
Shiming Chen, Guosen Xie, Yang Liu, Qinmu Peng, Baigui Sun, Hao Li, Xinge You, and Ling Shao. 2021c. Hsva: Hierarchical semantic-visual adaptation for zero-shot learning. Advances in Neural Information Processing Systems, 34.
|
| 282 |
+
Aristotelis Chrysakis and Marie-Francine Moens. 2020. Online continual learning from imbalanced data. In International Conference on Machine Learning, pages 1952-1961. PMLR.
|
| 283 |
+
Yann N Dauphin, Angela Fan, Michael Auli, and David Grangier. 2017. Language modeling with gated convolutional networks. In International conference on machine learning, pages 933-941. PMLR.
|
| 284 |
+
Matthias Delange, Rahaf Aljundi, Marc Masana, Sarah Parisot, Xu Jia, Ales Leonardis, Greg Slabaugh, and Tinne Tuytelaars. 2021. A continual learning survey: Defying forgetting in classification tasks. IEEE Transactions on Pattern Analysis and Machine Intelligence.
|
| 285 |
+
Zhengming Ding and Hongfu Liu. 2019. Marginalized latent semantic encoder for zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6191-6199.
|
| 286 |
+
Georgiana Dinu, Angeliki Lazaridou, and Marco Baroni. 2014. Improving zero-shot learning by mitigating the hubness problem. arXiv preprint arXiv:1412.6568.
|
| 287 |
+
Mohamed Elhoseiny and Mohamed Elfeki. 2019. Creativity inspired zero-shot learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5784-5793.
|
| 288 |
+
Ali Farhadi, Ian Endres, Derek Hoiem, and David Forsyth. 2009. Describing objects by their attributes. In 2009 IEEE Conference on Computer Vision and Pattern Recognition, pages 1778-1785. IEEE.
|
| 289 |
+
Rafael Felix, Vijay BG Kumar, Ian Reid, and Gustavo Carneiro. 2018. Multi-modal cycle-consistent generalized zero-shot learning. In Proceedings of the European Conference on Computer Vision (ECCV), pages 21-37.
|
| 290 |
+
|
| 291 |
+
Andrea Frome, Greg S Corrado, Jon Shlens, Samy Bengio, Jeff Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. 2013. Devise: A deep visual-semantic embedding model. In Advances in neural information processing systems, pages 2121-2129.
|
| 292 |
+
Yanwei Fu, Timothy M Hospedales, Tao Xiang, Zhenyong Fu, and Shaogang Gong. 2014. Transductive multi-view embedding for zero-shot recognition and annotation. In European Conference on Computer Vision, pages 584-599. Springer.
|
| 293 |
+
Yanwei Fu and Leonid Sigal. 2016. Semi-supervised vocabulary-informed learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5337-5346.
|
| 294 |
+
Chandan Gautam, Sethupathy Parameswaran, Ashish Mishra, and Suresh Sundaram. 2020. Generalized continual zero-shot learning. arXiv preprint arXiv:2011.08508.
|
| 295 |
+
Chandan Gautam, Sethupathy Parameswaran, Ashish Mishra, and Suresh Sundaram. 2021a. Generative replay-based continual zero-shot learning. arXiv preprint arXiv:2101.08894.
|
| 296 |
+
Chandan Gautam, Sethupathy Parameswaran, Ashish Mishra, and Suresh Sundaram. 2021b. Online lifelong generalized zero-shot learning. arXiv preprint arXiv:2103.10741.
|
| 297 |
+
Dan Hendrycks and Kevin Gimpel. 2016. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415.
|
| 298 |
+
Dat Huynh and Ehsan Elhamifar. 2020. Fine-grained generalized zero-shot learning via dense attribute-based attention. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4483-4493.
|
| 299 |
+
Huajie Jiang, Ruiping Wang, Shiguang Shan, and Xilin Chen. 2018. Learning class prototypes via structure alignment for zero-shot recognition. In Proceedings of the European conference on computer vision (ECCV), pages 118-134.
|
| 300 |
+
Rohit Keshari, Richa Singh, and Mayank Vatsa. 2020. Generalized zero-shot learning via over-complete distribution. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13300-13308.
|
| 301 |
+
Elyor Kodirov, Tao Xiang, and Shaogang Gong. 2017. Semantic autoencoder for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3174-3183.
|
| 302 |
+
Vinay Kumar Verma, Gundeep Arora, Ashish Mishra, and Piyush Rai. 2018. Generalized zero-shot learning via synthesized examples. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4281-4289.
|
| 303 |
+
|
| 304 |
+
Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. 2009. Learning to detect unseen object classes by between-class attribute transfer. In 2009 IEEE conference on computer vision and pattern recognition, pages 951-958. IEEE.
|
| 305 |
+
Christoph H Lampert, Hannes Nickisch, and Stefan Harmeling. 2013. Attribute-based classification for zero-shot visual object categorization. IEEE transactions on pattern analysis and machine intelligence, 36(3):453-465.
|
| 306 |
+
Hugo Larochelle, Dumitru Erhan, and Yoshua Bengio. 2008. Zero-data learning of new tasks. In AAAI, volume 1, page 3.
|
| 307 |
+
Jimmy Lei Ba, Kevin Swersky, Sanja Fidler, et al. 2015. Predicting deep zero-shot convolutional neural networks using textual descriptions. In Proceedings of the IEEE international conference on computer vision, pages 4247-4255.
|
| 308 |
+
Kai Li, Martin Renqiang Min, and Yun Fu. 2019. Rethinking zero-shot learning: A conditional visual classification perspective. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3583-3592.
|
| 309 |
+
Yan Li, Junge Zhang, Jianguo Zhang, and Kaiqi Huang. 2018. Discriminative learning of latent features for zero-shot recognition. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 7463-7471.
|
| 310 |
+
Hanxiao Liu, Zihang Dai, David So, and Quoc Le. 2021. Pay attention to mlps. Advances in Neural Information Processing Systems, 34.
|
| 311 |
+
Lu Liu, Tianyi Zhou, Guodong Long, Jing Jiang, and Chengqi Zhang. 2020. Attribute propagation network for graph zero-shot learning. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 4868-4875.
|
| 312 |
+
Shichen Liu, Mingsheng Long, Jianmin Wang, and Michael I Jordan. 2018. Generalized zero-shot learning with deep calibration network. In Advances in neural information processing systems, pages 2009-2019.
|
| 313 |
+
Shaobo Min, Hantao Yao, Hongtao Xie, Chaoqun Wang, Zheng-Jun Zha, and Yongdong Zhang. 2020. Domain-aware visual bias eliminating for generalized zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12664-12673.
|
| 314 |
+
Ashish Mishra, Shiva Krishna Reddy, Anurag Mittal, and Hema A Murthy. 2018. A generative model for zero shot learning using conditional variational autoencoders. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pages 2188-2196.
|
| 315 |
+
|
| 316 |
+
Sanath Narayan, Akshita Gupta, Fahad Shahbaz Khan, Cees GM Snoek, and Ling Shao. 2020. Latent embedding feedback and discriminative features for zero-shot classification. In Proceedings of the European Conference on Computer Vision (ECCV), pages 479-495. Springer.
|
| 317 |
+
Jian Ni, Shanghang Zhang, and Haiyong Xie. 2019. Dual adversarial semantics-consistent network for generalized zero-shot learning. Advances in Neural Information Processing Systems, 32:6146-6157.
|
| 318 |
+
Mark Palatucci, Dean Pomerleau, Geoffrey E Hinton, and Tom M Mitchell. 2009. Zero-shot learning with semantic output codes. Advances in neural information processing systems, 22.
|
| 319 |
+
Genevieve Patterson and James Hays. 2012. Sun attribute database: Discovering, annotating, and recognizing scene attributes. In 2012 IEEE Conference on Computer Vision and Pattern Recognition, pages 2751-2758. IEEE.
|
| 320 |
+
Scott Reed, Zeynep Akata, Honglak Lee, and Bernt Schiele. 2016. Learning deep representations of fine-grained visual descriptions. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 49-58.
|
| 321 |
+
Bernardino Romera-Paredes and Philip Torr. 2015. An embarrassingly simple approach to zero-shot learning. In International Conference on Machine Learning, pages 2152-2161.
|
| 322 |
+
Mark Sandler, Andrew Howard, Menglong Zhu, Andrey Zhmoginov, and Liang-Chieh Chen. 2018. Mobilenetv2: Inverted residuals and linear bottlenecks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4510-4520.
|
| 323 |
+
Edgar Schonfeld, Sayna Ebrahimi, Samarth Sinha, Trevor Darrell, and Zeynep Akata. 2019. Generalized zero-and few-shot learning via aligned variational autoencoders. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8247-8255.
|
| 324 |
+
Yuming Shen, Jie Qin, Lei Huang, Li Liu, Fan Zhu, and Ling Shao. 2020. Invertible zero-shot recognition flows. In European Conference on Computer Vision, pages 614-631. Springer.
|
| 325 |
+
Ivan Skorokhodov and Mohamed Elhoseiny. 2021. Class normalization for (continual)? generalized zero-shot learning. In International Conference on Learning Representations.
|
| 326 |
+
Richard Socher, Milind Ganjoo, Christopher D Manning, and Andrew Ng. 2013. Zero-shot learning through cross-modal transfer. In Advances in neural information processing systems, pages 935-943.
|
| 327 |
+
Jie Song, Chengchao Shen, Yezhou Yang, Yang Liu, and Mingli Song. 2018. Transductive unbiased embedding for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 1024-1033.
|
| 328 |
+
|
| 329 |
+
Rupesh Kumar Srivastava, Klaus Greff, and Jürgen Schmidhuber. 2015. Highway networks. arXiv preprint arXiv:1505.00387.
|
| 330 |
+
Yifan Sun, Changmao Cheng, Yuhan Zhang, Chi Zhang, Liang Zheng, Zhongdao Wang, and Yichen Wei. 2020. Circle loss: A unified perspective of pair similarity optimization. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6398-6407.
|
| 331 |
+
Flood Sung, Yongxin Yang, Li Zhang, Tao Xiang, Philip HS Torr, and Timothy M Hospedales. 2018. Learning to compare: Relation network for few-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1199-1208.
|
| 332 |
+
Bin Tong, Chao Wang, Martin Klinkigt, Yoshiyuki Kobayashi, and Yuuichi Nonaka. 2019. Hierarchical disentanglement of discriminative latent features for zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11467-11476.
|
| 333 |
+
Vinay Kumar Verma, Dhanajit Brahma, and Piyush Rai. 2020. Meta-learning for generalized zero-shot learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 6062-6069.
|
| 334 |
+
Maunil R Vyas, Hemanth Venkateswara, and Sethuraman Panchanathan. 2020. Leveraging seen and unseen semantic relationships for generative zero-shot learning. In European Conference on Computer Vision, pages 70-86. Springer.
|
| 335 |
+
Wei Wang, Vincent W Zheng, Han Yu, and Chunyan Miao. 2019. A survey of zero-shot learning: Settings, methods, and applications. ACM Transactions on Intelligent Systems and Technology (TIST), 10(2):1-37.
|
| 336 |
+
Kun Wei, Cheng Deng, and Xu Yang. 2020. Lifelong zero-shot learning. In Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence, pages 551-557.
|
| 337 |
+
Peter Welinder, Steve Branson, Takeshi Mita, Catherine Wah, Florian Schroff, Serge Belongie, and Pietro Perona. 2010. Caltech-ucsd birds 200.
|
| 338 |
+
Felix Wu, Angela Fan, Alexei Baevski, Yann Dauphin, and Michael Auli. 2018. Pay less attention with lightweight and dynamic convolutions. In International Conference on Learning Representations.
|
| 339 |
+
Yongqin Xian, Zeynep Akata, Gaurav Sharma, Quynh Nguyen, Matthias Hein, and Bernt Schiele. 2016. Latent embeddings for zero-shot classification. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 69-77.
|
| 340 |
+
Yongqin Xian, Christoph H Lampert, Bernt Schiele, and Zeynep Akata. 2018a. Zero-shot learning—a comprehensive evaluation of the good, the bad and the ugly. IEEE transactions on pattern analysis and machine intelligence, 41(9):2251-2265.
|
| 341 |
+
|
| 342 |
+
Yongqin Xian, Tobias Lorenz, Bernt Schiele, and Zeynep Akata. 2018b. Feature generating networks for zero-shot learning. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 5542-5551.
|
| 343 |
+
Yongqin Xian, Bernt Schiele, and Zeynep Akata. 2017. Zero-shot learning-the good, the bad and the ugly. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 4582-4591.
|
| 344 |
+
Yongqin Xian, Saurabh Sharma, Bernt Schiele, and Zeynep Akata. 2019. f-vaegan-d2: A feature generating framework for any-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10275-10284.
|
| 345 |
+
Hyeonwoo Yu and Beomhee Lee. 2019. Zero-shot learning via simultaneous generating and learning. Advances in Neural Information Processing Systems, 32:46-56.
|
| 346 |
+
Yunlong Yu, Zhong Ji, Jungong Han, and Zhongfei Zhang. 2020. Episode-based prototype generating network for zero-shot learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14035-14044.
|
| 347 |
+
Fei Zhang and Guangming Shi. 2019. Co-representation network for generalized zero-shot learning. In International Conference on Machine Learning, pages 7434-7443. PMLR.
|
| 348 |
+
Hongguang Zhang and Piotr Koniusz. 2018. Zero-shot kernel learning. In CVPR, pages 7670-7679.
|
| 349 |
+
Li Zhang, Tao Xiang, and Shaogang Gong. 2017. Learning a deep embedding model for zero-shot learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 2021-2030.
|
| 350 |
+
Ziming Zhang and Venkatesh Saligrama. 2015. Zero-shot learning via semantic similarity embedding. In Proceedings of the IEEE international conference on computer vision, pages 4166-4174.
|
| 351 |
+
|
| 352 |
+
# A Appendix
|
| 353 |
+
|
| 354 |
+
# A.1 ZSL Datasets
|
| 355 |
+
|
| 356 |
+
Five common benchmark datasets used in ZSL are Scene Understanding (SUN) dataset, CaltechUCSD Birds (CUB) dataset, Animals With Attributes (AWA1, AWA2) dataset and Attribute Pascal and Yahoo (aPY) dataset. A brief description of these datasets is provided in Table 5.
|
| 357 |
+
|
| 358 |
+
# A.2 Continual Learning Setup
|
| 359 |
+
|
| 360 |
+
In a continual learning setup, all the classes (both seen and unseen classes) of a given dataset are grouped together and split into T tasks. Further, all classes up to the current task t are taken as seen classes. All classes of the remaining tasks, namely $t + 1$ to T, are taken as unseen classes. At task t, the input consists of training images for all classes in task t. During testing, the test data consists of images belonging to all classes of tasks 1 to T. The Task split for the CUB and SUN dataset are as follows:
|
| 361 |
+
|
| 362 |
+
1. The CUB dataset consists of 200 classes. The dataset is split into 20 tasks with 10 classes in each task.
|
| 363 |
+
2. The SUN dataset consists of 717 classes. The dataset is split into 15 tasks, with 47 classes in the first 3 tasks and 48 classes in the remaining 12 tasks.
|
| 364 |
+
|
| 365 |
+
# A.3 Implementation Details
|
| 366 |
+
|
| 367 |
+
We use ResNet-101 as a pretrained model, which is pretrained on ImageNet as the backbone for visual feature extraction. CARNet is trained using the Adam optimizer with a learning rate of 0.001 for APY and 0.0005 for all remaining datasets. Further, weight decay of 0.001 for APY and SUN, and 0.0001 for other datasets are used. We choose $\beta = 5$ across all datasets for computing scaled cosine similarity and $m$ , $\gamma$ , $\lambda$ are taken as shown in Table 6. We performed all our experiments on RTX 2080 GPU with i7 processor and 32 GB RAM.
|
| 368 |
+
|
| 369 |
+
# A.4 Impact of Hyper-parameters ( $\gamma$ and $m$ )
|
| 370 |
+
|
| 371 |
+
In Fig. 3, we provide three 3-D plots for $HM$ , $SA$ , and $UA$ on AWA2 dataset to study the effects of $m$ and $\gamma$ on CARNet. From the figure, it is seen that $HM$ , $SA$ , and $UA$ vary only $3\%$ , $2\%$ , and $4\%$ with changes in $m$ and $\gamma$ , and still outperforms most generative and all non-generative ZSL methods.
|
| 372 |
+
|
| 373 |
+
<table><tr><td>Dataset</td><td>Attribute Dimen-sion</td><td>#Seen Classes</td><td>#Unseen Classes</td><td>Total Classes</td><td>Description</td></tr><tr><td>SUN</td><td>102</td><td>645</td><td>72</td><td>717</td><td>Fine-grained</td></tr><tr><td>CUB</td><td>1024</td><td>150</td><td>50</td><td>200</td><td>Fine-grained</td></tr><tr><td>AWA1</td><td>85</td><td>40</td><td>10</td><td>50</td><td>Coarse-grained</td></tr><tr><td>AWA2</td><td>85</td><td>40</td><td>10</td><td>50</td><td>Coarse-grained</td></tr><tr><td>aPY</td><td>64</td><td>20</td><td>12</td><td>32</td><td>Coarse-grained</td></tr></table>
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
(a) HM
|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
(b) SA
|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
(c) UA
|
| 383 |
+
Figure 3: Impact of hyperparameters on proposed CARNet model for AWA2 dataset on GZSL setting with $\lambda = 0.8$
|
| 384 |
+
|
| 385 |
+
Thus, it can be observed that the CARNet is robust to large changes in the $\gamma$ and $m$ .
|
| 386 |
+
|
| 387 |
+
Table 5: Zero-shot learning benchmark datasets
|
| 388 |
+
|
| 389 |
+
<table><tr><td>Hyperparameter</td><td>SUN</td><td>CUB</td><td>AWA1</td><td>AWA2</td><td>APY</td></tr><tr><td>m</td><td>0.4</td><td>0.1</td><td>0.3</td><td>0.4</td><td>0.2</td></tr><tr><td>γ</td><td>0.5</td><td>0.9</td><td>1.0</td><td>0.5</td><td>1.0</td></tr><tr><td>λ</td><td>1.2</td><td>0.7</td><td>0.2</td><td>0.5</td><td>1.0</td></tr></table>
|
| 390 |
+
|
| 391 |
+
Table 6: Values taken by the Hyperparameters for different datasets
|
refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb9dd0185d657d3afc72e406a750511fc9296dae24996e3aee1cb779b5509f8e
|
| 3 |
+
size 817626
|
refinementmatterstextualdescriptionneedstoberefinedforzeroshotlearning/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:42c56cee1620197137806229996f1519e2ac32bc84f6bf81d6d078046f16a709
|
| 3 |
+
size 525964
|
representationlearningforresourceconstrainedkeyphrasegeneration/bf57ab26-286d-4d72-ad69-0a0bea7817dc_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cae1b9762d37f4b5bab31ff28b75e933e79782e6f8e05abe5eca914956721ce6
|
| 3 |
+
size 124957
|
representationlearningforresourceconstrainedkeyphrasegeneration/bf57ab26-286d-4d72-ad69-0a0bea7817dc_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a00ecddd730904240dc80250fa88f6cec35b526e92ea3c72868a85a0b61dd79
|
| 3 |
+
size 150102
|
representationlearningforresourceconstrainedkeyphrasegeneration/bf57ab26-286d-4d72-ad69-0a0bea7817dc_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:88258a4ab06dd5534eeaeb2adcac358be861dca88274cb440cba1efa05548836
|
| 3 |
+
size 495196
|
representationlearningforresourceconstrainedkeyphrasegeneration/full.md
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Representation Learning for Resource-Constrained Keyphrase Generation
|
| 2 |
+
|
| 3 |
+
Di Wu $^{1}$ , Wasi Uddin Ahmad $^{2*}$ , Sunipa Dev $^{1}$ , Kai-Wei Chang $^{1}$
|
| 4 |
+
|
| 5 |
+
University of California, Los Angeles<sup>1</sup>, AWS AI Labs<sup>2</sup>
|
| 6 |
+
|
| 7 |
+
{diwu,sunipa,kwchang}@cs.ucla.edu, wasiahmad@ucla.edu
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
State-of-the-art keyphrase generation methods generally depend on large annotated datasets, limiting their performance in domains with limited annotated data. To overcome this challenge, we design a data-oriented approach that first identifies salient information using retrieval-based corpus-level statistics, and then learns a task-specific intermediate representation based on a pre-trained language model using large-scale unlabeled documents. We introduce salient span recovery and salient span prediction as denoising training objectives that condense the intra-article and inter-article knowledge essential for keyphrase generation. Through experiments on multiple keyphrase generation benchmarks, we show the effectiveness of the proposed approach for facilitating low-resource keyphrase generation and zero-shot domain adaptation. Our method especially benefits the generation of absent keyphrases, approaching the performance of models trained with large training sets.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Keyphrases of a document are the phrases that summarize the most important information. In the keyphrase generation task, given a document, a model is required to generate a set of keyphrases, each of which can be classified as a present keyphrase if it appears as a contiguous text span in the document or an absent keyphrase otherwise. The generated keyphrases can facilitate a wide range of applications, such as document clustering (Hammouda et al., 2005), recommendation systems (Wu and Bolivar, 2008; Dave and Varma, 2010), information retrieval tasks (Jones and Staveley, 1999; Kim et al., 2013; Tang et al., 2017; Boudin et al., 2020), text summarization (Zhang et al., 2004), and text classification (Hulth and Megyesi, 2006; Wilson et al., 2005; Berend, 2011).
|
| 16 |
+
|
| 17 |
+
Input: localization and regularization behavior of mixed finite elements for 2d structural problems with damaging material. <sep> a class of lagrangian mixed finite elements is presented for applications to 2d structural problems based on a damage constitutive model. attention is on localization and regularization issues as compared with the correspondent behavior of lagrangian displacement based elements.
|
| 18 |
+
|
| 19 |
+
Present Keyphrases: localization; regularization; mixed finite elements; damage
|
| 20 |
+
|
| 21 |
+
Absent Keyphrases: hybrid formulations ; plasticity
|
| 22 |
+
|
| 23 |
+
Figure 1: An example keyphrase generation case. The input document contains a title and some body text, separated by a separator token $<\mathsf{sep}>$ .
|
| 24 |
+
|
| 25 |
+
Recent years have seen promising results of neural keyphrase generation approaches as more large-scale annotated training datasets become available (Meng et al., 2017; Chan et al., 2019; Chen et al., 2020; Yuan et al., 2020; Ahmad et al., 2021; Ye et al., 2021). For instance, KP20k (Meng et al., 2017), a popular scientific keyphrase generation dataset, contains over 500,000 documents in its training set. Recent datasets in the news, science, or social media domains are often of a similar scale (Gallina et al., 2019; Cano and Bojar, 2019; Yuan et al., 2020). On the other hand, the poor out-of-distribution generalization ability of keyphrase generation models is often observed (Chen et al., 2018). This brings the challenge of training neural keyphrase generation models in the domains where gathering labeled data is difficult (e.g., due to privacy concerns) or domains that evolve as time goes by (e.g., with the creation of new concepts).
|
| 26 |
+
|
| 27 |
+
In this paper, we focus on improving the keyphrase generation performance in such "low-resource" scenarios where annotated data is limited. Pre-trained language models (PLMs), task-specific pre-training, and domain-specific pre-training have successfully driven low-resource NLP applications (Zhang et al., 2020a,b; Gururangan et al., 2020; Hedderich et al., 2021; Zou et al., 2021; Yu et al., 2021). These approaches often rely on objectives
|
| 28 |
+
|
| 29 |
+
such as masked language modeling (Devlin et al., 2019) or text infilling (Lewis et al., 2020) to provide self-supervised learning signals. Can we find similar self-supervision signals for keyphrase generation to make the downstream supervised fine-tuning more data-efficient?
|
| 30 |
+
|
| 31 |
+
To fulfill this goal, language modeling based on random masking or infilling may not be optimal. Intuitively, training to recover from random masking via maximum likelihood estimation (MLE) teaches the model to generate probable and coherent output but does not encourage the model to generate key information. For example, given the instance "A(n) approach is what we need", based on the context, a general language model may fill in with general words such as "creative" or "reliable". By contrast, a model that is better equipped for keyphrase generation may fill in with more specific and salient information, such as "multimodal" or "object detection". In other words, we hypothesize that keyphrase generation is benefited from pretraining signals that help the model induce the key information from the context.
|
| 32 |
+
|
| 33 |
+
Observing that keyphrases are often snippets or synonyms of salient in-text spans (which we call salient spans), we propose to derive learning signals from them for task-specific pre-training using PLMs. We posit that a span carries salient information if it can effectively identify the associated document. Based on this assumption, we design a retrieval-based salient span mining procedure that finds spans that are domain-wise salient and functionally similar to keyphrases. Using these spans, we design salient span recovery (SSR) and salient span prediction (SSP) as objectives to further pretrain BART (Lewis et al., 2020) with unlabeled in-domain data. By corrupting salient spans from the document and asking the model to predict them back within or without the original context, SSR and SSP encourage the model to learn knowledge conducive to downstream keyphrase generation.
|
| 34 |
+
|
| 35 |
+
We design low-resource benchmarks in the scientific domain and extensively compare our method with supervised and unsupervised keyphrase generation baselines. The results establish that the proposed method can outperform the BART fine-tuning baseline and various supervised keyphrase generation models trained from scratch in the low-resource setting. Moreover, we show that one variant of SSR is superior to other in-domain pre
|
| 36 |
+
|
| 37 |
+
training objectives, such as text infilling and title generation. Finally, we show that our method improves the performance of zero-shot domain transfer. We conclude by observing that manually annotated present keyphrases align with the assumptions of our retrieval-based span selection method.
|
| 38 |
+
|
| 39 |
+
In summary, the main innovation of the paper is the strategy to select information from unlabeled data for effective learning of PLM-based low-resource keyphrase generation. We do not aim at designing masking strategies, as literature has explored closely related ones (Joshi et al., 2020; Guu et al., 2020), or performing large-scale pre-training with annotated keyphrase data, as explored in the concurrent work Kulkarni et al. (2022). Instead, we (1) observe that phrase saliency can be defined from the perspective of information retrieval, (2) design a procedure to mine salient spans automatically from large in-domain unlabeled data, (3) use these spans for domain-adaptive pre-training that teaches the model to induce essential information, and (4) demonstrate the resulting gains on low-resource keyphrase generation and zero-shot domain transfer. We release our experiment code and model outputs at https://github.com/xiaowu0162/low-resource-kpgen to facilitate future research.
|
| 40 |
+
|
| 41 |
+
# 2 Methods
|
| 42 |
+
|
| 43 |
+
Problem Definition Let $D_{kp}$ denote a keyphrase generation dataset, which is a set of tuples $(\mathbf{x}^{\mathrm{i}},\mathbf{p}^{\mathrm{i}})$ , where $\mathbf{x}^{\mathrm{i}}$ is an input document and $\mathbf{p}^{\mathrm{i}} = \{p_1^i,p_2^i,\dots,p_{|\mathbf{p}^{\mathrm{i}}|}^i\}$ is the corresponding set of keyphrases (each of which is a sequence of tokens). Following Yuan et al. (2020), we define keyphrase generation as generating the sequence $\mathbf{y}^{\mathrm{i}} = (p_1^i [\mathrm{sep}] p_2^i [\mathrm{sep}] \dots [\mathrm{sep}] p_{|\mathbf{p}^{\mathrm{i}}|}^i)^1$ based on the source text $\mathbf{x}^{\mathrm{i}}$ . In addition, let $D_{aux}$ be a set of unlabeled documents from $D_{kp}$ 's domain.
|
| 44 |
+
|
| 45 |
+
A typical way to train BART-like sequence-to-sequence PLMs for keyphrase generation is to directly fine-tune using the formulation above. Designed for small $D_{kp}$ , our method first extracts salient spans using $D_{aux}$ and further trains BART using these spans. The resulting model with task-specific and domain-specific knowledge is then fine-tuned in the same way. Section 2.1 introduces the salient span extraction method, and section 2.2 details the intermediate learning objectives.
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
Figure 2: An illustration of the proposed framework. A PLM is first pre-trained on large in-domain data $D_{aux}$ using one of the proposed objectives, and then fine-tuned on keyphrase generation using $D_{kp}$ . In the example on the right, the salient span "event trigger words" and a random span "text" are corrupted, while "biomedical" is not.
|
| 49 |
+
|
| 50 |
+
# 2.1 Retrieval for Salient Spans
|
| 51 |
+
|
| 52 |
+
Inspired by previous works that identify retrieval as an important usage of keyphrases and a possible way to evaluate keyphrases (Kim et al., 2013; Boudin et al., 2020; Boudin and Gallina, 2021), we use retrieval as a tool to define and extract salient spans. Concretely, we define a salient span of a document as a contiguous sequence of tokens (an n-gram) that can retrieve the document from $D_{aux}$ via the BM25 retrieval (Robertson and Walker, 1994). For each document $\mathbf{x}^{\mathrm{i}} \in D_{aux}$ , let $Q^{i} = \{q_{1}^{i},\dots,q_{n}^{i}\}$ be a set of candidate n-grams. Let $BM25(x,q)$ be the BM25 score between a document $x$ and a query $q$ in $D_{aux}$ . Then, define
|
| 53 |
+
|
| 54 |
+
$$
|
| 55 |
+
\begin{array}{l} \operatorname {r a n k} \left(q _ {j} ^ {i}\right) = \left| \mathbf {x} ^ {\prime} \in D _ {a u x}: \right. \\ B M 2 5 (\mathbf {x} ^ {\prime}, q _ {j} ^ {i}) > B M 2 5 (\mathbf {x} ^ {\mathbf {i}}, q _ {j} ^ {i}) |. \\ \end{array}
|
| 56 |
+
$$
|
| 57 |
+
|
| 58 |
+
We then choose the set of salient spans $S^i$ from $Q^i$ by applying a filtering function to $\text{rank}(q_j^i)$ .
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
S ^ {i} = \left\{q _ {j} ^ {i} \in Q ^ {i}: r a n k \left(q _ {j} ^ {i}\right) \leq t h r e s h o l d \left(\left| q _ {j} ^ {i} \right|\right) \right\},
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where threshold $(\cdot)$ is a function that specifies a maximum accepted rank based on the span length. We use variable thresholds for different lengths to reduce BM25's bias towards longer phrases.
|
| 65 |
+
|
| 66 |
+
Different from keyphrase extraction works that require the keyphrases to be noun phrases (Hulth, 2003a; Wan and Xiao, 2008; Bougouin et al., 2013), we allow any n-gram from the document that does not contain stop words from being a candidate. To match the length of keyphrases, we require $n$ to be at most 3. In addition, different from previous works that use retrieval methods to identify similar
|
| 67 |
+
|
| 68 |
+
documents and use their keyphrase annotations as external knowledge (Chen et al., 2019a; Kim et al., 2021), we use each candidate as a query and use the retrieved documents for calculating the rank.
|
| 69 |
+
|
| 70 |
+
Intuitively, our definition of salient spans reflects the idea that generating good keyphrases requires both intra-article and inter-article reasoning: while intra-article reasoning is used to find the most emphasized spans, inter-article knowledge is employed to determine whether a span can identify the article of interest in the sea of other articles.
|
| 71 |
+
|
| 72 |
+
# 2.2 In-domain Representation Learning
|
| 73 |
+
|
| 74 |
+
After extracting the spans containing document-wise and domain-wise salient information, we propose to facilitate the downstream fine-tuning on the small $D_{kp}$ by first training BART on $D_{aux}$ with the following objectives.
|
| 75 |
+
|
| 76 |
+
Salient Span Recovery We design salient span recovery as a variant of BART's pre-training objectives where the tokens for masking or deletion are strategically chosen. Let $S^i = \{s_1^i, \dots, s_n^i\}$ be the salient spans of $\mathbf{x}^i$ . During training, each occurrence of $s_j^i$ in $\mathbf{x}^i$ is corrupted with probability $k_s$ . In addition, we corrupt words in $\mathbf{x}^i \backslash (s_1^i \cup \dots \cup s_n^i)$ randomly with probability $k_o$ to obtain the final input $\mathbf{x}_{\mathrm{SSR}}^i$ . The model is trained to minimize the cross entropy loss $\mathcal{L}_{CE}(\mathbf{z}^i, \mathbf{x}^i)$ , where $\mathbf{z}^i$ is the model's reconstruction of the corrupted input $\mathbf{x}_{\mathrm{SSR}}^i$ .
|
| 77 |
+
|
| 78 |
+
We experiment with two corruption strategies: (1) replacing the salient spans or randomly selected words with a single [MASK] token in the input (denoted as SSR-M) or (2) deleting the salient spans or
|
| 79 |
+
|
| 80 |
+
randomly selected words from the input sequence (denoted as SSR-D).
|
| 81 |
+
|
| 82 |
+
Salient Span Prediction We design SSP to align with the keyphrase generation task explicitly. While the input in SSP is still $\mathbf{x}_{\mathbf{SSR}}^i$ , the target is the concatenation of the salient spans $\mathbf{x}_{\mathbf{SSP}}^i = (s_1^i[\mathrm{sep}] s_2^i[\mathrm{sep}]\ldots [\mathrm{sep}] s_n^i)$ , sorted by $\mathrm{rank}(s_j^i)$ in the ascending order. The model is trained to minimize the cross entropy loss $\mathcal{L}_{CE}(\mathbf{z}^i, \mathbf{x}_{\mathbf{SSP}}^i)$ , where $\mathbf{z}^i$ is the model's prediction of the salient spans based on $\mathbf{x}_{\mathbf{SSR}}^i$ .
|
| 83 |
+
|
| 84 |
+
Similar to SSR, we also experiment with two variants: SSP-M refers to replacing the salient spans or randomly selected words with a single [MASK] token and SSP-D means deleting the salient spans or randomly selected words from the input. Figure 2 demonstrates the four objectives. SSR-M uses the same input corruption strategy as SSP-M, and SSR-D uses the same input corruption strategy as SSP-D.
|
| 85 |
+
|
| 86 |
+
# 3 Experimental Setup
|
| 87 |
+
|
| 88 |
+
# 3.1 Datasets
|
| 89 |
+
|
| 90 |
+
We conduct evaluations on five scientific keyphrase generation datasets. We use KP20k (Meng et al., 2017) for training and evaluate on KP20k, Inspec (Hulth, 2003b), Krapivin (Krapivin et al., 2009), NUS (Nguyen and Kan, 2007), and SemEval (Kim et al., 2010). After removing articles overlapping with the validation or test set, the KP20k train set contains 509,818 instances. Following Meng et al. (2017), we lower-case the text and replace the digits with a <digit> symbol to preprocess all the datasets. Table 1 presents the statistics of the test datasets.
|
| 91 |
+
|
| 92 |
+
We use the KP20k train set to create $D_{kp}$ and $D_{aux}$ , while keeping the validation and test sets the same. For the major results presented in section 4, we set $|D_{kp}| = 20,000$ and we let $D_{aux}$ be the entire train set. In other words, only 20,000 annotated documents are available to the model.
|
| 93 |
+
|
| 94 |
+
# 3.2 Baselines
|
| 95 |
+
|
| 96 |
+
First, we consider the following unsupervised baselines. As most of these methods are keyphrase extraction methods except Shen et al. (2022), we only evaluate their present keyphrase performance.
|
| 97 |
+
|
| 98 |
+
TextRank (Hulth and Anette, 2004) is a graph-based method that converts text to graphs and then uses PageRank to rank candidate phrases.
|
| 99 |
+
|
| 100 |
+
SIFRank and SIFRank+ (Sun et al., 2020) rank phrases by phrase-document cosine similarity with
|
| 101 |
+
|
| 102 |
+
<table><tr><td>Dataset</td><td>#Examples</td><td>#KP</td><td>|KPI|</td><td>%AKP</td></tr><tr><td>KP20k</td><td>20,000</td><td>5.28</td><td>2.04</td><td>37.06</td></tr><tr><td>Inspec</td><td>500</td><td>9.83</td><td>2.48</td><td>26.38</td></tr><tr><td>Krapivin</td><td>400</td><td>5.85</td><td>2.21</td><td>44.34</td></tr><tr><td>NUS</td><td>211</td><td>11.65</td><td>2.22</td><td>45.61</td></tr><tr><td>SemEval</td><td>100</td><td>14.66</td><td>2.38</td><td>57.37</td></tr></table>
|
| 103 |
+
|
| 104 |
+
Table 1: Statistics of all the test sets we use. #KP: average number of keyphrases of each document; |KPI: average length of each keyphrase; %AKP: the percentage of absent keyphrases.
|
| 105 |
+
|
| 106 |
+
PLM-based dense embeddings. SIFRank+ uses position information to better handle long documents.
|
| 107 |
+
|
| 108 |
+
Liang et al. (2021) is embedding-based and combines the global phrase-document similarity with the local boundary-aware degree centrality to calculate the score of each candidate phrase for ranking.
|
| 109 |
+
|
| 110 |
+
AutoKeyGen (Shen et al., 2022) performs keyphrase generation by constructing a phrase bank to predict present keyphrases via partial matching and to train a model to generate absent keyphrases.
|
| 111 |
+
|
| 112 |
+
We also consider the following supervised baselines trained on the low-resource $D_{kp}$ .
|
| 113 |
+
|
| 114 |
+
ExHiRD-h (Chen et al., 2021) designs a hierarchical decoding framework combined with a hard exclusion algorithm for reducing duplication, applied on the CatSeq models (Yuan et al., 2020).
|
| 115 |
+
|
| 116 |
+
One2Set (Ye et al., 2021) proposes to train a transformer to predict keyphrases in parallel as a set based on learned control codes, which avoids the bias of generating keyphrases as a sequence.
|
| 117 |
+
|
| 118 |
+
BART. A fine-tuned BART-base (Lewis et al., 2020) model for keyphrase generation.
|
| 119 |
+
|
| 120 |
+
Transformer. A randomly initialized Transformer with BART's architecture and vocabulary.
|
| 121 |
+
|
| 122 |
+
We denote our methods as BART+SSR-M, BART+SSR-D, BART+SSP-M and BART+SSP-D. They train BART on $D_{aux}$ using SSR or SSP, and then train on $D_{kp}$ for keyphrase generation.
|
| 123 |
+
|
| 124 |
+
# 3.3 Evaluation
|
| 125 |
+
|
| 126 |
+
Following Chan et al. (2019), we use greedy decoding. We apply the Porter Stemmer (Porter, 1980) on the predictions and targets and then calculate the macro-averaged F1@5 and F1@M for present and absent keyphrases. While F1@k only considers the top $k$ predictions for evaluation, F1@M takes all predictions from the model (Yuan et al., 2020). We do not calculate F1@M for the unsupervised methods since they only predict the ranking of the candidates. Each experiment is repeated with three
|
| 127 |
+
|
| 128 |
+
randomly sampled $D_{kp}$ 's, and we report the averaged scores. Unless otherwise stated, we use the same script based on Chan et al. (2019)'s implementation to calculate the scores.
|
| 129 |
+
|
| 130 |
+
# 3.4 Implementation Details
|
| 131 |
+
|
| 132 |
+
In this section, we provide the implementation details. Further discussions on the baselines and the hyperparameters are provided in the appendix.
|
| 133 |
+
|
| 134 |
+
SSR and SSP We obtain the salient spans via BM25 retrieval. Using Elasticsearch $^2$ , we build a database containing documents from $D_{aux}$ . Then, for each document in $D_{aux}$ , we construct a boolean query to perform a fuzzy search for each of its candidates. We use BM25 as the search metric, using $k_{1} = 1.2$ and $b = 0.75$ . Our query code is based on the implementations of Asai et al. (2021). Then, we use the following threshold function:
|
| 135 |
+
|
| 136 |
+
$$
|
| 137 |
+
\text {t h r e s h o l d} = \{1: 5 0 0, 2: 4 3 0, 3: 3 6 0 \}
|
| 138 |
+
$$
|
| 139 |
+
|
| 140 |
+
We start training from the pre-trained BART-base checkpoint using Fairseq's translation task<sup>3</sup>. The input documents are truncated to 512 tokens. We set $k_{s} = 0.4$ and $k_{o} = 0.2$ . This gives a corruption rate of about $39\%$ tokens, and the [MASK] symbol takes up about $11\%$ of the resulting corrupted text (for SSR-M and SSP-M). For SSP-M and SSP-D, we remove phrases from the target that are substrings of longer salient spans. We use the Adam optimizer with $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ and polynomial decay with 6000 warmup steps. We use batch size 64, learning rate 3e-4, 0.1 dropout, and 0.01 weight decay.
|
| 141 |
+
|
| 142 |
+
Fine-tuning For fine-tuning on $D_{kp}$ , we use learning rate 1e-5, batch size 32, and 150 warmup steps. All experiments are run on two Nvidia GTX 1080Ti GPUs, and we use gradient accumulation to achieve the desired batch size.
|
| 143 |
+
|
| 144 |
+
# 4 Results and Analysis
|
| 145 |
+
|
| 146 |
+
We aim to address the following questions.
|
| 147 |
+
|
| 148 |
+
1. Does our method learn strong representations from unlabeled data, and thus has competitive performance in low-resource fine-tuning?
|
| 149 |
+
2. Can our method outperform training on $D_{aux}$ with other objectives such as text infilling?
|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
Figure 3: Present keyphrase generation performance of different methods as a function of train set size. Our indomain annotation-free pre-training approach achieves the best performance in all resource schemes.
|
| 153 |
+
|
| 154 |
+
3. Can our representations benefit keyphrase generation in zero-shot settings?
|
| 155 |
+
4. Are present keyphrases effective for retrieval? How much do they overlap with salient spans?
|
| 156 |
+
|
| 157 |
+
# 4.1 Low-Resource Fine-tuning Performance
|
| 158 |
+
|
| 159 |
+
The scarcity of annotated data poses a significant challenge to supervised keyphrase generation models. Using $D_{kp}$ from KP20k with size 5k, 10k, 20k, 50k, and 100k, we train One2Set (Ye et al., 2021) and ExHiRD-h (Chen et al., 2021) from scratch and compare their performance with fine-tuning the pre-trained BART or our BART+SSR-D model trained on KP20k. The macro-averaged F1@M scores for the present keyphrases of the KP20k test set are shown in Figure 3. One2Set and ExHiRD-h perform poorly with less than 50k training data and have a similar performance as BART when the data size is as large as 100k. Nevertheless, in all resource regimes, our in-domain unsupervised SSR-D pre-training achieves the best performance.
|
| 160 |
+
|
| 161 |
+
Next, we focus on the scenario with $|D_{kp}| = 20,000$ and provide a more careful analysis. Table 2 and 3 show the performance of low-resource absent and present keyphrase generation on the scientific benchmarks. Additional qualitative results are presented in the appendix.
|
| 162 |
+
|
| 163 |
+
# Using pre-trained language models improves low-resource present keyphrase performance.
|
| 164 |
+
|
| 165 |
+
From Table 3, it is apparent that fine-tuning BART significantly outperforms the three supervised base
|
| 166 |
+
|
| 167 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">KP20k</td><td colspan="2">Inspec</td><td colspan="2">Krapivin</td><td colspan="2">NUS</td><td colspan="2">SemEval</td></tr><tr><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td></tr><tr><td>ExHiRD-h</td><td>0.35</td><td>0.57</td><td>0.26</td><td>0.41</td><td>0.65</td><td>0.98</td><td>0.46</td><td>0.57</td><td>0.43</td><td>0.56</td></tr><tr><td>One2Set</td><td>0.54</td><td>0.98</td><td>0.10</td><td>0.15</td><td>0.71</td><td>1.32</td><td>0.69</td><td>1.01</td><td>0.66</td><td>0.94</td></tr><tr><td>Transformer</td><td>1.16</td><td>1.90</td><td>0.48</td><td>0.71</td><td>1.30</td><td>1.86</td><td>1.50</td><td>2.02</td><td>1.17</td><td>1.44</td></tr><tr><td>BART</td><td>0.93</td><td>1.87</td><td>0.89</td><td>1.58</td><td>1.37</td><td>2.52</td><td>1.06</td><td>1.70</td><td>0.87</td><td>1.24</td></tr><tr><td>BART+SSP-M</td><td>1.39</td><td>2.78</td><td>0.93</td><td>1.70</td><td>2.24</td><td>4.34</td><td>1.77</td><td>2.92</td><td>1.66</td><td>2.31</td></tr><tr><td>BART+SSP-D</td><td>1.35</td><td>2.73</td><td>0.91</td><td>1.63</td><td>2.19</td><td>4.06</td><td>1.86</td><td>2.79</td><td>1.28</td><td>1.78</td></tr><tr><td>BART+SSR-M</td><td>1.95</td><td>3.42</td><td>1.04</td><td>1.73</td><td>2.41</td><td>3.87</td><td>2.16</td><td>3.12</td><td>1.85</td><td>2.39</td></tr><tr><td>BART+SSR-D</td><td>1.95</td><td>3.76</td><td>1.22</td><td>2.07</td><td>2.55</td><td>4.63</td><td>3.11</td><td>5.31</td><td>2.15</td><td>2.89</td></tr></table>
|
| 168 |
+
|
| 169 |
+
Table 2: F1 scores of low-resource absent keyphrase generation on five scientific benchmarks ( $|D_{kp}| = 20,000$ ). Best result is boldfaced. BART+SSR-D outperforms the other approaches in all benchmarks. Meanwhile, all the proposed objectives improve over simple BART fine-tuning.
|
| 170 |
+
|
| 171 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">KP20k</td><td colspan="2">Inspec</td><td colspan="2">Krapivin</td><td colspan="2">NUS</td><td colspan="2">SemEval</td></tr><tr><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td></tr><tr><td>TextRank</td><td>9.24</td><td>-</td><td>32.07</td><td>-</td><td>11.56</td><td>-</td><td>8.99</td><td>-</td><td>9.24</td><td>-</td></tr><tr><td>SIFRank</td><td>14.09</td><td>-</td><td>38.22</td><td>-</td><td>15.94</td><td>-</td><td>13.97</td><td>-</td><td>16.43</td><td>-</td></tr><tr><td>SIFRank+</td><td>20.00</td><td>-</td><td>35.08</td><td>-</td><td>19.59</td><td>-</td><td>25.47</td><td>-</td><td>24.77</td><td>-</td></tr><tr><td>AutoKeyGen</td><td>23.4</td><td>-</td><td>30.3</td><td>-</td><td>17.1</td><td>-</td><td>21.8</td><td>-</td><td>18.7</td><td>-</td></tr><tr><td>Liang et al. (2021)</td><td>17.66</td><td>-</td><td>29.57</td><td>-</td><td>16.93</td><td>-</td><td>24.98</td><td>-</td><td>25.33</td><td>-</td></tr><tr><td>ExHiRD-h</td><td>24.01</td><td>29.92</td><td>22.41</td><td>25.21</td><td>22.83</td><td>29.32</td><td>28.26</td><td>33.75</td><td>22.23</td><td>26.71</td></tr><tr><td>One2Set</td><td>15.76</td><td>23.84</td><td>10.46</td><td>14.21</td><td>15.23</td><td>23.24</td><td>20.61</td><td>28.22</td><td>15.11</td><td>20.48</td></tr><tr><td>Transformer</td><td>11.06</td><td>18.04</td><td>6.63</td><td>9.91</td><td>10.05</td><td>17.12</td><td>14.51</td><td>20.72</td><td>8.77</td><td>12.13</td></tr><tr><td>BART</td><td>26.97</td><td>31.54</td><td>28.54</td><td>33.93</td><td>26.62</td><td>31.12</td><td>33.88</td><td>38.08</td><td>26.33</td><td>30.12</td></tr><tr><td>BART+SSP-M</td><td>28.04</td><td>32.30</td><td>27.39</td><td>32.25</td><td>27.51</td><td>33.59</td><td>34.35</td><td>39.21</td><td>24.49</td><td>27.72</td></tr><tr><td>BART+SSP-D</td><td>28.29</td><td>32.63</td><td>27.29</td><td>32.84</td><td>27.46</td><td>32.49</td><td>33.44</td><td>38.05</td><td>26.04</td><td>29.47</td></tr><tr><td>BART+SSR-M</td><td>25.83</td><td>33.00</td><td>22.57</td><td>28.09</td><td>23.18</td><td>30.01</td><td>31.13</td><td>36.86</td><td>22.60</td><td>27.28</td></tr><tr><td>BART+SSR-D</td><td>28.82</td><td>35.43</td><td>24.35</td><td>30.17</td><td>27.08</td><td>34.30</td><td>34.34</td><td>40.49</td><td>23.69</td><td>29.04</td></tr></table>
|
| 172 |
+
|
| 173 |
+
Table 3: F1 scores of low-resource present keyphrase generation on five benchmarks in the scientific domain $(|D_{kp}| = 20,000)$ . Best result is boldfaced. Pre-trained language models greatly outperform methods trained from scratch. Moreover, performing in-domain pre-training using the proposed objectives improves over the simple BART fine-tuning on three of the five benchmarks. Some example outputs are presented in the appendix.
|
| 174 |
+
|
| 175 |
+
lines trained from scratch. However, Table 2 indicates that the absent keyphrase generation follows a different pattern. Randomly initializing a Transformer with BART's architecture, we achieve better F1@5 and F1@M on KP20k, NUS, and SemEval. This shows that in the low-resource regime, BART pre-training mainly facilitates present keyphrase generation but does not give the model much additional capability to generate absent keyphrases.
|
| 176 |
+
|
| 177 |
+
SSR-D performs the best in the proposed objectives. Among the proposed objectives, we find that SSR-D enables the best fine-tuning performance, achieving the best F1@5 and F1@M for absent keyphrase generation on all datasets and the best F1@M for present keyphrase generation on three of the five datasets. Our intuition is that
|
| 178 |
+
|
| 179 |
+
SSR-D is the most challenging objective because it requires the prediction of target spans at the correct positions in the context (rather than only predicting the salient spans in any order as in SSP), without being given [MASK] tokens as hints in the input (as in SSR-M or SSP-M). SSR-D's low-resource absent keyphrase performance is highly competitive. Its F1 scores on KP20k, Inspec, Krapivin, and SemEval even exceed those of ExHiRD-h trained on the complete KP20k train set (as reported in Chen et al. (2021) and Table 8 in the appendix).
|
| 180 |
+
|
| 181 |
+
At the same time, we find SSP-M and SSP-D have very similar performance, while SSR-D outperforms both on KP20k, Krapivin, and NUS. One possible reason is that they converge in a relatively short time, and thus the behaviors do not differ a
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
Figure 4: KP20k fine-tuning validation loss with different initializations, using learning rate 1e-5 and $|D_{kp}| = 20,000$ . BART+SSR-D converges to the lowest loss and suffers the least from overfitting.
|
| 185 |
+
|
| 186 |
+
lot. Also, they may be affected by the noise in the salient spans due to the lack of human annotation. We suspect that SSP-like objectives may have more advantages if the span quality is as good as manual annotations, as suggested by the observations made by Kulkarni et al. (2022).
|
| 187 |
+
|
| 188 |
+
# 4.2 In-domain Pre-training Objectives
|
| 189 |
+
|
| 190 |
+
In this section, we compare SSP and SSR with two baseline objectives that can be used to train BART on $D_{aux}$ before fine-tuning on $D_{kp}$ .
|
| 191 |
+
|
| 192 |
+
BART+TI Text infilling (TI) is one of the pretraining objectives of BART. In text infilling, spans with lengths following a Poisson distribution $(\lambda = 3)$ are randomly selected from $\mathbf{x}^{\mathrm{i}}$ and replaced with a single [MASK] token to obtain $\mathbf{x}_{\mathrm{Infilling}}^{\mathrm{i}}$ . The model is trained to minimize the cross entropy loss $\mathcal{L}_{CE}(\mathbf{z}^{\mathrm{i}},\mathbf{x}^{\mathrm{i}})$ , where $\mathbf{z}^{\mathrm{i}}$ is the model's reconstruction of the corrupted input $\mathbf{x}_{\mathrm{Infilling}}^{\mathrm{i}}$ .
|
| 193 |
+
|
| 194 |
+
BART+TG Ye and Wang (2018) showed that learning signals from title generation can benefit low-resource keyphrase generation. We remove the titles from $\mathbf{x}^{\mathrm{i}}$ and further pre-train BART for generating the titles using cross-entropy loss.
|
| 195 |
+
|
| 196 |
+
Results Table 4 compares BART+TI, BART+TG, and BART+SSR-D. Fine-tuning via BART+SSR-D achieves the best F1@5 and F1@M for absent keyphrases and the best F1@M for present keyphrases. This indicates that SSR is more tailored for identifying keyphrases than TI. Also, TG
|
| 197 |
+
|
| 198 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Present</td><td colspan="2">Absent</td></tr><tr><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td></tr><tr><td>BART+TI</td><td>29.27</td><td>34.58</td><td>1.33</td><td>2.67</td></tr><tr><td>BART+TG</td><td>29.77</td><td>33.86</td><td>1.28</td><td>2.55</td></tr><tr><td>BART+SSR-D</td><td>28.82</td><td>35.43</td><td>1.95</td><td>3.76</td></tr></table>
|
| 199 |
+
|
| 200 |
+
Table 4: F1 scores of low-resource keyphrase generation on KP20k ( $|D_{kp}| = 20,000$ ) based on in-domain models pre-trained with different methods. BART+SSR-D achieves the best F1@5 and F1@M for absent keyphrases, and the best F1@M for present keyphrases.
|
| 201 |
+
|
| 202 |
+
contributes better to present keyphrases since the information in titles is likely to be extractive.
|
| 203 |
+
|
| 204 |
+
In Figure 4, we plot the validation loss for low-resource fine-tuning. We observe that all in-domain pre-training methods outperform the BART fine-tuning baseline. Initializing with BART+SSR-D converges to the best validation loss and seems less susceptible to overfitting on the small data.
|
| 205 |
+
|
| 206 |
+
# 4.3 Zero-shot Cross-domain Generalization
|
| 207 |
+
|
| 208 |
+
Although we mainly focus on the low-resource scheme, it is also helpful to investigate the zero-shot generalization ability. Using the in-domain models trained with KP20k as $D_{aux}$ , we fine-tune the models on keyphrase generation using KPTimes (Gallina et al., 2019) and evaluate on the scientific benchmarks. In this setting, while KPTimes provides comprehensive task-wise information, the final performance also highly depends on how much domain-wise information the model extracts from $D_{aux}$ .
|
| 209 |
+
|
| 210 |
+
We compare the performance of BART+TI, BART+TG, and BART+SSR-D. The results are presented in Table 5. Although the intermediate training does not use manual keyphrase labels, the learned representation condenses domain-specific knowledge. It results in better zero-shot transfer performance compared to the BART directly fine-tuned on KPTimes. SSR-D achieves the best cross-domain transfer performance, outperforming the other methods by a large margin, especially in present keyphrase generation and F1@5 for absent keyphrase generation. We also directly report the score of the intermediate SSP-D model. Despite a somewhat competitive performance on present keyphrases, its absent keyphrase performance is worse than the baselines. Considering the poor performance of BART fine-tuned on KPTimes, we conclude that training with in-domain annotated data is crucial for absent keyphrase generation.
|
| 211 |
+
|
| 212 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Present</td><td colspan="2">Absent</td></tr><tr><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td></tr><tr><td>SSP-D-only</td><td>4.21</td><td>5.63</td><td>0.08</td><td>0.11</td></tr><tr><td>BART</td><td>3.01</td><td>5.51</td><td>0.13</td><td>0.23</td></tr><tr><td>BART+TI</td><td>6.51</td><td>11.13</td><td>0.22</td><td>0.40</td></tr><tr><td>BART+TG</td><td>7.20</td><td>12.37</td><td>0.27</td><td>0.50</td></tr><tr><td>BART+SSR-D</td><td>10.81</td><td>16.87</td><td>0.82</td><td>0.47</td></tr></table>
|
| 213 |
+
|
| 214 |
+
# 4.4 Analysis of BM25 Retrieval
|
| 215 |
+
|
| 216 |
+
In this section, we address questions about our retrieval-based definition of salient spans.
|
| 217 |
+
|
| 218 |
+
Can present keyphrases retrieve well? We construct a document pool with the train and validation set of KP20k and the five test datasets. For each document, we perform BM25 retrieval using each of its present keyphrases. If the document is retrieved in the top 1000 documents, then we consider the retrieval as successful. Table 6 presents the resulting success rates. SemEval is excluded because all of its keyphrases are stemmed. We observe that the overall success rate is high for all datasets. This shows that the properties of present keyphrases align with our retrieval-based definition of salient spans. Moreover, shorter keyphrases retrieve worse due to their higher frequency in the corpus. This justifies our design of the length-adaptive threshold function to compensate for the bias.
|
| 219 |
+
|
| 220 |
+
How do present keyphrases overlap with salient spans? We compute the overlap between the salient spans and the actual present keyphrases. For each document, we define phrase recall as the proportion of present keyphrases that are present in the salient span, word recall as the proportion of all words in present keyphrases that are also in any salient span, and word precision as the proportion of words in salient spans that are included in any keyphrase of the same document. Table 7 presents the measures evaluated on the KP20k train set. The columns labeled "len k" only consider keyphrases and salient spans of length k. We observe that the salient spans can cover about $36\%$ of the present keyphrases and $85\%$ of the words in the present keyphrases. Meanwhile, the $13\%$ precision indicates that the salient spans also contain many words that do not belong to any keyphrase. In ad
|
| 221 |
+
|
| 222 |
+
Table 5: F1 scores of zero-shot keyphrase generation on KP20k. Best result is boldfaced. "SSP-D-only" = BART trained on SSP-D using KP20k. BART+SSR-D significantly outperforms other methods.
|
| 223 |
+
|
| 224 |
+
<table><tr><td>Dataset</td><td>len 1</td><td>len 2</td><td>len 3</td><td>overall</td></tr><tr><td>KP20k</td><td>39.4%</td><td>83.5%</td><td>91.7%</td><td>80.5%</td></tr><tr><td>Inspec</td><td>67.8%</td><td>89.9%</td><td>97.8%</td><td>90.4%</td></tr><tr><td>Krapivin</td><td>52.2%</td><td>82.7%</td><td>94.9%</td><td>81.1%</td></tr><tr><td>NUS</td><td>52.4%</td><td>77.5%</td><td>93.7%</td><td>76.1%</td></tr></table>
|
| 225 |
+
|
| 226 |
+
Table 6: Retrieval success rates of manually annotated present keyphrases. The success rate is high for all the datasets overall, while at the same time exhibiting a positive correlation with keyphrase length.
|
| 227 |
+
|
| 228 |
+
<table><tr><td>Measure</td><td>len 1</td><td>len 2</td><td>len 3</td><td>overall</td></tr><tr><td>Phrase Recall</td><td>0.188</td><td>0.376</td><td>0.380</td><td>0.364</td></tr><tr><td>Word Recall</td><td>0.376</td><td>0.857</td><td>0.864</td><td>0.849</td></tr><tr><td>Word Precision</td><td>0.039</td><td>0.069</td><td>0.051</td><td>0.128</td></tr></table>
|
| 229 |
+
|
| 230 |
+
Table 7: Overlap between salient spans and the present keyphrases of KP20k training set. Salient spans obtained using BM25 has high word-level coverage but lower phrase-level coverage.
|
| 231 |
+
|
| 232 |
+
dition, although we tune the threshold function to benefit short phrases, the overlap between the salient single-word spans and the present single-word keyphrases is still small. Also, the overall word precision is much higher than obtained by considering the phrase lengths separately. This suggests that our method tends to ignore the boundaries of keyphrases.
|
| 233 |
+
|
| 234 |
+
# 5 Related Work
|
| 235 |
+
|
| 236 |
+
Low-resource Keyphrase Generation Prior works in keyphrase identification are broadly divided into keyphrase extraction and keyphrase generation. While keyphrase extraction only extracts present keyphrases as spans of the document (Hulth, 2003a; Mihalcea and Tarau, 2004; Wan and Xiao, 2008; Bougouin et al., 2013; Zhang et al., 2016; Sun et al., 2020; Liang et al., 2021), keyphrase generation directly predicts both present and absent keyphrases (Meng et al., 2017; Chen et al., 2018, 2019b; Zhao and Zhang, 2019; Chan et al., 2019; Yuan et al., 2020; Swaminathan et al., 2020; Ahmad et al., 2021; Ye et al., 2021; Kim et al., 2021). One solution to the "low-resource" problem is unsupervised keyphrase extraction or generation, which does not require annotations. However, they either cannot predict absent keyphrases or require the construction of large phrase banks and may have inferior performance compared to supervised methods. Alternatively, other previous studies have considered solving
|
| 237 |
+
|
| 238 |
+
low-resource keyphrase generation via synthetic labeling and semi-supervised multi-task learning to leverage $D_{aux}$ (Ye and Wang, 2018) or using reinforcement learning to exploit learning signals from a pre-trained discriminator in the setting of Generative Adversarial Networks (Lancioni et al., 2020). In contrast, our innovation is the retrieval-based task-specific pre-training of PLMs.
|
| 239 |
+
|
| 240 |
+
Retrieval-Augmented Keyphrase Generation Retrieval methods have been used to investigate keyphrases' role or to enhance the performance of keyphrase generation models. Kim et al. (2013) and Boudin et al. (2020) verify that keyphrases can significantly enhance retrieval performance. Boudin and Gallina (2021) provide a finer-grained analysis of absent keyphrases and conclude that a subset of them contributes to information retrieval by adding in new information via document expansion. Chen et al. (2019a) design a retriever to find similar documents from the training corpus, whose phrases are used as keyphrase candidates and encoded as a continuous vector to augment the input. Kim et al. (2021) propose to augment the document's structure with keyphrases from similar documents and obtain a structure-aware representation of the augmented text.
|
| 241 |
+
|
| 242 |
+
Language modeling and keyphrase generation Recent studies have successfully used PLMs for rich-resource keyphrase generation (Liu et al., 2021) and keyphrase extraction (Sahrawat et al., 2020). For other tasks, studies explored continued domain-adaptive pre-training of the autoencoding (Gururangan et al., 2020; Lee et al., 2019) and encoder-decoder PLMs (Yu et al., 2021). Kulkarni et al. (2022) is a concurrent work that explores a similar objective for representation learning using supervised data. In comparison, our work focuses on unsupervised learning to facilitate low-resource keyphrase generation. It thus leads to different conclusions from that in Kulkarni et al. (2022).
|
| 243 |
+
|
| 244 |
+
# 6 Conclusion
|
| 245 |
+
|
| 246 |
+
This paper considers the problem of low-resource keyphrase generation. We design an innovative retrieval-based method to extract salient information from unlabeled documents and perform continued BART pre-training. We verify that the method facilitates low-resource keyphrase generation and zero-shot cross-domain generalization. Our method consistently outperforms the baselines
|
| 247 |
+
|
| 248 |
+
in a range of resource schemes. Future works may consider investigating dense embeddings for extracting salient spans, composing the proposed objectives, or designing specialized methods for finetuning on small datasets.
|
| 249 |
+
|
| 250 |
+
# Limitations
|
| 251 |
+
|
| 252 |
+
In this work, although we conduct experiments in a variety of settings and on several datasets, most of them are only in the scientific domain. In addition, we only experiment on BART. We use BART because it is pre-trained using denoising autoencoderoding, which is closer to salient span recovery and prediction than other PLMs such as T5 (Raffel et al., 2020). Finally, we acknowledge that the proposed large-scale intermediate representation learning causes energy costs and emissions. As a trade-off, we obtain strong representations to solve the challenging low-resource problem better and to be reused for fine-tuning on different datasets.
|
| 253 |
+
|
| 254 |
+
# Ethical Statement
|
| 255 |
+
|
| 256 |
+
We use the KP20k dataset distributed by their original host, and we have verified that our preprocessing methods do not introduce external biases or sensitive information. However, our self-supervised representation learning method may propagate the bias that lies in the unlabeled external data it uses. As our approach can be easily integrated into BART-based keyphrase generation services, we encourage potential users to monitor for biases closely and apply corresponding mitigation measures when necessary.
|
| 257 |
+
|
| 258 |
+
# Acknowledgment
|
| 259 |
+
|
| 260 |
+
The research is supported in part by Taboola and an Amazon AWS credit award. We thank the Taboola team for helpful discussions and feedback. We also thank the anonymous reviewers and the members of UCLA-NLP for providing their valuable feedback.
|
| 261 |
+
|
| 262 |
+
# References
|
| 263 |
+
|
| 264 |
+
Wasi Ahmad, Xiao Bai, Soomin Lee, and Kai-Wei Chang. 2021. Select, extract and generate: Neural keyphrase generation with layer-wise coverage attention. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1389–1404, Online. Association for Computational Linguistics.
|
| 265 |
+
|
| 266 |
+
Akari Asai, Jungo Kasai, Jonathan H. Clark, Kenton Lee, Eunsol Choi, and Hannaneh Hajishirzi. 2021. XOR QA: Cross-lingual open-retrieval question answering. In NAACL-HLT.
|
| 267 |
+
Gábor Berend. 2011. Opinion expression mining by exploiting keyphrase extraction. In Proceedings of 5th International Joint Conference on Natural Language Processing, pages 1162-1170, Chiang Mai, Thailand. Asian Federation of Natural Language Processing.
|
| 268 |
+
Florian Boudin and Ygor Gallina. 2021. Redefining absent keyphrases and their effect on retrieval effectiveness. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4185-4193, Online. Association for Computational Linguistics.
|
| 269 |
+
Florian Boudin, Ygor Gallina, and Akiko Aizawa. 2020. Keyphrase generation for scientific document retrieval. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1118-1126, Online. Association for Computational Linguistics.
|
| 270 |
+
Adrien Bougouin, Florian Boudin, and Beatrice Daille. 2013. TopicRank: Graph-based topic ranking for keyphrase extraction. In Proceedings of the Sixth International Joint Conference on Natural Language Processing, pages 543-551, Nagoya, Japan. Asian Federation of Natural Language Processing.
|
| 271 |
+
Erion Cano and Ondrej Bojar. 2019. Keyphrase generation: A text summarization struggle. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 666-672, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 272 |
+
Hou Pong Chan, Wang Chen, Lu Wang, and Irwin King. 2019. Neural keyphrase generation via reinforcement learning with adaptive rewards. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2163-2174, Florence, Italy. Association for Computational Linguistics.
|
| 273 |
+
Jun Chen, Xiaoming Zhang, Yu Wu, Zhao Yan, and Zhoujun Li. 2018. Keyphrase generation with correlation constraints. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4057-4066, Brussels, Belgium. Association for Computational Linguistics.
|
| 274 |
+
Wang Chen, Hou Pong Chan, Piji Li, Lidong Bing, and Irwin King. 2019a. An integrated approach for keyphrase generation via exploring the power of retrieval and extraction. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2846-2856, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 275 |
+
|
| 276 |
+
Wang Chen, Hou Pong Chan, Piji Li, and Irwin King. 2020. Exclusive hierarchical decoding for deep keyphrase generation. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 1095-1105, Online. Association for Computational Linguistics.
|
| 277 |
+
Wang Chen, Yifan Gao, Jiani Zhang, Irwin King, and Michael R. Lyu. 2019b. Title-guided encoding for keyphrase generation. In AAAI.
|
| 278 |
+
Wang Chen, Piji Li, and Irwin King. 2021. A training-free and reference-free summarization evaluation metric via centrality-weighted relevance and self-referenced redundancy. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 404-414, Online. Association for Computational Linguistics.
|
| 279 |
+
Kushal S. Dave and Vasudeva Varma. 2010. Pattern based keyword extraction for contextual advertising. In Proceedings of the 19th ACM International Conference on Information and Knowledge Management, CIKM '10, page 1885-1888, New York, NY, USA. Association for Computing Machinery.
|
| 280 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 281 |
+
Ygor Gallina, Florian Boudin, and Beatrice Daille. 2019. KPTimes: A large-scale dataset for keyphrase generation on news documents. In Proceedings of the 12th International Conference on Natural Language Generation, pages 130-135, Tokyo, Japan. Association for Computational Linguistics.
|
| 282 |
+
Suchin Gururangan, Ana Marasović, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A. Smith. 2020. Don't stop pretraining: Adapt language models to domains and tasks. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8342-8360, Online. Association for Computational Linguistics.
|
| 283 |
+
Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Ming-Wei Chang. 2020. Realm: Retrievalaugmented language model pre-training.
|
| 284 |
+
Khaled Hammouda, Diego Munate, and Mohamed S. Kamel. 2005. Corephrase: Keyphrase extraction for document clustering. In International workshop on machine learning and data mining in pattern recognition, pages 265-274.
|
| 285 |
+
|
| 286 |
+
Michael A. Hedderich, Lukas Lange, Heike Adel, Jannik Strötgen, and Dietrich Klakow. 2021. A survey on recent approaches for natural language processing in low-resource scenarios. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2545-2568, Online. Association for Computational Linguistics.
|
| 287 |
+
Hulth and Anette. 2004. Textrank: Bringing order into texts. In Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing, EMNLP '04, pages 404-411. Association for Computational Linguistics.
|
| 288 |
+
Anette Hulth. 2003a. Improved automatic keyword extraction given more linguistic knowledge. In Proceedings of the 2003 Conference on Empirical Methods in Natural Language Processing, pages 216-223.
|
| 289 |
+
Anette Hulth. 2003b. Improved automatic keyword extraction given more linguistic knowledge. In Proceedings of the 2003 Conference on Empirical Methods in Natural Language Processing, EMNLP '03, page 216-223, USA. Association for Computational Linguistics.
|
| 290 |
+
Anette Hulth and Beata B. Megyesi. 2006. A study on automatically extracted keywords in text categorization. In Proceedings of the 21st International Conference on Computational Linguistics and the 44th Annual Meeting of the Association for Computational Linguistics, ACL-44, page 537-544, USA. Association for Computational Linguistics.
|
| 291 |
+
Karen Sparck Jones. 1972. A statistical interpretation of term specificity and its application in retrieval. Journal of Documentation, 28(1):11-21.
|
| 292 |
+
Steve Jones and Mark S. Staveley. 1999. Phrasier: A system for interactive document retrieval using keyphrases. In Proceedings of the 22nd Annual International ACM SIGIR Conference on Research and Development in Information Retrieval, SIGIR '99, page 160-167, New York, NY, USA. Association for Computing Machinery.
|
| 293 |
+
Mandar Joshi, Danqi Chen, Yinhan Liu, Daniel S. Weld, Luke Zettlemoyer, and Omer Levy. 2020. SpanBERT: Improving pre-training by representing and predicting spans. Transactions of the Association for Computational Linguistics, 8:64-77.
|
| 294 |
+
Jihyuk Kim, Myeongho Jeong, Seungtaek Choi, and Seung-won Hwang. 2021. Structure-augmented keyphrase generation. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 2657-2667, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 295 |
+
Su Nam Kim, Olena Medelyan, Min-Yen Kan, and Timothy Baldwin. 2010. SemEval-2010 task 5: Automatic keyphrase extraction from scientific articles. In Proceedings of the 5th International Workshop on
|
| 296 |
+
|
| 297 |
+
Semantic Evaluation, pages 21-26, Uppsala, Sweden.
|
| 298 |
+
Association for Computational Linguistics.
|
| 299 |
+
Youngsam Kim, Munhyong Kim, Andrew Cattle, Julia Otmakhova, Suzi Park, and Hyopil Shin. 2013. Applying graph-based keyword extraction to document retrieval. In Proceedings of the Sixth International Joint Conference on Natural Language Processing, pages 864-868, Nagoya, Japan. Asian Federation of Natural Language Processing.
|
| 300 |
+
Mikalai Krapivin, Aliaksandr Autaeu, and Maurizio Marchese. 2009. Large dataset for keyphrases extraction. Technical report, University of Trento.
|
| 301 |
+
Mayank Kulkarni, Debanjan Mahata, Ravneet Arora, and Rajarshi Bhowmik. 2022. Learning rich representation of keyphrases from text. In *Findings of the Association for Computational Linguistics: NAACL* 2022, pages 891–906, Seattle, United States. Association for Computational Linguistics.
|
| 302 |
+
Giuseppe Lancioni, Saida S.Mohamed, Beatrice Portelli, Giuseppe Serra, and Carlo Tasso. 2020. Keyphrase generation with GANs in low-resources scenarios. In Proceedings of SustainNLP: Workshop on Simple and Efficient Natural Language Processing, pages 89-96, Online. Association for Computational Linguistics.
|
| 303 |
+
Jinhyuk Lee, Wonjin Yoon, Sungdong Kim, Donghyeon Kim, Sunkyu Kim, Chan Ho So, and Jaewoo Kang. 2019. Biobert: a pre-trained biomedical language representation model for biomedical text mining. CoRR, abs/1901.08746.
|
| 304 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
|
| 305 |
+
Xinnian Liang, Shuangzhi Wu, Mu Li, and Zhoujun Li. 2021. Unsupervised keyphrase extraction by jointly modeling local and global context. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 155-164, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 306 |
+
Rui Liu, Zheng Lin, and Weiping Wang. 2021. Addressing extraction and generation separately: Keyphrase prediction with pre-trained language models. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 29:3180-3191.
|
| 307 |
+
Rui Meng, Sanqiang Zhao, Shuguang Han, Daqing He, Peter Brusilovsky, and Yu Chi. 2017. Deep keyphrase generation. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 582-592, Vancouver, Canada. Association for Computational Linguistics.
|
| 308 |
+
|
| 309 |
+
Rada Mihalcea and Paul Tarau. 2004. TextRank: Bringing order into text. In Proceedings of the 2004 Conference on Empirical Methods in Natural Language Processing, pages 404-411, Barcelona, Spain. Association for Computational Linguistics.
|
| 310 |
+
Thuy Dung Nguyen and Min-Yen Kan. 2007. Keyphrase extraction in scientific publications. In Asian Digital Libraries. Looking Back 10 Years and Forging New Frontiers, pages 317-326, Berlin, Heidelberg. Springer Berlin Heidelberg.
|
| 311 |
+
Martin F. Porter. 1980. An algorithm for suffix stripping. Program, 40:211-218.
|
| 312 |
+
Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J Liu, et al. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. J. Mach. Learn. Res., 21(140):1-67.
|
| 313 |
+
S. E. Robertson and S. Walker. 1994. Some simple effective approximations to the 2-poisson model for probabilistic weighted retrieval. In SIGIR '94, pages 232-241, London. Springer London.
|
| 314 |
+
Dhruva Sahrawat, Debanjan Mahata, Haimin Zhang, Mayank Kulkarni, Agniv Sharma, Rakesh Gosangi, Amanda Stent, Yaman Kumar, Rajiv Ratn Shah, and Roger Zimmermann. 2020. Keyphrase extraction as sequence labeling using contextualized embeddings. In Advances in Information Retrieval, pages 328-335, Cham. Springer International Publishing.
|
| 315 |
+
Xianjie Shen, Yinghan Wang, Rui Meng, and Jingbo Shang. 2022. Unsupervised deep keyphrase generation. Proceedings of the AAAI Conference on Artificial Intelligence, 36(10):11303-11311.
|
| 316 |
+
Yi Sun, Hangping Qiu, Yu Zheng, Zhongwei Wang, and Chaoran Zhang. 2020. Sifrank: A new baseline for unsupervised keyphrase extraction based on pre-trained language model. IEEE Access, 8:10896-10906.
|
| 317 |
+
Avinash Swaminathan, Haimin Zhang, Debanjan Mahata, Rakesh Gosangi, Rajiv Ratn Shah, and Amanda Stent. 2020. A preliminary exploration of GANs for keyphrase generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8021-8030, Online. Association for Computational Linguistics.
|
| 318 |
+
Yixuan Tang, Weilong Huang, Qi Liu, Anthony K. H. Tung, Xiaoli Wang, Jisong Yang, and Beibei Zhang. 2017. Qalink: Enriching text documents with relevant q&a site contents. Proceedings of the 2017 ACM on Conference on Information and Knowledge Management.
|
| 319 |
+
Xiaojun Wan and Jianguo Xiao. 2008. Single document keyphrase extraction using neighborhood knowledge. In Proceedings of the 23rd National Conference on Artificial Intelligence - Volume 2, AAAI'08, page 855-860. AAAI Press.
|
| 320 |
+
|
| 321 |
+
Theresa Wilson, Janyce Wiebe, and Paul Hoffmann. 2005. Recognizing contextual polarity in phrase-level sentiment analysis. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 347-354, Vancouver, British Columbia, Canada. Association for Computational Linguistics.
|
| 322 |
+
Xiaoyuan Wu and Alvaro Bolivar. 2008. Keyword extraction for contextual advertisement. In Proceedings of the 17th International Conference on World Wide Web, WWW '08, page 1195-1196, New York, NY, USA. Association for Computing Machinery.
|
| 323 |
+
Hai Ye and Lu Wang. 2018. Semi-supervised learning for neural keyphrase generation. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 4142-4153, Brussels, Belgium. Association for Computational Linguistics.
|
| 324 |
+
Jiacheng Ye, Tao Gui, Yichao Luo, Yige Xu, and Qi Zhang. 2021. One2Set: Generating diverse keyphrases as a set. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 4598-4608, Online. Association for Computational Linguistics.
|
| 325 |
+
Tiezheng Yu, Zihan Liu, and Pascale Fung. 2021. AdaptSum: Towards low-resource domain adaptation for abstractive summarization. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5892-5904, Online. Association for Computational Linguistics.
|
| 326 |
+
Xingdi Yuan, Tong Wang, Rui Meng, Khushboo Thaker, Peter Brusilovsky, Daqing He, and Adam Trischler. 2020. One size does not fit all: Generating and evaluating variable number of keyphrases. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7961-7975, Online. Association for Computational Linguistics.
|
| 327 |
+
Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Peter Liu. 2020a. PEGASUS: Pre-training with extracted gap-sentences for abstractive summarization. In Proceedings of the 37th International Conference on Machine Learning, volume 119 of Proceedings of Machine Learning Research, pages 11328-11339. PMLR.
|
| 328 |
+
Qi Zhang, Yang Wang, Yeyun Gong, and Xuanjing Huang. 2016. Keyphrase extraction using deep recurrent neural networks on Twitter. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 836-845, Austin, Texas. Association for Computational Linguistics.
|
| 329 |
+
Rong Zhang, Revanth Gangi Reddy, Md Arafat Sultan, Vittorio Castelli, Anthony Ferritto, Radu Florian, Efsun Sarioglu Kayi, Salim Roukos, Avi Sil, and Todd Ward. 2020b. Multi-stage pre-training for low-resource domain adaptation. In Proceedings of the
|
| 330 |
+
|
| 331 |
+
2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5461-5468, Online. Association for Computational Linguistics.
|
| 332 |
+
Yongzheng Zhang, Nur Zincir-Heywood, and Evangelos Milios. 2004. World wide web site summarization. Web Intelli. and Agent Sys., 2(1):39-53.
|
| 333 |
+
Jing Zhao and Yuxiang Zhang. 2019. Incorporating linguistic constraints into keyphrase generation. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5224-5233, Florence, Italy. Association for Computational Linguistics.
|
| 334 |
+
Yicheng Zou, Bolin Zhu, Xingwu Hu, Tao Gui, and Qi Zhang. 2021. Low-resource dialogue summarization with domain-agnostic multi-source pretraining. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 80–91, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 335 |
+
|
| 336 |
+
# Supplementary Material: Appendices
|
| 337 |
+
|
| 338 |
+
# A Rich-resource Results
|
| 339 |
+
|
| 340 |
+
In Table 8, we compare BART, ExHiRD-s (Chen et al., 2021), ExHiRD-h (Chen et al., 2021), and One2Set (Ye et al., 2021) trained on the entire KP20k train set. In this rich-resource scenario, BART fine-tuning outperforms ExHiRD on the scientific benchmarks while performing worse than One2Set on most datasets.
|
| 341 |
+
|
| 342 |
+
# B Hyperparameter Optimization
|
| 343 |
+
|
| 344 |
+
For SSP and SSR, we search over $\{\{1:500,2:430,3:360\},\{1:500,2:400,3:300\},\{1:300,2:300,3:300\}\}$ for threshold, $\{0.3,0.35,0.4,0.45\}$ for $k_{s}$ and $\{0.2,0.3,0.4\}$ for $k_{o}$ . We also search over $\{3e-4,1e-4,3e-5\}$ for learning rate. We prepare the validation set using the same method for each experiment and use validation loss as the stopping criteria during training. We choose the hyperparameters that enable the best validation performance during downstream fine-tuning on $D_{kp}$ .
|
| 345 |
+
|
| 346 |
+
For fine-tuning, we perform a grid search over $\{1\mathrm{e} - 4,6\mathrm{e} - 5,3\mathrm{e} - 5,1\mathrm{e} - 5\}$ for learning rate, $\{32,64\}$ for batch size, and $\{50,150,400,1000\}$ for the number of warmup steps. We choose the hyperparameters based on validation performance.
|
| 347 |
+
|
| 348 |
+
In Table 9, we present all the hyperparameters for training our SSR/SSP model and fine-tuning on low-resource keyphrase generation.
|
| 349 |
+
|
| 350 |
+
# C Implementation Details of the Baselines
|
| 351 |
+
|
| 352 |
+
We use the publicly available implementations to reproduce ExHiRD-h, One2Set, TextRank, SIFRank, and SIFRank+. We use the scores reported by the authors for AutoKeyGen. For ExHiRD-h, we use the hyperparameters recommended in Chen et al. (2021). For One2Set, we use the recommended hyperparameters in the authors' implementation, except for removing dropout after tuning on the KP20k validation set. For SIFRank and SIFRank+, we use the L1 layer of ElMo, and set $\lambda = 0.6$ . We write our own implementations for Liang et al. (2021), where we follow the methods in SIFRank to generate candidate phrases and use BERT-base-uncased (Devlin et al., 2019) to obtain the contextual embeddings. Through a hyperparameter search on the KP20k validation set, we determine the set of hyperparameters $\{\alpha = 1.2, \beta = 0.0, \lambda = 0.8\}$ .
|
| 353 |
+
|
| 354 |
+
# D Characteristics of Salient Spans
|
| 355 |
+
|
| 356 |
+
How many salient spans do we get? In our BM25 retrieval setting, where the KP20k train set is used as $D_{aux}$ , several spans can accurately retrieve the original document. On average, each document has 9.83 spans that can retrieve the document back to the top. Among these spans, $12\%$ are unigrams, $30\%$ are bigrams, and $58\%$ are trigrams. If exact matching is specified in Elasticsearch, the number of hits of the salient spans is low, indicating that they tend to be rare.
|
| 357 |
+
|
| 358 |
+
Is BM25 indispensable? We considered the TF-IDF score (Jones, 1972) as an alternative phrase-document similarity measure. We observed that it also gives good salient span predictions when the document lengths are similar. On KP20k, we find that using the retrieval scheme is more important than choosing between TF-IDF and BM25 as the scoring function. We finally chose BM25 because it is designed for information retrieval, can better adapt to long documents, and enables better keyphrase generation performance. It is worth noting that it might help improve the scoring function by considering dense embeddings. We leave this to future work.
|
| 359 |
+
|
| 360 |
+
# E Further Discussions
|
| 361 |
+
|
| 362 |
+
Failed Attempts To explore the possibility of extending or unifying our proposed objectives, we ran several preliminary experiments on (1) combining span masking with span deletion and (2) combining SSR and SSP via multi-task learning or multi-step adaptation. However, the results were not as good as BART+SSR-D.
|
| 363 |
+
|
| 364 |
+
Computational Budget All experiments are run on a local GPU server. SSP and SSR take 20 and 120 GPU hours, respectively, on a dataset of a size similar to KP20k, and the final fine-tuning takes 1 GPU hour on a dataset with 20,000 examples.
|
| 365 |
+
|
| 366 |
+
# F Qualitative Results
|
| 367 |
+
|
| 368 |
+
We present two sets of outputs in Figure 5 and Figure 6. Figures 5 presents the predictions of the low-resource models on the scientific benchmark datasets (corresponding to Table 2 and 3). Figure 6 presents the predictions of zero-shot models on KP20k (corresponding to Table 5). We find that
|
| 369 |
+
|
| 370 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">KP20k</td><td colspan="2">Inspec</td><td colspan="2">Krapivin</td><td colspan="2">NUS</td><td colspan="2">SemEval</td></tr><tr><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td><td>F1@5</td><td>F1@M</td></tr><tr><td colspan="11">Present Keyphrase Generation</td></tr><tr><td>ExHiRD-h</td><td>31.07</td><td>37.38</td><td>25.35</td><td>29.13</td><td>28.56</td><td>30.75</td><td>-</td><td>-</td><td>30.40</td><td>28.21</td></tr><tr><td>ExHiRD-s</td><td>30.75</td><td>37.20</td><td>23.53</td><td>27.81</td><td>27.84</td><td>33.84</td><td>-</td><td>-</td><td>26.71</td><td>31.41</td></tr><tr><td>One2Set</td><td>35.57</td><td>39.14</td><td>29.13</td><td>32.77</td><td>33.46</td><td>37.47</td><td>39.94</td><td>44.58</td><td>32.17</td><td>34.18</td></tr><tr><td>BART</td><td>32.21</td><td>39.03</td><td>27.31</td><td>33.01</td><td>26.42</td><td>33.11</td><td>36.66</td><td>43.09</td><td>28.32</td><td>34.53</td></tr><tr><td colspan="11">Absent Keyphrase Generation</td></tr><tr><td>ExHiRD-h</td><td>1.57</td><td>2.47</td><td>1.09</td><td>1.64</td><td>2.19</td><td>3.31</td><td>-</td><td>-</td><td>1.58</td><td>2.05</td></tr><tr><td>ExHiRD-s</td><td>1.36</td><td>2.22</td><td>0.95</td><td>1.56</td><td>1.63</td><td>2.59</td><td>-</td><td>-</td><td>1.24</td><td>1.87</td></tr><tr><td>One2Set</td><td>3.54</td><td>5.82</td><td>1.91</td><td>2.99</td><td>4.49</td><td>7.16</td><td>3.74</td><td>5.52</td><td>2.24</td><td>2.87</td></tr><tr><td>BART</td><td>2.06</td><td>3.96</td><td>0.86</td><td>1.53</td><td>2.82</td><td>4.95</td><td>2.52</td><td>4.14</td><td>1.50</td><td>2.03</td></tr></table>
|
| 371 |
+
|
| 372 |
+
Table 8: Rich-resource keyphrase generation results. All the scores reported are macro-averaged f1 scores across runs with three different seeds. Best result is boldfaced. We run our evaluation script on the predictions provided by Chen et al. (2021) to get the scores for ExHiRD-h and ExHiRD-s. Although BART does not have SOTA performance, it is a competitive model for both present and absent keyphrase generation.
|
| 373 |
+
|
| 374 |
+
<table><tr><td>Parameter</td><td>SSR</td><td>SSP</td><td>Fine-tuning</td></tr><tr><td>vocabulary size</td><td>51,200</td><td>51,200</td><td>51,200</td></tr><tr><td># parameters</td><td>140M</td><td>140M</td><td>140M</td></tr><tr><td>ks, ko</td><td>0.4, 0.2</td><td>0.4, 0.2</td><td>-</td></tr><tr><td>total epochs</td><td>60</td><td>10</td><td>15</td></tr><tr><td>batch size</td><td>64</td><td>64</td><td>32</td></tr><tr><td>learning rate</td><td>3e-4</td><td>3e-4</td><td>1e-5</td></tr><tr><td>lr schedule</td><td>polynomial</td><td>polynomial</td><td>polynomial</td></tr><tr><td>warmup steps</td><td>6000</td><td>6000</td><td>150</td></tr><tr><td>optimizer</td><td>Adam</td><td>Adam</td><td>Adam</td></tr><tr><td>weight decay</td><td>0.01</td><td>0.01</td><td>0.01</td></tr><tr><td>dropout</td><td>0.1</td><td>0.1</td><td>0.1</td></tr><tr><td>max. grad. norm</td><td>0.1</td><td>0.1</td><td>0.1</td></tr></table>
|
| 375 |
+
|
| 376 |
+
Table 9: Hyperparameters for pre-training using SSR or SSP and fine-tuning on low-resource keyphrase generation. "polynomial" means the polynomial decay learning rate schedule. "max. grad. norm" means the maximum norm allowed for the gradient.
|
| 377 |
+
|
| 378 |
+
BART+SSR-D predicts more correct keyphrases and generally has a more diverse output.
|
| 379 |
+
|
| 380 |
+
# G Artifact Release
|
| 381 |
+
|
| 382 |
+
The KP20k dataset and the Fairseq library we use are MIT licensed. While commercial use is allowed for these artifacts, we only use them for research. For reproducibility, we release the three small KP20k subsets that we use as $D_{kp}$ and the code to reproduce our experiments. We refer to their original hosts for the entire training, validation, and testing datasets. In addition, we release the raw predictions of our BART+SSR-D model trained on 20k data from KP20k. Our code, data, and model outputs are released at https://github.com/xiaowu0162/low-resource-kpgen.
|
| 383 |
+
|
| 384 |
+
Title: short signatures from the weil pairing.
|
| 385 |
+
|
| 386 |
+
Abstract: we introduce a short signature scheme based on the computational diffiehellman assumption on certain elliptic and hyperelliptic curves. for standard security parameters, the signature length is about half that of a dsa signature with a similar level of security . our short signature scheme is designed for systems where signatures are typed in by a human or are sent over a low bandwidth channel . we survey a number of properties of our signature scheme such as signature aggregation and batch verification .
|
| 387 |
+
|
| 388 |
+
Ground Truth: short signatures, pairings, bilinear maps, digital signatures, elliptic curves
|
| 389 |
+
|
| 390 |
+
ExHiRD-h: short signature, weil pairing, signature aggregation, elliptic security
|
| 391 |
+
|
| 392 |
+
One2Set: weil pairing, security, hyperelliptic signature, weil signature
|
| 393 |
+
|
| 394 |
+
BART+SSR-D: short signatures, pairing, hyperelliptic curve, elliptic curve, digital signatures
|
| 395 |
+
|
| 396 |
+
Title: computing smallest singular triplets with implicitly restarted lanczos bidiagonalization.
|
| 397 |
+
|
| 398 |
+
Abstract: a matrix free algorithm, for the efficient computation of the smallest singular triplets of large and possibly sparse matrices is described. key characteristics of the approach are its use of lanczos bidiagonalization, implicit restarting, and harmonic ritz values. the algorithm also uses a deflation strategy that can be applied directly on lanczos bidiagonalization. a refinement postprocessing phase is applied to the converged singular vectors . the computational costs of the above techniques are kept small as they make direct use of the bidiagonal form obtained in the course of the lanczos factorization several numerical experiments with the method are presented that illustrate its effectiveness and indicate that it performs well compared to existing codes.
|
| 399 |
+
|
| 400 |
+
Ground Truth: lanczos bidiagonalization, implicit restarting, harmonic ritz values, deflation, pseudospectrum, refined singular vectors
|
| 401 |
+
|
| 402 |
+
ExHiRD-h: singular triplets, implicitly restarted lanczos bidiagonalization, refinement postprocessing, bidiagonalization bidiagonalization
|
| 403 |
+
|
| 404 |
+
One2Set: singular computing, matrix triplets
|
| 405 |
+
|
| 406 |
+
BART+SSR-D: lanczos bidiagonalization, lanczos factorization, deflation, matrix free algorithms, matrix eigenvalue problems
|
| 407 |
+
|
| 408 |
+
Title: self stabilizing clock synchronization in the presence of byzantine faults.
|
| 409 |
+
|
| 410 |
+
Abstract: we initiate a study of bounded clock synchronization under a more severe fault model than that proposed by lamport and melliar smith [digit]. realistic aspects of the problem of synchronizing clocks in the presence of faults are considered. one aspect is that clock synchronization is an on going task, thus the assumption that some of the processors never fail is too optimistic. to cope with this reality, we suggest self stabilizing protocols that stabilize in any ( long enough ) period in which less than a third of the processors are faulty . another aspect is that the clock value of each processor is bounded . a single transient fault may cause the clock to reach the upper bound . therefore , we suggest a bounded clock that wraps around when appropriate . we present two randomized self stabilizing protocols for synchronizing bounded clocks in the presence of byzantine processor failures . the first protocol assumes that processors have a common pulse , while the second protocol does not . a new type of distributed counter based on the chinese remainder theorem is used as part of the first protocol .
|
| 411 |
+
|
| 412 |
+
Ground Truth: self stabilization, clock synchronization, byzantine failures
|
| 413 |
+
|
| 414 |
+
ExHiRD-h: self stabilizing, clock synchronization, chinese remainder theorem
|
| 415 |
+
|
| 416 |
+
One2Set: fault synchronization, synchronization, fault tolerance, bounded presence, clock reality
|
| 417 |
+
|
| 418 |
+
BART+SSR-D: self stabilization, clock synchronization, byzantine faults, distributed algorithms
|
| 419 |
+
|
| 420 |
+
Title: distributed representations , simple recurrent networks , and grammatical structure .
|
| 421 |
+
|
| 422 |
+
Abstract: in this paper three problems for a connectionist account of language are considered [digit]. what is the nature of linguistic representations [digit] . how can complex structural relationships such as constituent structure be represented [digit] . how can the apparently open ended nature of language be accommodated by a fixed resource system using a prediction task , a simple recurrent network ( srn ) is trained on<unk> sentences which contain multiply embedded relative clauses . principal component analysis of the hidden unit activation patterns reveals that the network solves the task by developing complex distributed representations which encode the relevant grammatical relations and hierarchical constituent structure . differences between the srn state representations and the more traditional pushdown store are discussed in the final section .
|
| 423 |
+
|
| 424 |
+
Ground Truth: distributed representations, simple recurrent networks, grammatical structure
|
| 425 |
+
|
| 426 |
+
ExHiRD-h: distributed representations, recurrent networks, grammatical relations, hierarchical constituent structure, hierarchical representations
|
| 427 |
+
|
| 428 |
+
One2Set: recurrent networks, linguistic structure
|
| 429 |
+
|
| 430 |
+
BART+SSR-D: distributed representations, simple recurrent networks, grammatical structure, language, recurrent networks, hidden units, connectionist systems
|
| 431 |
+
|
| 432 |
+
Title: random walks in weyl chambers and the decomposition of tensor powers.
|
| 433 |
+
|
| 434 |
+
Abstract: we consider a class of random walks on a lattice, introduced by gessel and zeilberger, for which the reflection principle can be used to count the number of k step walks between two points which stay within a chamber of a weyl group. We prove three independent results about such reflectable walks first, a classification of all such walks semi second, many determinant formulas for walk numbers and their generating functions semi third, an equality between the walk numbers and the multiplicities of irreducibles in the kth tensor power of certain lie group representations associated to the walk types. Our results apply to the defining representations of the classical groups, as well as some spin representations of the orthogonal groups.
|
| 435 |
+
|
| 436 |
+
Ground Truth: random walk, tensor power, weyl group, hyperbolic bessel function, representation of lie group
|
| 437 |
+
|
| 438 |
+
ExHiRD-h: random walks, weyl chambers, tensor powers, weyl
|
| 439 |
+
|
| 440 |
+
One2Set: lattice chambers
|
| 441 |
+
|
| 442 |
+
BART+SSR-D: random walks, reflection principle, tensor powers, lie groups, weyl groups, determinant formulas, orthogonal groups, group representations, tensor product, group integrals
|
| 443 |
+
|
| 444 |
+
Figure 5: Example outputs from low-resource models on the scientific benchmarks. The models are trained on a training set of size 20,000. Correct keyphrases are colored in blue. We observe that BART+SSR-D has significantly more correct outputs and is able to predict more diverse keyphrases.
|
| 445 |
+
|
| 446 |
+
Title: shot change detection using scene based constraint.
|
| 447 |
+
|
| 448 |
+
Abstract: a key step for managing a large video database is to partition the video sequences into shots. past approaches to this problem tend to confuse gradual shot changes with changes caused by smooth camera motions. this is in part due to the fact that camera motion has not been dealt with in a more fundamental way. we propose an approach that is based on a physical constraint used in optical flow analysis, namely, the total brightness of a scene point across two frames should remain constant if the change across two frames is a result of smooth camera motion. since the brightness constraint would be violated across a shot change, the detection can be based on detecting the violation of this constraint. it is robust because it uses only the qualitative aspect of the brightness constraint detecting a scene change rather than estimating the scene itself. moreover, by tapping on the significant know how in using this constraint, the algorithm's robustness is further enhanced. experimental results are presented to demonstrate the performance of various algorithms . it was shown that our algorithm is less likely to interpret gradual camera motions as shot changes , resulting in a significantly better precision performance than most other algorithms .
|
| 449 |
+
|
| 450 |
+
Ground Truth: shot change detection, optical flow, video segmentation
|
| 451 |
+
|
| 452 |
+
BART+TI: cameras, computers and the internet
|
| 453 |
+
|
| 454 |
+
BART+TG: video, computers and the internet
|
| 455 |
+
|
| 456 |
+
BART+SSR-D: camera, optical flow, video shot change detection
|
| 457 |
+
|
| 458 |
+
Title: a generic sampling framework for improving anomaly detection in the next generation network
|
| 459 |
+
|
| 460 |
+
Abstract: the heterogeneous nature of network traffic in next generation networks (ngns) may impose scalability issue to traffic monitoring applications. while this issue can be well addressed by existing sampling approaches, owing to their inherent 'lossy' characteristic and data reduction principle, traditional sampling techniques suffer from incomplete traffic statistics, which can lead to inaccurate inferences of the network traffic. by focusing on two distinct traffic monitoring applications, namely, anomaly detection and traffic measurement, we highlight the possibility of addressing the accuracy of both applications without having to sacrifice one for the sake of the other. in light of this, we propose a generic sampling framework, which is capable of providing credible network traffic statistics for accurate anomaly detection in the non, while at the same time preserves the principal purpose of sampling ( i.e., to sample dominant traffic flows for accurate traffic measurement ), and thus addressing the accuracy of both applications concurrently . with the emphasize on the accuracy of anomaly detection and the scalability of monitoring devices, the performance evaluation over real network traces demonstrates the superiority of the proposed framework over traditional sampling techniques. copyright ( c ) [digit] john wiley sons , Ltd .
|
| 461 |
+
|
| 462 |
+
Ground Truth: sampling framework, anomaly detection, next generation network, scalability, traffic measurement, accuracy
|
| 463 |
+
|
| 464 |
+
BART+TI: ngs, computers and the internet, tech industry
|
| 465 |
+
|
| 466 |
+
BART+TG: computers and the internet, wireless communications
|
| 467 |
+
|
| 468 |
+
BART+SSR-D: ngn, anomaly detection, traffic measurement, wireless, nsa
|
| 469 |
+
|
| 470 |
+
Title: recent developments in high level synthesis
|
| 471 |
+
|
| 472 |
+
Abstract: we survey recent developments in high level synthesis technology for vlsi design . the need for higher level design automation tools are discussed first . we then describe some basic techniques for various subtasks of high level synthesis. techniques that have been proposed in the past few years ( since [digit ] ) for various subtasks of high level synthesis are surveyed . we also survey some new synthesis objectives including testability , power efficiency , and reliability .
|
| 473 |
+
|
| 474 |
+
Ground Truth: high level synthesis, vlsi design, design automation, design methodology
|
| 475 |
+
|
| 476 |
+
BART+TI: design, computers and the internet
|
| 477 |
+
|
| 478 |
+
BART+TG: design, computers and the internet
|
| 479 |
+
|
| 480 |
+
BART+SSR-D: high level synthesis, vlsi
|
| 481 |
+
|
| 482 |
+
Title: asynchronous parallel finite automaton a new mechanism for deep packet inspection in cloud computing
|
| 483 |
+
|
| 484 |
+
Abstract: security is quite an important issue in cloud computing. the general security mechanisms applied in the cloud are always passive defense methods such as encryption. besides these, it's necessary to utilize real time active monitoring, detection and defense technologies. according to the published researches, deep packets inspection (dpi) is the most effective technology to realize active inspection and defense. however, most of the works on dpi focus on its performance in general application scenarios and make improvement for space reduction, which could not meet the demands of high speed and stability in the cloud. therefore it is meaningful to improve the common mechanisms of dpi, making it more suitable for cloud computing. in this paper, an asynchronous parallel finite automaton (fa) is proposed. the applying of asynchronous parallelization and heuristic forecast mechanism decreases the time consumed in matching significantly, while still reduces the memory required. moreover, it is immune to overlapping problem, also enhancing the stability. the final evaluation results show that asynchronous parallel fa has higher stability, better performance on both time and memory, and is more suitable for cloud computing.
|
| 485 |
+
|
| 486 |
+
Ground Truth: asynchronous parallel finite automaton, deep packet inspection, cloud computing, lock free ffo
|
| 487 |
+
|
| 488 |
+
BART+TI: cloud computing, computer security
|
| 489 |
+
|
| 490 |
+
BART+TG: cloud computing, dpi
|
| 491 |
+
|
| 492 |
+
BART+SSR-D: cloud computing, parallel finite automaton, deep packets inspection ( dpi ), computer security
|
| 493 |
+
|
| 494 |
+
Figure 6: Example zero-shot cross-domain transfer outputs on the scientific benchmarks. We train the models with KP20k as $D_{aux}$ and KPTimes as $D_{kp}$ . Correct keyphrases are colored in blue. We observe that the KPTimes model fine-tuned on BART+SSR-D is able to predict sinifically more diverse and relevant keyphrases. It also has some correct predictions while BART+TI or BART+TG barely have any.
|
representationlearningforresourceconstrainedkeyphrasegeneration/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f18a66ac6dd1d16aa84e2a6d996a6e28290db7b2d55207de9f30bef84e58cbb6
|
| 3 |
+
size 631873
|
representationlearningforresourceconstrainedkeyphrasegeneration/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:218ddfc671c02ef96c7a7fb32820d2c7c60ff3c9df50745b1c8f0c57736d5dc9
|
| 3 |
+
size 548022
|
residuallearningofneuraltextgenerationwithngramlanguagemodel/ca51a554-0f1e-4834-b6d8-cbab18356468_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e0430659cb206bdac872a718ecd48be57b5dfa165f54662454d85d7e7daae21
|
| 3 |
+
size 78196
|
residuallearningofneuraltextgenerationwithngramlanguagemodel/ca51a554-0f1e-4834-b6d8-cbab18356468_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ff63dc43d4ae9153e42055df8767df9ca2a49e6c00868833e5583b12a1d7079
|
| 3 |
+
size 96635
|
residuallearningofneuraltextgenerationwithngramlanguagemodel/ca51a554-0f1e-4834-b6d8-cbab18356468_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2109ae4c24f3ae45e9ef1fa491739bededcba8a2a84f093049d4c4183915680
|
| 3 |
+
size 337278
|
residuallearningofneuraltextgenerationwithngramlanguagemodel/full.md
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# N-gram Is Back: Residual Learning of Neural Text Generation with $n$ -gram Language Model
|
| 2 |
+
|
| 3 |
+
Huayang Li\* Deng Cai\* Jin Xu\* Taro Watanabe\*
|
| 4 |
+
|
| 5 |
+
$^{\text{念}}$ Nara Institute of Science and Technology $\text{♥}$ The Chinese University of Hong Kong
|
| 6 |
+
|
| 7 |
+
$\diamond$ Institute for Interdisciplinary Information Sciences, Tsinghua University {li.huayang.lh6, taro}@is.naist.jp thisisjcykcd@gmail.com xujin21@mails.tsinghua.edu.cn
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
$N$ -gram language models (LM) have been largely superseded by neural LMs as the latter exhibits better performance. However, we find that $n$ -gram models can achieve satisfactory performance on a large proportion of testing cases, indicating they have already captured abundant knowledge of the language with relatively low computational cost. With this observation, we propose to learn a neural LM that fits the residual between an $n$ -gram LM and the real-data distribution. The combination of $n$ -gram and neural LMs not only allows the neural part to focus on the deeper understanding of language but also provides a flexible way to customize an LM by switching the underlying $n$ -gram model without changing the neural model. Experimental results on three typical language tasks (i.e., language modeling, machine translation, and summarization) demonstrate that our approach attains additional performance gains over popular standalone neural models consistently. We also show that our approach allows for effective domain adaptation by simply switching to a domain-specific $n$ -gram model, without any extra training. Our code is released at https://github.com/ghrua/NgramRes.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
$N$ -gram language model (LM) was widely adopted in a broad range of natural language processing (NLP) applications, such as input method (Chen et al., 2019), statistical machine translation (Brown et al., 1990), and audio speech recognition (Bahl et al., 1983). However, with the development of deep learning, neural LMs have gradually taken the place of $n$ -gram LMs and became the new standard in recent literature (Merity et al., 2017; Vaswani et al., 2017; Radford et al., 2019). One critical reason is the superior performance of neural LMs, e.g., the GPT-2 model (Radford et al., 2019) can generate text near the human level, outperforming $n$ -gram LMs by large margins.
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Figure 1: Sentence-level perplexity (PPL) of 5-gram LM and GPT-2 LM on the validation dataset of wikitext-103. We sort sentences in the validation dataset according to their 5-gram PPL scores, and collect them into 5 bins with an equal number of sentences. The reported PPL score of each bin is the average over the sentences in it, and the y-axis uses a logarithmic scale. Details of the dataset and LMs are shown in section 5.1.
|
| 19 |
+
|
| 20 |
+
Despite that neural LMs have surpassed $n$ -gram models at the macro level, we find that $n$ -gram LMs are still attractive: they are able to achieve satisfactory performance on a large proportion of testing cases at a much lower cost than neural LMs. As observed in Figure 1, our preliminary experiments show that the performance of 5-gram LM is close to the GPT-2 model trained from scratch on 3 out of 5 bins (1, 2, and 5). Moreover, the performance of 5-gram on the first bin is slightly better than GPT-2. Because training a neural LM is much more expensive, spending effort on learning the knowledge that can be cheaply captured by $n$ -gram seems a waste.
|
| 21 |
+
|
| 22 |
+
Inspired by the above observation, we propose to learn a neural LM that focuses on the information gap that has not been captured by an $n$ -gram model: $\mathcal{F} \coloneqq \mathcal{G} - \mathcal{Q}$ , where $\mathcal{G}$ and $\mathcal{Q}$ are the real-data distribution and the $n$ -gram prediction distribution respectively, which is in a similar spirit to residual learning (He et al., 2016). More concretely, we combine the logits (the unnormalized probabil
|
| 23 |
+
|
| 24 |
+
ity scores before softmax layer) of a neural model and those derived from an $n$ -gram model. The joint neuro-symbolic system at least brings two appealing characteristics. First, since the neural model stands on the shoulders of the shallow $n$ -gram LM, it can concentrate on deeper understanding. Second, the underlying $n$ -gram LM can be purposefully switched without changing the neural model, which offers great flexibility in tackling scenarios such as domain adaptation. That is, we can adapt the model to a specific domain by changing the underlying $n$ -gram LM in a plug-and-play manner, without changing any parameters of the neural model.
|
| 25 |
+
|
| 26 |
+
We conduct extensive experiments to evaluate the proposed approach. Experiments on the standard benchmarks of three typical language tasks, including language modeling, machine translation, and summarization, show that our approach can improve the performance of recent state-of-the-art neural models consistently and considerably. For example, our approach outperforms popular baseline models by at least 0.7 PPL scores on the wikitext-103 dataset for language modeling, 0.65 BLEU scores on average on IWSLT datasets for machine translation, and 0.36 ROUGE-L scores on the CNN/DailyMail dataset for summarization. Moreover, on the language modeling task, when switching the underlying $n$ -gram LM to a particular domain-specific one (e.g., IT, Koran, Law, Medical, and Subtitles) in a plug-and-play manner, our model can reduce the PPL by 5.4 points on average without any domain-specific training of the neural part. Remarkably, the performance of our approach is even close to fine-tuning the whole model on domain-specific corpora.
|
| 27 |
+
|
| 28 |
+
Our contributions are three-fold:
|
| 29 |
+
|
| 30 |
+
- We propose a residual learning approach for two heterogeneous structures, i.e., $n$ -gram and neural LMs, which forces the neural LM to approximate the information gap that has not been captured by $n$ -gram LM.
|
| 31 |
+
- Our approach is able to improve the performance of recent state-of-the-art neural models consistently and considerably on language modeling, machine translation, and summarization.
|
| 32 |
+
- Experiments on domain adaptation demonstrate that our approach can effectively and cheaply adapt the model to a specific domain
|
| 33 |
+
|
| 34 |
+
by changing the used $n$ -gram LM in a plug-and-play manner, without changing any parameters of the neural model.
|
| 35 |
+
|
| 36 |
+
# 2 Related Work
|
| 37 |
+
|
| 38 |
+
Language Model The $n$ -gram language model (LM) has been widely used in lots of applications of natural language processing (NLP) since a long time ago (Jurafsky, 2000). The emergence of advanced smoothing technologies makes the $n$ -gram model able to provide a better estimation of human languages (Kneser and Ney, 1995; Chen and Goodman, 1996; Heafield et al., 2013). In statistical machine translation (Brown et al., 1990) and automatic speech recognition (Bahl et al., 1983), the decoder-side $n$ -gram model is critical to estimate the quality of generated candidates. In recent literature on input methods, the $n$ -gram LM is still the most popular choice for providing word suggestions (Huang et al., 2015; Chen et al., 2019), because of its low cost and low latency.
|
| 39 |
+
|
| 40 |
+
However, with the development of deep neural networks, the macro-level performance of neural LM has surpassed that of $n$ -gram LM by a large margin. Comparing with the $n$ -gram LM, one big advantage of the neural LM basing on recurrent neural network (Hochreiter and Schmidhuber, 1997; Chung et al., 2014) and attention neural network (Vaswani et al., 2017; Radford et al., 2019) is their ability to modeling long-distance dependencies (Grave et al., 2017). The success of neural LM can also be observed in the big improvement achieved in lots of downstream tasks, e.g., text generation (Holtzman et al., 2020; Welleck et al., 2020; Su et al., 2022; Xu et al., 2022; Li et al., 2022; Cai et al., 2022), machine translation (Bahdanau et al., 2015; Luong and Manning, 2015; Vaswani et al., 2017; Cai et al., 2021) and summarization (Li et al., 2017; See et al., 2017; Bi et al., 2020).
|
| 41 |
+
|
| 42 |
+
Although neural LM has outperformed $n$ -gram LM at the macro level, we find that $n$ -gram LM can achieve satisfactory performance on a large portion of testing cases. Since the training cost of neural LM is much more expensive and the model capacity is fixed, we hypothesize that it is not necessary to train the neural LM to learn the knowledge that can be captured by $n$ -gram LM at a much lower cost. Therefore, we propose a residual learning method to let the neural LM learn the gap of knowledge that has not been captured by $n$ -gram LM.
|
| 43 |
+
|
| 44 |
+
Residual Learning Residual learning is a useful technique for lots of neural networks in computer vision (CV) and natural language processing (NLP). He et al. (2016) propose deep residual learning to alleviate the training difficulties of deep models, which has been the backbone of lots of tasks in CV. In NLP, Wang and Tian (2016) and Prakash et al. (2016) use the residual learning technique to train deep recurrent neural networks for text generation. Different from previous works that conduct residual learning over different layers, Werlen et al. (2018) propose to aggregate the information of historical predictions using residual learning. In He et al. (2021), they use the residual learning to propagate attention scores across different layers of the Transformer-based model.
|
| 45 |
+
|
| 46 |
+
Most of these works conduct residual learning over homogeneous model structures, e.g., stacked identical layers of the same model. In our work, we use residual learning to combine the neural and symbolic models, i.e., learn a neural LM that approximates the information that has not been captured by the $n$ -gram model.
|
| 47 |
+
|
| 48 |
+
# 3 Background
|
| 49 |
+
|
| 50 |
+
Models that estimate the probabilities of sequences of words are called language models (LM) (Jurafsky, 2000). Let $\pmb{x} = \{x_{1}, x_{2}, \dots, x_{L}\}$ be a sequence of words with length $L$ . The probability of $P(\pmb{x})$ can be formalized according to the chain rule of probability:
|
| 51 |
+
|
| 52 |
+
$$
|
| 53 |
+
\begin{array}{l} P (\boldsymbol {x}) = P (x _ {1}) P \left(x _ {2} \mid x _ {1}\right) \dots P \left(x _ {L} \mid \boldsymbol {x} _ {1} ^ {L - 1}\right) \\ = \prod_ {k = 1} ^ {L} P \left(x _ {k} \mid \boldsymbol {x} _ {1} ^ {k - 1}\right), \tag {1} \\ \end{array}
|
| 54 |
+
$$
|
| 55 |
+
|
| 56 |
+
where $x_1^{k-1}$ is called the prefix or context of $x_k$ . In this section we will briefly introduce two kinds of language models, the $n$ -gram and neural language models, to compute the probability in Eq. (1).
|
| 57 |
+
|
| 58 |
+
# 3.1 $N$ -gram Language Model
|
| 59 |
+
|
| 60 |
+
Among lots of variants of $n$ -gram LMs, the $n$ -gram LM with modified Kneser-Ney smoothing is widely adopted in lots of related tasks, because of its low perplexity and efficiency (Kneser and Ney, 1995; Chen and Goodman, 1996; Heafield et al., 2013). Like most $n$ -gram LMs, the Kneser-Ney approximates the entire context $x_{1}^{k - 1}$ in Eq. (1) by the last
|
| 61 |
+
|
| 62 |
+
$n - 1$ words in the context:
|
| 63 |
+
|
| 64 |
+
$$
|
| 65 |
+
P (x _ {k} | \boldsymbol {x} _ {1} ^ {k - 1}) \approx P _ {N G} (x _ {k} | \boldsymbol {x} _ {k - n + 1} ^ {k - 1}). \qquad (2)
|
| 66 |
+
$$
|
| 67 |
+
|
| 68 |
+
In Kneser-Ney algorithm, the estimation of $P_{NG}(x_k|\pmb{x}_{k - n + 1}^{k - 1})$ is defined according to a recursive equation:
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\begin{array}{l} P _ {N G} (x _ {k} | \pmb {x} _ {k - n + 1} ^ {k - 1}) = U (x _ {k} | \pmb {x} _ {k - n + 1} ^ {k - 1}) + \\ b \left(\boldsymbol {x} _ {k - n + 1} ^ {k - 1}\right) P _ {N G} \left(x _ {k} \mid \boldsymbol {x} _ {k - n + 2} ^ {k - 1}\right), \tag {3} \\ \end{array}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
$$
|
| 75 |
+
U (x _ {k} | \boldsymbol {x} _ {k - n + 1} ^ {k - 1}) = \frac {c (\boldsymbol {x} _ {k - n + 1} ^ {k}) - d}{\sum_ {w} c (\boldsymbol {x} _ {k - n + 1} ^ {k - 1} w)},
|
| 76 |
+
$$
|
| 77 |
+
|
| 78 |
+
where $w$ indicates a word appears after $\pmb{x}_{k - n + 1}^{k - 1}$ $b(\cdot)$ is the backoff value for lower-order estimation, $c(\cdot)$ is the adjusted counts, $d$ is the discounts for smoothing (Jurafsky, 2000; Heafield et al., 2013). According to Eq. (3), Kneser-Ney allows us to assign probabilities for unseen $n$ -grams (e.g., 5-grams), using the lower-order information (e.g., 4-, 3-, or even uni-grams).
|
| 79 |
+
|
| 80 |
+
# 3.2 Neural Language Model
|
| 81 |
+
|
| 82 |
+
An neural LM typically estimates the probability of $x_{k}$ based on the whole context $\pmb{x}_{1}^{k - 1}$ . The parameter $\theta$ of a neural LM is optimized through the following MLE loss:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\mathcal {L} _ {N U} = \sum_ {\boldsymbol {x} \in \mathcal {D}} \sum_ {k = 1} ^ {L} \log P _ {N U} \left(x _ {k} \mid \boldsymbol {x} _ {1} ^ {k - 1}; \theta\right) \tag {4}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
where $\mathcal{D}$ is the training dataset. The probability of $P_{NU}(x_k|\cdot)$ is computed by:
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
P _ {N U} \left(x _ {k} \mid \boldsymbol {x} _ {1} ^ {k - 1}; \theta\right) = \operatorname {s o f t m a x} \left(\phi \left(\boldsymbol {h} _ {k}\right)\right) \left[ x _ {k} \right], \tag {5}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $h_k$ is the hidden vector output by the last layer of an neural LM, e.g., the GPT-2 model (Radford et al., 2019) or LSTM model (Grave et al., 2017). The $[x_k]$ is defined as taking the component regarding to $x_k$ in a vector, i.e., the probabilistic distribution got from softmax in this equation. The $\phi(\cdot)$ is a linear layer that transforms the hidden vector $h_k$ to a vector in the vocabulary space, which is also called the logits.
|
| 95 |
+
|
| 96 |
+
# 4 Methodology
|
| 97 |
+
|
| 98 |
+
# 4.1 Motivation
|
| 99 |
+
|
| 100 |
+
The main idea of our work is to use the neural LM to approximate a residual function. Given the context $\pmb{x}_1^{k-1}$ in the language modeling task, let us consider $\mathcal{G}(\pmb{x}_1^{k-1})$ as the golden-truth distribution of the next word, and
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
\mathcal {Q} \left(\boldsymbol {x} _ {1} ^ {k - 1}\right) = P _ {N G} \left(X \mid \boldsymbol {x} _ {k - n + 1} ^ {k - 1}\right) \tag {6}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
as the prediction distribution of the $n$ -gram LM, where $X$ is the random variable and the probability $P_{NG}(X = x_k | \boldsymbol{x}_{k-n+1}^{k-1})$ is calculated according to Eq. (3). Since the $n$ -gram distribution $\mathcal{Q}(\boldsymbol{x}_1^{k-1})$ has captured abundant information of the language as we discussed in the introduction, one interesting question is: can we use a neural LM to approximate the residual function $\mathcal{F}(\boldsymbol{x}_1^{k-1}) := \mathcal{G}(\boldsymbol{x}_1^{k-1}) - \mathcal{Q}(\boldsymbol{x}_1^{k-1})$ ? This is similar to the residual learning in He et al. (2016). If it is possible, we can release the burden of neural LMs on learning the information that has been captured by $n$ -gram LMs, e.g., short-distance dependencies, and provide a flexible way to customize an LM by switching the underlying $n$ -gram model without changing the neural model.
|
| 107 |
+
|
| 108 |
+
# 4.2 Learning Objective
|
| 109 |
+
|
| 110 |
+
Ideally, to train a neural LM that approximates the residual function, one way is to re-define the $P_{NU}(x_k|\cdot)$ in Eq. (5) as follows:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\begin{array}{l} P _ {N U} \left(x _ {k} \mid \boldsymbol {x} _ {1} ^ {k - 1}; \theta\right) = \mathcal {F} \left(\boldsymbol {x} _ {1} ^ {k - 1}\right) \left[ x _ {k} \right] + \\ P _ {N G} (x _ {k} | \pmb {x} _ {k - n + 1} ^ {k - 1}), \\ \end{array}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where $\mathcal{F}(\cdot)$ is parameterized by the neural model $\theta$ , and $P_{NG}(x_k|\cdot)$ is defined in Eq. (3). Then we can optimize the MLE loss in Eq. (4) based on the new $P_{NU}(x_k|\cdot)$ , which is equivalent to approximate real-data distribution $\mathcal{G}$ by $\mathcal{F} + \mathcal{Q}$ . However, directly optimizing this objective may have some problems. If $\mathcal{F}(\cdot)$ is unbounded, $P_{NU}$ defined in this equation may not be guaranteed as a valid probabilistic distribution. In contrast, if $\mathcal{F}(\cdot)$ is bounded as a valid distribution, this objective would become the ensemble of a neural LM and $n$ -gram LM. Since $n$ -gram is a weaker model, the ensemble of them is more likely to achieve worse performance than the vanilla neural LM, as shown in the experimental results of section 5.1.
|
| 117 |
+
|
| 118 |
+
To address these issues, we propose to define residual approximation at the logits level. In the
|
| 119 |
+
|
| 120 |
+
language modeling task, we can map the probabilistic distribution back to its logits and conduct residual learning as follows:
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
\begin{array}{l} \mathcal {F} ^ {\prime} \left(\boldsymbol {x} _ {1} ^ {k - 1}\right): = \operatorname {s o f t m a x} ^ {- 1} \left(\mathcal {G} \left(\boldsymbol {x} _ {1} ^ {k - 1}\right)\right) - \\ \operatorname {s o f t m a x} ^ {- 1} \left(\mathcal {Q} \left(\boldsymbol {x} _ {1} ^ {k - 1}\right)\right) \tag {7} \\ \end{array}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\operatorname {s o f t m a x} ^ {- 1} (\boldsymbol {p}) = \log \boldsymbol {p} + C, \tag {8}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
where $\mathcal{F}'(\cdot)$ is the residual function at the logits level, $\mathrm{softmax}^{-1}(\pmb{p})$ is the reverse function of softmax that maps the probabilistic distribution $\pmb{p}$ to its logits, and $C$ is a constant. One reason that we conduct residual learning at the logits level is that logits are highly correlated to the final distribution. Moreover, since the value of logits is in the real number space, training the neural LM becomes more tractable by making sure that its logits are close to $\mathcal{F}'(\pmb{x}_1^{k-1})$ . Therefore, the final $P_{NU}(x_k|\cdot)$ defined in our work is:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\begin{array}{l} P _ {N U} \left(x _ {k} \mid \boldsymbol {x} _ {1} ^ {k - 1}; \theta\right) = \operatorname {s o f t m a x} \left(\mathcal {F} ^ {\prime} \left(\boldsymbol {x} _ {1} ^ {k - 1}\right) + \alpha \times \right. \\ \operatorname {s o f t m a x} ^ {- 1} \left(\mathcal {Q} \left(\boldsymbol {x} _ {1} ^ {k - 1}\right)\right) \bigg) [ x _ {k} ] \tag {9} \\ \end{array}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $\alpha$ is a hyper-parameter to control the smoothness of the logits of the $n$ -gram distribution $\mathcal{Q}(\pmb{x}_1^{k-1})$ , and $\mathcal{F}'(\cdot)$ is approximated by the logits $\phi(\pmb{h}_k)$ of a neural LM. We can use the definition in Eq. (9) to optimize the MLE loss in Eq. (4).
|
| 137 |
+
|
| 138 |
+
# 4.3 Relation to Re-weighting
|
| 139 |
+
|
| 140 |
+
To better understand our approach, we can dive into the details of Eq. (9). For simplicity, let us omit the condition $\pmb{x}_1^{k-1}$ in this section:
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
\begin{array}{l} P _ {N U} (x _ {k} | \cdot) = \operatorname {s o f t m a x} \left(\phi (\boldsymbol {h} _ {k}) + \alpha \times \right. \\ \left. \left(\log P _ {N G} (X | \cdot) + C\right)\right) [ x _ {k} ] (10) \\ = \frac {\left(\mathrm {e} ^ {C}\right) ^ {\alpha} \left(\mathrm {e} ^ {\log P _ {N G} \left(x _ {k} \mid \cdot\right)}\right) ^ {\alpha} \mathrm {e} ^ {\phi \left(\boldsymbol {h} _ {k}\right) [ x _ {k} ]}}{Z}, (11) \\ \end{array}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
We apply the Eq. (6) and (8) to get the explicit form of logits of the $n$ -gram LM in Eq.(10), and the definition of $\phi(h_k)$ is the same as that in Eq. (5). In Eq. (11), we expand the softmax function, where $Z$ is the normalization term. The numerator of Eq. (11) has three terms. The first term $(\mathrm{e}^{C})^{\alpha}$ is a constant for all the logit values, which does not affect the distribution. The middle term $(\mathrm{e}^{\log P_{NG}(x_k|\cdot)})^{\alpha}$
|
| 147 |
+
|
| 148 |
+
actually equals to $P_{NG}(x_k|\cdot)^{\alpha}$ , which makes it be like the weight of the logits of neural LM, i.e., the last term $\mathrm{e}^{\phi (h_k)[x_k]}$ in Eq. (11). When comparing with the vanilla neural LM, the golden-truth words are not equally important in the learning process of our approach. For golden-truth words that are well estimated by the $n$ -gram LM, our approach would get high probabilities after softmax, leading to a small loss value for the neural module. As a result, the neural model can spend more effort on difficult cases, such as predictions relying on long-distance dependencies, which are hard to be estimated by the $n$ -gram LM.
|
| 149 |
+
|
| 150 |
+
# 4.4 Discussion
|
| 151 |
+
|
| 152 |
+
In this section, we propose a method to conduct residual learning between the neural and symbolic models, i.e., neural LM and $n$ -gram LM. One of our expectations about the joint neuro-symbolic system is its better understanding of language. To evaluate this hypothesis, we can test our approach on standard language tasks, such as language modeling, machine translation, and summarization. The other expectation is the plug-and-lay property of our approach. For instance, if the testing data come from different domains, we can change the $\mathcal{Q}$ in Eq. (9) by simply switching the used $n$ -gram model.
|
| 153 |
+
|
| 154 |
+
# 5 Experiments
|
| 155 |
+
|
| 156 |
+
In our work, we consider three kinds of natural language generation tasks: language modeling, machine translation, and summarization. For the language modeling task, we first evaluate the performance of our approach on the standard setting of the language modeling task. Then we turn to a domain adaptation setting.
|
| 157 |
+
|
| 158 |
+
# 5.1 Language Modeling
|
| 159 |
+
|
| 160 |
+
Setup We use the wikitext-103 benchmark $^2$ to evaluate the performance of our approach in the standard setting. The training set contains around 101M tokens. Following Merity et al. (2017), tokens with a frequency lower than 3 have been replaced by the special token $\langle \mathsf{unk} \rangle$ in the training datasets, and the number of remaining unique words is around 260k. For wikitext-103, we will train models at both word and subword levels. The subword-level data is preprocessed using
|
| 161 |
+
|
| 162 |
+
subword-nmt<sup>3</sup> (Sennrich et al., 2016), where the number of merge operation is set to $32k$ .
|
| 163 |
+
|
| 164 |
+
We use $\text{fairseq}^4$ (Ott et al., 2019) as the code base of our neural modules. We implement our approach on two popular neural language models, GPT-2 base (Radford et al., 2019) and Adaptive Input (ADP) (Baevski and Auli, 2019). For the ADP model, we follow the original hyperparameters and use the code released by Baevski and Auli (2019) in $\text{fairseq}^5$ to train the model on word-level data. Since the vocabulary size of the word-level data is too large, we train the GPT-2 base model on the subword-level data. For those neural models, we mostly use their default hyperparameters reported in their paper (Baevski and Auli, 2019; Radford et al., 2019) and train those models from random initialization. Regarding to the $n$ -gram model, we use the $\text{KenLM}^6$ (Heafield, 2011) to train $n$ -gram models on both the word-level and subword-level data of wikitext-103. The $n$ is set to 5 in our work. To make the perplexities of different models comparable, we report all the perplexity scores at the word level. For subword-level data, the word-level probability is the product of its subword tokens, following Baevski and Auli (2019).
|
| 165 |
+
|
| 166 |
+
When training our approach NGRAMRES, we will hybrid the KenLM-5GRAM model and the neural model, i.e., GPT-2 and ADP, using the residual learning method discussed in section 4. The hyper-parameter $\alpha$ in Eq. (9) is tuned according to the performance on the validation dataset.
|
| 167 |
+
|
| 168 |
+
Results As shown in Table 2, we evaluate our approach on the wikitext-103 benchmark. Although the macro performance of KENLM-5GRAM (Line 6) on the test set is poor, it is still able to promote the performance of our approach. When comparing our approach (Line 8 and 11) with the vanilla neural models (Line 7 and 9), our approach steadily outperforms ADP-FAIRSEQ $^{7}$ and GPT-2 by 0.7 and 0.9 PPL scores, respectively. According to these results, NGRAMRES is able to improve the model performance without changing the architecture and the number of parameters.
|
| 169 |
+
|
| 170 |
+
<table><tr><td>#</td><td></td><td>IT</td><td>Koran</td><td>Law</td><td>Medical</td><td>Subtitles</td><td>AVG.</td></tr><tr><td>1</td><td>#SENT</td><td>222,927</td><td>17,982</td><td>467,309</td><td>248,099</td><td>500,000</td><td>-</td></tr><tr><td>2</td><td>#WORD</td><td>2,585,965</td><td>4,512,266</td><td>15,348,052</td><td>4,512,266</td><td>5,125,239</td><td>-</td></tr><tr><td>3</td><td>KENLM-5GRAM</td><td>95.89</td><td>35.51</td><td>15.74</td><td>24.00</td><td>101.99</td><td>54.63</td></tr><tr><td>4</td><td>GPT-2</td><td>66.49</td><td>35.34</td><td>9.93</td><td>15.18</td><td>77.34</td><td>40.86</td></tr><tr><td>5</td><td>+ FINETUNE</td><td>53.69</td><td>26.77</td><td>9.43</td><td>12.96</td><td>69.33</td><td>34.44</td></tr><tr><td>6</td><td>+NGRAMRES</td><td>54.29</td><td>28.08</td><td>8.93</td><td>13.29</td><td>71.80</td><td>35.28</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 1: Test perplexity of five domains. Results in lines 1-2 are the statistical information of each domain. Results in lines 3-6 are the perplexity scores of different approaches when testing on the five domains. The GPT-2 and NGRAMRES (Line 4 and 6) approaches only train unified models for five domains, while the FINETUNE method (Line 5) trains a domain-specific model for each domain.
|
| 173 |
+
|
| 174 |
+
<table><tr><td>#</td><td>Model</td><td>#Param</td><td>PPL</td></tr><tr><td>1</td><td>(Grave et al., 2017) - LSTM</td><td>-</td><td>40.8</td></tr><tr><td>2</td><td>(Dauphin et al., 2017) - GCNN-8</td><td>229M</td><td>37.2</td></tr><tr><td>3</td><td>(Merity et al., 2018) - QRNN</td><td>151M</td><td>33.0</td></tr><tr><td>4</td><td>(Rae et al., 2018) - HEBBIAN + Cache</td><td>-</td><td>29.2</td></tr><tr><td>5</td><td>(Baevski and Auli, 2019) - ADP</td><td>247M</td><td>18.7</td></tr><tr><td>6</td><td>KENLM-5GRAM</td><td>-</td><td>116.4</td></tr><tr><td>7</td><td>ADP-FAIRSEQ</td><td>247M</td><td>18.9</td></tr><tr><td>8</td><td>+ NGRAMRES</td><td>247M</td><td>18.2</td></tr><tr><td>9</td><td>GPT-2 (BPE)</td><td>185M</td><td>22.2</td></tr><tr><td>10</td><td>+ PROB-INTER</td><td>185M</td><td>60.2</td></tr><tr><td>11</td><td>+ NGRAMRES</td><td>185M</td><td>21.3</td></tr></table>
|
| 175 |
+
|
| 176 |
+
Table 2: Test perplexity on wikitext-103. Results in lines 1-5 are reported in previous works, and results in lines 6-11 are run by us. The NGRAMRES is our approach discussed in section 4.
|
| 177 |
+
|
| 178 |
+
Moreover, we also compare our method with a straightforward baseline PROB-INTER, as discussed in section 4. The PROB-INTER baseline directly interpolates the probabilistic distribution of KENLM-5GRAM and GPT-2. The performance of PROB-INTER is better than the KENLM-5GRAM but worse than the vanilla GPT-2, making it like the ensemble of the two models, as we discussed in the section 4.
|
| 179 |
+
|
| 180 |
+
# 5.2 Language Modeling: Multi-Domain
|
| 181 |
+
|
| 182 |
+
In this setting, we will evaluate the performance of adapting our approach to a specific domain by changing the used $n$ -gram model.
|
| 183 |
+
|
| 184 |
+
Setup In the multi-domain setting, we use the English side of a bilingual dataset with 5 domains (Aharoni and Goldberg, 2020), i.e., IT, Koran, Law, Medical, and Subtitles. The statistical information of this dataset is shown in Table 1. we apply subword-nmt on the joint training data of five domains, and the number of the merge operation is also $32k$ .
|
| 185 |
+
|
| 186 |
+
Following the standard setting of the language
|
| 187 |
+
|
| 188 |
+
modeling task, we use GPT-2 base (Radford et al., 2019) as the neural model. We train and select GPT-2 model on the mixed data from five domains, and report the word-level perplexity on the test data of each domain independently. The GPT-2 + FINETUNE method will adapt the parameters of GPT-2 model on the corresponding domain before testing. For our approach NGRAMRES, we train a 5-gram LM for each specific domain and switch the used 5-gram model to the corresponding domain during training and testing. It is worth noting that the neural parameters of NGRAMRES are fixed when testing.
|
| 189 |
+
|
| 190 |
+
Results The experimental results are shown in Table 1. For GPT-2 and NGRAMRES (Line 4 and 6), we train unified neural models on mixed data of five domains and evaluate their performances on the test data of five domains one by one. Results show that our approach can outperform the vanilla neural model GPT-2 by a large margin. Since the NGRAMRES approach stores a lot of domain-specific information in the 5-gram LM, we hypothesize that the neural module is able to learn useful and complementary knowledge during training, leading to the performance gain.
|
| 191 |
+
|
| 192 |
+
In the line of $+$ FINETUNE, we also report the results of fine-tuning the GPT-2 model on each testing domain. It surprised us that the performances of our approach are very close to those of the FINETUNE method. The NGRAMRES even outperforms FINETUNE slightly on the Law domain. Moreover, compared with the FINETUNE, one advantage of our approach is its low cost of adapting our model to the testing domain, since we only need to replace the used 5-gram model in a plug-and-play manner.
|
| 193 |
+
|
| 194 |
+
<table><tr><td>Model</td><td>En ⇒ Fr</td><td>En ⇒ Es</td><td>En ⇒ Vi</td><td>En ⇒ De</td><td>AVG.</td></tr><tr><td>TRANSFORMER</td><td>39.96</td><td>36.99</td><td>28.55</td><td>27.79</td><td>33.32</td></tr><tr><td>+ NGRAMRES</td><td>40.27</td><td>37.27</td><td>29.60</td><td>28.05</td><td>33.79</td></tr><tr><td>+ NGRAMRES-ANNEAL</td><td>40.49</td><td>37.07</td><td>29.92</td><td>28.41</td><td>33.97</td></tr></table>
|
| 195 |
+
|
| 196 |
+
Table 3: BLEU scores on IWSLT. The TRANSFORMER model is the baseline, and NGRAMRES and NGRAMRES-ANNEAL are two variants of our approach. Comparing with NGRAMRES, the NGRAMRES-ANNEAL decreases the value of $\alpha$ in Eq. (9) linearly in the first 10k steps of model training.
|
| 197 |
+
|
| 198 |
+
<table><tr><td>Model</td><td>ROUGE-1</td><td>ROUGE-2</td><td>ROUGE-L</td></tr><tr><td>Pointer-generator + Coverage (See et al., 2017)</td><td>39.53</td><td>17.28</td><td>36.38</td></tr><tr><td>Mask Attention Network (Fan et al., 2021)</td><td>40.98</td><td>18.29</td><td>37.88</td></tr><tr><td>BertSum (Liu and Lapata, 2019)</td><td>42.13</td><td>19.60</td><td>39.18</td></tr><tr><td>UniLM (Dong et al., 2019)</td><td>43.08</td><td>20.43</td><td>40.34</td></tr><tr><td>UniLM V2 (Bao et al., 2020)</td><td>43.16</td><td>20.42</td><td>40.14</td></tr><tr><td>ERNIE-GEN-large (Xiao et al., 2021)</td><td>44.02</td><td>21.17</td><td>41.26</td></tr><tr><td>PEGASUS (Zhang et al., 2020)</td><td>44.17</td><td>21.47</td><td>41.11</td></tr><tr><td>ProphetNet (Qi et al., 2020)</td><td>44.20</td><td>21.17</td><td>41.30</td></tr><tr><td>PALM (Bi et al., 2020)</td><td>44.30</td><td>21.12</td><td>41.14</td></tr><tr><td>BART-LARGE (Lewis et al., 2020)</td><td>44.11</td><td>21.21</td><td>40.83</td></tr><tr><td>+ NGRAMRES</td><td>44.41</td><td>21.36</td><td>41.19</td></tr></table>
|
| 199 |
+
|
| 200 |
+
Table 4: ROUGE scores on the test set of CNN/DailyMail dataset.
|
| 201 |
+
|
| 202 |
+
# 5.3 Machine Translation
|
| 203 |
+
|
| 204 |
+
Next, we evaluate our approach on a popular sequence-to-sequence task, namely, machine translation. Note that we only integrate our approach into the decoder side of the encoder-decoder model.
|
| 205 |
+
|
| 206 |
+
Setup We conduct the experiments of machine translation on IWSLT14 (En $\Rightarrow$ Fr, Es, De) and IWSLT15 (En $\Rightarrow$ Vi). The IWSLT14 datasets<sup>8</sup> of three language pairs are preprocessed following the script provided by fairseq<sup>9</sup>, where the evaluation data is sampled from the whole dataset and the test data is the concatenation of dev2011, tst2012, tst2012. There is no overlap between train, validation, and test sets. For IWSLT15, we use the train, evaluation, and test data preprocessed and released by Stanford<sup>10</sup> (Luong and Manning, 2015). The results are reported using tokenized SacreBLEU<sup>11</sup> (Post, 2018).
|
| 207 |
+
|
| 208 |
+
We use fairseq as our code base. We use the
|
| 209 |
+
|
| 210 |
+
Transformer model as our architecture $^{12}$ for all the translation models. The Transformer model has 6 encoder layers and 6 decoder layers. Since the IWSLT datasets are small, the hidden size of FFN sublayers is set to 1024, the number of attention heads is set to 4, the dropout rate is set to 0.3, and the weight decay rate is set to 0.001. We set other hyper-parameters according to the default setting of Vaswani et al. (2017). All the translation models are trained for 30 epochs from random initialization.
|
| 211 |
+
|
| 212 |
+
The implementation details of the $n$ -gram model and our approach are similar to that in the language modeling task. For the translation task, we only use the target data, i.e., the X side of $\mathrm{En} \Rightarrow \mathrm{X}$ data, to train the KENLM-5GRAM LM.
|
| 213 |
+
|
| 214 |
+
Results The results of machine translation are shown in Table 3. We implement two variants of our approaches, namely, NGRAMRES and NGRAMRES-ANNEAL. The system of NGRAMRES only uses the 5-gram information on the decoder side, as we discussed in section 4. The difference between NGRAMRES and NGRAMRES
|
| 215 |
+
|
| 216 |
+
ANNEAL system is that the latter decreases the value of $\alpha$ linearly after each update. The alpha value becomes zero after 10k steps.
|
| 217 |
+
|
| 218 |
+
We find that both the two variants of our approaches outperform the TRANSFORMER model. The NGRAMRES-ANNEAL achieves the best results on each language pair, which means that the $n$ -gram model is more critical for the beginning phase and may hurt the translation performance after that phase. According to Voita et al. (2021), the training of neural machine translation (NMT) systems undergoes three stages: target-side language modeling, learning the word-by-word translation, and learning to reorder. Therefore, we hypothesize that the use of the $n$ -gram model in the whole training procedure may over-emphasize the importance of target-side language modeling in NMT, having a negative impact on the next two stages.
|
| 219 |
+
|
| 220 |
+
# 5.4 Abstractive Summarization
|
| 221 |
+
|
| 222 |
+
Lastly, we evaluate our approach on another popular sequence-to-sequence task, namely, abstractive summarization. Like machine translation, our approach is applied to the decoder side of the encoder-decoder model.
|
| 223 |
+
|
| 224 |
+
Setup For the abstractive summarization task, we preprocess the CNN/DailyMail dataset following the script provided by fairseq<sup>13</sup>. The evaluation metrics of the summarization task are ROUGE scores, i.e., ROUGE-1, ROUGE-2, and ROUGE-L (Lin, 2004)<sup>14</sup>.
|
| 225 |
+
|
| 226 |
+
We follow the setting of previous works and fine-tune the pre-trained BART-LARGE model (Lewis et al., 2020) on the CNN/DailyMail dataset for 20k updates. We train the KENLM-5GRAM LM on the joint data of its source and summarization text.
|
| 227 |
+
|
| 228 |
+
Results The summarization task is also a sequence-to-sequence task, where the source text and summarization are in the same language and share similar semantics. As shown in Table 4, in this task, our approach is still able to improve the performance of the strong baseline model BART-LARGE, without any change in the model architecture.
|
| 229 |
+
|
| 230 |
+
Different from the machine translation task, we find that using a fixed $\alpha$ value achieves better performance than annealing it. The reason may be that the target-side language modeling plays a more
|
| 231 |
+
|
| 232 |
+
important role in the summarization task because summarization is more like monolingual text generation in a constrained context.
|
| 233 |
+
|
| 234 |
+
# 6 Conclusion and Future Work
|
| 235 |
+
|
| 236 |
+
This work aims to learn a neural LM that approximates the information that has not been captured by $n$ -gram LM. To achieve this goal, we propose a residual learning approach to force the two neural and symbolic models, i.e., the neural LM and $n$ -gram LM, to learn complementary information. We conduct extensive experiments to evaluate the performance of the proposed approach. In our experiments, we find that our neuro-symbolic system can not only improve the performance of recent state-of-the-art neural models consistently and considerable on three typical language tasks (including language modeling, machine translation, and summarization) but also exhibits a good plug-and-play property on the multi-domain language modeling task.
|
| 237 |
+
|
| 238 |
+
The $n$ -gram LM has lots of attractive properties that we have not explored in this work. First, the $n$ -gram model has good interpretability. The behavior of $n$ -gram LM is easier to understand than the weights of neurons from the perspective of humans. In the future, we want to leverage the property of the $n$ -gram model to better understand the decision-making process of the neural LM. Second, controlling the system predictions through the $n$ -gram model may have a big potential. As observed in our multi-domain experiments, we are able to customize an LM by switching the underlying $n$ -gram model without changing the neural part. It is also interesting to explore how to control the model output at a fine-grained level using the $n$ -gram LM.
|
| 239 |
+
|
| 240 |
+
# Limitations
|
| 241 |
+
|
| 242 |
+
We believe there are two limitations in our approach. First, since the estimation of the prediction distribution of $n$ -gram models relies on CPU, the estimation speed by $n$ -gram models may be slow when using a big batch size ( $>> 8192 * 8$ ). Second, the performance gain of our current approach on high-resource datasets is not big. For instance, we also evaluate the performance of TRANSFORMER + NGRAMRES on WMT14 En-De (Vaswani et al., 2017), but the improvement is only 0.15 BLEU score. These limitations urge us to propose more efficient and effective approaches in future works.
|
| 243 |
+
|
| 244 |
+
# Acknowledgement
|
| 245 |
+
|
| 246 |
+
We are particularly grateful for the help from Xiaojiang Liu, because this project would never have been conceived and completed without his generous and selfless support. We also want to thank the insightful discussions with Yixuan Su and the valuable comments from our anonymous reviewers, area chairs, and senior area chairs.
|
| 247 |
+
|
| 248 |
+
# References
|
| 249 |
+
|
| 250 |
+
Roe Aharoni and Yoav Goldberg. 2020. Unsupervised domain clusters in pretrained language models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7747-7763, Online. Association for Computational Linguistics.
|
| 251 |
+
Alexei Baevski and Michael Auli. 2019. Adaptive input representations for neural language modeling. In 7th International Conference on Learning Representations, ICLR 2019, New Orleans, LA, USA, May 6-9, 2019. OpenReview.net.
|
| 252 |
+
Dzmitry Bahdanau, Kyunghyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
|
| 253 |
+
Lalit R. Bahl, Frederick Jelinek, and Robert L. Mercer. 1983. A maximum likelihood approach to continuous speech recognition. IEEE Trans. Pattern Anal. Mach. Intell., 5(2):179-190.
|
| 254 |
+
Hangbo Bao, Li Dong, Furu Wei, Wenhui Wang, Nan Yang, Xiaodong Liu, Yu Wang, Jianfeng Gao, Songhao Piao, Ming Zhou, et al. 2020. Unilmv2: Pseudomasked language models for unified language model pre-training. In International Conference on Machine Learning, pages 642-652. PMLR.
|
| 255 |
+
Bin Bi, Chenliang Li, Chen Wu, Ming Yan, Wei Wang, Songfang Huang, Fei Huang, and Luo Si. 2020. Palm: Pre-training an autoencoding&autoregressive language model for context-conditioned generation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8681-8691.
|
| 256 |
+
Peter F. Brown, John Cocke, Stephen Della Pietra, Vincent J. Della Pietra, Frederick Jelinek, John D. Lafferty, Robert L. Mercer, and Paul S. Roossin. 1990. A statistical approach to machine translation. Comput. Linguistics, 16(2):79-85.
|
| 257 |
+
Deng Cai, Yan Wang, Huayang Li, Wai Lam, and Lemao Liu. 2021. Neural machine translation with monolingual translation memory. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International
|
| 258 |
+
|
| 259 |
+
Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 7307-7318.
|
| 260 |
+
Deng Cai, Yan Wang, Lemao Liu, and Shuming Shi. 2022. Recent advances in retrieval-augmented text generation. In Proceedings of the 45th International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 3417-3419.
|
| 261 |
+
Mingqing Chen, Ananda Theertha Suresh, Rajiv Mathews, Adeline Wong, Cyril Allauzen, Françoise Beaufays, and Michael Riley. 2019. Federated learning of n-gram language models. In Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 121-130, Hong Kong, China. Association for Computational Linguistics.
|
| 262 |
+
Stanley F. Chen and Joshua Goodman. 1996. An empirical study of smoothing techniques for language modeling. In 34th Annual Meeting of the Association for Computational Linguistics, 24-27 June 1996, University of California, Santa Cruz, California, USA, Proceedings, pages 310-318. Morgan Kaufmann Publishers / ACL.
|
| 263 |
+
Junyoung Chung, Caglar Gülcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence modeling. CoRR, abs/1412.3555.
|
| 264 |
+
Yann N. Dauphin, Angela Fan, Michael Auli, and David Grangier. 2017. Language modeling with gated convolutional networks. In Proceedings of the 34th International Conference on Machine Learning, ICML 2017, Sydney, NSW, Australia, 6-11 August 2017, volume 70 of Proceedings of Machine Learning Research, pages 933-941. PMLR.
|
| 265 |
+
Li Dong, Nan Yang, Wenhui Wang, Furu Wei, Xiaodong Liu, Yu Wang, Jianfeng Gao, Ming Zhou, and Hsiao-Wuen Hon. 2019. Unified language model pre-training for natural language understanding and generation. Advances in Neural Information Processing Systems, 32.
|
| 266 |
+
Zhihao Fan, Yeyun Gong, Dayiheng Liu, Zhongyu Wei, Siyuan Wang, Jian Jiao, Nan Duan, Ruofei Zhang, and Xuan-Jing Huang. 2021. Mask attention networks: Rethinking and strengthen transformer. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1692-1701.
|
| 267 |
+
Edouard Grave, Armand Joulin, and Nicolas Usunier. 2017. Improving neural language models with a continuous cache. In 5th International Conference on Learning Representations, ICLR 2017, Toulouse, France, April 24-26, 2017, Conference Track Proceedings. OpenReview.net.
|
| 268 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In 2016 IEEE Conference on Computer Vision
|
| 269 |
+
|
| 270 |
+
and Pattern Recognition, CVPR 2016, Las Vegas, NV, USA, June 27-30, 2016, pages 770-778. IEEE Computer Society.
|
| 271 |
+
Ruining He, Anirudh Ravula, Bhargav Kanagal, and Joshua Ainslie. 2021. Realformer: Transformer likes residual attention. In *Findings of the Association for Computational Linguistics: ACL/IJCNLP* 2021, Online Event, August 1-6, 2021, volume ACL/IJCNLP 2021 of *Findings of ACL*, pages 929-943. Association for Computational Linguistics.
|
| 272 |
+
Kenneth Heafield. 2011. KenLM: Faster and smaller language model queries. In Proceedings of the Sixth Workshop on Statistical Machine Translation, pages 187-197, Edinburgh, Scotland. Association for Computational Linguistics.
|
| 273 |
+
Kenneth Heafield, Ivan Pouzyrevsky, Jonathan H. Clark, and Philipp Koehn. 2013. Scalable modified Kneser-Ney language model estimation. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 690-696, Sofia, Bulgaria. Association for Computational Linguistics.
|
| 274 |
+
Sepp Hochreiter and Jürgen Schmidhuber. 1997. Long short-term memory. Neural Comput., 9(8):1735-1780.
|
| 275 |
+
Ari Holtzman, Jan Buys, Li Du, Maxwell Forbes, and Yejin Choi. 2020. The curious case of neural text degeneration. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.
|
| 276 |
+
Guoping Huang, Jiajun Zhang, Yu Zhou, and Chengqing Zong. 2015. A new input method for human translators: Integrating machine translation effectively and imperceptibly. In Proceedings of the Twenty-Fourth International Joint Conference on Artificial Intelligence, IJCAI 2015, Buenos Aires, Argentina, July 25-31, 2015, pages 1163-1169. AAAI Press.
|
| 277 |
+
Dan Jurafsky. 2000. Speech & language processing. Pearson Education India.
|
| 278 |
+
Reinhard Kneser and Hermann Ney. 1995. Improved backing-off for m-gram language modeling. In 1995 International Conference on Acoustics, Speech, and Signal Processing, ICASSP '95, Detroit, Michigan, USA, May 08-12, 1995, pages 181-184. IEEE Computer Society.
|
| 279 |
+
Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
|
| 280 |
+
Huayang Li, Yixuan Su, Deng Cai, Yan Wang, and Lemao Liu. 2022. A survey on retrieval-augmented text generation. arXiv preprint arXiv:2202.01110.
|
| 281 |
+
|
| 282 |
+
Piji Li, Wai Lam, Lidong Bing, and Zihao Wang. 2017. Deep recurrent generative decoder for abstractive text summarization. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing, pages 2091-2100, Copenhagen, Denmark. Association for Computational Linguistics.
|
| 283 |
+
Chin-Yew Lin. 2004. ROUGE: A package for automatic evaluation of summaries. In Text Summarization Branches Out, pages 74-81, Barcelona, Spain. Association for Computational Linguistics.
|
| 284 |
+
Yang Liu and Mirella Lapata. 2019. Text summarization with pretrained encoders. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3730-3740.
|
| 285 |
+
Minh-Thang Luong and Christopher D. Manning. 2015. Stanford neural machine translation systems for spoken language domain. In International Workshop on Spoken Language Translation, Da Nang, Vietnam.
|
| 286 |
+
Stephen Merity, Nitish Shirish Keskar, and Richard Socher. 2018. An analysis of neural language modeling at multiple scales. CoRR, abs/1803.08240.
|
| 287 |
+
Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. 2017. Pointer sentinel mixture models. In 5th International Conference on Learning Representations, ICLR 2017, Toulon, France, April 24-26, 2017, Conference Track Proceedings. Open-Review.net.
|
| 288 |
+
Myle Ott, Sergey Edunov, Alexei Baevski, Angela Fan, Sam Gross, Nathan Ng, David Grangier, and Michael Auli. 2019. *fairoseq: A fast, extensible toolkit for sequence modeling.* In *Proceedings of NAACL-HLT* 2019: Demonstrations.
|
| 289 |
+
Matt Post. 2018. A call for clarity in reporting BLEU scores. In Proceedings of the Third Conference on Machine Translation: Research Papers, pages 186-191, Belgium, Brussels. Association for Computational Linguistics.
|
| 290 |
+
Aaditya Prakash, Sadid A. Hasan, Kathy Lee, Vivek V. Datla, Ashequl Qadir, Joey Liu, and Oladimeji Farri. 2016. Neural paraphrase generation with stacked residual LSTM networks. In COLING 2016, 26th International Conference on Computational Linguistics, Proceedings of the Conference: Technical Papers, December 11-16, 2016, Osaka, Japan, pages 2923-2934. ACL.
|
| 291 |
+
Weizhen Qi, Yu Yan, Yeyun Gong, Dayiheng Liu, Nan Duan, Jiusheng Chen, Ruofei Zhang, and Ming Zhou. 2020. Prophetnet: Predicting future n-gram for sequence-to-sequencepre-training. In Findings of the Association for Computational Linguistics: EMNLP 2020, pages 2401-2410.
|
| 292 |
+
Alec Radford, Jeff Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners.
|
| 293 |
+
|
| 294 |
+
Jack W. Rae, Chris Dyer, Peter Dayan, and Timothy P. Lillicrap. 2018. Fast parametric learning with activation memorization. In Proceedings of the 35th International Conference on Machine Learning, ICML 2018, Stockholm, Sweden, July 10-15, 2018, volume 80 of Proceedings of Machine Learning Research, pages 4225-4234. PMLR.
|
| 295 |
+
Abigail See, Peter J Liu, and Christopher D Manning. 2017. Get to the point: Summarization with pointer-generator networks. In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1073-1083.
|
| 296 |
+
Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715-1725, Berlin, Germany. Association for Computational Linguistics.
|
| 297 |
+
Yixuan Su, Tian Lan, Yan Wang, Dani Yogatama, Lingpeng Kong, and Nigel Collier. 2022. A contrastive framework for neural text generation. arXiv preprint arXiv:2202.06417.
|
| 298 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008.
|
| 299 |
+
Elena Voita, Rico Sennrich, and Ivan Titov. 2021. Language modeling, lexical translation, reordering: The training process of NMT through the lens of classical SMT. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 8478-8491, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 300 |
+
Yiren Wang and Fei Tian. 2016. Recurrent residual learning for sequence classification. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, EMNLP 2016, Austin, Texas, USA, November 1-4, 2016, pages 938-943. The Association for Computational Linguistics.
|
| 301 |
+
Sean Welleck, Ilia Kulikov, Stephen Roller, Emily Dinan, Kyunghyun Cho, and Jason Weston. 2020. Neural text generation with unlikelihood training. In 8th International Conference on Learning Representations, ICLR 2020, Addis Ababa, Ethiopia, April 26-30, 2020. OpenReview.net.
|
| 302 |
+
Lesly Miculicich Werlen, Nikolaos Pappas, Dhananjay Ram, and Andrei Popescu-Belis. 2018. Self-attentive residual decoder for neural machine translation. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies,
|
| 303 |
+
|
| 304 |
+
NAACL-HLT 2018, New Orleans, Louisiana, USA, June 1-6, 2018, Volume 1 (Long Papers), pages 1366-1379. Association for Computational Linguistics.
|
| 305 |
+
Dongling Xiao, Han Zhang, Yukun Li, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. 2021. Ernie-gen: an enhanced multi-flow pre-training and fine-tuning framework for natural language generation. In Proceedings of the Twenty-Ninth International Conference on International Joint Conferences on Artificial Intelligence, pages 3997-4003.
|
| 306 |
+
Jin Xu, Xiaojiang Liu, Jianhao Yan, Deng Cai, Huayang Li, and Jian Li. 2022. Learning to break the loop: Analyzing and mitigating repetitions for neural text generation. CoRR, abs/2206.02369.
|
| 307 |
+
Jingqing Zhang, Yao Zhao, Mohammad Saleh, and Peter Liu. 2020. Pegasus: Pre-training with extracted gap-sentences for abstractive summarization. In International Conference on Machine Learning, pages 11328-11339. PMLR.
|
residuallearningofneuraltextgenerationwithngramlanguagemodel/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:aecf5e20790550b2b760c6d6cea462cf021ff2c2ee442df46c713a1a9fe66b03
|
| 3 |
+
size 300759
|
residuallearningofneuraltextgenerationwithngramlanguagemodel/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14ca6adbacd16c0e0861a20563681dbb9e4f54f805ab84eaedee2badf9920a51
|
| 3 |
+
size 425485
|
rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/d1f6ba3e-d3d1-4fd1-aa85-df92519933af_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:55ade1c587d19ffa6ad6ecfca9e80b5401f5eed853ebfb76b682f740fc4c8a96
|
| 3 |
+
size 79260
|
rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/d1f6ba3e-d3d1-4fd1-aa85-df92519933af_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79bb87e9bba4c4e52d80a9bc9c62f0526b699c4fe4710aba87b11903eb1e05b4
|
| 3 |
+
size 96245
|
rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/d1f6ba3e-d3d1-4fd1-aa85-df92519933af_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d08665d02d5ab56dc237d7cbc720c69b1f4f5d8e251cf35482da8148c50ef41
|
| 3 |
+
size 1107131
|
rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/full.md
ADDED
|
@@ -0,0 +1,317 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Rethinking the Video Sampling and Reasoning Strategies for Temporal Sentence Grounding
|
| 2 |
+
|
| 3 |
+
Jiahao Zhu $^{1*}$ , Daizong Liu $^{2\dagger*}$ , Pan Zhou $^{1\dagger}$ , Xing Di $^{3}$ , Yu Cheng $^{4}$ , Song Yang $^{5}$ , Wenzheng Xu $^{6}$ , Zichuan Xu $^{7}$ , Yao Wan $^{8}$ , Lichao Sun $^{9}$ , Zeyu Xiong $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Hubei Key Laboratory of Distributed System Security, Hubei Engineering Research Center on Big Data Security, School of Cyber Science and Engineering, Huazhong University of Science and Technology <sup>3</sup>ProtagoLabs Inc <sup>4</sup>Microsoft Research
|
| 6 |
+
|
| 7 |
+
$^{2}$ Wangxuan Institute of Computer Technology, Peking University
|
| 8 |
+
|
| 9 |
+
$^{5}$ Beijing Institute of Technology $^{6}$ School of Sichuan University
|
| 10 |
+
|
| 11 |
+
$^{7}$ Dalian University of Technology $^{9}$ Lehigh University
|
| 12 |
+
|
| 13 |
+
$^{8}$ School of Computer Sci. & Tech., Huazhong University of Science and Technology
|
| 14 |
+
|
| 15 |
+
{jiahaozhu, panzhou, wanyao, zeyuxiong}@hust.edu.cn, dzliu@stu.pku.edu.cn
|
| 16 |
+
|
| 17 |
+
xing.di@protagolabs.com, yu.cheng@microsoft.com, S.Yang@bit.edu.cn
|
| 18 |
+
|
| 19 |
+
wenzheng.xu@scu.edu.cn, z.xu@dlut.edu.cn, lis221@lehigh.edu
|
| 20 |
+
|
| 21 |
+
# Abstract
|
| 22 |
+
|
| 23 |
+
Temporal sentence grounding (TSG) aims to identify the temporal boundary of a specific segment from an untrimmed video by a sentence query. All existing works first utilize a sparse sampling strategy to extract a fixed number of video frames and then conduct multimodal interactions with query sentence for reasoning. However, we argue that these methods have overlooked two indispensable issues: 1) Boundary-bias: The annotated target segment generally refers to two specific frames as corresponding start and end timestamps. The video downsampling process may lose these two frames and take the adjacent irrelevant frames as new boundaries. 2) Reasoning-bias: Such incorrect new boundary frames also lead to the reasoning bias during frame-query interaction, reducing the generalization ability of model. To alleviate above limitations, in this paper, we propose a novel Siamese Sampling and Reasoning Network (SSRN) for TSG, which introduces a siamese sampling mechanism to generate additional contextual frames to enrich and refine the new boundaries. Specifically, a reasoning strategy is developed to learn the inter-relationship among these frames and generate soft labels on boundaries for more accurate frame-query reasoning. Such mechanism is also able to supplement the absent consecutive visual semantics to the sampled sparse frames for fine-grained activity understanding. Extensive experiments demonstrate the effectiveness of SSRN on three challenging datasets.
|
| 24 |
+
|
| 25 |
+
# 1 Introduction
|
| 26 |
+
|
| 27 |
+
Temporal sentence grounding (TSG) is an important yet challenging task in natural language pro
|
| 28 |
+
|
| 29 |
+

|
| 30 |
+
Sentence Query: The woman then adds ginger ale, and shakes the drink in a tumbler.
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
(c) An overview of our siamese sampling strategy.
|
| 34 |
+
Figure 1: (a) An example of temporal sentence grounding task. (b) All existing TSG methods generally utilize a downsampling process to evenly extract a fixed number of frames from a long video. However, the new target segment is obtained by rounding operation and may introduces boundary bias since some original boundary frames are lost. (c) We propose a siamese sampling strategy to extract additional adjacent frames to enrich and refine the information of the sampled frames for generating more accurate boundary of the new segment.
|
| 35 |
+
|
| 36 |
+
cessing, which has drawn increasing attention over the last few years due to its vast potential applications in information retrieval (Dong et al., 2019; Yang et al., 2020) and human-computer interaction (Singha et al., 2018). It aims to ground the most relevant video segment according to a given sentence query. As shown in Figure 1 (a), video and query information need to be deeply incorporated to distinguish the fine-grained details of adjacent frames for determining accurate boundary timestamps.
|
| 37 |
+
|
| 38 |
+
Previous TSG methods (Gao et al., 2017; Chen et al., 2018; Zhang et al., 2019b; Yuan et al., 2019a; Zhang et al., 2020b; Liu et al., 2018a; Zhang et al.,
|
| 39 |
+
|
| 40 |
+
2019a; Liu et al., 2018b, 2021a) generally follow an encoding-then-interaction framework that first extracts both video and query features and then conduct multi-modal interactions for reasoning. Since many videos are overlong while corresponding target segments are short, these methods simply utilize a sparse sampling strategy shown in Figure1 (b), which samples a fixed number of frames from each video to reconstruct a shorter video, and then learn frame-query relations for segment inferring. We argue that existing learning paradigm suffers from two obvious limitations: 1) Boundary-bias: Each video has a query-related segment, which refers to two specific frames as its start and end timestamps. Traditional sparse downsampling strategy extracts frames from videos with a fixed interval. A rounding operation is then applied to map the annotated segment to the sampled frames by keeping the same proportional length in both original and new videos. As a result, the ground-truth boundary frames may be filtered out and the query-irrelevant frames will be regarded as the actual boundaries, generating wrong labels for latter training. 2) Reasoning-bias: The query-irrelevant boundary frames in the newly reconstructed segment will also lead to incorrect frame-query interaction and reasoning in the training process, reducing the generalization ability of model.
|
| 41 |
+
|
| 42 |
+
To alleviate these two issues, a straightforward idea is to filter out the sampled boundary frames in the new segment if they are query-irrelevant. However, this will destroy the true segment length when we transfer the downsampled segment back to the original one during the inference process. Another straightforward idea is to directly keep the appropriate segment length (by float values) in the newly reconstructed video and then reason the query content in the new boundary to determine what percentage of this boundary is correct. However, the query-irrelevant boundaries lack sufficient query-related information for boundary reasoning. Based on the above considerations, we aim to extract additional frames adjacent to the sampled frames to enrich and refine their information for supplementing the consecutive visual semantics. In this way, the new boundary frames are well semantic-correlated to its original adjacent boundaries. Based on the refined boundary frames, we can keep and learn the appropriate segment length of the downsampled video for query reasoning. Moreover, other inner frames are also enriched by their neighbors, captur
|
| 43 |
+
|
| 44 |
+
ing more consecutive visual appearances for fully understanding the entire activity.
|
| 45 |
+
|
| 46 |
+
Therefore, in this paper, we propose a novel Siamese Sampling and Reasoning Network (SSRN) for temporal sentence grounding task to generate additional contextual frames to enrich and refine the new boundaries. Specifically, we treat the sparse sampled video frames as anchor frames, and additionally extract several frames adjacent to each anchor frame as the siamese frames for semantic sharing and enriching. A siamese knowledge aggregation module is designed to explore internal relationships and aggregate contextual information among these frames. Then, a siamese reasoning module supplements the fine-grained contexts of siamese frames into the anchor frames for enriching their semantics. In this way, the query-related information are added into the new boundaries thus we can utilize an appropriate float value to represent the new segment length for query reasoning, addressing both boundary- and reasoning-bias. Moreover, other sampled frames are also equipped with more consecutive visual semantics from their original neighbors, which further benefits more fine-grained learning process.
|
| 47 |
+
|
| 48 |
+
Our contributions are summarized as follows:
|
| 49 |
+
|
| 50 |
+
- We propose a novel SSRN model which can sparsely extract multiple relevant frames from original videos to enrich the anchor frames for more accurate boundary prediction. To the best of our knowledge, we are the first to propose and address both boundary-bias and reasoning-bias in TSG task.
|
| 51 |
+
- We propose an effective siamese aggregation and reasoning method to correlate and integrate the contextual information of siamese frames to refine the anchor frames.
|
| 52 |
+
- Extensive experiments are conducted on three challenging public benchmarks, including ActivityNet Captions, TACoS and CharadesSTA, demonstrating the effectiveness of our proposed SSRN method.
|
| 53 |
+
|
| 54 |
+
# 2 Related Work
|
| 55 |
+
|
| 56 |
+
Temporal sentence grounding (TSG) is a new task introduced recently (Gao et al., 2017; Anne Hendricks et al., 2017), which aims to localize the most relevant video segment from a video with sentence descriptions. All existing methods follow an encoding-then-interaction framework that
|
| 57 |
+
|
| 58 |
+
first extracts video/query features and then conduct multi-modal interactions for segment inferring.
|
| 59 |
+
|
| 60 |
+
Based on the interacted multi-modal features, traditional methods follow a propose-and-rank paradigm to make predictions. Most of them (Anne Hendricks et al., 2017; Liu et al., 2018a; Chen et al., 2018; Liu et al., 2018b; Ge et al., 2019; Zhang et al., 2019a; Qu et al., 2020; Xiao et al., 2021; Liu et al., 2021a,c, 2020a) typically utilize a proposal-based grounding head that first generates multiple candidate segments as proposals, and then ranks them according to their similarity with the query semantic to select the best matching one. Some of them (Gao et al., 2017; Anne Hendricks et al., 2017) directly utilize multi-scale sliding windows to produce the proposals and subsequently integrate the query with segment representations via a matrix operation. To improve the quality of the proposals, latest works (Wang et al., 2020; Yuan et al., 2019a; Zhang et al., 2019b; Xiao et al., 2021; Cao et al., 2021; Liu et al., 2021b, 2020b, 2022b,c) integrate sentence information with each fine-grained video clip unit, and predict the scores of candidate segments by gradually merging the fusion feature sequence over time.
|
| 61 |
+
|
| 62 |
+
Recently, some proposal-free works (Yuan et al., 2019b; Wang et al., 2019; Rodriguez et al., 2020; Chen et al., 2020; Mun et al., 2020; Zeng et al., 2020; Zhang et al., 2020a, 2021; Nan et al., 2021) directly predict the temporal locations of the target segment without generating complex proposals. These works directly select the starting and ending frames by leveraging cross-modal interactions between video and query. Specifically, they either regress the start/end timestamps based on the entire video representation (Yuan et al., 2019b; Mun et al., 2020), or predict at each frame to determine whether this frame is a start or end boundary (Rodriguez et al., 2020; Chen et al., 2020; Zeng et al., 2020; Zhang et al., 2020a, 2021).
|
| 63 |
+
|
| 64 |
+
Although the above two types of methods have achieved great performances, their video sampling strategy in encoding part is unreasonable that can lead to both boundary and reasoning bias. Specifically, the boundary bias is defined as the incorrect boundary of the new segment reconstructed by the video sparse sampling. The reasoning bias is defined as the incorrect correlation learning between the query-irrelevant frames and query. In this paper, we aim to reduce the above bias by proposing a new siamese sampling and reasoning strategy to
|
| 65 |
+
|
| 66 |
+
enrich the sampled frames and further refine the reconstructed segment boundary.
|
| 67 |
+
|
| 68 |
+
# 3 The Proposed Method
|
| 69 |
+
|
| 70 |
+
Given an untrimmed video and a sentence query, we represent the video as $\mathcal{V}$ with a frame number of $T$ . Similarly, the query with $N$ words is denoted as $\mathcal{Q}$ . Temporal sentence grounding (TSG) aims to localize a segment $(\tau_s,\tau_e)$ starting at timestamp $\tau_{s}$ and ending at timestamp $\tau_{e}$ in video $\mathcal{V}$ , which corresponds to the same semantic as query $\mathcal{Q}$ .
|
| 71 |
+
|
| 72 |
+
The overall architecture of the proposed Siamese Sampling and Reasoning Network (SSRN) method is illustrated in Figure 2. The SSRN framework contains four main components: (1) Siamese sampling and encoding: We sparsely downsample each long video into the anchor frames, and a new siamese sampling strategy additionally samples their adjacent frames as siamese frames. A video/query encoder then extracts visual/query features from all sampled video frames and query sentence respectively. (2) Multi-modal interaction: After that, we interact the query features with the visual features for cross-modal interaction. (3) Multimodal reasoning: Next, to supplement the knowledge of siamese frames into the anchor frames, a siamese knowledge aggregation module is developed to determine how much the information of siamese frames are needed to inject into the anchor ones. Then, a reasoning module is utilized to enrich the anchor frames with the aggregated semantic knowledge. In this way, the contexts of both new boundaries and other sparse frames are enriched and can better represent the full and consecutive visual semantics. (4) Grounding heads with soft labels: At last, we employ the grounding heads with soft label to predict more accurate boundaries via float value to keep the appropriate segment length. We illustrate the details of each component in the following subsections.
|
| 73 |
+
|
| 74 |
+
# 3.1 Siamese Sampling and Encoding
|
| 75 |
+
|
| 76 |
+
Given the dense video input $\mathcal{V}$ , previous works generally downsample each video into a new video of fixed length to address the problem of overlong video. Considering the existing boundary-bias, we propose a siamese sampling strategy to additionally extract contextual adjacent frames nearby each sampled frame to enrich its query-related information for better determining the accurate new boundary. Here, we call the downsampled frames and their
|
| 77 |
+
|
| 78 |
+

|
| 79 |
+
Figure 2: Overview of our Siamese Sampling and Reasoning Network. Given a dense video, the anchor frames and siamese frames are first extracted by sparse sampling and siamese sampling, respectively. Then a video/query encoder and a multimodal interaction module are utilized to generate multimodal features. Next, a siamese knowledge generation module is proposed to model contextual relationship between anchor frames and siamese ones from the same video. After that, the siamese knowledge reasoning module exploits the siamese knowledge to enrich the information of the anchor frames for more accurate boundary prediction. At last, in the grounding heads, we utilize a soft label to learn more fine-grained boundaries of float value in addition to the rounded one.
|
| 80 |
+
|
| 81 |
+
contextual frames as anchor frames and siamese frames, respectively. Specifically, as shown in Figure 1 (c), following previous works, we directly construct the anchor video $\mathcal{V}^a$ by sparsely and evenly sampling $M$ frames from dense video frames of length $T$ ( $T$ is usually much greater than $M$ ). The new siamese videos are then captured at different beginning indices in the original video but next to the frames of the anchor video. The same sample interval is utilized for all frames. After siamese sampling, we can obtain multiple siamese videos with same length and similar global semantics as the anchor video. We denote the new siamese videos as $\{\mathcal{V}^{s,k}\}_{k=1}^{K}$ where $K$ means the siamese sample number.
|
| 82 |
+
|
| 83 |
+
Since we utilize the sampling strategy to process the dense video frames, the start/end time of the target segment in original video sequence needs to be accurately mapped to the corresponding boundaries in the new video sequence of $M$ frames. Following almost all previous TSG methods (Zhang et al., 2019b, 2020a; Liu et al., 2021a), the new start/end index is generally calculated by $\hat{\tau}_{s(e)} = \lfloor \tau_{s(e)} / T \times M \rfloor$ , where $\lfloor \cdot \rfloor$ denotes the rounding operator. During the inference, the predicted segment boundary index can be easily converted to the corresponding time in the dense video via $\tau_{s(e)} = \hat{\tau}_{s(e)} / M \times T$ . However, the rounding
|
| 84 |
+
|
| 85 |
+
operation may produce boundary bias that the new boundary frames are not semantically correlated to the query semantic. Therefore, we further generate a soft label $\tilde{\tau}_{s(e)} = \langle \tau_{s(e)} / T\times M\rangle$ as an additional supervision to keep the appropriate segment length during training, where $\langle \cdot \rangle$ denotes the float result.
|
| 86 |
+
|
| 87 |
+
Video encoder For video encoding, we first extract frame features by a pre-trained C3D network (Tran et al., 2015), and then add a positional encoding (Vaswani et al., 2017) to provide positional knowledge. Such position encoding plays a crucial role in distinguishing semantics at diverse temporal locations. Considering the sequential characteristic in videos, a Bi-GRU (Chung et al., 2014) is further applied to incorporate the contextual information along time series. We denote the extracted video features of both anchor video and siamese video as $V^{a}, \{V^{s,k}\}_{k=1}^{K} \in \mathbb{R}^{M \times D}$ , respectively.
|
| 88 |
+
|
| 89 |
+
Query encoder For query encoding, we first extract word embeddings by the Glove model (Pennington et al., 2014). We also apply positional encoding and Bi-GRU to integrate the sequential information within the sentence. The final feature of the query is denoted as $Q \in \mathbb{R}^{N \times D}$ .
|
| 90 |
+
|
| 91 |
+
# 3.2 Multi-Modal Interaction
|
| 92 |
+
|
| 93 |
+
After obtaining the video features $V^{a},\{V^{s,k}\}_{k = 1}^{K}$ and query feature $\pmb{Q}$ , we utilize a co-attention mech
|
| 94 |
+
|
| 95 |
+
anism (Lu et al., 2019) to capture the cross-modal interactions between them. Specifically, for each video feature $\mathbf{V} \in \{\mathbf{V}^a\} \cup \{\mathbf{V}^{s,k}\}_{k=1}^K$ , we first calculate the similarity between $\mathbf{V}$ and $\mathbf{Q}$ as:
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\boldsymbol {S} = \boldsymbol {V} \left(\boldsymbol {Q} \boldsymbol {W} _ {S}\right) ^ {\top} \in \mathbb {R} ^ {M \times N}, \tag {1}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
where $\mathbf{W}_S \in \mathbb{R}^{D \times D}$ projects the query features into the same latent space as the video. Then, we compute two attention weights as:
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\begin{array}{l} \boldsymbol {A} = \boldsymbol {S} _ {r} (\boldsymbol {Q} \boldsymbol {W} _ {S}) \in \mathbb {R} ^ {M \times D}, \\ \boldsymbol {B} = \boldsymbol {S} _ {r} \boldsymbol {S} _ {c} ^ {\mathrm {T}} \boldsymbol {V} \in \mathbb {R} ^ {M \times D}, \tag {2} \\ \end{array}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where $S_{r}$ and $S_{c}$ are the row- and column-wise softmax results of $S$ , respectively. We compose the final query-guided video representation by learning its sequential features as follows:
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\boldsymbol {F} = \operatorname {B i - G R U} ([ \boldsymbol {V}; \boldsymbol {A}; \boldsymbol {V} \odot \boldsymbol {A}; \boldsymbol {V} \odot \boldsymbol {B} ]) \in \mathbb {R} ^ {M \times D}, \tag {3}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\mathrm{Bi - GRU}(\cdot)$ denotes the Bi-GRU layers, $[;]$ is the concatenate operation, and $\odot$ is the elementwise multiplication. The output $F\in \{\pmb {F}^a\} \cup$ $\{\pmb{F}^{s,k}\}_{k = 1}^{K}$ encodes visual features with queryguided attention.
|
| 114 |
+
|
| 115 |
+
# 3.3 Multi-Modal Reasoning Strategy
|
| 116 |
+
|
| 117 |
+
Note that the query-irreverent new boundary frames encoded in the anchor video feature $F^a$ has insufficient query-guided visual information for later boundary prediction. To address this issue, we propose a new multi-modal reasoning strategy to enrich the query-related knowledge in anchor features $F^a$ referring to the contextual information in siamese features $\{F^{s,k}\}_{k=1}^K$ . In detail, the multimodal reasoning strategy consists of two components: a siamese knowledge aggregation module and a siamese knowledge reasoning module.
|
| 118 |
+
|
| 119 |
+
Siamese knowledge aggregation Intuitively, features with close visual-query correlation are expected to generate more consistent predictions of segment probabilities. To this end, we utilize a siamese knowledge aggregation module to generate interdependent knowledge from siamese features to anchor ones to enrich the contexts of anchor features and refine the prediction.
|
| 120 |
+
|
| 121 |
+
We propose to propagate and integrate knowledge between the query-guided visual features $\mathbf{F}^a$ and $\{\mathbf{F}^{s,k}\}_{k = 1}^{K}$ . Specifically, we first obtain their semantic similarities by calculating their pairwise cosine similarity scores as:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\boldsymbol {C} (i, k) = \frac {\left(\boldsymbol {F} _ {i} ^ {a}\right) \left(\boldsymbol {F} _ {i} ^ {s , k}\right) ^ {\top}}{\| \boldsymbol {F} _ {i} ^ {a} \| _ {2} \| \boldsymbol {F} _ {i} ^ {s , k} \| _ {2}}, \tag {4}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $C \in \mathbb{R}^{M \times K}$ is interdependent similarity matrix, $\| \cdot \|_2$ is $l_2$ -norm, $i \in \{1, 2, \dots, M\}$ is the indices of features and $k \in \{1, 2, \dots, K\}$ is the indices of siamese videos. Here, each anchor frame is needed to be enriched by only its siamese frames. We employ a softmax function to each row of the similarity matrix $C$ as:
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
\boldsymbol {C} (i, k) = \frac {\exp (\boldsymbol {C} (i , k))}{\sum \exp (\boldsymbol {C} (i , k))}, \tag {5}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
where the new $C$ indicates the contextual affinities between each anchor feature and its corresponding siamese features.
|
| 134 |
+
|
| 135 |
+
Siamese knowledge reasoning After that, we propose to adaptively propagate and merge the siamese knowledge into the anchor features for enriching the query-aware information. This is especially helpful when we determine more accurate boundaries for the downsampled video. Specifically, The integration process can be formulated as:
|
| 136 |
+
|
| 137 |
+
$$
|
| 138 |
+
\widetilde {\boldsymbol {F}} ^ {a} = \sum_ {k = 1} ^ {K} \boldsymbol {C} (:, k) \cdot \left(\boldsymbol {F} ^ {s, k} \boldsymbol {W} _ {1}\right) \in \mathbb {R} ^ {M \times D}, \tag {6}
|
| 139 |
+
$$
|
| 140 |
+
|
| 141 |
+
where $\widetilde{F}^a$ is the propagated semantic vector in anchor video. In order to avoid over propagation and involves in irrelevant noisy information, we further exploit a residual design with a learnable weight to enrich the anchor video as:
|
| 142 |
+
|
| 143 |
+
$$
|
| 144 |
+
\widetilde {\boldsymbol {F}} ^ {a} = \alpha \sum_ {k = 1} ^ {K} \boldsymbol {C} (:, k) \cdot \left(\boldsymbol {F} ^ {s, k} \boldsymbol {W} _ {1}\right) + (1 - \alpha) \boldsymbol {F} ^ {a} \boldsymbol {W} _ {2}, \tag {7}
|
| 145 |
+
$$
|
| 146 |
+
|
| 147 |
+
where $W_{1}, W_{2} \in \mathbb{R}^{D \times D}$ are projection matrices, weighting factor $\alpha \in [0,1]$ is a hyper-parameter. With the above formulations, the knowledge of the siamese samples within the same video can be propagated and integrated to the anchor one.
|
| 148 |
+
|
| 149 |
+
# 3.4 Grounding Heads with Soft Label
|
| 150 |
+
|
| 151 |
+
For the final segment boundary prediction, we first follow the span predictor in (Zhang et al., 2020a) to utilize two stacked-LSTM with two corresponding feed-forward layers to predict the start/end scores of each frame. In details, we send the contextual multi-modal feature $\widetilde{\pmb{F}}^a\in \mathbb{R}^{M\times D}$ into this span predictor and apply the softmax function on its two outputs to produce the probability distributions $P_{s},P_{e}\in \mathbb{R}^{M}$ of start and end boundaries. We utilize the rounded boundary $\hat{\tau}_{s(e)}$ to generate the coarse label vectors $Y_{s(e)}$ to supervise $P_{s},P_{e}$ as:
|
| 152 |
+
|
| 153 |
+
$$
|
| 154 |
+
\mathcal {L} _ {1} = f _ {C E} \left(P _ {s}, Y _ {s}\right) + f _ {C E} \left(P _ {e}, Y _ {e}\right), \tag {8}
|
| 155 |
+
$$
|
| 156 |
+
|
| 157 |
+
where $f_{CE}$ represents cross-entropy loss function. The predicted timestamps $(\hat{\tau}_s',\hat{\tau}_e')$ are obtained from the maximum scores of start and end predictions $P_{s(e)}$ of frames as:
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
\left(\hat {\tau} _ {s} ^ {\prime}, \hat {\tau} _ {e} ^ {\prime}\right) = \arg \max _ {\hat {\tau} _ {s} ^ {\prime}, \hat {\tau} _ {e} ^ {\prime}} P _ {s} \left(\hat {\tau} _ {s} ^ {\prime}\right) P _ {e} \left(\hat {\tau} _ {e} ^ {\prime}\right), \tag {9}
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
where $0 \leq \hat{\tau}_s' \leq \hat{\tau}_e' \leq M$ .
|
| 164 |
+
|
| 165 |
+
Since the above predictions are coarse on the segment boundaries with boundary-bias, we further utilize a parallel prediction head on $\widetilde{F}^a$ to predict more fine-grained float boundaries on the downsampled boundary frames. Specifically, we utilize the float boundary $\tilde{\tau}_{s(e)}$ to generate the soft labels $Y_{s(e)}'$ , and $\widetilde{F}^a$ is fed into a single feed-forward layer to predict the float boundaries $O_{s(e)}$ supervised by our designed soft labels $Y_{s(e)}'$ as follows:
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\mathcal {L} _ {2} = \mathcal {R} _ {1} \left(O _ {s (e)} - Y _ {s (e)} ^ {\prime}\right), \tag {10}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
where $\mathcal{R}_1$ is the smooth L1 loss. The final predicted segment is calculated by:
|
| 172 |
+
|
| 173 |
+
$$
|
| 174 |
+
\left(\tilde {\tau} _ {s} ^ {\prime}, \tilde {\tau} _ {e} ^ {\prime}\right) = \left(\hat {\tau} _ {s} ^ {\prime} + 1 - O _ {s} \left(\hat {\tau} _ {s} ^ {\prime}\right), \hat {\tau} _ {e} ^ {\prime} - 1 + O _ {s} \left(\hat {\tau} _ {e} ^ {\prime}\right)\right). \tag {11}
|
| 175 |
+
$$
|
| 176 |
+
|
| 177 |
+
# 4 Experiments
|
| 178 |
+
|
| 179 |
+
# 4.1 Datasets and Evaluation
|
| 180 |
+
|
| 181 |
+
ActivityNet Captions This dataset (Krishna et al., 2017) contains 20000 untrimmed videos from YouTube with 100000 textual descriptions. The videos are 2 minutes on average, and the annotated video clips have significant variation of length, ranging from several seconds to over 3 minutes. Following public split, we use 37417, 17505, and 17031 sentence-video pairs for training, validation, and testing.
|
| 182 |
+
|
| 183 |
+
TACoS TACoS (Regneri et al., 2013) contains 127 videos. The videos from TACoS are collected from cooking scenarios, thus lacking the diversity. They are around 7 minutes on average. We use the same split as (Gao et al., 2017), which includes 10146, 4589, 4083 query-segment pairs for training, validation and testing.
|
| 184 |
+
|
| 185 |
+
Charades-STA Charades-STA is built on the Charades dataset (Sigurdsson et al., 2016), which focuses on indoor activities. The video length of Charades-STA dataset is 30 seconds on average, and there are 12408 and 3720 moment-query pairs in the training and testing sets, respectively.
|
| 186 |
+
|
| 187 |
+
Evaluation Following previous works (Gao et al., 2017; Liu et al., 2021a), we adopt “R@n, IoU=m”
|
| 188 |
+
|
| 189 |
+
<table><tr><td>Method</td><td>Feature</td><td>R@1, IoU=0.5</td><td>R@1, IoU=0.7</td><td>R@5, IoU=0.5</td><td>R@5 IoU=0.7</td></tr><tr><td>TGN</td><td>C3D</td><td>28.47</td><td>-</td><td>43.33</td><td>-</td></tr><tr><td>CTRL</td><td>C3D</td><td>29.01</td><td>10.34</td><td>59.17</td><td>37.54</td></tr><tr><td>QSPN</td><td>C3D</td><td>33.26</td><td>13.43</td><td>62.39</td><td>40.78</td></tr><tr><td>CBP</td><td>C3D</td><td>35.76</td><td>17.80</td><td>65.89</td><td>46.20</td></tr><tr><td>GDP</td><td>C3D</td><td>39.27</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VSLNet</td><td>C3D</td><td>43.22</td><td>26.16</td><td>-</td><td>-</td></tr><tr><td>CMIN</td><td>C3D</td><td>43.40</td><td>23.88</td><td>67.95</td><td>50.73</td></tr><tr><td>DRN</td><td>C3D</td><td>45.45</td><td>24.36</td><td>77.97</td><td>50.30</td></tr><tr><td>2DTAN</td><td>C3D</td><td>44.51</td><td>26.54</td><td>77.13</td><td>61.96</td></tr><tr><td>APGN</td><td>C3D</td><td>48.92</td><td>28.64</td><td>78.87</td><td>63.19</td></tr><tr><td>MGSL</td><td>C3D</td><td>51.87</td><td>31.42</td><td>82.60</td><td>66.71</td></tr><tr><td>SSRN</td><td>C3D</td><td>54.49</td><td>33.15</td><td>84.72</td><td>68.48</td></tr></table>
|
| 190 |
+
|
| 191 |
+
Table 1: Performance compared with the state-of-the-art TSG models on ActivityNet Captions dataset.
|
| 192 |
+
|
| 193 |
+
<table><tr><td>Method</td><td>Feature</td><td>R@1, IoU=0.3</td><td>R@1, IoU=0.5</td><td>R@5, IoU=0.3</td><td>R@5, IoU=0.5</td></tr><tr><td>TGN</td><td>C3D</td><td>21.77</td><td>18.90</td><td>39.06</td><td>31.02</td></tr><tr><td>CTRL</td><td>C3D</td><td>18.32</td><td>13.30</td><td>36.69</td><td>25.42</td></tr><tr><td>QSPN</td><td>C3D</td><td>20.15</td><td>15.23</td><td>36.72</td><td>25.30</td></tr><tr><td>CBP</td><td>C3D</td><td>27.31</td><td>24.79</td><td>43.64</td><td>37.40</td></tr><tr><td>GDP</td><td>C3D</td><td>24.14</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VSLNet</td><td>C3D</td><td>29.61</td><td>24.27</td><td>-</td><td>-</td></tr><tr><td>CMIN</td><td>C3D</td><td>24.64</td><td>18.05</td><td>38.46</td><td>27.02</td></tr><tr><td>DRN</td><td>C3D</td><td>-</td><td>23.17</td><td>-</td><td>33.36</td></tr><tr><td>2DTAN</td><td>C3D</td><td>37.29</td><td>25.32</td><td>57.81</td><td>45.04</td></tr><tr><td>APGN</td><td>C3D</td><td>40.47</td><td>27.86</td><td>59.98</td><td>47.12</td></tr><tr><td>MGSL</td><td>C3D</td><td>42.54</td><td>32.27</td><td>63.39</td><td>50.13</td></tr><tr><td>SSRN</td><td>C3D</td><td>45.10</td><td>34.33</td><td>65.26</td><td>51.85</td></tr></table>
|
| 194 |
+
|
| 195 |
+
Table 2: Performance compared with the state-of-the-art TSG models on TACoS datasets.
|
| 196 |
+
|
| 197 |
+
<table><tr><td>Method</td><td>Feature</td><td>R@1, IoU=0.5</td><td>R@1, IoU=0.7</td><td>R@5, IoU=0.5</td><td>R@5, IoU=0.7</td></tr><tr><td>2DTAN</td><td>VGG</td><td>39.81</td><td>23.25</td><td>79.33</td><td>51.15</td></tr><tr><td>APGN</td><td>VGG</td><td>44.23</td><td>25.64</td><td>89.51</td><td>57.87</td></tr><tr><td>SSRN</td><td>VGG</td><td>46.72</td><td>27.98</td><td>91.37</td><td>59.64</td></tr><tr><td>CTRL</td><td>C3D</td><td>23.63</td><td>8.89</td><td>58.92</td><td>29.57</td></tr><tr><td>QSPN</td><td>C3D</td><td>35.60</td><td>15.80</td><td>79.40</td><td>45.40</td></tr><tr><td>CBP</td><td>C3D</td><td>36.80</td><td>18.87</td><td>70.94</td><td>50.19</td></tr><tr><td>GDP</td><td>C3D</td><td>39.47</td><td>18.49</td><td>-</td><td>-</td></tr><tr><td>APGN</td><td>C3D</td><td>48.20</td><td>29.37</td><td>89.05</td><td>58.49</td></tr><tr><td>SSRN</td><td>C3D</td><td>50.39</td><td>31.42</td><td>90.68</td><td>59.94</td></tr><tr><td>DRN</td><td>I3D</td><td>53.09</td><td>31.75</td><td>89.06</td><td>60.05</td></tr><tr><td>APGN</td><td>I3D</td><td>62.58</td><td>38.86</td><td>91.24</td><td>62.11</td></tr><tr><td>MGSL</td><td>I3D</td><td>63.98</td><td>41.03</td><td>93.21</td><td>63.85</td></tr><tr><td>SSRN</td><td>I3D</td><td>65.59</td><td>42.65</td><td>94.76</td><td>65.48</td></tr></table>
|
| 198 |
+
|
| 199 |
+
Table 3: Performance compared with the state-of-the-art TSG models on Charades-STA datasets.
|
| 200 |
+
|
| 201 |
+
as our evaluation metrics. The “R@n, IoU=m” is defined as the percentage of at least one of top-n selected moments having IoU larger than m, which is the higher the better.
|
| 202 |
+
|
| 203 |
+
<table><tr><td></td><td>CTRL</td><td>TGN</td><td>2DTAN</td><td>CMIN</td><td>DRN</td><td>APGN</td><td>SSRN</td></tr><tr><td>VPS ↑</td><td>0.45</td><td>1.09</td><td>1.75</td><td>81.29</td><td>133.38</td><td>146.67</td><td>158.12</td></tr><tr><td>Para. ↓</td><td>22</td><td>166</td><td>363</td><td>78</td><td>214</td><td>91</td><td>184</td></tr></table>
|
| 204 |
+
|
| 205 |
+
Table 4: Efficiency comparison in terms of video per second (VPS) and parameters (Para.).
|
| 206 |
+
|
| 207 |
+
# 4.2 Implementation Details
|
| 208 |
+
|
| 209 |
+
For video encoding, we apply C3D (Tran et al., 2015) to encode the videos on all three datasets, and also extract the I3D (Carreira and Zisserman, 2017) and VGG (Simonyan and Zisserman, 2014) features on Charades-STA dataset for fairly comparing with other methods. Following previous works, we set the length $M$ of the sampled anchor video sequences to 200 for ActivityNet Captions and TACoS datasets, 64 for Charades-STA dataset, respectively. As for sentence encoding, we utilize Glove word2vec (Pennington et al., 2014) to embed each word to a 300-dimension feature. The hidden state dimensions of Bi-GRU and Bi-LSTM are set to 512. The number $K$ of the sampled siamese frames for each anchor frame is set to 4. We train our model with an Adam optimizer with leaning rate $8 \times 10^{-4}$ , $3 \times 10^{-4}$ , $4 \times 10^{-4}$ for ActivityNet Captions, TACoS, and Charades-STA datasets, respectively. The batch size is set to 64.
|
| 210 |
+
|
| 211 |
+
# 4.3 Comparison with State-of-the-Arts
|
| 212 |
+
|
| 213 |
+
Compared methods We compare our SSRN with state-of-the-art methods, including: (1) propose-and-rank methods: TGN (Chen et al., 2018), CTRL (Gao et al., 2017), QSPN (Xu et al., 2019), CBP (Wang et al., 2020), CMIN (Zhang et al., 2019b), 2DTAN (Zhang et al., 2020b), APGN (Liu et al., 2021a), MGSL (Liu et al., 2022a). (2) proposal-free methods: GDP (Chen et al., 2020), VSLNet (Zhang et al., 2020a), DRN (Zeng et al., 2020).
|
| 214 |
+
|
| 215 |
+
Quantitative comparison As shown in Table 1, 2 and 3, our SSRN outperforms all the existing methods by a large margin. Specifically, on ActivityNet Captions dataset, compared to the previous best method MGSL, we outperform it by $2.62\%$ , $1.73\%$ , $2.12\%$ , $1.77\%$ in all metrics, respectively. Although TACoS dataset suffers from similar kitchen background and cooking objects among the videos, it is worth noting that our SSRN still achieves significant improvements. Compared to the previous best method MGSL, our method brings significant improvement of $2.06\%$ and $1.72\%$ in the strict “R@1, IoU=0.5” and “R@5, IoU=0.5” met
|
| 216 |
+
|
| 217 |
+
<table><tr><td>Model</td><td>Anchor</td><td>Siamese</td><td>SKA</td><td>SKR</td><td>SL</td><td>R@1, IoU=0.5</td><td>R@1, IoU=0.7</td></tr><tr><td>①</td><td>✓</td><td>×</td><td>×</td><td>×</td><td>×</td><td>42.78</td><td>26.35</td></tr><tr><td>②</td><td>✓</td><td>×</td><td>×</td><td>×</td><td>✓</td><td>43.64</td><td>26.81</td></tr><tr><td>③</td><td>✓</td><td>✓</td><td>×</td><td>×</td><td>×</td><td>45.50</td><td>27.93</td></tr><tr><td>④</td><td>✓</td><td>✓</td><td>×</td><td>✓</td><td>×</td><td>48.97</td><td>29.36</td></tr><tr><td>⑤</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>×</td><td>51.26</td><td>31.02</td></tr><tr><td>⑥</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>54.49</td><td>33.15</td></tr></table>
|
| 218 |
+
|
| 219 |
+
Table 5: Main ablation studies on ActivityNet Captions dataset, where "Anchor" and "Siamese" denote the anchor and siamese frames, "SKA" and "SKR" denote the siamese knowledge aggregation and siamese knowledge reasoning, "SL" denotes the usage of soft label.
|
| 220 |
+
|
| 221 |
+
rics, respectively. On Charades-STA dataset, for fair comparisons with other methods, we perform experiments with same features (i.e., VGG, C3D, and I3D) reported in their papers. It shows that our SSRN reaches the highest results over all metrics.
|
| 222 |
+
|
| 223 |
+
Efficiency comparison To compare the efficiency of our SSRN with previous methods, we make a fair comparison on a single Nvidia Titan XP GPU on the TACoS dataset. As shown in Table 4, it can be observed that we achieve much faster processing speeds with a competitive model sizes.
|
| 224 |
+
|
| 225 |
+
# 4.4 Ablation Study
|
| 226 |
+
|
| 227 |
+
Effect of the siamese learning strategy As shown in Table 5, we set the network without both siamese sampling/reasoning and soft label training as the baseline (model ①). Compared with the baseline, the model ③ additionally extracts siamese frames for contextual learning, and can apparently improve the accuracy. It directly utilizes average operation to aggregate siamese knowledge and exploit concatenation for knowledge reasoning, which validates that multiple frames from same videos can really bring some strong knowledge to enhance the network. When further applying the SKR module on model ③, the model ④ performs better, demonstrating the effectiveness of our SKR module. When we further add the SKG module, our model ⑤ can reach a higher performance, which can demonstrate the effectiveness of building the interdependent knowledge (i.e., siamese knowledge) for integrating the samples. It can also prove that adaptively reasoning by our siamese knowledge is better than the purely average operation. We think that the siamese knowledge not only serves as the knowledge-routed representation, but also implicitly constrains the semantic consistency of frames in the space of frame-text features.
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
Figure 3: The visualization examples to show the benefits from the siamese frames. Due to the boundary-bias during the sparse sampling process, previous VSLNet method filters out the true-positive boundary frames and fails to predict the accurate boundaries. Instead, our siamese learning strategy supplements the query-related information of the adjacent frames into the ambiguous downsampled boundary-frames for predicting more precise boundaries.
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
|
| 234 |
+
<table><tr><td>Number</td><td>K=1</td><td>K=2</td><td>K=4</td><td>K=8</td></tr><tr><td>R@1, IoU=0.5</td><td>50.45</td><td>52.10</td><td>54.49</td><td>54.62</td></tr><tr><td>R@1, IoU=0.7</td><td>29.64</td><td>30.78</td><td>33.15</td><td>33.27</td></tr></table>
|
| 235 |
+
|
| 236 |
+
Effect of the usage of soft label We also investigate whether our soft label (float value) of the segment boundary contributes to the performance of our model. As shown in Table 5, directly applying the soft label learning to the baseline does not bring significant performance improvement (model ②). This is mainly because that the boundary frame may be query-irrelevant and its feature is not able to be accurately matched with the query. Instead, comparing model ⑥ with model ⑤, model ⑥ enriches the boundary frames with siamese contexts and supplements them with the neighboring query-related visual information. Therefore, it brings significant improvement by using the soft label in training process.
|
| 237 |
+
|
| 238 |
+
Effect of the number of siamese frames We compare our method with various number of siamese frames as shown in Table 6. When adding the siamese sample number $K$ from 1 to 8, our method dynamically promotes the accuracy. Such improvement can demonstrate that more siamese samples can bring richer knowledge, which makes our network benefited from it. Although the accuracy is increasing with the number of siamese frames, we observe that the improvement from the number 4 to 8 is slight. We think the reason is the saturation of knowledge, i.e., the model has enough knowledge to learn the task on this dataset. Hence, it is almost meaningless to purely increase the siamese frames. To balance the training time and accuracy, we assign $K = 4$ in our final version.
|
| 239 |
+
|
| 240 |
+
Plug-and-Play Our proposed siamese learning strategy is flexible and can be adopted to other
|
| 241 |
+
|
| 242 |
+
Table 6: The effect of the number $K$ of the sampled siamese frames on ActivityNet Captions dataset.
|
| 243 |
+
|
| 244 |
+
<table><tr><td>Methods</td><td>Variant</td><td>R@1, IoU=0.5</td><td>R@1, IoU=0.7</td></tr><tr><td rowspan="2">VSLNet</td><td>Origin</td><td>43.22</td><td>26.16</td></tr><tr><td>+siamese</td><td>50.38</td><td>30.06</td></tr><tr><td rowspan="2">CBLN</td><td>Origin</td><td>48.12</td><td>27.60</td></tr><tr><td>+siamese</td><td>56.86</td><td>30.79</td></tr><tr><td rowspan="2">MGSL</td><td>Origin</td><td>51.87</td><td>31.42</td></tr><tr><td>+siamese</td><td>58.77</td><td>33.41</td></tr></table>
|
| 245 |
+
|
| 246 |
+
Table 7: We apply our siamese learning strategy to existing TSG models on ActivityNet Captions dataset.
|
| 247 |
+
|
| 248 |
+
TSG methods for anchor feature enhancement. As shown in Table 7, we directly apply siamese learning strategy into existing module for anchor feature enriching without using soft label training. It shows that our siamese learning strategy can provide more contextual and fine-grained information for anchor feature encoding, bringing large improvement.
|
| 249 |
+
|
| 250 |
+
# 4.5 Qualitative Results
|
| 251 |
+
|
| 252 |
+
In Figure 3, we show two visualization examples to qualitatively analyze what kind of knowledge does the siamese frames bring to the anchor frames. It is unavoidable to lose some visual contents when sparsely sampling from the video. Especially for the boundary frames that are easily to be filtered out by sampling, the visual content of the newly sampled boundary may lose query-relevant information (e.g., brown words in figure). However, we can obtain the absent contents from their siamese frames due to different sampling indices and duration. Hence, our siamese frames can enrich and supplement the sampled frames with more consecutive query-related visual semantics to make a fine-grained video comprehension, keeping the appropriate segment length of the sampled video for more accurate boundary prediction.
|
| 253 |
+
|
| 254 |
+
# 5 Conclusion
|
| 255 |
+
|
| 256 |
+
In this paper, we propose a novel Siamese Sampling and Reasoning Network (SSRN) to alleviate the limitations of both boundary-bias and reasoning-bias in existing TSG methods. In addition to the original anchor frames, our model also samples a certain number of siamese frames from the same video to enrich and refine the visual semantics of the anchor frames. A soft label is further exploited to supervise the enhanced anchor features for predicting more accurate segment boundaries. Experimental results show both effectiveness and efficiency of our SSRN on three challenging datasets.
|
| 257 |
+
|
| 258 |
+
# Limitations
|
| 259 |
+
|
| 260 |
+
This work analyzes an interesting problem of how to learn from inside to address the limitation of the boundary-bias on the temporal sentence grounding. Since our method targets on the issue of long video sampling, it may be not helpful to handle the short video processing but still can improve the contextual representation learning for the short video. Besides, our sampled siamese frames would bring extra burden (e.g., computation, memory and parameters) during the training and testing. Therefore, a more light way to ease the siamese knowledge extraction is a promising future direction.
|
| 261 |
+
|
| 262 |
+
# 6 Acknowledgments
|
| 263 |
+
|
| 264 |
+
This work was supported by National Natural Science Foundation of China (No.61972448, No.62272328, No.62172038 and No.62172068).
|
| 265 |
+
|
| 266 |
+
# References
|
| 267 |
+
|
| 268 |
+
Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. 2017. Localizing moments in video with natural language. In Proceedings of the IEEE International Conference on Computer Vision (ICCV).
|
| 269 |
+
Meng Cao, Long Chen, Mike Zheng Shou, Can Zhang, and Yuexian Zou. 2021. On pursuit of designing multi-modal transformer for video grounding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9810-9823.
|
| 270 |
+
Joao Carreira and Andrew Zisserman. 2017. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 6299-6308.
|
| 271 |
+
|
| 272 |
+
Jingyuan Chen, Xinpeng Chen, Lin Ma, Zequn Jie, and Tat-Seng Chua. 2018. Temporally grounding natural sentence in video. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 162-171.
|
| 273 |
+
Long Chen, Chujie Lu, Siliang Tang, Jun Xiao, Dong Zhang, Chilie Tan, and Xiaolin Li. 2020. Rethinking the bottom-up framework for query-based video localization. In Proceedings of the AAAI Conference on Artificial Intelligence.
|
| 274 |
+
Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence modeling. In Advances in Neural Information Processing Systems (NIPS).
|
| 275 |
+
Jianfeng Dong, Xirong Li, Chaoxi Xu, Shouling Ji, and Xun Wang. 2019. Dual encoding for zero-example video retrieval. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR).
|
| 276 |
+
Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevata. 2017. Tall: Temporal activity localization via language query. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 5267-5275.
|
| 277 |
+
Runzhou Ge, Jiyang Gao, Kan Chen, and Ram Nevatia. 2019. Mac: Mining activity concepts for language-based temporal localization. In IEEE Winter Conference on Applications of Computer Vision (WACV), pages 245-253.
|
| 278 |
+
Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. 2017. Dense-captioning events in videos. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 706-715.
|
| 279 |
+
Daizong Liu, Xiaoye Qu, Xing Di, Yu Cheng, Zichuan Xu Xu, and Pan Zhou. 2022a. Memory-guided semantic learning network for temporal sentence grounding. In Proceedings of the AAAI Conference on Artificial Intelligence.
|
| 280 |
+
Daizong Liu, Xiaoye Qu, Jianfeng Dong, and Pan Zhou. 2020a. Reasoning step-by-step: Temporal sentence localization in videos via deep rectification-modulation network. In Proceedings of the 28th International Conference on Computational Linguistics, pages 1841-1851.
|
| 281 |
+
Daizong Liu, Xiaoye Qu, Jianfeng Dong, and Pan Zhou. 2021a. Adaptive proposal generation network for temporal sentence localization in videos. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9292-9301.
|
| 282 |
+
Daizong Liu, Xiaoye Qu, Jianfeng Dong, Pan Zhou, Yu Cheng, Wei Wei, Zichuan Xu, and Yulai Xie. 2021b. Context-aware biaffine localizing network for temporal sentence grounding. In Proceedings of
|
| 283 |
+
|
| 284 |
+
the IEEE/CVF Conference on Computer Vision and Pattern Recognition.
|
| 285 |
+
Daizong Liu, Xiaoye Qu, Xiao-Yang Liu, Jianfeng Dong, Pan Zhou, and Zichuan Xu. 2020b. Jointly cross-and self-modal graph attention network for query-based moment localization. In Proceedings of the 28th ACM International Conference on Multimedia, pages 4070-4078.
|
| 286 |
+
Daizong Liu, Xiaoye Qu, Yinzhen Wang, Xing Di, Kai Zou, Yu Cheng, Zichuan Xu, and Pan Zhou. 2022b. Unsupervised temporal video grounding with deep semantic clustering. In Proceedings of the AAAI Conference on Artificial Intelligence.
|
| 287 |
+
Daizong Liu, Xiaoye Qu, and Pan Zhou. 2021c. Progressively guide to attend: An iterative alignment framework for temporal sentence grounding. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 9302-9311.
|
| 288 |
+
Daizong Liu, Xiaoye Qu, Pan Zhou, and Yang Liu. 2022c. Exploring motion and appearance information for temporal sentence grounding. In Proceedings of the AAAI Conference on Artificial Intelligence.
|
| 289 |
+
Meng Liu, Xiang Wang, Liqiang Nie, Xiangnan He, Baoquan Chen, and Tat-Seng Chua. 2018a. Attentive moment retrieval in videos. In Proceedings of the 41nd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR), pages 15-24.
|
| 290 |
+
Meng Liu, Xiang Wang, Liqiang Nie, Qi Tian, Baoquan Chen, and Tat-Seng Chua. 2018b. Cross-modal moment localization in videos. In Proceedings of the 26th ACM international conference on Multimedia, pages 843-851.
|
| 291 |
+
Chujie Lu, Long Chen, Chilie Tan, Xiaolin Li, and Jun Xiao. 2019. DEBUG: A dense bottom-up grounding approach for natural language video localization. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP).
|
| 292 |
+
Jonghwan Mun, Minsu Cho, and Bohyung Han. 2020. Local-global video-text interactions for temporal grounding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 10810-10819.
|
| 293 |
+
Guoshun Nan, Rui Qiao, Yao Xiao, Jun Liu, Sicong Leng, Hao Zhang, and Wei Lu. 2021. Interventional video grounding with dual contrastive learning. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR).
|
| 294 |
+
Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word representation. In Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543.
|
| 295 |
+
|
| 296 |
+
Xiaoye Qu, Pengwei Tang, Zhikang Zou, Yu Cheng, Jianfeng Dong, Pan Zhou, and Zichuan Xu. 2020. Fine-grained iterative attention network for temporal language localization in videos. In Proceedings of the 28th ACM International Conference on Multimedia, pages 4280-4288.
|
| 297 |
+
Michaela Regneri, Marcus Rohrbach, Dominikus Wetzel, Stefan Thater, Bernt Schiele, and Manfred Pinkal. 2013. Grounding action descriptions in videos. Transactions of the Association for Computational Linguistics, 1:25-36.
|
| 298 |
+
Cristian Rodriguez, Edison Marrese-Taylor, Fatemeh Sdat Saleh, Hongdong Li, and Stephen Gould. 2020. Proposal-free temporal moment localization of a natural-language query in video using guided attention. In The IEEE Winter Conference on Applications of Computer Vision (WACV), pages 2464-2473.
|
| 299 |
+
Gunnar A Sigurdsson, Gül Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. 2016. Hollywood in homes: Crowdsourcing data collection for activity understanding. In European Conference on Computer Vision (ECCV), pages 510-526.
|
| 300 |
+
Karen Simonyan and Andrew Zisserman. 2014. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556.
|
| 301 |
+
Joyeeta Singha, Amarjit Roy, and Rahul Hussain Laskar. 2018. Dynamic hand gesture recognition using vision-based approach for human-computer interaction. Neural Computing and Applications.
|
| 302 |
+
Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. 2015. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE International Conference on Computer Vision (ICCV), pages 4489-4497.
|
| 303 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Processing Systems (NIPS), pages 5998-6008.
|
| 304 |
+
Jingwen Wang, Lin Ma, and Wenhao Jiang. 2020. Temporally grounding language queries in videos by contextual boundary-aware prediction. In Proceedings of the AAAI Conference on Artificial Intelligence.
|
| 305 |
+
Weining Wang, Yan Huang, and Liang Wang. 2019. Language-driven temporal activity localization: A semantic matching reinforcement learning model. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 334-343.
|
| 306 |
+
Shaoning Xiao, Long Chen, Jian Shao, Yueting Zhuang, and Jun Xiao. 2021. Natural language video localization with learnable moment proposals. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 4008-4017.
|
| 307 |
+
|
| 308 |
+
Huijuan Xu, Kun He, Bryan A Plummer, Leonid Sigal, Stan Sclaroff, and Kate Saenko. 2019. Multilevel language and vision integration for text-to-clip retrieval. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 9062–9069.
|
| 309 |
+
Xun Yang, Jianfeng Dong, Yixin Cao, Xun Wang, Meng Wang, and Tat-Seng Chua. 2020. Tree-augmented cross-modal encoding for complex-query video retrieval. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR), pages 1339-1348.
|
| 310 |
+
Yitian Yuan, Lin Ma, Jingwen Wang, Wei Liu, and Wenwu Zhu. 2019a. Semantic conditioned dynamic modulation for temporal sentence grounding in videos. In Advances in Neural Information Processing Systems (NIPS), pages 534-544.
|
| 311 |
+
Yitian Yuan, Tao Mei, and Wenwu Zhu. 2019b. To find where you talk: Temporal sentence localization in video with attention based location regression. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 9159-9166.
|
| 312 |
+
Runhao Zeng, Haoming Xu, Wenbing Huang, Peihao Chen, Mingkui Tan, and Chuang Gan. 2020. Dense regression network for video grounding. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 10287-10296.
|
| 313 |
+
Da Zhang, Xiyang Dai, Xin Wang, Yuan-Fang Wang, and Larry S Davis. 2019a. Man: Moment alignment network for natural language moment retrieval via iterative graph adjustment. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1247-1257.
|
| 314 |
+
H. Zhang, A. Sun, W. Jing, L. Zhen, and Rsm Goh. 2021. Parallel attention network with sequence matching for video grounding. In Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021.
|
| 315 |
+
Hao Zhang, Aixin Sun, Wei Jing, and Joey Tianyi Zhou. 2020a. Span-based localizing network for natural language video localization. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6543-6554.
|
| 316 |
+
Songyang Zhang, Houwen Peng, Jianlong Fu, and Jiebo Luo. 2020b. Learning 2d temporal adjacent networks for moment localization with natural language. In Proceedings of the AAAI Conference on Artificial Intelligence.
|
| 317 |
+
Zhu Zhang, Zhijie Lin, Zhou Zhao, and Zhenxin Xiao. 2019b. Cross-modal interaction networks for query-based moment retrieval in videos. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval (SIGIR), pages 655-664.
|
rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c4b13d94af2ab6ac2c5f7b3bff33820176ebd589aec73198970b0a9869e54a58
|
| 3 |
+
size 513259
|
rethinkingthevideosamplingandreasoningstrategiesfortemporalsentencegrounding/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e3378ffbf91aa792c8cc366395210538162d329e266a10ed025823735268eb7d
|
| 3 |
+
size 378441
|
revisitingtherolesoftextintextgames/d883147c-abf3-45e2-930a-abfa9c21b9ca_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:17793011291c89f4002defa9769d19e59b9c6cada1c8c2bc7e6724b6b5496fba
|
| 3 |
+
size 81209
|
revisitingtherolesoftextintextgames/d883147c-abf3-45e2-930a-abfa9c21b9ca_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:458a0cc673dc9fc88b9913b85f02fa260b814379ba8d25623e49e3b41690cd74
|
| 3 |
+
size 93088
|
revisitingtherolesoftextintextgames/d883147c-abf3-45e2-930a-abfa9c21b9ca_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dca34164927e6802d8e9caad2c3f767be43ac85b408509e9b4e9a7481b3315b5
|
| 3 |
+
size 545441
|
revisitingtherolesoftextintextgames/full.md
ADDED
|
@@ -0,0 +1,381 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Revisiting the Roles of "Text" in Text Games
|
| 2 |
+
|
| 3 |
+
Yi Gu\*1 Shunyu Yao\*2 Chuang Gan3,4 Joshua B. Tenenbaum5 Mo Yu6
|
| 4 |
+
|
| 5 |
+
$^{1}$ UC San Diego $^{2}$ Princeton University $^{3}$ MIT-IBM Watson AI Lab
|
| 6 |
+
|
| 7 |
+
$^{4}$ UMass Amherst $^{5}$ MIT $^{6}$ WeChat AI
|
| 8 |
+
|
| 9 |
+
yig025@ucsd.edu, shunyuy@princeton.edu, moyumyu@tencent.com
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Text games present opportunities for natural language understanding (NLU) methods to tackle reinforcement learning (RL) challenges. However, recent work has questioned the necessity of NLU by showing random text hashes could perform decently. In this paper, we pursue a fine-grained investigation into the roles of text in the face of different RL challenges, and reconcile that semantic and non-semantic language representations could be complementary rather than contrasting. Concretely, we propose a simple scheme to extract relevant contextual information into an approximate state hash as extra input for an RNN-based text agent. Such a lightweight plug-in achieves competitive performance with state-of-the-art text agents using advanced NLU techniques such as knowledge graph and passage retrieval, suggesting non-NLU methods might suffice to tackle the challenge of partial observability. However, if we remove RNN encoders and use approximate or even ground-truth state hash alone, the model performs miserably, which confirms the importance of semantic function approximation to tackle the challenge of combinatorially large observation and action spaces. Our findings and analysis provide new insights for designing better text game task setups and agents.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
In text-based games (Narasimhan et al., 2015; He et al., 2016; Hausknecht et al., 2019; Côté et al., 2018), players read text observation, command text actions to interact with a simulated world, and gain rewards as they progress through the story. From a reinforcement learning (RL) viewpoint, they are partially observable Markov decision processes (POMDP)—the current observation does not carry the full information of the game progress. In our Figure 1 example, visiting the Living Room before or after the dark place puzzle may yield
|
| 18 |
+
|
| 19 |
+
Observation: Kitchen. You are in the kitchen of the white house... There is a brass lantern (battery-powered) here...
|
| 20 |
+
|
| 21 |
+
Look: <Same as Observation>
|
| 22 |
+
|
| 23 |
+
Inventory: A glass bottle containing water.
|
| 24 |
+
|
| 25 |
+
# Action: Go west
|
| 26 |
+
|
| 27 |
+
# Several steps later ...
|
| 28 |
+
|
| 29 |
+
Observation: Living Room. Above the trophy case hangs an elvish sword of great antiquity.
|
| 30 |
+
|
| 31 |
+
Look: Living Room. There is a doorway to the east, ..., and a rug lying beside an open trap door.
|
| 32 |
+
|
| 33 |
+
Inventory: A glass bottle containing water. A brown sack.
|
| 34 |
+
|
| 35 |
+
# Action: Go down
|
| 36 |
+
|
| 37 |
+
# +25 reward
|
| 38 |
+
|
| 39 |
+
Observation: You have moved into a dark place.
|
| 40 |
+
|
| 41 |
+
The trap door crashes shut, and you hear someone barring it. It is pitch black. You are likely to be eaten by a grue.
|
| 42 |
+
|
| 43 |
+
Look: It is pitch black. You are likely to be eaten by a grue.
|
| 44 |
+
|
| 45 |
+
Inventory: A glass bottle containing water. A brown sack.
|
| 46 |
+
|
| 47 |
+
# Action: North
|
| 48 |
+
|
| 49 |
+
# -10 reward
|
| 50 |
+
|
| 51 |
+
Observation: Oh, no! A lurking grue slithered into the room and devoured you!
|
| 52 |
+
|
| 53 |
+
**** You have died ****
|
| 54 |
+
|
| 55 |
+
Figure 1: A game trajectory from Zork I.
|
| 56 |
+
|
| 57 |
+
the same observation, but only when informed by the game history, the player can decide whether go down is the right action.
|
| 58 |
+
|
| 59 |
+
Recent work has proposed to incorporate RL agents with natural language understanding (NLU) capabilities for better text game performance. For example, pre-trained language models support combinatorial action generation (Yao et al., 2020); commonsense reasoning (Murugesan et al., 2021), information extraction (Ammanabrolu and Hausknecht, 2020), and reading comprehension (Guo et al., 2020) provide priors for exploration with sparse reward and long horizon; and knowledge graph (Ammanabrolu and Hausknecht, 2020) and document retrieval (Guo et al., 2020) techniques help alleviate partial observability.
|
| 60 |
+
|
| 61 |
+
Nevertheless, Yao et al. (2021) doubts the need of NLU for RL agents trained and evaluated on the same game. They found that a text game agent, DRRN (He et al., 2016), performs even slightly better when RNN-based language representations
|
| 62 |
+
|
| 63 |
+
are replaced with non-semantic hash codes. Intuitively, hash serves to memorize state-action pairs and ignore text similarities, which is sometimes useful — consider the second-to-last observation in Figure 1 and a counterfactual observation where a "lantern" is added into "Inventory", RNNs might encode them very similarly though they lead to antipodal consequences (die or explore the underground). How do we reconcile this with recent NLU-augmented text agents with improved performances? Where are semantic representations useful, and where would a hash approach suffice?
|
| 64 |
+
|
| 65 |
+
In this paper, we present initial findings that semantic and non-semantic language representations could work hand-in-hand better than each alone by targeting different RL challenges. Concretely, we show the hash idea could help DRRN tackle partial observability – returning to the Figure 1 example, to get lantern to avoid death, it is vital to know where the lantern is, which is revealed in a previous instead of current observation. Based on such intuition, we propose a simple algorithm that tracks the current location and the up-to-date descriptions of all locations, then encode them into a single approximate state hash vector as extra DRRN input. Though lightweight and easy-to-implement, such a representation plug-in improves DRRN scores by $29\%$ across games, with competitive performances against state-of-the-art text agents using advanced NLU techniques and pre-trained Transformer models. The effectiveness is further confirmed by comparing to models that plug in groundtruth state or location hash codes, where we find our performance and these upper bounds with very little gaps. These results suggest that the current partial observability bottlenecks might not require advanced NLU models or semantic representations to conquer.
|
| 66 |
+
|
| 67 |
+
However, such a message is gauged by the ablations that show the approximate state hash alone only achieves $58\%$ of the full performance, as it fails to handle other RL challenges such as the combinatorial state and action spaces. In conclusion, we find the role of NLU in text games is not black-or-white as indicated by prior work, but rather differs for different RL challenges, and agents could benefit from combining semantic and non-semantic language representations that target different functionalities. Our results and insights contribute to future research in designing better tasks and models toward autonomous agents with grounded language abilities.
|
| 68 |
+
|
| 69 |
+
# 2 Preliminaries
|
| 70 |
+
|
| 71 |
+
# 2.1 Problem Formulation
|
| 72 |
+
|
| 73 |
+
A text game can be formulated as a partially observable Markov decision process (POMDP) $\langle S, A, T, O, \Omega, R, \gamma \rangle$ , where at the $t$ -th turn the agent reads a textual observation $o_t = \Omega(s_t) \in O$ as a partial reflection of underlying world state $s_t \in S$ , issues a textual command $a_t \in A$ in response, and receives a sparse scalar reward $r_t = R(s_t, a_t)$ in light of game progress. The state transition $s_{t+1} = T(s_t, a_t)$ is hidden to the agent. The goal is to maximize the expected cumulative discounted rewards $\mathbf{E}[\sum_t \gamma^t r_t]$ .
|
| 74 |
+
|
| 75 |
+
Observations and States Following prior practice in the Jericho benchmark (Hausknecht et al., 2019), we augment the direct observation $o_t$ with inventory $i_t$ and location description $l_t$ obtained by issuing actions "inventory" and "look" respectively. But even this may not reveal the complete $s_t$ (Section 1), which is Jericho includes an object tree and a large simulator RAM array hidden to players. As $s_t$ is large and lacks interpretability, more often used is the state hash $\mathrm{h}(s_t)$ , where $\mathrm{h} : S \to \mathbb{N}$ maps each state to an integer that can be used to probe if states are identical, but cannot provide semantic information about state differences. Access to $s_t$ or $\mathrm{h}(s_t)$ is a handicap in Jericho.
|
| 76 |
+
|
| 77 |
+
# 2.2 The DRRN Baseline and its Hash Variant
|
| 78 |
+
|
| 79 |
+
Denote $c_{t} = (o_{1}, a_{1}, \dots, o_{t})$ as the game context up to $o_{t}$ , and for convenience we omit the subscript $t$ when no confusion is caused. Our baseline RL model, Deep Reinforcement Relevance Network (DRRN) (He et al., 2016), learns a Q-network
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
Q \left(c _ {t}, a _ {t}\right) = \operatorname {M L P} (s r, a r) \tag {1}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
where the state and action representations
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
s r _ {\mathrm {d r r n}} = \left[ \mathrm {G R U} _ {1} \left(o _ {t}\right), \mathrm {G R U} _ {2} \left(i _ {t}\right), \mathrm {G R U} _ {3} \left(l _ {t}\right) \right] \tag {2}
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
a r _ {\mathrm {d r n}} = \mathrm {G R U} _ {4} (a _ {t})
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
are encoded by gated recurrent units (GRU) (Cho et al., 2014). The temporal difference (TD) loss and Boltzmann exploration are used for RL.
|
| 96 |
+
|
| 97 |
+
In Yao et al. (2021), Eq. 2 is replaced by random, fixed, non-semantic hash representations
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
s r _ {\text {h a s h}} = \left[ H \left(o _ {t}\right), H \left(i _ {t}\right), H \left(l _ {t}\right) \right] \tag {3}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
a r _ {\mathrm {h a s h}} = H \left(a _ {t}\right)
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
where a hash vector function $H = \text{vec} h$ first maps inputs to integers (via Python built-in hash) then to
|
| 108 |
+
|
| 109 |
+
random normal vectors (by using the integer as the generator seed). However, neither of the models addresses partial observability by using the context $c_{t}$ beyond the current observation $o_{t}$ .
|
| 110 |
+
|
| 111 |
+
# 3 Method
|
| 112 |
+
|
| 113 |
+
The key to handle partial observability is to extract the appropriate state-distinguishing information from the context $c_{t}$ — while under-extraction leads to different states with same representations, over-extraction leads to diverging representations for the same state with different history paths. So to approximate the state hash, we first obtain and maintain a location map by exploration with limited depth $d$ , collecting names of adjacent rooms:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
p o _ {1} = \left\{\left(p, \operatorname {l o c} \left(c _ {t}, p\right) \mid p \subset A ^ {d} \right\} \right. \tag {4}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $p$ is a sequence of navigation actions, and $loc$ is the location after following $p$ from $c_t$ . Essentially, $po_1(c_t)$ serves to distinguish different locations with same names.
|
| 120 |
+
|
| 121 |
+
Secondly, we collect the most-recent location descriptions for all locations, so that we may know, for example, the whereabouts of the lantern when needed (Section 1).
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
p o _ {2} = \left\{\left(l o c, \operatorname {L a s t L o o k} (l o c)\right) \mid l o c \in \operatorname {M a p} \right\} \tag {5}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
Together, our model DRRN-LocationGraph (Log) takes state representation
|
| 128 |
+
|
| 129 |
+
$$
|
| 130 |
+
s r _ {\mathrm {W H}} = \left[ s r _ {\mathrm {d r m}}, H \left(p o _ {1}\right), H \left(p o _ {2}\right) \right] \tag {6}
|
| 131 |
+
$$
|
| 132 |
+
|
| 133 |
+
The algorithm details are in Appendix A.
|
| 134 |
+
|
| 135 |
+
# 4 Experiments
|
| 136 |
+
|
| 137 |
+
Implementation Details We adopt DRRN hyperparameters from Yao et al. (2021) to train our model. Following previous work, we implement the BiDAF (Seo et al., 2016) attention mechanism and the inverse dynamics auxiliary objective (Yao et al., 2021) for better text encoding. The episodic limit is 100 steps and the training has 1,000 episodes from 8 parallel game environments. For $p o_{1}$ , we use $d = 1$ as depth limit. We train three independent runs for each game. More details are in Appendix B.
|
| 138 |
+
|
| 139 |
+
Baselines Our approach builds on the backbone DRRN agent, thus we provide fair comparisons to the original DRRN and its hash and inverse dynamics variants from Yao et al. (2021). We also
|
| 140 |
+
|
| 141 |
+
compare with more complex state-of-the-art agents that are designed to deal with the partial observability via NLU:
|
| 142 |
+
|
| 143 |
+
- MPRC-DQN (Guo et al., 2020), which retrieves the relevant history to enhance the current observation, and formulates the action prediction as a multi-passage reading comprehension problem.
|
| 144 |
+
- KG-A2C (Ammanabrolu and Hausknecht, 2020; Ammanabrolu et al., 2020), which extracts an object graph with OpenIE (Angeli et al., 2015) or a BERT-based QA model (Devlin et al., 2019), and embeds the graph to a single vector as the state representation. We compare with the better result from the two papers for each game.
|
| 145 |
+
|
| 146 |
+
Evaluating Games We select 6 games from Jericho (Hausknecht et al., 2019) where MPRC-DQN or KG-A2C exhibits performance boosts, thus are more likely to suffer from partial observability.
|
| 147 |
+
|
| 148 |
+
# 4.1 Game Results
|
| 149 |
+
|
| 150 |
+
Table 1 shows game scores for all models. Among DRRN and it variants, DRRN-Log performs best on 4 of the 6 games. More impressively, our agent is competitive against MPRC-DQN (better or same score on 3/6 games) and KG-A2C (better scores on 4/6 games) in terms of winning rates. Overall, our DRRN-Log achieves the second best average normalized score of $36\%$ , only behind $41\%$ of MPRC-DQN (which is largely attributed to Zork III). Considering the fact that we explicitly choose the six games in favor of these two state-of-the-art baselines, such a result indicates that advanced NLU techniques might not be a must to solve partial observability — at least in the scoring ranges of current text game agents (i.e. average normalized score less than $50\%$ ).
|
| 151 |
+
|
| 152 |
+
# 4.2 Oracle Analysis with Groundtruth States
|
| 153 |
+
|
| 154 |
+
Next, we aim to study the performance gap between our model, and an oracle version that replaces our approximate state hash with the groundtruth state hash (GT-State) from Jericho. The GT-States could perfectly distinguish different states apart, where $sr_{gt} = (sr_{\mathrm{drm}}, H(s))$ .
|
| 155 |
+
|
| 156 |
+
As shown in Table 2, the scores of DRRN-LoG and the GT-State are very close across different games, meaning our approximation has been close-to-perfect within the state hashing scheme. Notably, even GT-State fails to totally surpass MPRC-DQN or KG-A2C, suggesting NLU techniques might
|
| 157 |
+
|
| 158 |
+
<table><tr><td rowspan="3">Game</td><td colspan="2">DRRN</td><td colspan="6">DRRN and Variants</td><td colspan="5">Agents with advanced NLU</td></tr><tr><td rowspan="2">Avg</td><td rowspan="2">Max</td><td>Obs</td><td>Hash</td><td>+</td><td>Inv-Dy</td><td colspan="2">LoG (ours)</td><td colspan="2">MPRC-DQN</td><td colspan="2">KG-A2C</td><td rowspan="2">Max</td></tr><tr><td>Avg</td><td>Max</td><td>Avg</td><td>Max</td><td>Avg</td><td>Max</td><td>Avg</td><td>Max</td><td>Avg</td><td>Max</td></tr><tr><td>zork1</td><td>39.4</td><td>53</td><td>35.5</td><td>50</td><td>43.1</td><td>87</td><td>51.2</td><td>107</td><td>38.3</td><td>-</td><td>33.6</td><td>35</td><td>350</td></tr><tr><td>zork3</td><td>0.4</td><td>4.5</td><td>0.4</td><td>4</td><td>0.4</td><td>4</td><td>1.33</td><td>5</td><td>3.0</td><td>5.0</td><td>0.1</td><td>-</td><td>7</td></tr><tr><td>pentari</td><td>26.5</td><td>45</td><td>51.9</td><td>60</td><td>37.2</td><td>50</td><td>44.4</td><td>60</td><td>44.4</td><td>-</td><td>48.2</td><td>56</td><td>70</td></tr><tr><td>detective</td><td>290</td><td>337</td><td>290</td><td>317</td><td>290</td><td>323</td><td>288.8</td><td>313.3</td><td>317.7</td><td>-</td><td>246.1</td><td>274</td><td>360</td></tr><tr><td>ludicorp</td><td>12.7</td><td>23</td><td>14.8</td><td>23</td><td>13.5</td><td>23</td><td>16.7</td><td>23</td><td>10.9</td><td>40.7</td><td>17.6</td><td>19</td><td>150</td></tr><tr><td>inhumane</td><td>21.1</td><td>45</td><td>21.9</td><td>45</td><td>19.6</td><td>45</td><td>25.7</td><td>56.7</td><td>29.8</td><td>53.3</td><td>3</td><td>-</td><td>90</td></tr><tr><td>avg norm</td><td>.28</td><td>.52</td><td>.34</td><td>.52</td><td>.30</td><td>.51</td><td>.36</td><td>.59</td><td>.41</td><td>-</td><td>.27</td><td>-</td><td></td></tr></table>
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
Figure 2: Ablation results on Zork I.
|
| 162 |
+
|
| 163 |
+
help these agents with RL challenges other than partial observability. Finally, we also show in Appendix C the performance of replacing our state approximation with the groundtruth room IDs (GT-Room), where our agent achieves on-par or better results on all the games. This confirms that our state approximation not only effectively identifies player locations by Eq. 4, but also brings richer state information thanks to Eq. 5.
|
| 164 |
+
|
| 165 |
+
# 4.3 Ablation Studies
|
| 166 |
+
|
| 167 |
+
In light of the good scores of DRRN-LoG, are distinguishing states and memorizing trajectories all it takes to solve a text game? To answer this question, we conduct ablation experiments to remove the text encoder (i.e. all GRUs) in our agent as well as the GT-State version (- Text Enc). Intuitively, this renders the text game into a large, deterministic MDP (instead of POMDP), where even very close states (e.g. same except a window is ajar or open) have completely different representations.
|
| 168 |
+
|
| 169 |
+
The result in Table 2 shows a huge performance drop for DRRN-LoG and its GT-State version with text encoders removed — in other words, learning the text game as a tabular MDP without language semantics could lead to a much deteriorated sample complexity, even when partial observability is solved. To explain why DRRN with GT-State hash is much worse than DRRN with observation hash
|
| 170 |
+
|
| 171 |
+
Table 1: Final episodic/maximum explored scores for different games. MPRC-DQN numbers with max scores correspond to version change of games, so we re-run their model and report the new results. Average normalized score (avg norm) is model score divided by maximum game score, averaged across games.
|
| 172 |
+
|
| 173 |
+
<table><tr><td rowspan="3">Game</td><td colspan="4">Ours</td><td colspan="4">Ours w/ GT-State</td></tr><tr><td colspan="2">Full Model</td><td colspan="2">- Text Enc.</td><td colspan="2">Full Model</td><td colspan="2">- Text Enc.</td></tr><tr><td>Avg</td><td>Max</td><td>Avg</td><td>Max</td><td>Avg</td><td>Max</td><td>Avg</td><td>Max</td></tr><tr><td>zork1</td><td>51.2</td><td>107</td><td>4.13</td><td>36.3</td><td>53.6</td><td>111</td><td>6.25</td><td>39.3</td></tr><tr><td>zork3</td><td>1.33</td><td>5</td><td>0.85</td><td>3</td><td>1.50</td><td>4.7</td><td>1.02</td><td>4</td></tr><tr><td>pentari</td><td>44.4</td><td>60</td><td>20.3</td><td>45</td><td>46.1</td><td>60</td><td>20</td><td>45</td></tr><tr><td>detective</td><td>288.8</td><td>313.3</td><td>281.3</td><td>313.3</td><td>289.9</td><td>310</td><td>280</td><td>290</td></tr><tr><td>ludicorp</td><td>16.7</td><td>23</td><td>10.15</td><td>22</td><td>15.9</td><td>23</td><td>9.5</td><td>21</td></tr><tr><td>inhumane</td><td>25.7</td><td>56.7</td><td>1.9</td><td>23</td><td>24.1</td><td>60</td><td>1.1</td><td>20</td></tr><tr><td>Avg. Norm</td><td>.36</td><td>.59</td><td>.21</td><td>.40</td><td>.37</td><td>.59</td><td>.22</td><td>.42</td></tr></table>
|
| 174 |
+
|
| 175 |
+
Table 2: The results of replacing our state representations with groundtruth state IDs (GT-State Full Model), as well as removing the text encoder (- Text Enc).
|
| 176 |
+
|
| 177 |
+
proposed in Yao et al. (2021), note that Eq. 3 still leverages the compositional structure of $(o_t, i_t, l_t)$ , e.g. two states with the same $i_t$ still share part of the state representation. Such a result helps confirm the importance of language for the RL challenge of large observation and actions spaces: semantics-preserving function approximation (e.g. RNN instead of hash) could be key to interpolation (smooth value estimation for similar states) as well as extrapolation (efficient exploration based on language and commonsense priors).
|
| 178 |
+
|
| 179 |
+
Finally, we ablate individual components of DRRN-LoG on Zork I. Figure 2 shows that removing the language-learning auxiliary task of inverse dynamics (w/o invdy) or the language attention (w/o att) leads to worse scores, reconfirming that semantic language representations are vital for DRRN-LoG's success. On the other hand, removing the current whereabouts (w/o cur_room) leads to much worse performance than removing location descriptions across the map (w/o last.Look), suggesting location identification Eq. 4 might be more important for solving partial observability.
|
| 180 |
+
|
| 181 |
+
# 5 Discussion
|
| 182 |
+
|
| 183 |
+
We propose a simple approach to deal with partial observability in text games, which could serve as a competitive baseline for future research, and also inspire similar investigations for other RL challenges to test the limits of memorization and necessities.
|
| 184 |
+
|
| 185 |
+
sity of NLU in different dimensions, which would in turn help identify flaws of current setups and propose better ones. We also hope our idea of best combing semantic and non-semantic language representations could be useful for building next-generation text game agents, as well as for other language applications with memorization needs like closed-domain QA or goal-oriented dialog.
|
| 186 |
+
|
| 187 |
+
# Limitations
|
| 188 |
+
|
| 189 |
+
Our approach to retrieve the global state focuses on different locations. The simplicity of our method can help prove the value and importance of involving non-semantic representations in text-based games. Also our hash-based non-semantic representation hides the difference in global state retrieval methods, as long as they can successfully distinguish different states. However, we acknowledge that more detailed designs is needed in order to generalize our method to other TBGs.
|
| 190 |
+
|
| 191 |
+
Another limitation is that our method focuses on text fictions, a specific type of text-based games. Most games of this type have lots of locations to explore. As a result, our location-based approach can successfully distinguish different states. Although the direct usage of our approach is limited, we believe the innovation of combining semantic and non-semantic representations is helpful in other text-based games and NLP tasks.
|
| 192 |
+
|
| 193 |
+
# References
|
| 194 |
+
|
| 195 |
+
Prithviraj Ammanabrolu and Matthew Hausknecht. 2020. Graph constrained reinforcement learning for natural language action spaces. arXiv, pages arXiv-2001.
|
| 196 |
+
Prithviraj Ammanabrolu, Ethan Tien, Matthew Hausknecht, and Mark O Riedl. 2020. How to avoid being eaten by a grue: Structured exploration strategies for textual worlds. arXiv preprint arXiv:2006.07409.
|
| 197 |
+
Gabor Angeli, Melvin Jose Johnson Premkumar, and Christopher D Manning. 2015. Leveraging linguistic structure for open domain information extraction. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 344-354.
|
| 198 |
+
Kyunghyun Cho, Bart Van Merrienboer, Dzmitry Bahdanau, and Yoshua Bengio. 2014. On the properties of neural machine translation: Encoder-decoder approaches. arXiv preprint arXiv:1409.1259.
|
| 199 |
+
|
| 200 |
+
Marc-Alexandre Côté, Ákos Kádár, Xingdi Yuan, Ben Kybartas, Tavian Barnes, Emery Fine, James Moore, Matthew Hausknecht, Layla El Asri, Mahmoud Adada, et al. 2018. Textworld: A learning environment for text-based games. In Workshop on Computer Games. Springer.
|
| 201 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of NAACL-HLT 2019, pages 4171-4186.
|
| 202 |
+
Xiaoxiao Guo, Mo Yu, Yupeng Gao, Chuang Gan, Murray Campbell, and Shiyu Chang. 2020. Interactive fiction game playing as multi-paragraph reading comprehension with reinforcement learning. arXiv preprint arXiv:2010.02386.
|
| 203 |
+
Matthew Hausknecht, Prithviraj Ammanabrolu, Marc-Alexandre Côté, and Xingdi Yuan. 2019. Interactive fiction games: A colossal adventure. arXiv preprint arXiv:1909.05398.
|
| 204 |
+
Ji He, Jianshu Chen, Xiaodong He, Jianfeng Gao, Li-hong Li, Li Deng, and Mari Ostendorf. 2016. Deep reinforcement learning with a natural language action space. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1621-1630.
|
| 205 |
+
Keerthiram Murugesan, Mattia Atzeni, Pavan Kapani-pathi, Kartik Talamadupula, Minmaya Sachan, and Murray Campbell. 2021. Efficient text-based reinforcement learning by jointly leveraging state and commonsense graph representations. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 719-725.
|
| 206 |
+
Karthik Narasimhan, Tejas Kulkarni, and Regina Barzilay. 2015. Language understanding for text-based games using deep reinforcement learning. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1-11.
|
| 207 |
+
Minjoon Seo, Aniruddha Kembhavi, Ali Farhadi, and Hannaneh Hajishirzi. 2016. Bidirectional attention flow for machine comprehension. arXiv preprint arXiv:1611.01603.
|
| 208 |
+
Shunyu Yao, Karthik Narasimhan, and Matthew Hausknecht. 2021. Reading and acting while blindfolded: The need for semantics in text game agents. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 3097-3102.
|
| 209 |
+
Shunyu Yao, Rohan Rao, Matthew Hausknecht, and Karthik Narasimhan. 2020. Keep calm and explore: Language models for action generation in text-based games. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8736-8754.
|
| 210 |
+
|
| 211 |
+
# A Algorithm of Our Approximated State Representation Construction
|
| 212 |
+
|
| 213 |
+
Algorithm 1 Infer the current location with nearby room names using depth-first search with limited depth. This can help distinguish different rooms with the same name in most cases. We use depth = 1 in our runs.
|
| 214 |
+
|
| 215 |
+
<table><tr><td colspan="2">1: function LOCATE(env, depth)</td></tr><tr><td colspan="2">2: state ← current state of env</td></tr><tr><td colspan="2">3: room ← current room name in env</td></tr><tr><td colspan="2">4: nearby ← []</td></tr><tr><td colspan="2">5: for all direction do</td></tr><tr><td colspan="2">6: step env with action direction</td></tr><tr><td colspan="2">7: if env changed in last action then</td></tr><tr><td colspan="2">8: if depth > 1 then</td></tr><tr><td colspan="2">9: d ← LOCATE(env, depth - 1)</td></tr><tr><td colspan="2">10: else</td></tr><tr><td colspan="2">11: d ← current room name in env</td></tr><tr><td colspan="2">12: append (direction, d) to nearby</td></tr><tr><td colspan="2">13: set env with state</td></tr><tr><td colspan="2">14: return nearby</td></tr></table>
|
| 216 |
+
|
| 217 |
+
Algorithm 2 Maintain the state approximation with the descriptions the last time we visit each room. The result is then hashed to serve as the state hash in our model. For runs provided with grounded room ID, we replace line 3 with the ground truth.
|
| 218 |
+
|
| 219 |
+
<table><tr><td>1:</td><td>state←{}</td></tr><tr><td>2:</td><td>function UPDATEANDGETSTATE(env)</td></tr><tr><td>3:</td><td>room←LOCATE(env,depth)</td></tr><tr><td>4:</td><td>look←look of the current state in env</td></tr><tr><td>5:</td><td>state[room]←look</td></tr><tr><td>6:</td><td>return state</td></tr></table>
|
| 220 |
+
|
| 221 |
+
# B More Details of Our Model and Implementation
|
| 222 |
+
|
| 223 |
+
We follow the hyperparameters from Yao et al. (2021). For the state approximation part, we use the built-in HASH function in Python. We train our model for $10^{5}$ steps, which takes about 40 hours on a Titan X or Geforce GTX 1080.
|
| 224 |
+
|
| 225 |
+
We use the latest Jericho version 3.1.0. Due to a bug in Zork I, we add a timeout in the library to filter out valid actions causing the emulator to hang.
|
| 226 |
+
|
| 227 |
+
# B.1 Details of Our BiDAF Observation Encoder
|
| 228 |
+
|
| 229 |
+
In DRRN, the GRU takes the responsibility of both memorizing the high-scoring trajectories, and generalizing to unseen observations. In our method, the memorization power can be provided with our hash codes of local graphs, with stronger ability to distinguish states. We thus hope to encourage the generalization strength of neural network; and propose the attentive extension of observation embedding.
|
| 230 |
+
|
| 231 |
+
Our key idea bases on the insight that the Q-value in DRRN is computed by matching the textual observations to a textual action. Since the observations are usually significantly longer than the actions, the effect of an action can usually be determined by its interaction with a local context in the observation. This can be naturally modeled with the attention mechanism. Specifically, we apply the BiDAF (Seo et al., 2016) to match each observation component to the action.
|
| 232 |
+
|
| 233 |
+
The BiDAF takes the observation and action embeddings; and outputs an action-attended observation embedding. We denote the GRU embeddings for observation word $i$ and action word $j$ as $\pmb{o}_i$ and $\pmb{a}_j$ . The attention score from an observation word to an action word is thus $\alpha_{ij} = \exp (a_{ij}) / \sum_j\exp (a_{ij})$ , where $a_{ij} = \pmb{o}_i^T\pmb {a}_j$ . We then compute the "action-to-observation" summary vector for the $i$ -th observation word as $\pmb{c_i} = \sum_j\alpha_{ij}\pmb {a_j}$ . We concatenate and project the output vectors as $\left[\pmb{o}_i,\pmb {c}_i,\pmb {o}_i\odot \pmb {c}_i,|\pmb {o}_i - \pmb {c}_i|\right]$ , followed by a linear layer with leaky ReLU activation units. We apply the aforementioned steps to the inventory $i$ and location appearance $l$ , too. Finally, we have
|
| 234 |
+
|
| 235 |
+
$$
|
| 236 |
+
\begin{array}{r l} s r _ {\mathrm {L o G}} = & \left[ \operatorname {B i D A F} (\boldsymbol {o}, \boldsymbol {a}), \operatorname {B i D A F} (\boldsymbol {i}, \boldsymbol {a}), \operatorname {B i D A F} (\boldsymbol {l}, \boldsymbol {a}), \right. \\ & \left. \operatorname {H} \left(p o _ {1} (c)\right), \operatorname {H} \left(p o _ {2} \left(p o _ {1} (c)\right) \right] \right. \end{array} \tag {7}
|
| 237 |
+
$$
|
| 238 |
+
|
| 239 |
+
# C Additional Experiments with Oracle State Information
|
| 240 |
+
|
| 241 |
+
We investigate the performance of replacing our state approximation with the groundtruth room IDs (GT-Room).
|
| 242 |
+
|
| 243 |
+
Specifically, instead of using our state representation or the groundtruth state ID, we use the groundtruth room ID the player is located in. The room ID has a much lower dimension compared to the state ID; and is a simple yet strong indicator for game playing. However, it is only a shortcut for some game states. For fundamentally dealing
|
| 244 |
+
|
| 245 |
+
<table><tr><td rowspan="2">Game</td><td colspan="2">Ours</td><td colspan="2">Ours w/ GT-State</td><td rowspan="2">Ours w/ GT-Room</td></tr><tr><td>Full Model</td><td>- Text Enc.</td><td>Full Model</td><td>- Text Enc.</td></tr><tr><td>zork1</td><td>51.2/107</td><td>4.13/36.3</td><td>53.6/111</td><td>6.25/39.3</td><td>52.0/110</td></tr><tr><td>zork3</td><td>1.33/5</td><td>0.85/3</td><td>1.50/4.7</td><td>1.02/4</td><td>1.31/5</td></tr><tr><td>pentari</td><td>44.4/60</td><td>20.3/45</td><td>46.1/60</td><td>20/45</td><td>44.8/58</td></tr><tr><td>detective</td><td>288.8/313.3</td><td>281.3/313.3</td><td>289.9/310</td><td>280/290</td><td>289.6/300</td></tr><tr><td>ludicorp</td><td>16.7/23</td><td>10.15/22</td><td>15.9/23</td><td>9.5/21</td><td>16.9/23</td></tr><tr><td>inhumane</td><td>25.7/56.7</td><td>1.9/23</td><td>24.1/60</td><td>1.1/20</td><td>25.7/56.7</td></tr><tr><td>Avg. Norm</td><td>.36/.59</td><td>.21/.40</td><td>.37/.59</td><td>.22/.42</td><td>.36/.58</td></tr></table>
|
| 246 |
+
|
| 247 |
+
Table 3: The results of replacing our state representations with groundtruth state IDs (GT-State Full Model), as well as removing the text encoder (- Text Enc).
|
| 248 |
+
|
| 249 |
+
with partial observability, the state representation should be able to capture more information beyond room IDs.
|
| 250 |
+
|
| 251 |
+
The experiment here is designed to verify whether our proposed representation can learn more than room IDs. Intuitively, if a model only learns to mimic room IDs, its performance will be strictly below the oracle GT-Room results. While our results in Table 3 show that our model is on par in terms of average measure and is even higher in terms of the maximum measure across all the games. This confirms that our state approximation not only identifies the true location of the player, but also brings richer state information.
|
| 252 |
+
|
| 253 |
+
Together with the GT-State results, we proved the advantage of our proposed state representation, as it is on par or better compared to both the sufficient but sparse state-ID oracle and the less informative but smoothed room-ID oracle.
|
| 254 |
+
|
| 255 |
+
# D Zork I Trajectory of DRRN-Log
|
| 256 |
+
|
| 257 |
+
Obs0: Copyright (c) 1981, 1982, 1983 Infocom, Inc. All rights reserved. ZORK is a registered trademark of Infocom, Inc. Revision 88 / Serial number 840726 West of House You are standing in an open field west of a white house, with a boarded front door. There is a small mailbox here. Inv: you are empty handed. Desc: west of house you are standing in an open field west of a white house, with a boarded front door. there is a small mailbox here.
|
| 258 |
+
|
| 259 |
+
Action0: north, Score 0
|
| 260 |
+
|
| 261 |
+
Obs1: North of House You are facing the north side of a white house. There is no door here, and all the windows are boarded up. To the north a narrow path winds through the trees. Inv: you are empty handed. Desc: north of house you are facing the north side of a white house. there is no door here, and all the windows are boarded up. to the north a narrow path winds through the trees.
|
| 262 |
+
|
| 263 |
+
Action1: east, Score 0
|
| 264 |
+
|
| 265 |
+
Obs2: Behind House You are behind the white house. A path leads into the forest to the east. In one corner of the house there is a small window which is slightly ajar. Inv: you are empty handed. Desc: behind house you are behind the white house. a path leads into the forest to the east. in one corner of the house there is a small window which is slightly ajar.
|
| 266 |
+
|
| 267 |
+
Action2: open window, Score 0
|
| 268 |
+
|
| 269 |
+
Obs3: With great effort, you open the window far enough to allow entry. Inv: you are empty handed. Desc: behind house you are behind the white house. a path leads into the forest to the east. in one corner of the house there is a small window which is open.
|
| 270 |
+
|
| 271 |
+
Action3: west, Score 10
|
| 272 |
+
|
| 273 |
+
Obs4: Kitchen You are in the kitchen of the white house. A table seems to have been used recently for the preparation of food. A passage leads to the west and a dark staircase can be seen leading upward. A dark chimney leads down and to the east is a small window which is open. On the table is an elongated brown sack, smelling of hot peppers. A bottle is sitting on the table. The glass bottle contains: A quantity of water Inv: you are empty handed. Desc: kitchen you are in the kitchen of the white house. a table seems to have been used recently for the preparation of food. a passage leads to the west and a dark staircase can be seen leading upward. a dark chimney leads down and to the east is a small window which is open. on the table is an elongated brown sack, smelling of hot peppers. a bottle is sitting on the table. the glass bottle contains: a quantity of water Action4: west, Score 10
|
| 274 |
+
|
| 275 |
+
Obs5: Living Room You are in the living
|
| 276 |
+
|
| 277 |
+
room. There is a doorway to the east, a wooden door with strange gothic lettering to the west, which appears to be nailed shut, a trophy case, and a large oriental rug in the center of the room. Above the trophy case hangs an elvish sword of great antiquity. A battery powered brass lantern is on the trophy case. Inv: you are empty handed. Desc: living room you are in the living room. there is a doorway to the east, a wooden door with strange gothic lettering to the west, which appears to be nailed shut, a trophy case, and a large oriental rug in the center of the room. above the trophy case hangs an elvish sword of great antiquity. a battery powered brass lantern is on the trophy case. Action5: push rug, Score 10
|
| 278 |
+
|
| 279 |
+
Obs6: With a great effort, the rug is moved to one side of the room, revealing the dusty cover of a closed trap door. Inv: you are empty handed. Desc: living room you are in the living room. there is a doorway to the east, a wooden door with strange gothic lettering to the west, which appears to be nailed shut, a trophy case, and a closed trap door at your feet. above the trophy case hangs an elvish sword of great antiquity. a battery powered brass lantern is on the trophy case.
|
| 280 |
+
|
| 281 |
+
Action6: take all, Score 10
|
| 282 |
+
|
| 283 |
+
Obs7: trophy case: The trophy case is securely fastened to the wall. sword: Taken. brass lantern: Taken. carpet: The rug is extremely heavy and cannot be carried. Inv: you are carrying: a brass lantern a sword Desc: living room you are in the living room. there is a doorway to the east, a wooden door with strange gothic lettering to the west, which appears to be nailed shut, a trophy case, and a closed trap door at your feet.
|
| 284 |
+
|
| 285 |
+
Action7: open trap, Score 10
|
| 286 |
+
|
| 287 |
+
Obs8: The door reluctantly opens to reveal a rickety staircase descending into darkness. Inv: you are carrying: a brass lantern a sword Desc: living room you are in the living room. there is a doorway to the east, a wooden door with strange gothic lettering to the west, which appears to be nailed shut, a trophy case, and a rug lying beside an open trap door.
|
| 288 |
+
|
| 289 |
+
Action8: open case, Score 10
|
| 290 |
+
|
| 291 |
+
Obs9: Opened. Inv: you are carrying: a brass lantern a sword Desc: living room you are in
|
| 292 |
+
|
| 293 |
+
the living room. there is a doorway to the east, a wooden door with strange gothic lettering to the west, which appears to be nailed shut, a trophy case, and a rug lying beside an open trap door.
|
| 294 |
+
|
| 295 |
+
Action9: down, Score 35
|
| 296 |
+
|
| 297 |
+
Obs10: You have moved into a dark place. The trap door crashes shut, and you hear someone barring it. It is pitch black. You are likely to be eaten by a grue. Your sword is glowing with a faint blue glow. Inv: you are carrying: a brass lantern a sword Desc: it is pitch black. you are likely to be eaten by a grue.
|
| 298 |
+
|
| 299 |
+
Action10: turn on lantern, Score 35
|
| 300 |
+
|
| 301 |
+
Obs11: The brass lantern is now on. Cellar You are in a dark and damp cellar with a narrow passageway leading north, and a crawl way to the south. On the west is the bottom of a steep metal ramp which is unclimbable. Inv: you are carrying: a brass lantern (providing light) a sword Desc: cellar you are in a dark and damp cellar with a narrow passageway leading north, and a crawl way to the south. on the west is the bottom of a steep metal ramp which is unclimbable.
|
| 302 |
+
|
| 303 |
+
Action11: north, Score 35
|
| 304 |
+
|
| 305 |
+
Obs12: The Troll Room This is a small room with passages to the east and south and a forbidding hole leading west. Bloodstains and deep scratches (perhaps made by an axe) mar the walls. A nasty looking troll, brandishing a bloody axe, blocks all passages out of the room. Your sword has begun to glow very brightly. Inv: you are carrying: a brass lantern (providing light) a sword the axe crashes against the rock, throwing sparks! Desc: the troll room this is a small room with passages to the east and south and a forbidding hole leading west. bloodstains and deep scratches (perhaps made by an axe) mar the walls. a nasty looking troll, brandishing a bloody axe, blocks all passages out of the room. the axe crashes against the rock, throwing sparks!
|
| 306 |
+
|
| 307 |
+
Action12: hit troll with sword, Score 35
|
| 308 |
+
|
| 309 |
+
Obs13: The haft of your sword knocks out the troll. Inv: you are carrying: a brass lantern (providing light) a sword Desc: the troll room this is a small room with passages to the east and south and a forbidding hole leading west. bloodstains and deep scratches (perhaps made by
|
| 310 |
+
|
| 311 |
+
an axe) mar the walls. there is a bloody axe here. an unconscious troll is sprawled on the floor. all passages out of the room are open.
|
| 312 |
+
|
| 313 |
+
Action13: west, Score 35
|
| 314 |
+
|
| 315 |
+
Obs14: Maze This is part of a maze of twisty little passages, all alike. Your sword is glowing with a faint blue glow. Inv: you are carrying: a brass lantern (providing light) a sword Desc: maze this is part of a maze of twisty little passages, all alike. Action14: west, Score 35
|
| 316 |
+
|
| 317 |
+
Obs15: Maze This is part of a maze of twisty little passages, all alike. Your sword is no longer glowing. Inv: you are carrying: a brass lantern (providing light) a sword Desc: maze this is part of a maze of twisty little passages, all alike.
|
| 318 |
+
|
| 319 |
+
Action15: west, Score 35
|
| 320 |
+
|
| 321 |
+
Obs16: Maze This is part of a maze of twisty little passages, all alike. Inv: you are carrying: a brass lantern (providing light) a sword Desc: maze this is part of a maze of twisty little passages, all alike. Action16: up, Score 35
|
| 322 |
+
|
| 323 |
+
Obs17: Maze This is part of a maze of twisty little passages, all alike. A skeleton, probably the remains of a luckless adventurer, lies here. Beside the skeleton is a rusty knife. The deceased adventurer's useless lantern is here. There is a skeleton key here. An old leather bag, bulging with coins, is here. Inv: you are carrying: a brass lantern (providing light) a sword Desc: maze this is part of a maze of twisty little passages, all alike. a skeleton, probably the remains of a luckless adventurer, lies here. beside the skeleton is a rusty knife. the deceased adventurer's useless lantern is here. there is a skeleton key here. an old leather bag, bulging with coins, is here.
|
| 324 |
+
|
| 325 |
+
Action17: put down sword, Score 35
|
| 326 |
+
|
| 327 |
+
Obs18: Dropped. Inv: you are carrying: a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike. a skeleton, probably the remains of a luckless adventurer, lies here. beside the skeleton is a rusty knife. the deceased adventurer's useless lantern is here. there is a sword here. there is a skeleton key here. an old leather bag, bulging with coins, is here.
|
| 328 |
+
|
| 329 |
+
Action18: take coins, Score 45
|
| 330 |
+
|
| 331 |
+
Obs19: Taken. Inv: you are carrying: a leather bag of coins a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike. a skeleton, probably the remains of a luckless adventurer, lies here. beside the skeleton is a rusty knife. the deceased adventurer's useless lantern is here. there is a sword here. there is a skeleton key here.
|
| 332 |
+
|
| 333 |
+
Action19: southwest, Score 45
|
| 334 |
+
|
| 335 |
+
Obs20: Maze This is part of a maze of twisty little passages, all alike. Inv: you are carrying: a leather bag of coins a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike.
|
| 336 |
+
|
| 337 |
+
Action20: put down coins, Score 45
|
| 338 |
+
|
| 339 |
+
Obs21: Dropped. Inv: you are carrying: a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike. an old leather bag, bulging with coins, is here.
|
| 340 |
+
|
| 341 |
+
Action21: take coins, Score 45
|
| 342 |
+
|
| 343 |
+
Obs22: Taken. Inv: you are carrying: a leather bag of coins a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike.
|
| 344 |
+
|
| 345 |
+
Action22: east, Score 45
|
| 346 |
+
|
| 347 |
+
Obs23: Maze This is part of a maze of twisty little passages, all alike. Inv: you are carrying: a leather bag of coins a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike.
|
| 348 |
+
|
| 349 |
+
Action23: put down coins, Score 45
|
| 350 |
+
|
| 351 |
+
Obs24: Dropped. Inv: you are carrying: a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike. an old leather bag, bulging with coins, is here.
|
| 352 |
+
|
| 353 |
+
Action24: up, Score 45
|
| 354 |
+
|
| 355 |
+
Obs25: Maze This is part of a maze of twisty little passages, all alike. Inv: you are carrying: a brass lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike.
|
| 356 |
+
|
| 357 |
+
Action25: west, Score 45
|
| 358 |
+
|
| 359 |
+
Obs26: Maze This is part of a maze of twisty little passages, all alike. Inv: you are carrying: a brass
|
| 360 |
+
|
| 361 |
+
lantern (providing light) Desc: maze this is part of a maze of twisty little passages, all alike.
|
| 362 |
+
|
| 363 |
+
Action26: southeast, Score 45
|
| 364 |
+
|
| 365 |
+
Obs27: Cyclops Room This room has an exit on the northwest, and a staircase leading up. A cyclops, who looks prepared to eat horses (much less mere adventurers), blocks the staircase. From his state of health, and the bloodstains on the walls, you gather that he is not very friendly, though he likes people. Inv: you are carrying: a brass lantern (providing light) Desc: cyclops room this room has an exit on the northwest, and a staircase leading up. a cyclops, who looks prepared to eat horses (much less mere adventurers), blocks the staircase. from his state of health, and the bloodstains on the walls, you gather that he is not very friendly, though he likes people.
|
| 366 |
+
|
| 367 |
+
Action27: odyssey, Score 45
|
| 368 |
+
|
| 369 |
+
Obs28: The cyclops, hearing the name of his father's deadly nemesis, flees the room by knocking down the wall on the east of the room. Inv: you are carrying: a brass lantern (providing light) Desc: cyclops room this room has an exit on the northwest, and a staircase leading up. the east wall, previously solid, now has a cyclops sized opening in it.
|
| 370 |
+
|
| 371 |
+
Action28: up, Score 70
|
| 372 |
+
|
| 373 |
+
Obs29: You hear a scream of anguish as you violate the robber's hideaway. Using passages unknown to you, he rushes to its defense. The thief gestures mysteriously, and the treasures in the room suddenly vanish. Treasure Room This is a large room, whose east wall is solid granite. A number of discarded bags, which crumble at your touch, are scattered about on the floor. There is an exit down a staircase. There is a suspicious looking individual, holding a large bag, leaning against one wall. He is armed with a deadly stiletto. There is a silver chalice, intricately engraved, here. The thief draws blood, raking his stiletto across your arm. Inv: you are carrying: a brass lantern (providing light) you dodge as the thief comes in low. Desc: treasure room this is a large room, whose east wall is solid granite. a number of discarded bags, which crumble at your touch, are scattered about on the floor. there is an exit down a staircase. there is a suspicious looking individual, holding a large bag, leaning against one wall. he is armed with a
|
| 374 |
+
|
| 375 |
+
deadly stiletto. there is a silver chalice, intricately engraved, here. you dodge as the thief comes in low.
|
| 376 |
+
|
| 377 |
+
Action29: temple, Score 70
|
| 378 |
+
|
| 379 |
+
Obs30: Temple This is the north end of a large temple. On the east wall is an ancient inscription, probably a prayer in a long forgotten language. Below the prayer is a staircase leading down. The west wall is solid granite. The exit to the north end of the room is through huge marble pillars. There is a brass bell here. Inv: you are carrying: a brass lantern (providing light) Desc: temple this is the north end of a large temple. on the east wall is an ancient inscription, probably a prayer in a long forgotten language. below the prayer is a staircase leading down. the west wall is solid granite. the exit to the north end of the room is through huge marble pillars. there is a brass bell here.
|
| 380 |
+
|
| 381 |
+
Action30: throw lantern at prayer, Score 70
|
revisitingtherolesoftextintextgames/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:db6e36ab6739f20a5334c0cde216edb909736083368f02a1ca68d8dc09e95d32
|
| 3 |
+
size 257614
|
revisitingtherolesoftextintextgames/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:da3a807582607165afe4d9c4e09a1089a4154cb7dbec407905c77080192a0ab9
|
| 3 |
+
size 338803
|
revisitingtransformerbasedmodelsforlongdocumentclassification/4a5cfe1e-a844-47bf-b5de-92c2d02317b6_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4241397a8f64288e4d32f31dbfca0b4114a08dc6918a53674b905783e39ee641
|
| 3 |
+
size 113264
|
revisitingtransformerbasedmodelsforlongdocumentclassification/4a5cfe1e-a844-47bf-b5de-92c2d02317b6_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c35d2f70bbf39c38f89e281ac33e27800b74673d0a12d1c4c0c8910c2d3dea4a
|
| 3 |
+
size 135395
|
revisitingtransformerbasedmodelsforlongdocumentclassification/4a5cfe1e-a844-47bf-b5de-92c2d02317b6_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5702d6c142d6c70984a1892407f012eeb990e66b9802ecf733a1617d351e245f
|
| 3 |
+
size 625693
|
revisitingtransformerbasedmodelsforlongdocumentclassification/full.md
ADDED
|
@@ -0,0 +1,442 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Revisiting Transformer-based Models for Long Document Classification
|
| 2 |
+
|
| 3 |
+
Xiang Dai *
|
| 4 |
+
|
| 5 |
+
CSIRO Data61
|
| 6 |
+
|
| 7 |
+
dai.dai@csiro.au
|
| 8 |
+
|
| 9 |
+
Sune Darkner
|
| 10 |
+
|
| 11 |
+
University of Copenhagen
|
| 12 |
+
|
| 13 |
+
darkner@di.ku.dk
|
| 14 |
+
|
| 15 |
+
Ilias Chalkidis
|
| 16 |
+
|
| 17 |
+
University of Copenhagen
|
| 18 |
+
|
| 19 |
+
ilias.chalkidis@di.ku.dk
|
| 20 |
+
|
| 21 |
+
Desmond Elliott
|
| 22 |
+
|
| 23 |
+
University of Copenhagen
|
| 24 |
+
|
| 25 |
+
Pioneer Centre for AI
|
| 26 |
+
|
| 27 |
+
de@di.ku.dk
|
| 28 |
+
|
| 29 |
+
# Abstract
|
| 30 |
+
|
| 31 |
+
The recent literature in text classification is biased towards short text sequences (e.g., sentences or paragraphs). In real-world applications, multi-page multi-paragraph documents are common and they cannot be efficiently encoded by vanilla Transformer-based models. We compare different Transformer-based Long Document Classification (TrLDC) approaches that aim to mitigate the computational overhead of vanilla transformers to encode much longer text, namely sparse attention and hierarchical encoding methods. We examine several aspects of sparse attention (e.g., size of local attention window, use of global attention) and hierarchical (e.g., document splitting strategy) transformers on four document classification datasets covering different domains. We observe a clear benefit from being able to process longer text, and, based on our results, we derive practical advice of applying Transformer-based models on long document classification tasks.<sup>1</sup>
|
| 32 |
+
|
| 33 |
+
# 1 Introduction
|
| 34 |
+
|
| 35 |
+
Natural language processing has been revolutionised by the large scale self-supervised pretraining of language encoders (Devlin et al., 2019; Liu et al., 2019), which are fine-tuned in order to solve a wide variety of downstream classification tasks. However, the recent literature in text classification mostly focuses on short sequences, such as sentences or paragraphs (Sun et al., 2019; Adhikari et al., 2019; Mosbach et al., 2021), which are sometimes misleadingly named as documents.
|
| 36 |
+
|
| 37 |
+
The transition from short-to-long document classification is non-trivial. One challenge is that BERT and most of its variants are pre-trained on
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
Figure 1: The effectiveness of Longformer, a long-document Transformer, on the MIMIC-III development set. There is a clear benefit from being able to process longer text.
|
| 41 |
+
|
| 42 |
+
sequences containing up-to 512 tokens, which is not a long document. A common practice is to truncate actually long documents to the first 512 tokens, which allows the immediate application of these pre-trained models (Adhikari et al., 2019; Chalkidis et al., 2020). We believe that this is an insufficient approach for long document classification because truncating the text may omit important information, leading to poor classification performance (Figure 1). Another challenge comes from the computational overhead of vanilla Transformer: in the multi-head self-attention operation (Vaswani et al., 2017), each token in a sequence of $n$ tokens attends to all other tokens. This results in a function that has $O(n^2)$ time and memory complexity, which makes it challenging to efficiently process long documents.
|
| 43 |
+
|
| 44 |
+
In response to the second challenge, long-document Transformers have emerged to deal with long sequences (Beltagy et al., 2020; Zaheer et al., 2020). However, they experiment and report results on non-ideal long document classification datasets, i.e., documents on the IMDB dataset are not really long - fewer than $15\%$ of examples are longer than 512 tokens; while the Hyperpartisan
|
| 45 |
+
|
| 46 |
+
dataset only has very few (645 in total) documents. On datasets with longer documents, such as the MIMIC-III dataset (Johnson et al., 2016) with an average length of 2,000 words, it has been shown that multiple variants of BERT perform worse than a CNN or RNN-based model (Chalkidis et al., 2020; Vu et al., 2020; Dong et al., 2021; Ji et al., 2021a; Gao et al., 2021; Pascual et al., 2021). We believe there is a need to understand the performance of Transformer-based models on classifying documents that are actually long.
|
| 47 |
+
|
| 48 |
+
In this work, we aim to transfer the success of the pre-train-fine-tune paradigm to long document classification. Our main contributions are:
|
| 49 |
+
|
| 50 |
+
- We compare different long document classification approaches based on transformer architecture: namely, sparse attention, and hierarchical methods. Our results show that processing more tokens can bring drastic improvements comparing to processing up-to 512 tokens.
|
| 51 |
+
- We conduct careful analyses to understand the impact of several design options on both the effectiveness and efficiency of different approaches. Our results show that some design choices (e.g., size of local attention window in sparse attention method) can be adjusted to improve the efficiency without sacrificing the effectiveness, whereas some choices (e.g., document splitting strategy in hierarchical method) vastly affect effectiveness.
|
| 52 |
+
- Last but not least, our results show that, contrary to previous claims, Transformer-based models can outperform former state-of-the-art CNN based models on MIMIC-III dataset.
|
| 53 |
+
|
| 54 |
+
# 2 Problem Formulation and Datasets
|
| 55 |
+
|
| 56 |
+
We divide the document classification model into two components: (1) a document encoder, which builds a vector representation of a given document; and, (2) a classifier that predicts a single or multiple labels given the encoded vector. In this work, we mainly focus on the first component: we use Transformer-based encoders to build a document representation, and then take the encoded document representation as the input to a classifier. For the second component, we use a TANH activated hidden layer, followed by the output layer. Output probabilities are obtained by applying a SIGMOID
|
| 57 |
+
|
| 58 |
+

|
| 59 |
+
Figure 2: The distribution of document lengths. A log-10 scale is used for the X axis.
|
| 60 |
+
|
| 61 |
+
(multi-label) or SOFTMAX (multi-class) function to output logits. $^3$
|
| 62 |
+
|
| 63 |
+
We mainly conduct our experiments on the MIMIC-III dataset (Johnson et al., 2016), where researchers still fail to transfer "the Magic of BERT" to medical code assignment tasks (Ji et al., 2021a; Pascual et al., 2021).
|
| 64 |
+
|
| 65 |
+
MIMIC-III contains Intensive Care Unit (ICU) discharge summaries, each of which is annotated with multiple labels—diagnoses and procedures—using the ICD-9 (The International Classification of Diseases, Ninth Revision) hierarchy. Following Mullenbach et al. (2018), we conduct experiments using the top 50 frequent labels. $^{4}$
|
| 66 |
+
|
| 67 |
+
To address the generalisation concern, we also use three datasets from other domains: ECtHR (Chalkidis et al., 2022) sourced from legal cases, Hyperpartisan (Kiesel et al., 2019) and 20 News (Joachims, 1997), both from news articles.
|
| 68 |
+
|
| 69 |
+
ECtHR contains legal cases from The European Court of Human Rights' public database. The court hears allegations that a state has breached human rights provisions of the European Convention of Human Rights, and each case is mapped to one or more articles of the convention that were allegedly violated.[5]
|
| 70 |
+
|
| 71 |
+
# Hyperpartisan contains news articles which are
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
Figure 3: A comparison of three types of attention operations. The example sequence contains 7 tokens; we set local attention window size as 2, and only the first token using global attention. Note that these curves are bi-directional that tokens can attend to each other.
|
| 75 |
+
|
| 76 |
+
manually labelled as hyperpartisan (taking an extreme left or right standpoint) or not.6
|
| 77 |
+
|
| 78 |
+
20 News contains newsgroups posts which are categorised into 20 topics.<sup>7</sup>
|
| 79 |
+
|
| 80 |
+
We note that documents in MIMIC-III and ECtHR are much longer than those in Hyperpartisan and 20 News (Table 5 in Appendix and Figure 2).
|
| 81 |
+
|
| 82 |
+
# 3 Approaches
|
| 83 |
+
|
| 84 |
+
In the era of Transformer-based models, we identify two representative approaches of processing long documents in the literature that either acts as an inexpensive drop-in replacement for the vanilla self-attention (i.e., sparse attention) or builds a task-specific architecture (i.e., hierarchical Transformers).
|
| 85 |
+
|
| 86 |
+
# 3.1 Sparse-Attention Transformers
|
| 87 |
+
|
| 88 |
+
Vanilla transformer relies on the multi-head self-attention mechanism, which scales poorly with the length of the input sequence, requiring quadratic computation time and memory to store all scores that are used to compute the gradients during back-propagation (Qiu et al., 2020). Several Transformer-based models (Kitaev et al., 2020; Tay et al., 2020; Choromanski et al., 2021) have been proposed exploring efficient alternatives that can be used to process long sequences.
|
| 89 |
+
|
| 90 |
+
Longformer of Beltagy et al. (2020) consists of local (window-based) attention and global attention that reduces the computational complexity of the model and thus can be deployed to process up to 4096 tokens. Local attention is computed
|
| 91 |
+
|
| 92 |
+

|
| 93 |
+
Figure 4: A high-level illustration of hierarchical Transformers. A shared pre-trained RoBERTa is used to encode each segment, and a two layer transformer blocks is used to capture the interaction between different segments. Finally, contextual segment representations are aggregated into a document representation.
|
| 94 |
+
|
| 95 |
+
in-between a window of neighbour (consecutive) tokens. Global attention relies on the idea of global tokens that are able to attend and be attended by any other token in the sequence (Figure 3). BigBird of Zaheer et al. (2020) is another sparse-attention based Transformer that uses a combination of a local, global and random attention, i.e., all tokens also attend a number of random tokens on top of those in the same neighbourhood. Both models are warm-started from the public RoBERTa checkpoint and are further pre-trained on masked language modelling. They have been reported to outperform RoBERTa on a range of tasks that require modelling long sequences.
|
| 96 |
+
|
| 97 |
+
We choose Longformer (Beltagy et al., 2020) in this study and refer readers to Xiong et al. (2021) for a systematic comparison of recent proposed efficient attention variants.
|
| 98 |
+
|
| 99 |
+
# 3.2 Hierarchical Transformers
|
| 100 |
+
|
| 101 |
+
Instead of modifying multi-head self-attention mechanism to efficiently model long sequences, hierarchical Transformers build on top of vanilla transformer architecture.
|
| 102 |
+
|
| 103 |
+
A document, $\mathcal{D} = \{t_0,t_1,\dots ,t_{|\mathcal{D}|}\}$ , is first split into segments, each of which should have less than 512 tokens. These segments can be independently encoded using any pre-trained Transformer-based encoders (e.g., RoBERTa in Figure 4). We sum the contextual representation of the first token from each segment up with segment position embeddings as the segment representation (i.e., $n_i$ in Figure 4). Then the segment encoder—two transformer blocks (Zhang et al., 2019)—are used to
|
| 104 |
+
|
| 105 |
+
capture the interaction between segments and output a list of contextual segment representations (i.e., $s_i$ in Figure 4), which are finally aggregated into a document representation. By default, the aggregator is the max-pooling operation unless other specified.
|
| 106 |
+
|
| 107 |
+
# 4 Experimental Setup
|
| 108 |
+
|
| 109 |
+
Backbone Models We mainly consider two models in our experiments: Longformer-base (Beltagy et al., 2020), and RoBERTa-base (Liu et al., 2019) which is used in hierarchical Transformers.
|
| 110 |
+
|
| 111 |
+
Evaluation metrics For the MIMIC-III (multilabel) dataset, we follow previous work (Mullenbach et al., 2018; Cao et al., 2020) and use micro-averaged AUC (Area Under the receiver operating characteristic Curve), macro-averaged AUC, micro-averaged $F_{1}$ , macro-averaged $F_{1}$ and Precision@5—the proportion of the ground truth labels in the top-5 predicted labels—as the metrics. We report micro and macro averaged $F_{1}$ for the ECtHR (multilabel) dataset, and accuracy for both Hyperpartisan (binary) and 20 News (multiclass) datasets.
|
| 112 |
+
|
| 113 |
+
# 5 Experiments
|
| 114 |
+
|
| 115 |
+
We conduct a series of controlled experiments to understand the impact of design choices in different TrLDC models. Bringing these optimal choices all together, we compare TrLDC against the state of the art, as well as baselines that only process up to 512 tokens. Finally, based on our investigation, we derive practical advice of applying transformer-based models to long document classification regarding both effectiveness and efficiency.
|
| 116 |
+
|
| 117 |
+
Task-adaptive pre-training is a promising first step. Domain-adaptive pre-training (DAPT) – the continued pre-training a language model on a large corpus of domain-specific text – is known to improve downstream task performance (Gururangan et al., 2020; Kær Jørgensen et al., 2021). However, task-adaptive pre-training (TAPT) – continues unsupervised pre-training on the task's data – is comparatively less studied, mainly because most of the benchmarking corpora are small and thus the benefit of TAPT seems less obvious than DAPT.
|
| 118 |
+
|
| 119 |
+
We believe document classification datasets, due to their relatively large size, can benefit from TAPT. On both MIMIC-III and ECtHR, we continue to pre-train Longformer and RoBERTa us
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
(a) Longformer on MIMIC-III
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
(b) RoBERTa on MIMIC-III
|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
(c) Longformer on ECtHR
|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
(d) RoBERTa on ECtHR
|
| 132 |
+
Figure 5: Task-adaptive pre-training (right side in each plot) can improve the effectiveness (measured on the development sets) of pre-trained models by a large margin on MIMIC-III, but small on ECtHR. $\Delta$ : the difference between mean values of compared experiments.
|
| 133 |
+
|
| 134 |
+
ing the masked language modelling pre-training objective (details about pre-training can be found at Appendix 9.3). We find that task-adaptive pretrained models substantially improve performance on MIMIC-III (Figure 5 (a) and (b)), but there are smaller improvements on ECtHR (Figure 5 (c) and (d)). We suspect this difference is because legal cases (i.e., ECtHR) are publicly available and have been covered in pre-training data used for training Longformer and RoBERTa, whereas clinical notes (i.e., MIMIC-III) are not (Dodge et al., 2021). See Appendix 9.5 for a short analysis on this matter.
|
| 135 |
+
|
| 136 |
+
We also compare our TAPT-RoBERTa against publicly available domain-specific RoBERTa, trained from scratch on biomedical articles and clinical notes. Results (Figure 8 in Appendix) show that TAPT-RoBERTa outperforms domain-specific base model, but underperforms the larger model.
|
| 137 |
+
|
| 138 |
+
# 5.1 Longformer
|
| 139 |
+
|
| 140 |
+
Small local attention windows are effective and efficient. Beltagy et al. (2020) observe that many tasks do not require reasoning over the entire context. For example, they find that the distance between any two mentions in a coreference resolution dataset (i.e., OntoNotes) is small, and it is possible to achieve competitive performance by processing small segments containing these mentions.
|
| 141 |
+
|
| 142 |
+
Inspired by this observation, we investigate the impact of local context size on document classification, regarding both effectiveness and efficiency.
|
| 143 |
+
|
| 144 |
+
<table><tr><td rowspan="2">Size</td><td rowspan="2">Micro F1</td><td colspan="2">Speed</td></tr><tr><td>Train</td><td>Test</td></tr><tr><td>32</td><td>67.9 ±0.3</td><td>9.9 (2.9x)</td><td>45.6 (2.8x)</td></tr><tr><td>64</td><td>68.1 ±0.1</td><td>8.8 (2.6x)</td><td>41.4 (2.5x)</td></tr><tr><td>128</td><td>68.3 ±0.3</td><td>7.4 (2.1x)</td><td>34.1 (2.1x)</td></tr><tr><td>256</td><td>68.4 ±0.3</td><td>5.5 (1.6x)</td><td>25.4 (1.6x)</td></tr><tr><td>512</td><td>68.5 ±0.3</td><td>3.5 (1.0x)</td><td>16.3 (1.0x)</td></tr></table>
|
| 145 |
+
|
| 146 |
+
Table 1: The impact of local attention window size in Longformer on MIMIC-III development set. Speed is measured using 'processed samples per second', and numbers in parenthesis are the relative speedup.
|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
Figure 6: The effect of applying global attention on more tokens, which are evenly chosen based on their positions. In the baseline model (first column), only the [CLS] token uses global attention.
|
| 150 |
+
|
| 151 |
+
We hypothesise that long document classification, which is usually paired with a large label space, can be performed by models that only attend over short sequences instead of the entire document (Gao et al., 2021). In this experiment, we vary the local attention window around each token.
|
| 152 |
+
|
| 153 |
+
Table 1 shows that even using a small window size, the micro $F_{1}$ score on MIMIC-III development set is still close to using a larger window size. We observe the same pattern on ECtHR and 20 News (See Table 11 in the Appendix). A major advantage of using smaller local attention windows is the faster computation for training and evaluation.
|
| 154 |
+
|
| 155 |
+
Considering a small number of tokens for global attention improves the stability of the training process. Longformer relies heavily on the [CLS] token, which is the only token with global attention—attending to all other tokens and all other tokens attending to it. We investigate whether allowing more tokens to use global attention can improve model performance, and if yes, how to choose which tokens to use global attention.
|
| 156 |
+
|
| 157 |
+
Figure 6 shows that adding more tokens using global attention does not improve $F_{1}$ score, while a
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Figure 7: The effect of varying the segment length and whether allowing segments to overlap in the hierarchical Transformers. $\Delta$ : improvement due to overlap.
|
| 163 |
+
|
| 164 |
+
small number of additional global attention tokens can make the training more stable.
|
| 165 |
+
|
| 166 |
+
Equally distributing global tokens across the sequence is better than content-based attribution. We consider two approaches to choose additional tokens that use global attention: position based or content based. In the position-based approach, we distribute $n$ additional tokens at equal distances. For example, if $n = 4$ and the sequence length is 4096, there are global attention on tokens at position 0, 1024, 2048 and 3072. In the content-based approach, we identify informative tokens, using TF-IDF (Term Frequency-Inverse Document Frequency) within each document, and we apply global attention on the top- $K$ informative tokens, together with the [CLS] token. Results show that the position based approach is more effective than content based (see Table 13 in the Appendix).
|
| 167 |
+
|
| 168 |
+
# 5.2 Hierarchical Transformers
|
| 169 |
+
|
| 170 |
+
The optimal segment length is dataset dependent. Ji et al. (2021a) and Gao et al. (2021) reported negative results with a hierarchical Transformer with a segment length of 512 tokens on the
|
| 171 |
+
|
| 172 |
+
MIMIC-III dataset. Their methods involved splitting a document into equally sized segments, which were processed using a shared BERT encoder. Instead of splitting the documents into such large segments, we investigate the impact of segment length and preventing context fragmentation.
|
| 173 |
+
|
| 174 |
+
Figure 7 (left side in each violin plot) shows that there is no optimal segment length across both MIMIC-III and ECtHR. Small segment length works well on MIMIC-III, and using segment length greater than 128 starts to decrease the performance. In contrast, the ECtHR dataset benefits from a model with larger segment lengths. The optimal performing segment length on 20 News and Hyperpartisan are 256 and 128, respectively (See Table 14 in the Appendix).
|
| 175 |
+
|
| 176 |
+
Splitting documents into overlapping segments can alleviate the context fragmentation problem. Splitting a long document into smaller segments may result in the problem of context fragmentation, where a model lacks the information it needs to make a prediction (Dai et al., 2019; Ding et al., 2021). Although, the hierarchical model uses a second-order transformer to fuse and contextualise information across segments, we investigate a simple way to alleviate context fragmentation by allowing segments to overlap when we split a document into segments. That it, except for the first segment, the first $\frac{1}{4} n$ tokens in each segment are taken from the previous segment, where $n$ is the segment length. Figure 7 (right side in each violin plot) show that this simple strategy can easily improve the effectiveness of the model.
|
| 177 |
+
|
| 178 |
+
Splitting based on document structure. Chalkidis et al. (2022) argue that we should follow the structure of a document when splitting it into segments (Tang et al., 2015; Yang et al., 2016). They propose a hierarchical Transformer for the ECtHR dataset that splits a document at the paragraph level, reading up to 64 paragraphs of 128 token each (8192 tokens in total).
|
| 179 |
+
|
| 180 |
+
We investigate whether splitting based on document structure is better than splitting a long document into segments of same length. Similar to their model, we consider each paragraph as a segment and all segments are then truncated or padded to the same segment length. We follow Chalkidis et al. (2022) and use segment length $(l)$ of 128 on EChR, and tune $l\in \{32,64,128\}$ on MIMIC-III.
|
| 181 |
+
|
| 182 |
+
Results show that splitting by the paragraph-level document structure does not improve performance on the ECtHR dataset. On MIMIC-III, splitting based on document structure substantially underperforms evenly splitting the document (Figure 9 in the Appendix).
|
| 183 |
+
|
| 184 |
+
# 5.3 Label-wise Attention Network
|
| 185 |
+
|
| 186 |
+
Recall from Section 3 that our models form a single document vector which is used for the final prediction. That is, in Longformer, we use the hidden states of the [CLS] token; in hierarchical models, we use the max pooling operation to aggregate a list of contextual segment representations into a document vector. The Label-Wise Attention Network (LWAN) (Mullenbach et al., 2018; Xiao et al., 2019; Chalkidis et al., 2020) is an alternative that allows the model to learn distinct document representations for each label. Given a sequence of hidden representations (e.g., contextual token representations in Longformer or contextual segment representations in hierarchical models: $S = [s_0, s_1, \dots, s_m]$ ), LWAN can allow each label to learn to attend to different positions via:
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\boldsymbol {a} _ {\ell} = \operatorname {S o f t M a x} \left(\boldsymbol {S} ^ {\top} \boldsymbol {u} _ {\ell}\right) \tag {1}
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\boldsymbol {v} _ {\ell} = \sum_ {i = 1} ^ {m} \boldsymbol {a} _ {\ell , i} \boldsymbol {s} _ {i} \tag {2}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
$$
|
| 197 |
+
\hat {\boldsymbol {y}} _ {\ell} = \sigma \left(\boldsymbol {\beta} _ {\ell} ^ {\top} \boldsymbol {v} _ {\ell}\right) \tag {3}
|
| 198 |
+
$$
|
| 199 |
+
|
| 200 |
+
where $\pmb{u}_{\ell}$ and $\beta_{\ell}$ are vector parameters for label $\ell$
|
| 201 |
+
|
| 202 |
+
Results show that adding a LWAN improves performance on MIMIC-III (Micro $F_{1}$ score of 1.1 with Longformer; 1.8 with hierarchical models), where on average each document is assigned 6 labels out of 50 available labels (classes). There is a smaller improvement on ECtHR (0.4 with Longformer; 0.1 with hierarchical models), where the average number of labels per document is 1.5 out of 10 labels (classes) in total (Table 16 in the Appendix).
|
| 203 |
+
|
| 204 |
+
# 5.4 Comparison with State of the art
|
| 205 |
+
|
| 206 |
+
We compare TrLDC models against recently published results on MIMIC-III, as well as baseline models that process up to 512 tokens. In addition to the common practice of truncating long documents (i.e., using the first 512 tokens), we consider two alternatives that either randomly choose 512
|
| 207 |
+
|
| 208 |
+
<table><tr><td></td><td></td><td>Macro AUC</td><td>Micro AUC</td><td>Macro F1</td><td>Micro F1</td><td>P@5</td></tr><tr><td>CAML (Mullenbach et al., 2018)</td><td>C</td><td>88.4</td><td>91.6</td><td>57.6</td><td>63.3</td><td>61.8</td></tr><tr><td>PubMedBERT (Ji et al., 2021a)</td><td>T</td><td>88.6</td><td>90.8</td><td>63.3</td><td>68.1</td><td>64.4</td></tr><tr><td>GatedCNN-NCI (Ji et al., 2021b)</td><td>C</td><td>91.5</td><td>93.8</td><td>62.9</td><td>68.6</td><td>65.3</td></tr><tr><td>LAAT (Vu et al., 2020)</td><td>R</td><td>92.5</td><td>94.6</td><td>66.6</td><td>71.5</td><td>67.5</td></tr><tr><td>MSMN (Yuan et al., 2022)</td><td>R</td><td>92.8</td><td>94.7</td><td>68.3</td><td>72.5</td><td>68.0</td></tr><tr><td colspan="7">Baselines processing up to 512 tokens</td></tr><tr><td>First</td><td>T</td><td>83.0 ±0.1</td><td>86.0 ±0.1</td><td>47.0 ±0.4</td><td>56.1 ±0.2</td><td>55.4 ±0.2</td></tr><tr><td>Random</td><td>T</td><td>82.5 ±0.2</td><td>85.4 ±0.1</td><td>42.7 ±0.4</td><td>51.1 ±0.2</td><td>52.3 ±0.2</td></tr><tr><td>Informative</td><td>T</td><td>82.7 ±0.1</td><td>85.8 ±0.1</td><td>46.4 ±0.5</td><td>55.2 ±0.3</td><td>54.8 ±0.2</td></tr><tr><td colspan="7">Long document models</td></tr><tr><td>Longformer (4096 + LWAN)</td><td>T</td><td>90.0 ±0.1</td><td>92.6 ±0.2</td><td>60.7 ±0.6</td><td>68.2 ±0.2</td><td>64.8 ±0.2</td></tr><tr><td>Hierarchical (4096 + LWAN)</td><td>T</td><td>91.1 ±0.1</td><td>93.6 ±0.0</td><td>62.9 ±0.1</td><td>69.5 ±0.1</td><td>65.7 ±0.2</td></tr><tr><td>Hierarchical (4096 + LWAN + L*)</td><td>T</td><td>91.7 ±0.1</td><td>94.1 ±0.0</td><td>65.2 ±0.2</td><td>71.0 ±0.1</td><td>66.2 ±0.1</td></tr><tr><td>Hierarchical (8192 + LWAN)</td><td>T</td><td>91.4 ±0.0</td><td>93.7 ±0.1</td><td>63.8 ±0.3</td><td>70.1 ±0.1</td><td>65.9 ±0.1</td></tr><tr><td>Hierarchical (8192 + LWAN + L*)</td><td>T</td><td>91.9 ±0.2</td><td>94.1 ±0.2</td><td>65.5 ±0.7</td><td>71.1 ±0.4</td><td>66.4 ±0.3</td></tr></table>
|
| 209 |
+
|
| 210 |
+
Table 2: Comparison of TrLDC against state-of-the-art on the MIMIC-III test set. C: CNN-based models; RNN-based models; and T: Transformer-based models. Models marked with an asterisk (*) is domain-specific RoBERTa-Large (Lewis et al., 2020), whereas Longformer and other RoBERTa models are task-adaptive pre-trained base versions.
|
| 211 |
+
|
| 212 |
+
<table><tr><td></td><td>ECtHR</td><td>20 News</td><td>Hyper</td></tr><tr><td>First (512)</td><td>73.5 ± 0.2</td><td>86.1 ± 0.3</td><td>92.9 ± 3.2</td></tr><tr><td>Random (512)</td><td>79.0 ± 0.6</td><td>85.3 ± 0.4</td><td>88.9 ± 2.5</td></tr><tr><td>Informative (512)</td><td>72.4 ± 0.2</td><td>86.2 ± 0.3</td><td>91.7 ± 3.2</td></tr><tr><td>Longformer (4096)</td><td>81.0 ± 0.5</td><td>86.3 ± 0.5</td><td>97.9 ± 0.7</td></tr><tr><td>Hierarchical (4096)</td><td>81.1 ± 0.2</td><td>86.3 ± 0.2</td><td>95.4 ± 1.3</td></tr></table>
|
| 213 |
+
|
| 214 |
+
Table 3: Comparison of TrLDC against baselines processing up to 512 tokens. We report Micro $F_{1}$ on ECtHR, Accuracy on 20 News and Hyperpartisan datasets.
|
| 215 |
+
|
| 216 |
+
tokens from the document as input or take as input the most informative 512 tokens, identified using TF-IDF scores.
|
| 217 |
+
|
| 218 |
+
Results in Table 2 and 3 show that there is a clear benefit from being able to process longer text. Both the Longformer and hierarchical Transformers outperform baselines that process up to 512 tokens with a large margin on MIMIC-III and ECtHR, whereas relatively small improvements on 20 News and Hyperpartisan. It is also worthy noting that, among these baselines, there is no single best strategy to choose which 512 tokens to process. Using the first 512 tokens works well on MIMIC-III and Hyperpartisan datasets, but it performs much worse than 512 random tokens on ECtHR.
|
| 219 |
+
|
| 220 |
+
Finally, Longformer, which can process up to 4096 tokens, achieves competitive results with the best performing CNN-based model (Ji et al., 2021b) on MIMIC-III. By processing longer text and using the RoBERTa-Large model, the hierarchical models further improve the performance, leading to comparable results of RNN-based models (Vu et al., 2020; Yuan et al., 2022). We hypothesise that further improvements can be observed when TrLDC models are enhanced with better hierarchy-aware classifier as in Vu et al. (2020) or code synonyms are used for training as in Yuan et al. (2022).
|
| 221 |
+
|
| 222 |
+
# 5.5 Comparison in terms of of GPU memory consumption
|
| 223 |
+
|
| 224 |
+
GPU memory becomes a big constraint when Transformer-based models are trained on long text. Table 4 shows a comparison between Longformer and Hierarchical models regarding the number of parameters and their GPU consumption. We use batch size of 2 in these experiments, and measure the impact of attention window size and segment length on the memory footprint. We find that Hierarchical models require less GPU memory than Longformer in general, and it is possible to set smaller local window size in Longformer or segment length in hierarchical models to fit the model using smaller GPU memory. Recall that small local
|
| 225 |
+
|
| 226 |
+
<table><tr><td>Size</td><td>Longformer (148.6M)</td><td>Hierarchical (139.0M)</td></tr><tr><td colspan="3">Maximum sequence length: 1024</td></tr><tr><td>64</td><td>4.8G</td><td>3.6G</td></tr><tr><td>128</td><td>5.0G</td><td>3.8G</td></tr><tr><td>256</td><td>5.5G</td><td>4.1G</td></tr><tr><td>512</td><td>6.6G</td><td>4.7G</td></tr><tr><td colspan="3">Maximum sequence length: 4096</td></tr><tr><td>64</td><td>11.8G</td><td>7.8G</td></tr><tr><td>128</td><td>12.8G</td><td>8.4G</td></tr><tr><td>256</td><td>14.9G</td><td>9.6G</td></tr><tr><td>512</td><td>19.4G</td><td>12.2G</td></tr></table>
|
| 227 |
+
|
| 228 |
+
Table 4: A comparison between Longformer and Hierarchical models regarding their GPU memory consumption. The number of parameters are listed in the table header. Size refers to the local attention window size in Longformer and the segment length in hierarchical method, respectively.
|
| 229 |
+
|
| 230 |
+
attention windows are effective in Longformer, and the optimal segment length in hierarchical models is dataset dependent.
|
| 231 |
+
|
| 232 |
+
# 6 Practical Advice
|
| 233 |
+
|
| 234 |
+
We compile several questions that practitioners may ask regarding long document classification and provide answers based on our results:
|
| 235 |
+
|
| 236 |
+
Q1 When should I start to consider using long document classification models?
|
| 237 |
+
|
| 238 |
+
A We suggest using TrLDC models if you work with datasets consisting of long documents (e.g., 2K tokens on average). We notice that on 20 News dataset, the gap between baselines that process 512 tokens and long document models is negligible.
|
| 239 |
+
|
| 240 |
+
Q2 Which model should I choose? Longformer or hierarchical Transformers?
|
| 241 |
+
|
| 242 |
+
A We suggest Longformer as the starting point if you do not plan on extensively tuning hyperparameters. We find the default config of Longformer is robust, although it is possible to set a moderate size (64-128) of local attention window to improve
|
| 243 |
+
|
| 244 |
+
efficiency without sacrificing effectiveness, and a small number of additional global attention tokens to make the training more stable. On the other hand, hierarchical Transformers may benefit from careful hyperparameter tuning (e.g., document splitting strategy, using LWAN). We suggest splitting a document into small non-structure-derived segments (e.g., 128 tokens) which overlap as a starting point when employing hierarchical Transformers.
|
| 245 |
+
|
| 246 |
+
We also note that the publicly available Longformer models can process sequences up-to 4096 tokens, whereas hierarchical Transformers can be easily extended to process much longer sequence.
|
| 247 |
+
|
| 248 |
+
# 7 Related Work
|
| 249 |
+
|
| 250 |
+
Long document classification Document length was not a point of controversy in the pre-neural era of NLP, where documents are encoded with Bag-of-Word representations, e.g., TF-IDF scores. The issue arises with the introduction of deep neural networks. Tang et al. (2015) use CNN and BiLSTM based hierarchical networks in a bottom-up fashion, i.e., first encode sentences into vectors, then combine those vectors in a single document vector. Similarly, Yang et al. (2016) incorporate the attention mechanism when constructing the sentence and document representation. Hierarchical variants of BERT have also been explored for document classification (Mulyar et al., 2019; Chalkidis et al., 2022), abstractive summarization (Zhang et al., 2019), semantic matching (Yang et al., 2020). Both Zhang et al., and Yang et al. also propose specialised pre-training tasks to explicitly capture sentence relations within a document. A very recent work by Park et al. (2022) shows that TrLDC do not perform consistently well across datasets that consist of 700 tokens on average.
|
| 251 |
+
|
| 252 |
+
Methods of modifying transformer architecture for long documents can be categorised into two approaches: recurrent Transformers and sparse attention Transformers. The recurrent approach processes segments moving from left-to-right (Dai et al., 2019). To capture bidirectional context, Ding et al. (2021) propose a retrospective mechanism in which segments from a document are fed twice as input. Sparse attention Transformers have been explored to reduce the complexity of self-attention, via using dilated sliding window (Child et al., 2019), and locality-sensitive hashing attention (Kitaev et al., 2020). Recently, the combination of local (window) and global attention are
|
| 253 |
+
|
| 254 |
+
proposed by Beltagy et al. (2020) and Zaheer et al. (2020), which we have detailed in Section 3.
|
| 255 |
+
|
| 256 |
+
ICD Coding The task of assigning most relevant ICD codes to a document, e.g., radiology report (Pestian et al., 2007), death certificate (Koopman et al., 2015) or discharge summary (Johnson et al., 2016), as a whole, has a long history of development (Farkas and Szarvas, 2008). Most existing methods simplified this task as a text classification problem and built classifiers using CNNs (Karimi et al., 2017) or LSTMs (Xie et al., 2018). Since the number of unique ICD codes is very large, methods are proposed to exploit relation between codes based on label co-occurrence (Dong et al., 2021), label count (Du et al., 2019), knowledge graph (Xie et al., 2019; Cao et al., 2020; Lu et al., 2020), code's textual descriptions (Mullenbach et al., 2018; Rios and Kavuluru, 2018). More recently, Ji et al. (2021a); Gao et al. (2021) investigate various methods of applying BERT on ICD coding. Different from our work, they mainly focus on comparing domain-specific BERT models that are pre-trained on various types of corpora. Ji et al. show that PubMedBERT—pre-trained from scratch on PubMed abstracts—outperforms other variants pre-trained on clinical notes or health-related posts; Gao et al. show that BlueBERT—pre-trained on PubMed and clinical notes—performs best. However, both report that Transformers-based models perform worse than CNN-based ones.
|
| 257 |
+
|
| 258 |
+
# 8 Conclusions
|
| 259 |
+
|
| 260 |
+
Transformers have previously been criticised for being incapable of long document classification. In this paper, we carefully study the role of different components of Transformer-based long document classification models. By conducting experiments on MIMIC-III and other three datasets (i.e., ECtHR, 20 News and Hyperpartisan), we observe clear improvements in performance when a model is able to process more text. Firstly, Longformer, a sparse attention model, which can process up to 4096 tokens, achieves competitive results with CNN-based models on MIMIC-III; its performance is relatively robust; a moderate size of local attention window (e.g., 128) and a small number (e.g., 16) of evenly chosen tokens with global attention can improve the efficiency and stability without sacrificing its effectiveness. Secondly, hierarchical Transformers outperform all CNN-based models by a large margin; the key design choice is how to split a
|
| 261 |
+
|
| 262 |
+
document into segments which can be encoded by pre-trained models; although the best performing segment length is dataset dependent, we find splitting a document into small overlapping segments (e.g., 128 tokens) is an effective strategy. Taken together, these experiments rebut the criticisms of Transformers for long document classification.
|
| 263 |
+
|
| 264 |
+
# Acknowledgments
|
| 265 |
+
|
| 266 |
+
This work is funded by the Innovation Fund Denmark under the AI4Xray project. Xiang Dai is funded by CSIRO Precision Health Future Science Platform. Ilias Chalkidis is funded by the Innovation Fund Denmark under File No. 0175-00011A. This project was also undertaken with the assistance of resources and services from the National Computational Infrastructure (NCI), which is supported by the Australian Government.
|
| 267 |
+
|
| 268 |
+
# References
|
| 269 |
+
|
| 270 |
+
Ashutosh Adhikari, Achyudh Ram, Raphael Tang, and Jimmy Lin. 2019. DocBERT: BERT for Document Classification. arXiv, 1904.08398.
|
| 271 |
+
Iz Beltagy, Matthew E Peters, and Arman Cohan. 2020. Longformer: The Long-Document Transformer. arXiv, 2004.05150.
|
| 272 |
+
Pengfei Cao, Yubo Chen, Kang Liu, Jun Zhao, Shengping Liu, and Weifeng Chong. 2020. HyperCore: Hyperbolic and Co-graph Representation for Automatic ICD Coding. In ACL.
|
| 273 |
+
Ilias Chalkidis, Manos Fergadiotis, Sotiris Kotitsas, Prodromos Malakasiotis, Nikolaos Aletras, and Ion Androutsopoulos. 2020. An Empirical Study on Large-Scale Multi-Label Text Classification Including Few and Zero-Shot Labels. In EMNLP.
|
| 274 |
+
Ilias Chalkidis, Abhik Jana, Dirk Hartung, Michael J Bommarito II, Ion Androutsopoulos, Daniel Martin Katz, and Nikolaos Aletras. 2022. LexGLUE: A Benchmark Dataset for Legal Language Understanding in English. In ACL.
|
| 275 |
+
Rewon Child, Scott Gray, Alec Radford, and Ilya Sutskever. 2019. Generating long sequences with sparse transformers. arXiv, 1904.10509.
|
| 276 |
+
Krzysztof Marcin Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamás Sarlós, Peter Hawkins, Jared Quincy Davis, Afroz Mohiuddin, Lukasz Kaiser, David Benjamin Belanger, Lucy J Colwell, and Adrian Weller. 2021. Rethinking Attention with Performers. In ICLR.
|
| 277 |
+
Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc Le, and Ruslan Salakhutdinov. 2019. Transformer-XL: Attentive Language Models beyond a Fixed-Length Context. In ACL.
|
| 278 |
+
|
| 279 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In *NAACL*.
|
| 280 |
+
SiYu Ding, Junyuan Shang, Shuohuan Wang, Yu Sun, Hao Tian, Hua Wu, and Haifeng Wang. 2021. ERNIE-Doc: A Retrospective Long-Document Modeling Transformer. In ACL-IJCNLP.
|
| 281 |
+
Jesse Dodge, Maarten Sap, Ana Marasovic, William Agnew, Gabriel Ilharco, Dirk Groeneveld, Margaret Mitchell, and Matt Gardner. 2021. Documenting Large Webtext Corpora: A Case Study on the Colossal Clean Crawled Corpus. In EMNLP.
|
| 282 |
+
Hang Dong, Víctor Suárez-Paniagua, William Whiteley, and Honghan Wu. 2021. Explainable automated coding of clinical notes using hierarchical label-wise attention networks and label embedding initialisation. JBI, 116.
|
| 283 |
+
Jingcheng Du, Qingyu Chen, Yifan Peng, Yang Xiang, Cui Tao, and Zhiyong Lu. 2019. ML-Net: multi-label classification of biomedical texts with deep neural networks. JAMIA, 26.
|
| 284 |
+
Richard Farkas and György Szarvas. 2008. Automatic construction of rule-based ICD-9-CM coding systems. BMC Bioinform., 9.
|
| 285 |
+
Shang Gao, Mohammed Alawad, M. Todd Young, John Gounley, Noah Schaefferkoetter, Hong Jun Yoon, Xiao-Cheng Wu, Eric B. Durbin, Jennifer Doherty, Antoinette Stroup, Linda Coyle, and Georgia Tourassi. 2021. Limitations of Transformers on Clinical Text Classification. IEEE J. Biomed. Health Inform., 25.
|
| 286 |
+
Suchin Gururangan, Ana Marasovic, Swabha Swayamdipta, Kyle Lo, Iz Beltagy, Doug Downey, and Noah A Smith. 2020. Don't Stop Pretraining: Adapt Language Models to Domains and Tasks. In ACL.
|
| 287 |
+
Shaoxiong Ji, Matti Holttä, and Pekka Marttinen. 2021a. Does the Magic of BERT Apply to Medical Code Assignment? A Quantitative Study. Comput. Biol. Med., 139.
|
| 288 |
+
Shaoxiong Ji, Shirui Pan, and Pekka Marttinen. 2021b. Medical Code Assignment with Gated Convolution and Note-Code Interaction. In Findings of ACLIJCNLP.
|
| 289 |
+
Thorsten Joachims. 1997. A Probabilistic Analysis of the Rocchio Algorithm with TFIDF for Text Categorization. In ICML.
|
| 290 |
+
Alistair E W Johnson, Tom J Pollard, Lu Shen, H Lehman Li-Wei, Mengling Feng, Mohammad Ghassemi, Benjamin Moody, Peter Szolovits, Leo Anthony Celi, and Roger G Mark. 2016. MIMIC-III, a freely accessible critical care database. Sci. Data, 3.
|
| 291 |
+
|
| 292 |
+
Sarvnaz Karimi, Xiang Dai, Hamed Hassanzadeh, and Anthony Nguyen. 2017. Automatic Diagnosis Coding of Radiology Reports: A Comparison of Deep Learning and Conventional Classification Methods. In BioNLP@ACL.
|
| 293 |
+
Johannes Kiesel, Maria Mestre, Rishabh Shukla, Emmanuel Vincent, Payam Adineh, David Corney, Benno Stein, and Martin Potthast. 2019. SemEval-2019 Task 4: Hyperpartisan News Detection. In SemEval@NAACL.
|
| 294 |
+
Nikita Kitaev, Lukasz Kaiser, and Anselm Levskaya. 2020. Reformer: The efficient transformer. In ICLR.
|
| 295 |
+
Bevan Koopman, Sarvnaz Karimi, Anthony Nguyen, Rhydwyn McGuire, David Muscatello, Madonna Kemp, Donna Truran, Ming Zhang, and Sarah Thackway. 2015. Automatic classification of diseases from free-text death certificates for real-time surveillance. BMC Medical Inform. Decis. Mak., 15.
|
| 296 |
+
Rasmus Kær Jørgensen, Mareike Hartmann, Xiang Dai, and Desmond Elliott. 2021. mDAPT: Multilingual Domain Adaptive Pretraining in a Single Model. In Findings of EMNLP.
|
| 297 |
+
Patrick Lewis, Myle Ott, Jingfei Du, and Veselin Stoyanov. 2020. Pretrained Language Models for Biomedical and Clinical Tasks: Understanding and Extending the State-of-the-Art. In *ClinicalNLP@EMNLP*.
|
| 298 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. RoBERTa: A robustly optimized bert pretraining approach. arXiv, 1907.11692.
|
| 299 |
+
Jueqing Lu, Lan Du, Ming Liu, and Joanna Dipnall. 2020. Multi-label Few/Zero-shot Learning with Knowledge Aggregated from Multiple Label Graphs. In EMNLP.
|
| 300 |
+
Marius Mosbach, Maksym Andriushchenko, and Dietrich Klakow. 2021. On the Stability of Fine-tuning BERT: Misconceptions, Explanations, and Strong Baselines. In ICLR.
|
| 301 |
+
James Mullenbach, Sarah Wiegreffe, Jon Duke, Jimeng Sun, and Jacob Eisenstein. 2018. Explainable Prediction of Medical Codes from Clinical Text. In NAACL.
|
| 302 |
+
Andriy Mulyar, Elliot Schumacher, Masoud Rouhizadeh, and Mark Dredze. 2019. Phenotyping of Clinical Notes with Improved Document Classification Models Using Contextualized Neural Language Models. arXiv, 1910.13664.
|
| 303 |
+
Hyunjji Park, Yogarshi Vyas, and Kashif Shah. 2022. Efficient Classification of Long Documents Using Transformers. In ACL.
|
| 304 |
+
Damian Pascual, Sandro Luck, and Roger Wattenhofer. 2021. Towards BERT-based Automatic ICD Coding: Limitations and Opportunities. In BioNLP@NAACL.
|
| 305 |
+
|
| 306 |
+
John Pestian, Chris Brew, Pawel Matykiewicz, Dj J Hovermale, Neil Johnson, K Bretonnel Cohen, and Wlodzislaw Duch. 2007. A shared task involving multi-label classification of clinical free text. In BioNLP@ACL.
|
| 307 |
+
Jiezhong Qiu, Hao Ma, Omer Levy, Wen-tau Yih, Sinong Wang, and Jie Tang. 2020. Blockwise Self-Attention for Long Document Understanding. In Findings of EMNLP.
|
| 308 |
+
Alan Ramponi and Barbara Plank. 2020. Neural Unsupervised Domain Adaptation in NLP—A Survey. In COLING.
|
| 309 |
+
Anthony Rios and Ramakanth Kavuluru. 2018. Few-Shot and Zero-Shot Multi-Label Learning for Structured Label Spaces. In EMNLP.
|
| 310 |
+
Chi Sun, Xipeng Qiu, Yige Xu, and Xuanjing Huang. 2019. How to Fine-Tune BERT for Text Classification? In CCL.
|
| 311 |
+
Duyu Tang, Bing Qin, and Ting Liu. 2015. Document modeling with gated recurrent neural network for sentiment classification. In EMNLP.
|
| 312 |
+
Yi Tay, Mostafa Dehghani, Dara Bahri, and Donald Metzler. 2020. Efficient Transformers: A Survey. arXiv, 2009.06732.
|
| 313 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In NeurIPS.
|
| 314 |
+
Thanh Vu, Dat Quoc Nguyen, and Anthony Nguyen. 2020. A label attention model for ICD coding from clinical text. In *IJCAI*.
|
| 315 |
+
Lin Xiao, Xin Huang, Boli Chen, and Liping Jing. 2019. Label-Specific Document Representation for Multi-Label Text Classification. In EMNLP-IJCNLP.
|
| 316 |
+
Pengtao Xie, Haoran Shi, Ming Zhang, and Eric Xing. 2018. A Neural Architecture for Automated ICD Coding. In ACL.
|
| 317 |
+
Xiancheng Xie, Yun Xiong, Philip S Yu, and Yangyong Zhu. 2019. EHR Coding with Multi-scale Feature Attention and Structured Knowledge Graph Propagation. In CIKM.
|
| 318 |
+
Wenhan Xiong, Barlas Oğuz, Anchit Gupta, Xilun Chen, Diana Liskovich, Omer Levy, Wen-tau Yih, and Yashar Mehdad. 2021. Simple Local Attentions Remain Competitive for Long-Context Tasks. arXiv, 2112.07210.
|
| 319 |
+
Liu Yang, Mingyang Zhang, Cheng Li, Michael Bendersky, and Marc Najork. 2020. Beyond 512 Tokens: Siamese Multi-depth Transformer-based Hierarchical Encoder for Long-Form Document Matching. In CIKM.
|
| 320 |
+
|
| 321 |
+
Zichao Yang, Diyi Yang, Chris Dyer, Xiaodong He, Alex Smola, and Eduard Hovy. 2016. Hierarchical Attention Networks for Document Classification. In NAACL.
|
| 322 |
+
Zheng Yuan, Chuanqi Tan, and Songfang Huang. 2022. Code Synonyms Do Matter: Multiple Synonyms Matching Network for Automatic ICD Coding. In ACL.
|
| 323 |
+
Manzil Zaheer, Guru Guruganesh, Avinava Dubey, Joshua Ainslie, Chris Alberti, Santiago Ontanon, Philip Pham, Anirudh Ravula, Qifan Wang, and Li Yang. 2020. Big Bird: Transformers for Longer Sequences. In NeurIPS.
|
| 324 |
+
Xingxing Zhang, Furu Wei, and Ming Zhou. 2019. HIBERT: Document Level Pre-training of Hierarchical Bidirectional Transformers for Document Summarization. In ACL.
|
| 325 |
+
|
| 326 |
+
# 9 Appendix
|
| 327 |
+
|
| 328 |
+
# 9.1 Limitations
|
| 329 |
+
|
| 330 |
+
Long document classification datasets are usually annotated using a large number of labels. For example, the complete MIMIC-III dataset contains 8,692 unique labels. As we mentioned in Section 2, we focus on building document representation and leave the challenge of learning with a large target label set for future work. Therefore, in this paper, we follow previous work (Mullenbach et al., 2018; Chalkidis et al., 2022) and consider a subset of frequent labels in MIMIC-III and ECtHR.
|
| 331 |
+
|
| 332 |
+
# 9.2 Dataset statistics
|
| 333 |
+
|
| 334 |
+
Table 5 shows the descriptive statistics of four datasets we use.
|
| 335 |
+
|
| 336 |
+
<table><tr><td></td><td>Train</td><td>Dev</td><td>Test</td></tr><tr><td colspan="4">MIMIC-III</td></tr><tr><td>Documents</td><td>8,066</td><td>1,573</td><td>1,729</td></tr><tr><td>Unique labels</td><td>50</td><td>50</td><td>50</td></tr><tr><td>Avg. tokens</td><td>2,260</td><td>2,693</td><td>2,737</td></tr><tr><td colspan="4">ECtHR</td></tr><tr><td>Documents</td><td>8,866</td><td>973</td><td>986</td></tr><tr><td>Unique labels</td><td>10</td><td>10</td><td>10</td></tr><tr><td>Avg. tokens</td><td>2,140</td><td>2,345</td><td>2,532</td></tr><tr><td colspan="4">Hyperpartisan</td></tr><tr><td>Documents</td><td>516</td><td>64</td><td>65</td></tr><tr><td>Unique labels</td><td>2</td><td>2</td><td>2</td></tr><tr><td>Avg. tokens</td><td>741</td><td>707</td><td>845</td></tr><tr><td colspan="4">20 News</td></tr><tr><td>Documents</td><td>10,183</td><td>1,131</td><td>7,532</td></tr><tr><td>Unique labels</td><td>20</td><td>20</td><td>20</td></tr><tr><td>Avg. tokens</td><td>613</td><td>627</td><td>551</td></tr></table>
|
| 337 |
+
|
| 338 |
+
# 9.3 Details of task-adaptive pre-training
|
| 339 |
+
|
| 340 |
+
Hyperparameters and training time for task-adaptive pre-training can be found in Table 6.
|
| 341 |
+
|
| 342 |
+
# 9.4 Details of classification experiments
|
| 343 |
+
|
| 344 |
+
Preprocessing We mainly follow Mullenbach et al. (2018) to preprocess the MIMIC-III dataset. That is, we lowercase the text, remove all punctuation marks and tokenize text by white spaces. The only change we make is that we normalise numeric (e.g., convert ‘2021’ to ‘0000’) instead of deleting
|
| 345 |
+
|
| 346 |
+
Table 5: Statistics of the datasets. The number of tokens is calculated using RoBERTa tokenizer.
|
| 347 |
+
|
| 348 |
+
<table><tr><td></td><td>Longformer</td><td>RoBERTa</td></tr><tr><td>Max sequence</td><td>4096</td><td>128</td></tr><tr><td>Batch size</td><td>8</td><td>128</td></tr><tr><td>Learning rate</td><td>5e-5</td><td>5e-5</td></tr><tr><td>Training epochs</td><td>6</td><td>15</td></tr><tr><td>Training time (GPU-hours)</td><td>≈ 130</td><td>≈ 40</td></tr></table>
|
| 349 |
+
|
| 350 |
+
Table 6: Hyperparameters and training time (measured on MIMIC-III dataset) for task-adaptive pre-training Longformer and RoBERTa. Batch size = batch size per GPU × num. GPUs × gradient accumulation steps.
|
| 351 |
+
|
| 352 |
+
numeric-only tokens in Mullenbach et al. (2018). We did not apply additional preprocessing to ECtHR and 20 News. We follow Beltagy et al. (2020) to preprocess the Hyperpartisan dataset. $^{10}$
|
| 353 |
+
|
| 354 |
+
Training We fine-tune the multilabel classification model using a binary cross entropy loss. That is, given an training example whose ground truth and predicted probability for the $i$ -th label are $y_{i}$ (0 or 1) and $\hat{y}_i$ , we calculate its loss, over the $C$ unique classification labels, as:
|
| 355 |
+
|
| 356 |
+
$$
|
| 357 |
+
\mathcal {L} = \sum_ {i = 1} ^ {C} - y _ {i} \log (\hat {y} _ {i}) - (1 - y _ {i}) \log (1 - \hat {y} _ {i}).
|
| 358 |
+
$$
|
| 359 |
+
|
| 360 |
+
For the multiclass and binary classification tasks, we fine-tune using the cross entropy loss, where $\hat{y}_g$ is the predicted probability for the gold label:
|
| 361 |
+
|
| 362 |
+
$$
|
| 363 |
+
\mathcal {L} = - \log (\hat {y} _ {g}),
|
| 364 |
+
$$
|
| 365 |
+
|
| 366 |
+
We use the same effective batch size (16), learning rate (2e-5), maximum number of training epochs (30) with early stop patience (5) in all experiments. We also follow Longformer (Beltagy et al., 2020) and set the maximum sequence length as 4096 in most of the experiments unless other specified. We fine-tune all classification models on Quadro RTX 6000 (24 GB GPU memory) or Tesla V100 (32 GB GPU memory). If one batch of data is too large to fit into the GPU memory, we use gradient accumulation so that the effective batch sizes (batch size per GPU $\times$ gradient accumulation steps) are still the same.
|
| 367 |
+
|
| 368 |
+
We repeat all experiments five times with different random seeds. The model which is most effective on the development set, measured using the micro $F_{1}$ score (multilabel) or accuracy (multiclass and binary), is used for the final evaluation.
|
| 369 |
+
|
| 370 |
+
# 9.5 A comparison between clinical notes and legal cases
|
| 371 |
+
|
| 372 |
+
Although we usually use the term domain to indicate that texts talk about a narrow set of related concepts (e.g., clinical concepts or legal concepts), text can vary along different dimensions (Ramponi and Plank, 2020).
|
| 373 |
+
|
| 374 |
+
In addition to the statistics difference between MIMIC-III and ECtHR, which we show in Table 5, there is another difference worthy considering: clinical notes are private as they contain protected health information. Even those clinical notes after de-identification are usually not publicly available (e.g., downloadable using web crawler). In contrast, legal cases have generally been allowed and encouraged to share with the public, and thus become a large portion of crawled pre-training data (Dodge et al., 2021). Dodge et al. find that legal documents, especially U.S. case law, are a significant part of the C4 corpus, a cleansed version of CommonCrawl used to pre-train RoBERTa models. The ECtHR proceedings are also publicly available via HUDOC, the court's database.
|
| 375 |
+
|
| 376 |
+
We suspect task-adaptive pre-training is more useful on MIMIC-III than on ECtHR (Figure 5) may relate to this difference. Therefore, we evaluate the vanilla RoBERTa on MIMIC-III and ECtHR regarding tokenization and language modelling. A comparison of the fragmentation ratio using the tokenizer and perplexity using the language model can be found in Table 7.
|
| 377 |
+
|
| 378 |
+
<table><tr><td></td><td>MIMIC-III</td><td>ECtHR</td></tr><tr><td>Fragmentation ratio</td><td>1.233</td><td>1.118</td></tr><tr><td>Perplexity</td><td>1.351</td><td>1.079</td></tr></table>
|
| 379 |
+
|
| 380 |
+
# 9.6 A comparison between TAPT and public available RoBERTa by (Lewis et al., 2020)
|
| 381 |
+
|
| 382 |
+
We compare our TAPT-RoBERTa against publicly available domain-specific RoBERTa (Lewis et al., 2020), which are trained from scratch on biomedical articles and clinical notes, in hierarchical models. In these experiments, we split long documents into overlapping segments of 64 tokens. Results in Figure 8 show that TAPT-RoBERTa outperforms
|
| 383 |
+
|
| 384 |
+
domain-specific base model, but underperforms the larger model.
|
| 385 |
+
|
| 386 |
+
# 9.7 Results on ECtHR test set
|
| 387 |
+
|
| 388 |
+
Results in Table 8 show that our results are higher than the ones reported in (Chalkidis et al., 2022). Chalkidis et al. compare different BERT variants including domain-specific models, whereas we use task-adaptive pre-trained models. Regarding hierarchical method, we split a document into overlapping segments, each of which has 512 tokens. We use the default setting for Longformer as in Beltagy et al. (2020).
|
| 389 |
+
|
| 390 |
+
Table 7: Evaluating vanilla RoBERTa on MIMIC-III and ECtHR. Lower fragmentation ratio and perplexity indicate that the test data have a higher similarity with the RoBERTa pre-training data.
|
| 391 |
+
|
| 392 |
+
<table><tr><td></td><td>Macro F1</td><td>Micro F1</td></tr><tr><td>RoBERTa</td><td>68.9</td><td>77.3</td></tr><tr><td>CaseLaw-BERT</td><td>70.3</td><td>78.8</td></tr><tr><td>BigBird</td><td>70.9</td><td>78.8</td></tr><tr><td>DeBERTa</td><td>71.0</td><td>78.8</td></tr><tr><td>Longformer</td><td>71.7</td><td>79.4</td></tr><tr><td>BERT</td><td>73.4</td><td>79.7</td></tr><tr><td>Legal-BERT</td><td>74.7</td><td>80.4</td></tr><tr><td>Longformer (4096)</td><td>76.0 ± 1.4</td><td>80.7 ± 0.3</td></tr><tr><td>Hierarchical (4096)</td><td>76.6 ± 0.7</td><td>81.0 ± 0.3</td></tr></table>
|
| 393 |
+
|
| 394 |
+
Table 8: Comparison of our results against the results reported in (Chalkidis et al., 2022) on the ECtHR test set. Results are sorted by Micro $F_{1}$ .
|
| 395 |
+
|
| 396 |
+
# 9.8 A comparison between evenly splitting and splitting based on document structure
|
| 397 |
+
|
| 398 |
+
Figure 9 shows that splitting by the paragraph level document structure does not improve performance on the ECtHR dataset. On MIMIC-III, splitting
|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
Figure 8: A comparison of task-adaptive pre-trained RoBERTa against public available domain-specific RoBERTa. Both Base and Large RoBERTa models are trained from scratch on biomedical articles and clinical notes (Lewis et al., 2020).
|
| 402 |
+
|
| 403 |
+

|
| 404 |
+
Figure 9: A comparison between evenly splitting and splitting based on document structure.
|
| 405 |
+
|
| 406 |
+
based on document structure substantially underperforms evenly splitting the document.
|
| 407 |
+
|
| 408 |
+
# 9.9 Detailed results on the development sets
|
| 409 |
+
|
| 410 |
+
For the sake of brevity, we use only micro $F_{1}$ score in most of our illustrations, and we detail results of other metrics in this section.
|
| 411 |
+
|
| 412 |
+
<table><tr><td rowspan="2">Max sequence length</td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td></tr><tr><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td>512</td><td>81.4 ±0.1</td><td>85.1 ±0.2</td><td>39.2 ±0.9</td><td>52.2 ±0.3</td><td>53.3 ±0.3</td></tr><tr><td>1024</td><td>83.6 ±0.2</td><td>87.3 ±0.3</td><td>43.2 ±0.6</td><td>56.3 ±0.5</td><td>56.5 ±0.2</td></tr><tr><td>2048</td><td>86.5 ±0.2</td><td>89.8 ±0.1</td><td>48.2 ±1.1</td><td>60.5 ±0.4</td><td>59.4 ±0.3</td></tr><tr><td>4096</td><td>88.4 ±0.1</td><td>91.5 ±0.1</td><td>53.1 ±0.5</td><td>64.0 ±0.3</td><td>62.0 ±0.4</td></tr></table>
|
| 413 |
+
|
| 414 |
+
Table 9: Detailed results of Figure 1: the effectiveness of Longformer on the MIMIC-III development set.
|
| 415 |
+
|
| 416 |
+
<table><tr><td></td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td></tr><tr><td></td><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td colspan="6">Longformer on MIMIC-III</td></tr><tr><td>Vanilla</td><td>88.4 ± 0.1</td><td>91.5 ± 0.1</td><td>53.1 ± 0.5</td><td>64.0 ± 0.3</td><td>62.0 ± 0.4</td></tr><tr><td>TAPT</td><td>90.3 ± 0.2</td><td>92.7 ± 0.1</td><td>60.8 ± 0.4</td><td>68.5 ± 0.3</td><td>64.8 ± 0.3</td></tr><tr><td colspan="6">RoBERTa on MIMIC-III</td></tr><tr><td>Vanilla</td><td>81.6 ± 0.2</td><td>85.0 ± 0.3</td><td>43.2 ± 1.7</td><td>53.9 ± 0.4</td><td>54.0 ± 0.2</td></tr><tr><td>TAPT</td><td>82.3 ± 0.4</td><td>85.5 ± 0.3</td><td>48.8 ± 0.4</td><td>56.7 ± 0.2</td><td>55.3 ± 0.2</td></tr><tr><td colspan="6">Longformer on ECtHR</td></tr><tr><td>Vanilla</td><td>—</td><td>—</td><td>77.4 ± 2.3</td><td>81.3 ± 0.3</td><td>—</td></tr><tr><td>TAPT</td><td>—</td><td>—</td><td>78.5 ± 2.2</td><td>82.1 ± 0.6</td><td>—</td></tr><tr><td colspan="6">RoBERTa on ECtHR</td></tr><tr><td>Vanilla</td><td>—</td><td>—</td><td>72.2 ± 1.5</td><td>74.8 ± 0.4</td><td>—</td></tr><tr><td>TAPT</td><td>—</td><td>—</td><td>72.7 ± 0.7</td><td>75.1 ± 0.4</td><td>—</td></tr></table>
|
| 417 |
+
|
| 418 |
+
Table 10: Detailed results of Figure 5: the impact of task-adaptive pre-training. Note that we use maximum sequence length 512 for RoBERTa and 4096 for Longformer in these experiments.
|
| 419 |
+
|
| 420 |
+
<table><tr><td rowspan="2">Size</td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td><td rowspan="2">Accuracy</td></tr><tr><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td colspan="7">MIMIC-III</td></tr><tr><td>32</td><td>89.8 ±0.1</td><td>92.3 ±0.1</td><td>59.6 ±0.6</td><td>67.9 ±0.3</td><td>64.2 ±0.3</td><td>—</td></tr><tr><td>64</td><td>90.0 ±0.1</td><td>92.5 ±0.1</td><td>60.3 ±0.3</td><td>68.1 ±0.1</td><td>64.5 ±0.1</td><td>—</td></tr><tr><td>128</td><td>90.1 ±0.1</td><td>92.6 ±0.1</td><td>60.5 ±0.7</td><td>68.3 ±0.3</td><td>64.7 ±0.3</td><td>—</td></tr><tr><td>256</td><td>90.2 ±0.0</td><td>92.6 ±0.1</td><td>60.7 ±0.6</td><td>68.4 ±0.3</td><td>64.6 ±0.2</td><td>—</td></tr><tr><td>512</td><td>90.3 ±0.2</td><td>92.7 ±0.1</td><td>60.8 ±0.4</td><td>68.5 ±0.3</td><td>64.8 ±0.3</td><td>—</td></tr><tr><td colspan="7">ECtHR</td></tr><tr><td>32</td><td>—</td><td>—</td><td>78.2 ±1.2</td><td>81.2 ±0.3</td><td>—</td><td>—</td></tr><tr><td>64</td><td>—</td><td>—</td><td>78.6 ±1.7</td><td>81.4 ±0.1</td><td>—</td><td>—</td></tr><tr><td>128</td><td>—</td><td>—</td><td>79.9 ±1.6</td><td>82.1 ±0.5</td><td>—</td><td>—</td></tr><tr><td>256</td><td>—</td><td>—</td><td>78.5 ±2.1</td><td>81.8 ±0.4</td><td>—</td><td>—</td></tr><tr><td>512</td><td>—</td><td>—</td><td>78.5 ±2.2</td><td>82.1 ±0.6</td><td>—</td><td>—</td></tr><tr><td colspan="7">Hyperpartisan</td></tr><tr><td>32</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>83.9 ±0.7</td></tr><tr><td>64</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>83.3 ±1.9</td></tr><tr><td>128</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>83.9 ±0.7</td></tr><tr><td>256</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>88.0 ±0.7</td></tr><tr><td>512</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>85.9 ±2.2</td></tr><tr><td colspan="7">20 News</td></tr><tr><td>32</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>92.8 ±0.6</td></tr><tr><td>64</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>94.0 ±0.5</td></tr><tr><td>128</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>93.8 ±0.3</td></tr><tr><td>256</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>93.5 ±0.1</td></tr><tr><td>512</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>94.0 ±0.1</td></tr></table>
|
| 421 |
+
|
| 422 |
+
Table 11: The impact of local attention window size in Longformer, measured on the development sets.
|
| 423 |
+
|
| 424 |
+
<table><tr><td rowspan="2"># tokens</td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td></tr><tr><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td colspan="6">MIMIC-III</td></tr><tr><td>1</td><td>90.1 ± 0.2</td><td>92.6 ± 0.1</td><td>60.5 ± 0.9</td><td>68.2 ± 0.3</td><td>64.7 ± 0.3</td></tr><tr><td>8</td><td>90.0 ± 0.1</td><td>92.5 ± 0.1</td><td>60.5 ± 0.7</td><td>68.2 ± 0.3</td><td>64.6 ± 0.2</td></tr><tr><td>16</td><td>90.0 ± 0.2</td><td>92.5 ± 0.1</td><td>60.0 ± 0.2</td><td>68.1 ± 0.2</td><td>64.3 ± 0.3</td></tr><tr><td>32</td><td>90.0 ± 0.2</td><td>92.4 ± 0.1</td><td>60.1 ± 0.5</td><td>67.9 ± 0.1</td><td>64.4 ± 0.2</td></tr><tr><td>64</td><td>89.9 ± 0.2</td><td>92.4 ± 0.1</td><td>59.9 ± 1.0</td><td>67.9 ± 0.4</td><td>64.4 ± 0.3</td></tr><tr><td colspan="6">ECtHR</td></tr><tr><td>1</td><td>—</td><td>—</td><td>78.5 ± 1.8</td><td>80.8 ± 0.4</td><td>—</td></tr><tr><td>8</td><td>—</td><td>—</td><td>77.2 ± 2.0</td><td>80.8 ± 0.4</td><td>—</td></tr><tr><td>16</td><td>—</td><td>—</td><td>77.7 ± 0.4</td><td>80.7 ± 0.3</td><td>—</td></tr><tr><td>32</td><td>—</td><td>—</td><td>78.2 ± 1.4</td><td>80.6 ± 0.4</td><td>—</td></tr><tr><td>64</td><td>—</td><td>—</td><td>77.7 ± 2.3</td><td>80.7 ± 0.5</td><td>—</td></tr></table>
|
| 425 |
+
|
| 426 |
+
Table 12: Detailed results of Figure 6: the effect of applying global attention on more tokens, which are evenly chosen based on their positions.
|
| 427 |
+
|
| 428 |
+
<table><tr><td rowspan="2"># tokens</td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td></tr><tr><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td colspan="6">MIMIC-III</td></tr><tr><td>1</td><td>90.1 ±0.2</td><td>92.6 ±0.1</td><td>60.5 ±0.9</td><td>68.2 ±0.3</td><td>64.7 ±0.3</td></tr><tr><td>8</td><td>89.7 ±0.2</td><td>92.0 ±0.1</td><td>61.0 ±1.3</td><td>66.9 ±0.4</td><td>64.0 ±0.4</td></tr><tr><td>16</td><td>89.4 ±0.2</td><td>91.9 ±0.1</td><td>60.1 ±1.2</td><td>66.5 ±0.3</td><td>63.9 ±0.5</td></tr><tr><td>32</td><td>89.4 ±0.4</td><td>91.9 ±0.2</td><td>60.3 ±1.6</td><td>66.4 ±0.6</td><td>63.7 ±0.7</td></tr><tr><td>64</td><td>89.1 ±0.4</td><td>91.7 ±0.2</td><td>59.4 ±2.0</td><td>66.2 ±0.7</td><td>63.4 ±0.7</td></tr><tr><td colspan="6">ECtHR</td></tr><tr><td>1</td><td>—</td><td>—</td><td>78.5 ±1.8</td><td>80.8 ±0.4</td><td>—</td></tr><tr><td>8</td><td>—</td><td>—</td><td>79.2 ±0.3</td><td>80.9 ±0.2</td><td>—</td></tr><tr><td>16</td><td>—</td><td>—</td><td>77.6 ±1.2</td><td>80.4 ±0.4</td><td>—</td></tr><tr><td>32</td><td>—</td><td>—</td><td>77.1 ±0.7</td><td>80.0 ±0.2</td><td>—</td></tr><tr><td>64</td><td>—</td><td>—</td><td>76.6 ±1.1</td><td>79.9 ±0.5</td><td>—</td></tr></table>
|
| 429 |
+
|
| 430 |
+
Table 13: The effect of applying global attention on more informative tokens, which are identified based on TF-IDF.
|
| 431 |
+
|
| 432 |
+
<table><tr><td rowspan="2">Size</td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td><td rowspan="2">Accuracy</td></tr><tr><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td colspan="7">Disjoint segments on MIMIC-III</td></tr><tr><td>64</td><td>89.4 ± 0.1</td><td>92.0 ± 0.1</td><td>60.8 ± 1.1</td><td>67.9 ± 0.3</td><td>63.5 ± 0.3</td><td>—</td></tr><tr><td>128</td><td>89.5 ± 0.1</td><td>92.1 ± 0.1</td><td>61.2 ± 0.6</td><td>68.0 ± 0.3</td><td>63.5 ± 0.3</td><td>—</td></tr><tr><td>256</td><td>89.6 ± 0.1</td><td>92.1 ± 0.1</td><td>61.0 ± 0.4</td><td>67.6 ± 0.2</td><td>63.6 ± 0.2</td><td>—</td></tr><tr><td>512</td><td>89.2 ± 0.2</td><td>91.8 ± 0.2</td><td>59.4 ± 0.5</td><td>66.7 ± 0.3</td><td>63.4 ± 0.4</td><td>—</td></tr><tr><td colspan="7">Overlapping segments on MIMIC-III</td></tr><tr><td>64</td><td>89.7 ± 0.1</td><td>92.3 ± 0.1</td><td>62.3 ± 0.2</td><td>68.7 ± 0.1</td><td>64.1 ± 0.1</td><td>—</td></tr><tr><td>128</td><td>89.7 ± 0.2</td><td>92.3 ± 0.1</td><td>61.8 ± 0.9</td><td>68.5 ± 0.3</td><td>64.0 ± 0.2</td><td>—</td></tr><tr><td>256</td><td>89.5 ± 0.1</td><td>92.1 ± 0.1</td><td>61.4 ± 0.3</td><td>68.1 ± 0.2</td><td>63.8 ± 0.1</td><td>—</td></tr><tr><td>512</td><td>89.4 ± 0.1</td><td>92.0 ± 0.0</td><td>60.3 ± 0.3</td><td>67.2 ± 0.2</td><td>63.6 ± 0.3</td><td>—</td></tr><tr><td colspan="7">Disjoint segments on ECtHR</td></tr><tr><td>64</td><td>—</td><td>—</td><td>76.6 ± 1.2</td><td>79.7 ± 0.2</td><td>—</td><td>—</td></tr><tr><td>128</td><td>—</td><td>—</td><td>77.6 ± 2.3</td><td>80.8 ± 0.4</td><td>—</td><td>—</td></tr><tr><td>256</td><td>—</td><td>—</td><td>77.7 ± 1.4</td><td>81.2 ± 0.4</td><td>—</td><td>—</td></tr><tr><td>512</td><td>—</td><td>—</td><td>78.3 ± 1.3</td><td>81.7 ± 0.3</td><td>—</td><td>—</td></tr><tr><td colspan="7">Overlapping segments on ECtHR</td></tr><tr><td>64</td><td>—</td><td>—</td><td>76.9 ± 1.7</td><td>80.5 ± 0.5</td><td>—</td><td>—</td></tr><tr><td>128</td><td>—</td><td>—</td><td>77.5 ± 1.7</td><td>81.2 ± 0.5</td><td>—</td><td>—</td></tr><tr><td>256</td><td>—</td><td>—</td><td>78.1 ± 1.4</td><td>81.5 ± 0.2</td><td>—</td><td>—</td></tr><tr><td>512</td><td>—</td><td>—</td><td>78.4 ± 1.5</td><td>81.4 ± 0.4</td><td>—</td><td>—</td></tr><tr><td colspan="7">Disjoint segments on Hyperpartisan</td></tr><tr><td>64</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>88.8 ± 1.8</td></tr><tr><td>128</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>89.1 ± 1.4</td></tr><tr><td>256</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>87.8 ± 1.8</td></tr><tr><td>512</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>86.2 ± 1.8</td></tr><tr><td colspan="7">Overlapping segments on Hyperpartisan</td></tr><tr><td>64</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>87.5 ± 1.4</td></tr><tr><td>128</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>88.4 ± 1.2</td></tr><tr><td>256</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>88.1 ± 2.1</td></tr><tr><td>512</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>88.4 ± 0.8</td></tr><tr><td colspan="7">Disjoint segments on 20 News</td></tr><tr><td>64</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>93.3 ± 0.2</td></tr><tr><td>128</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>93.5 ± 0.3</td></tr><tr><td>256</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>94.4 ± 0.4</td></tr><tr><td>512</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>94.0 ± 0.3</td></tr><tr><td colspan="7">Overlapping segments on 20 News</td></tr><tr><td>64</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>93.8 ± 0.4</td></tr><tr><td>128</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>93.4 ± 0.3</td></tr><tr><td>256</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>94.5 ± 0.2</td></tr><tr><td>512</td><td>—</td><td>—</td><td>—</td><td>—</td><td>—</td><td>93.9 ± 0.3</td></tr></table>
|
| 433 |
+
|
| 434 |
+
Table 14: The effect of varying the segment length and whether allowing segments to overlap in the hierarchical transformers.
|
| 435 |
+
|
| 436 |
+
<table><tr><td></td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td></tr><tr><td></td><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td colspan="6">MIMIC-III</td></tr><tr><td>E (4096)</td><td>89.7 ± 0.2</td><td>92.3 ± 0.1</td><td>61.8 ± 0.9</td><td>68.5 ± 0.3</td><td>64.0 ± 0.2</td></tr><tr><td>S (4096)</td><td>87.2 ± 0.2</td><td>90.1 ± 0.2</td><td>55.2 ± 0.4</td><td>62.9 ± 0.2</td><td>59.9 ± 0.2</td></tr><tr><td>S (6144)</td><td>88.2 ± 0.2</td><td>91.0 ± 0.2</td><td>57.8 ± 0.3</td><td>65.4 ± 0.3</td><td>61.7 ± 0.3</td></tr><tr><td>S (8192)</td><td>88.5 ± 0.3</td><td>91.2 ± 0.2</td><td>58.8 ± 0.2</td><td>66.0 ± 0.4</td><td>62.4 ± 0.1</td></tr><tr><td colspan="6">ECtHR</td></tr><tr><td>E (4096)</td><td>—</td><td>—</td><td>77.5 ± 1.7</td><td>81.2 ± 0.5</td><td>—</td></tr><tr><td>S (4096)</td><td>—</td><td>—</td><td>75.3 ± 1.3</td><td>80.1 ± 0.4</td><td>—</td></tr><tr><td>S (6144)</td><td>—</td><td>—</td><td>77.1 ± 1.8</td><td>80.5 ± 0.5</td><td>—</td></tr><tr><td>S (8192)</td><td>—</td><td>—</td><td>77.7 ± 1.9</td><td>81.3 ± 0.5</td><td>—</td></tr></table>
|
| 437 |
+
|
| 438 |
+
Table 15: Detailed results of Figure 9: a comparison between evenly splitting and splitting based on document structure. E: evenly splitting; S: splitting based on document structure.
|
| 439 |
+
|
| 440 |
+
<table><tr><td></td><td colspan="2">AUC</td><td colspan="2">F1</td><td rowspan="2">P@5</td></tr><tr><td></td><td>Macro</td><td>Micro</td><td>Macro</td><td>Micro</td></tr><tr><td colspan="6">MIMIC-III</td></tr><tr><td>Longformer</td><td>90.0 ± 0.2</td><td>92.5 ± 0.1</td><td>60.0 ± 0.2</td><td>68.1 ± 0.2</td><td>64.3 ± 0.3</td></tr><tr><td>+ LWAN</td><td>90.5 ± 0.2</td><td>92.9 ± 0.2</td><td>62.2 ± 0.7</td><td>69.2 ± 0.3</td><td>65.1 ± 0.1</td></tr><tr><td>Hierarchical</td><td>89.7 ± 0.2</td><td>92.3 ± 0.1</td><td>61.8 ± 0.9</td><td>68.5 ± 0.3</td><td>64.0 ± 0.2</td></tr><tr><td>+ LWAN</td><td>91.4 ± 0.1</td><td>93.7 ± 0.1</td><td>64.2 ± 0.4</td><td>70.3 ± 0.1</td><td>65.3 ± 0.1</td></tr><tr><td colspan="6">ECtHR</td></tr><tr><td>Longformer</td><td>—</td><td>—</td><td>77.7 ± 0.4</td><td>80.7 ± 0.3</td><td>—</td></tr><tr><td>+ LWAN</td><td>—</td><td>—</td><td>79.5 ± 0.8</td><td>81.1 ± 0.3</td><td>—</td></tr><tr><td>Hierarchical</td><td>—</td><td>—</td><td>77.5 ± 1.7</td><td>81.2 ± 0.5</td><td>—</td></tr><tr><td>+ LWAN</td><td>—</td><td>—</td><td>79.7 ± 0.9</td><td>81.3 ± 0.3</td><td>—</td></tr></table>
|
| 441 |
+
|
| 442 |
+
Table 16: The effect of label-wise attention network.
|
revisitingtransformerbasedmodelsforlongdocumentclassification/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:76ef80134bb06170aa4c42a5f6731e95f173650e45a01613d2e16dbadabe6770
|
| 3 |
+
size 1082822
|
revisitingtransformerbasedmodelsforlongdocumentclassification/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6ff82c4f1efc84be18ed679734b70580cb3d159f653735f32ed929abcf123c74
|
| 3 |
+
size 464863
|
robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/d0c305d6-88a5-4b7c-a7a3-0915e07c5604_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c40602733f5e8e9f97bd8444b95b07e5df6eb61b6ce0f8d0303e42380300d028
|
| 3 |
+
size 99923
|
robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/d0c305d6-88a5-4b7c-a7a3-0915e07c5604_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f5b1aa2f6a84dd054a0c1daa5ffa605a790d96c54ae5f9657bb1d42935075945
|
| 3 |
+
size 120717
|
robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/d0c305d6-88a5-4b7c-a7a3-0915e07c5604_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7f7a65a65a081f3b1c7ff5178b4eacb7b7bc683b490a4069dc4daeedf7139c0
|
| 3 |
+
size 800802
|
robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/full.md
ADDED
|
@@ -0,0 +1,411 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Robust Question Answering against Distribution Shifts with Test-Time Adaptation: An Empirical Study
|
| 2 |
+
|
| 3 |
+
Hai Ye† Yuyang Ding† Juntao Li‡ Hwee Tou Ng†
|
| 4 |
+
|
| 5 |
+
$^{\dagger}$ Department of Computer Science, National University of Singapore
|
| 6 |
+
|
| 7 |
+
$\dagger$ Soochow University, China
|
| 8 |
+
|
| 9 |
+
{yeh,nght}@comp.nus.edu.sg yyding.me@gmail.com ljt@suda.edu.cn
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
A deployed question answering (QA) model can easily fail when the test data has a distribution shift compared to the training data. Robustness tuning (RT) methods have been widely studied to enhance model robustness against distribution shifts before model deployment. However, can we improve a model after deployment? To answer this question, we evaluate test-time adaptation (TTA) to improve a model after deployment. We first introduce COLDQA, a unified evaluation benchmark for robust QA against text corruption and changes in language and domain. We then evaluate previous TTA methods on COLDQA and compare them to RT methods. We also propose a novel TTA method called online imitation learning (OIL). Through extensive experiments, we find that TTA is comparable to RT methods, and applying TTA after RT can significantly boost the performance on COLDQA. Our proposed OIL improves TTA to be more robust to variation in hyper-parameters and test distributions over time<sup>1</sup>.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
How to build a trustworthy NLP system that is robust to distribution shifts is important, since the real world is changing dynamically and a system can easily fail when the test data has a distribution shift compared to the training data (Ribeiro et al., 2020; Wang et al., 2022). Much previous work on robustness evaluation has found model failures on shifted test data. For example, question answering (QA) models are brittle when dealing with paraphrased questions (Gan and Ng, 2019), models for task-oriented dialogues fail to understand corrupted input (Liu et al., 2021a; Peng et al., 2021), and neural machine translation degrades on noisy text input (Belinkov and Bisk, 2018). In this
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1: Illustration of test-time adaptation. $\pi$ represents the model being adapted. $x_{t}$ is the test data at time $t$ and $y_{t}$ is the returned predictions for $x_{t}$ .
|
| 21 |
+
|
| 22 |
+
work, we study the robustness of QA models to out-of-distribution (OOD) test-time data.
|
| 23 |
+
|
| 24 |
+
To build a model that is robust against distribution shifts, most previous work focuses on robustness tuning (RT) methods that improve model generalization pre-deployment, such as adversarial training (Madry et al., 2018). However, can we continually enhance a model post-deployment? To answer this question, we study and evaluate test-time adaptation (TTA) for robust QA after model deployment. TTA generalizes a model by continually updating the model with test-time data (Sun et al., 2020). As shown in Fig. 1, in this work, we focus on test-time adaptation in real time, where the model predicts and updates over a data stream on the fly. For each test data instance, the model first returns its prediction and then updates itself with the test data. Unlike unsupervised domain adaptation (Ramponi and Plank, 2020) studied in NLP, TTA is suitable for domain generalization, since it makes no assumption about the target distribution and could adapt the model to any arbitrary distribution at test time.
|
| 25 |
+
|
| 26 |
+
We discuss TTA methods in §3, where we first present previous popular TTA baselines, and then introduce our newly proposed TTA method, online imitation learning (OIL). OIL is inspired by imitation learning, where the adapted model learns
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 2: The average results of RT, TTA, and $\mathrm{RT + TTA}$ on COLDQA. $\mathrm{RT + TTA}$ significantly improves over RT and TTA.
|
| 30 |
+
|
| 31 |
+
to clone the actions made by the source model, and the source model aims to reduce overfitting to noisy pseudo-labels in the adapted model. We further adopt causal inference to control model bias from the source model. Next, to compare to TTA methods, we briefly discuss previous robustness tuning (RT) methods such as adversarial training in §4.
|
| 32 |
+
|
| 33 |
+
To study and analyze TTA for robust QA post-deployment, we introduce COLDQA in §5 which is a unified evaluation benchmark for robust QA against distribution shifts from text corruption, language change, and domain change. It differs from previous benchmarks that only study one type of distribution shifts (Ravichander et al., 2021; Hu et al., 2020; Fisch et al., 2019). COLDQA expects a QA model to generalize well to all three types of distribution shifts.
|
| 34 |
+
|
| 35 |
+
Our contributions in this work include:
|
| 36 |
+
|
| 37 |
+
- We are the first to study test-time adaptation for QA tasks with extensive experiments.
|
| 38 |
+
- We propose a novel TTA method, OIL, which outperforms previous TTA baselines.
|
| 39 |
+
- We propose a new benchmark COLDQA which unifies the evaluation of robust QA against distribution shifts.
|
| 40 |
+
- We evaluate previous robustness tuning methods on the new benchmark.
|
| 41 |
+
|
| 42 |
+
Based on the experimental results in §6, we report the following findings:
|
| 43 |
+
|
| 44 |
+
- COLDQA is challenging and not all RT methods are effective on COLDQA (§6.2);
|
| 45 |
+
- Overall, as Fig. 2 shows, TTA is comparable to RT, and applying TTA after RT can further boost model performance (§6.2);
|
| 46 |
+
- Compared to previous TTA baselines, OIL is more robust to changes in hyper-parameters and test distributions over time (§6.3).
|
| 47 |
+
|
| 48 |
+
# 2 Related Work
|
| 49 |
+
|
| 50 |
+
Robust QA Much previous work on model robustness evaluation has shown that NLP models fail on test data with distribution shifts (Rychalska et al., 2019; Ribeiro et al., 2020; Wang et al., 2022) compared to the training data. For QA tasks, Ravichander et al. (2021) study how text corruption affects QA performance. Lewis et al. (2020) and Artetxe et al. (2020) analyze cross-lingual transfer of a QA system. Fisch et al. (2019) benchmark the generalization of QA models to data with domain shift. In this work, we jointly study distribution shifts due to corruption, language change, and domain change. Adversarial samples cause another type of distribution shifts (Jia and Liang, 2017) which is not studied in this work. Hard samples (Ye et al., 2022), dataset bias (Tu et al., 2020), and other robustness issues are not the focus of this work.
|
| 51 |
+
|
| 52 |
+
Test-Time Adaptation TTA adapts a source model with test-time data from a target distribution. TTA has been verified to be very effective in image recognition (Sun et al., 2020; Wang et al., 2021b; Liu et al., 2021b; Bartler et al., 2022). In NLP, Wang et al. (2021d) learn to combine adapters on low-resource languages at test time to improve sequence labeling tasks. Gao et al. (2022) and Li et al. (2022) keep adapting a QA model after model deployment using user feedback, which is different from our work which requires no human involvement when adapting the model. Ben-David et al. (2022) study test-time adaptation for text classification and sequence labeling, but they focus on example-based prompt learning which needs expert knowledge to design prompts. Banerjee et al. (2021) explore test-time learning for QA tasks, but their work concerns how to train a QA model from scratch by using unlabeled test data, instead of adapting to out-of-distribution test data.
|
| 53 |
+
|
| 54 |
+
Domain Adaptation Different from test-time adaptation, unsupervised domain adaptation (UDA) needs to know the target domain when performing adaptation pre-deployment (Ben-David et al., 2010; Li et al., 2020; Ye et al., 2020; Karouzos et al., 2021). UDA tries to minimize the gap between the source and target domain. Recent work studies UDA without knowing the source domain (Liang et al., 2020; Su et al., 2022), which means the model can be adapted to any unseen target domain on the fly. However, they assume all target data is available when performing adaptation, unlike
|
| 55 |
+
|
| 56 |
+
<table><tr><td rowspan="2">Settings</td><td colspan="2">Trianing Data</td><td colspan="2">Training Loss</td></tr><tr><td>Training time</td><td>Test time</td><td>Training time</td><td>Test time</td></tr><tr><td>Unsupervised domain adaptation</td><td>xs, ys; Xt</td><td>None</td><td>L(xs, ys) + L(xt)</td><td>None</td></tr><tr><td>Robustness tuning</td><td>xs, ys</td><td>None</td><td>L(xs, ys)</td><td>None</td></tr><tr><td>Test-time adaptation (online)</td><td>xs, ys</td><td>Xt</td><td>None</td><td>L(xt)</td></tr></table>
|
| 57 |
+
|
| 58 |
+
Table 1: Compared settings. $\mathcal{X}^s$ and $\mathcal{Y}^s$ are drawn from a source distribution and $\mathcal{X}^t$ from a target distribution. $x^s \in \mathcal{X}^s, y^s \in \mathcal{Y}^s, x^t \in \mathcal{X}^t$ .
|
| 59 |
+
|
| 60 |
+
online adaptation.
|
| 61 |
+
|
| 62 |
+
Robustness Tuning Robustness tuning (RT) is another family of methods that tries to train a more generalized model pre-deployment at training time instead of test time. Adversarial training is a well-studied method to enhance model robustness (Miyato et al., 2017; Madry et al., 2018; Zhu et al., 2020; Wang et al., 2021a). Some work also uses regularization to improve model generalization (Wang et al., 2021c; Zheng et al., 2021; Cheng et al., 2021; Jiang et al., 2020). Prompt tuning (Lester et al., 2021) and adapter-based tuning (He et al., 2021) can also enhance model generalization to unseen test distributions.
|
| 63 |
+
|
| 64 |
+
Life-long Learning Similar to TTA, life-long learning (LLL) can also continually improve a model post-deployment (Parisi et al., 2019). However, LLL requires the model to remember previously learned knowledge, and training data in the target distribution is labeled. TTA only focuses on the distribution to be adapted to and the test data is unlabeled. Lin et al. (2022) also adapt QA models with test-time data but in a LLL setting.
|
| 65 |
+
|
| 66 |
+
# 3 Test-Time Adaptation
|
| 67 |
+
|
| 68 |
+
Problem Definition Given a source model $\pi_0$ trained on a source distribution $S$ , test-time adaptation (TTA) adapts the model to the test distribution $\mathcal{T}$ with the test data, which enhances the model post-deployment. In the setting of online adaptation, test-time data comes in a stream<sup>2</sup>. As shown in Fig. 1, at time $t$ , for the test data $x_{t} \sim \mathcal{T}$ , the model $\pi_t$ will first predict its labels $y_{t}$ to return to the end user. Next, $\pi_t$ adapts itself with a TTA method and the adapted model will be carried forward to time $t + 1$ . The process can proceed without stopping as more test data arrive. There is no access to the gold labels of test data in the whole process. We compare the setting studied in this work, which is online test-time adaptation, with unsupervised domain adaptation and robustness tuning in Table 1.
|
| 69 |
+
|
| 70 |
+
# 3.1 TTA with Tent and PL
|
| 71 |
+
|
| 72 |
+
We first discuss two prior TTA methods, Tent (Wang et al., 2021b) and PL (Lee, 2013). Tent adapts the model by entropy minimization, in which the model predicts the outputs over test-time data and calculates the entropy loss for optimization. Similarly, PL is a pseudo-labeling method, predicting the pseudo-labels on test-time data and calculating the cross-entropy loss. Tent is simple yet it achieves SOTA performance on computer vision (CV) tasks such as image classification, compared to other more complex methods, such as TTT (Sun et al., 2020) which needs to modify the training process by introducing extra self-supervised losses. Other TTA methods improve over Tent (Bartler et al., 2022; Liu et al., 2021b), but they are much more complex.
|
| 73 |
+
|
| 74 |
+
Formally, Tent and PL start from the source model $\pi_0$ . At time $t$ , the model $\pi_t$ updates itself with the test data $x_t$ . The loss for optimization is denoted as $l_t(\pi_t)$ :
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
l _ {t} \left(\pi_ {t}\right) = H \left(p _ {t}\right) _ {\text {T e n t}} \text {o r} H \left(p _ {t}, y _ {t}\right) _ {\mathrm {P L}} \tag {1}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
where $p_t$ is the predicted probabilities over the output classes of $x_t$ from the model $\pi_t$ , and $y_t = \arg \max_i p_t[i]$ . $H(\cdot)$ and $H(\cdot)$ are the entropy and cross-entropy loss respectively. On the data $x_t$ , the model is optimized with only one gradient step to get $\pi_t'$ : $\pi_t' \gets \pi_t$ . Then the model $\pi_t'$ will be carried forward to time $t + 1$ : $\pi_{t+1} \gets \pi_t'$ .
|
| 81 |
+
|
| 82 |
+
# 3.2 Online Imitation Learning
|
| 83 |
+
|
| 84 |
+
Adapting by the model alone, Tent and PL may easily lose the ability to predict correct labels, since the labels predicted by them are not verified to be correct and learning with such noisy signals may degrade the model. The model may not recover again once it starts to deteriorate. To overcome such an issue, inspired by imitation learning (Ross et al., 2011), we propose online imitation learning (OIL) in this work. OIL aims to train a learner (or model) $\pi$ by the supervision of an expert $\pi_{e}$ in a data stream. The expert can help the model to be more robust throughout model adaptation,
|
| 85 |
+
|
| 86 |
+
since the expert is stable and the learner clones the behavior of the expert.
|
| 87 |
+
|
| 88 |
+
Formally, at each time $t$ , the expert $\pi_e$ takes an action (makes a prediction) $\hat{y}_t \sim \pi_e$ on $x_t \sim \mathcal{T}$ . The learner $\pi_t$ then learns to clone such an action by optimizing a surrogate objective $l_t$ :
|
| 89 |
+
|
| 90 |
+
$$
|
| 91 |
+
l _ {t} \left(\pi_ {t}\right) = \mathbb {E} _ {x _ {t} \sim \mathcal {T}} \mathbb {E} _ {\langle y _ {t}, \hat {y} _ {t} \rangle \sim \langle \pi , \pi_ {e} \rangle} \mathcal {L} \left(y _ {t}, \hat {y} _ {t}; x _ {t}\right) \tag {2}
|
| 92 |
+
$$
|
| 93 |
+
|
| 94 |
+
where $y_{t}$ is the action taken by the learner at time $t$ and $\mathcal{L}(:,;\cdot)$ measures the distance between the two actions. Formally, at time $T$ , with a sequence of online loss functions $\{l_t\}_{t = 1}^T$ and the learners $\Pi = \{\pi_t\}_{t = 1}^T$ , the regret $R(T)$ is defined as:
|
| 95 |
+
|
| 96 |
+
$$
|
| 97 |
+
R (T) = \sum_ {t = 1} ^ {T} l _ {t} (\pi_ {t}) - \min _ {\pi \in \Pi} \sum_ {t = 1} ^ {T} l _ {t} (\pi) \tag {3}
|
| 98 |
+
$$
|
| 99 |
+
|
| 100 |
+
where we try to minimize such regret during adaptation, which is equal to optimizing the loss function $l_{t}(\pi_{t})$ at each time $t$ (Ross et al., 2011).
|
| 101 |
+
|
| 102 |
+
# 3.2.1 Instantiation of TTA with OIL
|
| 103 |
+
|
| 104 |
+
At time 0, both the learner $\pi$ and the expert $\pi_{e}$ are initialized by the source model $\pi_0$ . At time $t$ , the loss function $l_{t}(\pi_{t})$ for optimization is:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
l _ {t} \left(\pi_ {t}\right) = H \left(p _ {t}, \hat {y} _ {t}\right) \tag {4}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $p_t$ is the predicted probabilities over the output classes of $x_t$ from the learner $\pi_t$ . $\hat{y}_t = \arg \max_i \hat{p}_t[i]$ in which $\hat{p}_t$ is the corresponding predicted probabilities of the expert $\pi_e$ . Same as Tent and PL, the model is also optimized with one gradient step to get $\pi_t'$ : $\pi_t' \gets \pi_t$ , and the model $\pi_t'$ is carried forward to time $t + 1$ : $\pi_{t+1} \gets \pi_t'$ .
|
| 111 |
+
|
| 112 |
+
For the expert, we can also update it by using the model parameters of the learner. At time $t$ , we update the expert as follows:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\theta_ {\pi_ {e}} \leftarrow \alpha \cdot \theta_ {\pi_ {e}} + (1 - \alpha) \cdot \theta_ {\pi_ {t} ^ {\prime}} \tag {5}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where $\theta$ represents the model parameters and $\alpha$ is a hyper-parameter to control the updating of the expert. $\alpha$ is set to a high value such as 0.99 or 1, so the expert stays close to the source model $\pi_0$ in the adaptation process. Here, the expert is also similar to the mean teacher (Tarvainen and Valpola, 2017).
|
| 119 |
+
|
| 120 |
+
Furthermore, since the expert is initialized by the source model and because of distribution shift, the actions taken by the expert may be noisy. We can filter and try not to learn these noisy actions. Then the loss function in Eq. 4 becomes:
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
l _ {t} \left(\pi_ {t}\right) = \mathbb {I} \left(H \left(p _ {t}, \hat {y} _ {t}\right) < \gamma\right) \cdot H \left(p _ {t}, \hat {y} _ {t}\right) \tag {6}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where the cross-entropy loss $H(p_{t},\hat{y}_{t})$ is used to identify the noisy actions, and $\gamma$ is a hyper
|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
Figure 3: (a) The proposed causal graph. (b) The calculation of total direct effect as in Eq. 7.
|
| 130 |
+
|
| 131 |
+
parameter serving as a threshold.
|
| 132 |
+
|
| 133 |
+
# 3.2.2 Enhancing OIL with Causal Inference
|
| 134 |
+
|
| 135 |
+
Since the expert is initialized by the source model, when it predicts labels on the test data, its behavior will be affected by the knowledge that it has learned from the source distribution, which is what we call model bias in this work. Since the test distribution is different from the source distribution, and the expert provides instructions to the learner to clone, such model bias will have a negative effect on the learning of the learner. Here, we further use causal inference (Pearl, 2009) to reduce such effect caused by model bias.
|
| 136 |
+
|
| 137 |
+
Causal Graph We assume that the model output of the learner $\pi$ is affected by direct and indirect effect from the input, as shown in the causal graph in Fig. 3a. The causal graph includes the variables which are the input $X$ , the output $Y$ , and the potential model bias $M$ from the expert. $X \rightarrow Y$ is the direct effect. $X \rightarrow M \rightarrow Y$ represents the indirect effect, where $M$ is a mediator between $X$ and $Y$ . $M$ is determined by the input $X$ , which can come from in-distribution or out-of-distribution data.
|
| 138 |
+
|
| 139 |
+
Causal Effects Our goal in causal inference is to keep the direct effect but control or remove the indirect effect. As shown in Fig. 3b, we calculate the total direct effect (TDE) along $X \rightarrow Y$ as follows:
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\begin{array}{l} T D E (y) = Y _ {y \mid d o (X = x)} - Y _ {y \mid d o (X = x _ {0})} \\ = Y _ {y \mid X = x} - Y _ {y \mid X = x _ {0}} \tag {7} \\ \end{array}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
where $do$ operation is the causal intervention (Glymour et al., 2016) which is to remove the confounders to $X$ . However, since there is no confounder to $X$ in our assumption, we just omit it.
|
| 146 |
+
|
| 147 |
+
Model Training Given the total direct effect in Eq. 7, we first have to learn the left term $Y_{y|X = x}$ which is the combination of the direct and indirect effect along $X \to Y$ and $X \to M \to Y$ respectively. We use the learner $\pi$ to learn the direct effect. For the indirect effect, the model bias of $M$ exhibits different behaviors to data from different distributions. Since the learner $\pi$ and the expert $\pi_e$
|
| 148 |
+
|
| 149 |
+
# Algorithm 1 Online Imitation Learning
|
| 150 |
+
|
| 151 |
+
Require: Source model $\pi_0$ ; memory bank size $K$ ; $\alpha$ for expert updating; $\gamma$ for filtering noisy actions; $\beta$ for controlling indirect effect.
|
| 152 |
+
|
| 153 |
+
1: Initialize the expert $\pi_e\gets \pi_0$
|
| 154 |
+
2: for $t = 1,2,\cdots$ do
|
| 155 |
+
3: Return predictions on $x_{t}$ using Eq. 9;
|
| 156 |
+
4: Enqueue $x_{t}$ & deque $x_{t - K}$ ;
|
| 157 |
+
5: for $x_{k}\in \{x_{t - K},\dots ,x_{t}\}$ do
|
| 158 |
+
6: Use $x_{k}$ to update the learner $\pi_{t}$ as in Eq. 8;
|
| 159 |
+
7: Update the expert $\pi_e$ as in Eq. 5;
|
| 160 |
+
8: end for
|
| 161 |
+
9: end for
|
| 162 |
+
|
| 163 |
+
capture the test and source distribution respectively, we use the discrepancy in their outputs to represent the model bias. Considering the model bias, the loss function $l_{t}$ in Eq. 6 becomes:
|
| 164 |
+
|
| 165 |
+
$$
|
| 166 |
+
l _ {t} \left(\pi_ {t}\right) = \mathbb {I} \left(H \left(p _ {t}, \hat {y} _ {t}\right) < \gamma\right) \cdot H \left(p _ {t} + \left(p _ {t} - \hat {p} _ {t}\right), \hat {y} _ {t}\right) \tag {8}
|
| 167 |
+
$$
|
| 168 |
+
|
| 169 |
+
where $p_t$ and $\hat{p}_t$ are the predicted probabilities over the output classes of the learner $\pi_t$ and the expert $\pi_e$ respectively. $p_t$ captures the direct effect and $p_t - \hat{p}_t$ learns the indirect effect.
|
| 170 |
+
|
| 171 |
+
Inference When performing inference, we take the action $y$ which has the largest TDE value. Based on Eq. 7 for TDE calculation, we obtain the prediction over the input $x_{t}$ using the learner $\pi_t$ as:
|
| 172 |
+
|
| 173 |
+
$$
|
| 174 |
+
\begin{array}{l} y _ {t} = \arg \max _ {i} T D E (i) = \\ p _ {t} [ i ] + (1 - \beta) \cdot (p _ {t} - \hat {p} _ {t}) [ i ] \tag {9} \\ \end{array}
|
| 175 |
+
$$
|
| 176 |
+
|
| 177 |
+
where $\beta$ controls the contribution of the indirect effect. Here, when calculating the TDE score, we assume the model output is zero when given the null input $x_0$ , since we assume the model cannot make predictions without the given input. We set $\beta$ to 1 throughout the experiments, which completely eliminates the effect of model bias.
|
| 178 |
+
|
| 179 |
+
# 3.3 Implementation of TTA for the QA Task
|
| 180 |
+
|
| 181 |
+
For extractive question answering, the model needs to predict the start and end position. The above TTA methods treat the two positions independently and apply the same loss, i.e., $l_{t}(\pi_{t})$ , to them separately, and the final loss takes the average of the two. We present the pseudocode of OIL in Algorithm 1, where Tent and PL follow the same procedure but with different losses to update. The data $x_{t}$ at each time $t$ is a batch of instances. We preserve a mem
|
| 182 |
+
|
| 183 |
+
ory bank with size $K$ to store the data from time $t - K$ to $t$ , which more fully exploits test-time data for model adaptation. At each time $t$ , we enqueue $x_{t}$ and deque $x_{t - K}$ from the memory bank. Then each batch of data from the memory bank is used to optimize the online loss as shown in Eq. 8. The expert for OIL is updated accordingly.
|
| 184 |
+
|
| 185 |
+
# 4 Robustness Tuning
|
| 186 |
+
|
| 187 |
+
In contrast to improving the model post-deployment with TTA, robustness tuning (RT) enhances the model pre-deployment. RT has been studied in NLP to improve model generalization (Wang et al., 2022). RT methods are applied at training time when training the source model. We also benchmark RT methods on COLDQA to compare with TTA methods.
|
| 188 |
+
|
| 189 |
+
First, we compare with adversarial training methods, which are FGM (Miyato et al., 2017), PGD (Madry et al., 2018), FreeLB (Zhu et al., 2020), and InfoBERT (Wang et al., 2021a). Next, we further evaluate robustness tuning methods proposed for cross-lingual transfer, which are MVR (Wang et al., 2021c) and xTune (Zheng et al., 2021). These two methods use regularization to enhance model robustness. All of these methods have not been comprehensively evaluated on distribution shifts arising from text corruption, language change, and domain change.
|
| 190 |
+
|
| 191 |
+
Combination of RT and TTA. Finally, we also study combining RT and TTA methods. The source model is tuned by a RT method, then this model is adapted by a TTA method to the test distribution.
|
| 192 |
+
|
| 193 |
+
# 5 COLDQA
|
| 194 |
+
|
| 195 |
+
To study robust QA under distribution shifts, in this work we introduce COLDQA, a unified evaluation benchmark against text corruption, language change, and domain change. As shown in Table 2, we collect some existing QA datasets to construct the source and target distributions for COLDQA.
|
| 196 |
+
|
| 197 |
+
Source Distribution The training data for the source distribution is SQuAD v1.1 (Rajpurkar et al., 2016). To evaluate model generalization on COLDQA, we first need to train a source model with the source training data. Next, we evaluate the model on each subset of each target dataset. For test-time adaptation, the model needs to be adapted with the test data on the fly. To evaluate performance under all kinds of distribution shifts, we use a multilingual pre-trained language model as the
|
| 198 |
+
|
| 199 |
+
<table><tr><td>Source</td><td>|Train|</td><td>|Dev|</td><td>Distribution Shift</td><td>Target</td><td>|Subset|</td><td>|Test|</td><td>Metric</td></tr><tr><td rowspan="9">SQuAD</td><td rowspan="9">87,599</td><td rowspan="9">34,726</td><td rowspan="2">Text corruption</td><td>NoiseQA-syn</td><td>3</td><td>1,190</td><td rowspan="9">EM/F1</td></tr><tr><td>NoiseQA-na</td><td>3</td><td>1,190</td></tr><tr><td rowspan="2">Language change</td><td>XQuAD</td><td>11</td><td>1,190</td></tr><tr><td>MLQA</td><td>7</td><td>4,517-11,590</td></tr><tr><td rowspan="5">Domain change</td><td>HotpotQA</td><td>1</td><td>5,901</td></tr><tr><td>NaturalQA</td><td>1</td><td>12,836</td></tr><tr><td>NewsQA</td><td>1</td><td>4,212</td></tr><tr><td>SearchQA</td><td>1</td><td>16,980</td></tr><tr><td>TriviaQA</td><td>1</td><td>7,785</td></tr></table>
|
| 200 |
+
|
| 201 |
+
Table 2: Detailed characteristics of COLDQA. To perform evaluation on COLDQA, a model is first trained on the source distribution. Next, the trained model is tested on each subset of each target dataset.
|
| 202 |
+
|
| 203 |
+
<table><tr><td></td><td></td><td colspan="2">ColdQA</td><td colspan="4">Text Corruption</td><td colspan="4">Language Change</td><td colspan="2">Domain Change</td></tr><tr><td></td><td></td><td colspan="2">Average</td><td colspan="2">NoiseQA-syn</td><td colspan="2">NoiseQA-na</td><td colspan="2">XQuAD</td><td colspan="2">MLQA</td><td colspan="2">MRQA</td></tr><tr><td></td><td>Metric</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td></td><td>xlmr-base</td><td>55.11</td><td>69.21</td><td>66.64</td><td>78.67</td><td>66.05</td><td>77.91</td><td>55.59</td><td>71.42</td><td>47.14</td><td>65.27</td><td>40.11</td><td>52.78</td></tr><tr><td rowspan="2">RT</td><td>MVR</td><td>56.93</td><td>70.51</td><td>68.85</td><td>80.10</td><td>67.87</td><td>78.91</td><td>58.08</td><td>73.34</td><td>48.45</td><td>66.33</td><td>41.40</td><td>53.84</td></tr><tr><td>xTune</td><td>58.54</td><td>71.94</td><td>70.95</td><td>81.52</td><td>69.75</td><td>80.66</td><td>58.78</td><td>73.75</td><td>49.87</td><td>67.76</td><td>43.36</td><td>56.03</td></tr><tr><td rowspan="3">TTA</td><td>Tent</td><td>56.24</td><td>69.68</td><td>68.02</td><td>79.37</td><td>67.79</td><td>78.91</td><td>57.40</td><td>72.56</td><td>47.59</td><td>65.13</td><td>40.39</td><td>52.43</td></tr><tr><td>PL</td><td>56.45</td><td>69.78</td><td>68.51</td><td>79.59</td><td>68.15</td><td>79.23</td><td>57.91</td><td>72.69</td><td>47.75</td><td>65.17</td><td>39.94</td><td>52.20</td></tr><tr><td>OIL</td><td>57.06</td><td>70.38</td><td>68.75</td><td>79.86</td><td>68.40</td><td>79.40</td><td>57.96</td><td>72.64</td><td>48.39</td><td>66.08</td><td>41.80</td><td>53.92</td></tr><tr><td rowspan="2">RT+TTA</td><td>xTune + PL</td><td>58.86</td><td>71.89</td><td>71.73</td><td>82.12</td><td>70.87</td><td>81.10</td><td>60.23</td><td>74.56</td><td>50.33</td><td>68.10</td><td>41.14</td><td>53.58</td></tr><tr><td>xTune + OIL</td><td>59.63</td><td>72.68</td><td>71.90</td><td>82.24</td><td>70.81</td><td>81.15</td><td>60.13</td><td>74.46</td><td>50.67</td><td>68.53</td><td>44.65</td><td>57.00</td></tr><tr><td></td><td>xlmr-large</td><td>58.58</td><td>73.82</td><td>65.55</td><td>79.91</td><td>64.17</td><td>78.37</td><td>63.15</td><td>78.77</td><td>53.87</td><td>72.58</td><td>46.18</td><td>59.46</td></tr><tr><td rowspan="6">RT</td><td>FGM</td><td>58.06</td><td>73.56</td><td>64.93</td><td>79.71</td><td>62.94</td><td>77.96</td><td>63.21</td><td>78.74</td><td>54.14</td><td>72.72</td><td>45.09</td><td>58.65</td></tr><tr><td>PGD</td><td>58.80</td><td>74.02</td><td>65.91</td><td>80.16</td><td>63.75</td><td>78.05</td><td>63.80</td><td>78.91</td><td>54.28</td><td>72.82</td><td>46.25</td><td>60.19</td></tr><tr><td>FreeLB</td><td>58.79</td><td>73.83</td><td>66.22</td><td>79.97</td><td>64.37</td><td>77.88</td><td>63.34</td><td>78.79</td><td>53.93</td><td>72.51</td><td>46.07</td><td>59.99</td></tr><tr><td>InfoBERT</td><td>57.72</td><td>73.39</td><td>64.59</td><td>79.52</td><td>62.66</td><td>77.46</td><td>62.31</td><td>78.28</td><td>53.98</td><td>72.52</td><td>45.05</td><td>59.14</td></tr><tr><td>MVR</td><td>59.52</td><td>74.51</td><td>67.06</td><td>80.96</td><td>64.76</td><td>78.59</td><td>63.35</td><td>78.61</td><td>54.47</td><td>72.80</td><td>47.97</td><td>61.60</td></tr><tr><td>xTune</td><td>61.51</td><td>76.06</td><td>70.11</td><td>83.17</td><td>67.20</td><td>80.54</td><td>65.00</td><td>79.91</td><td>56.30</td><td>74.33</td><td>48.94</td><td>62.37</td></tr><tr><td rowspan="3">TTA</td><td>Tent</td><td>54.56</td><td>70.34</td><td>52.91</td><td>69.01</td><td>54.29</td><td>69.87</td><td>63.22</td><td>78.91</td><td>52.72</td><td>70.96</td><td>49.65</td><td>62.95</td></tr><tr><td>PL</td><td>61.80</td><td>76.05</td><td>71.26</td><td>83.60</td><td>69.32</td><td>81.67</td><td>64.05</td><td>79.21</td><td>54.27</td><td>72.57</td><td>50.12</td><td>63.21</td></tr><tr><td>OIL</td><td>62.04</td><td>76.19</td><td>71.57</td><td>83.93</td><td>70.11</td><td>82.22</td><td>64.19</td><td>79.37</td><td>54.41</td><td>72.90</td><td>49.93</td><td>62.53</td></tr><tr><td rowspan="2">RT+TTA</td><td>xTune + PL</td><td>63.73</td><td>77.01</td><td>76.01</td><td>86.60</td><td>73.83</td><td>84.55</td><td>65.74</td><td>80.15</td><td>55.78</td><td>73.92</td><td>47.29</td><td>59.81</td></tr><tr><td>xTune + OIL</td><td>64.57</td><td>77.93</td><td>76.13</td><td>86.72</td><td>73.69</td><td>84.61</td><td>65.83</td><td>80.12</td><td>56.24</td><td>74.34</td><td>51.00</td><td>63.86</td></tr></table>
|
| 204 |
+
|
| 205 |
+
Table 3: Benchmarking results (%) on COLDQA for XLMR-base and XLMR-large. Each TTA method is run three times with random seeds and the average results are reported. Bold: the best results.
|
| 206 |
+
|
| 207 |
+
base model since it maps different languages into a shared representation space.
|
| 208 |
+
|
| 209 |
+
Target Distributions We study the following target distribution shifts at test time.
|
| 210 |
+
|
| 211 |
+
- Text Corruption We use NoiseQA to evaluate model robustness to text corruption. NoiseQA (Ravichander et al., 2021) studies noises from real-world interfaces, i.e., speech recognizers, keyboards, and translation systems. When humans use these interfaces, the questions asked may contain noises, which degrade the QA system's performance. NoiseQA includes two subsets, NoiseQA-na and NoiseQA-syn. NoiseQA-na has real-world noises annotated by human annotators, while NoiseQA-syn is synthetically generated.
|
| 212 |
+
|
| 213 |
+
- Language Change A robust QA system
|
| 214 |
+
|
| 215 |
+
should also perform well when the inputs are in other languages. We use the datasets XQuAD (Artetxe et al., 2020) and MLQA (Lewis et al., 2020), designed for cross-lingual transfer, to evaluate change of language in the test data.
|
| 216 |
+
|
| 217 |
+
- Domain Change The test data may come from a domain different from the source domain used for model training. Here, the training and test domains are in the same language without any text corruption. We use the datasets from MRQA (Fisch et al., 2019) for evaluation, which include HotpotQA (Yang et al., 2018), NaturalQA (Kwiatkowski et al., 2019), NewsQA (Trischler et al., 2017), SearchQA (Dunn et al., 2017), and TriviaQA (Joshi et al., 2017). The development sets of these datasets are used.
|
| 218 |
+
|
| 219 |
+
<table><tr><td>MRQA</td><td>HotpotQA</td><td>NaturalQA</td><td>NewsQA</td><td>TriviaQA</td><td>SearchQA</td><td>Average</td></tr><tr><td>xlmr-large</td><td>53.13 / 68.19</td><td>44.38 / 61.26</td><td>45.56 / 63.35</td><td>58.25 / 67.16</td><td>29.56 / 37.34</td><td>46.18 / 59.46</td></tr><tr><td>xTune</td><td>55.41 / 70.85</td><td>47.44 / 63.39</td><td>47.98 / 65.58</td><td>60.95 / 70.19</td><td>32.92 / 41.82</td><td>48.94 / 62.37</td></tr><tr><td>Tent</td><td>53.95 / 69.20</td><td>48.14 / 64.34</td><td>45.61 / 63.36</td><td>58.95 / 67.77</td><td>41.61 / 50.09</td><td>49.65 / 62.95</td></tr><tr><td>PL</td><td>53.83 / 69.03</td><td>50.88 / 66.17</td><td>46.05 / 63.68</td><td>58.56 / 67.31</td><td>41.26 / 49.84</td><td>50.12 / 63.21</td></tr><tr><td>OIL</td><td>56.65 / 71.92</td><td>54.49 / 68.16</td><td>46.77 / 64.12</td><td>59.18 / 67.99</td><td>32.54 / 40.45</td><td>49.93 / 62.53</td></tr><tr><td>xTune+PL</td><td>55.95 / 71.30</td><td>53.90 / 68.11</td><td>48.50 / 65.72</td><td>59.75 / 68.78</td><td>18.33 / 25.13</td><td>47.29 / 59.81</td></tr><tr><td>xTune+OIL</td><td>58.46 / 73.99</td><td>54.27 / 68.06</td><td>47.45 / 64.80</td><td>61.05 / 70.21</td><td>33.74 / 42.23</td><td>51.00 / 63.86</td></tr></table>
|
| 220 |
+
|
| 221 |
+
Table 4: Results (EM / F1) on each subset of MRQA. Bold: the best results. Underlined: the second best results.
|
| 222 |
+
|
| 223 |
+
Comparison to Existing Benchmarks To the best of our knowledge, COLDQA is the first benchmark that unifies robustness evaluation over text corruption, language change, and domain change. Previous benchmarks for robust QA usually only study one type of these distribution shifts, e.g., NoiseQA (Ravichander et al., 2021), XTREME (Hu et al., 2020), and MRQA (Fisch et al., 2019) study text corruption, language change, and domain change respectively, where the methods proposed on these benchmarks are tested only on one type of distribution shifts. So it is unclear if prior proposed methods generalize well to other types of distribution shifts. In contrast, COLDQA evaluates a method on all types of distribution shifts mentioned above, a more challenging task to tackle.
|
| 224 |
+
|
| 225 |
+
# 6 Experiments
|
| 226 |
+
|
| 227 |
+
# 6.1 Setup
|
| 228 |
+
|
| 229 |
+
To carry out comprehensive evaluation on all types of distribution shifts, we use a multilingual pretrained language model as the base model, specifically XLMR-base and XLMR-large (Conneau et al., 2020). To train the source model on SQuAD with vanilla fine-tuning, we use the default training setup from Hu et al. (2020). For robustness tuning, we use the hyper-parameter values suggested by Wang et al. (2021a) to train FreeLB and InfoBERT. For MVR and xTune, the default settings from the original work are used<sup>3</sup>.
|
| 230 |
+
|
| 231 |
+
For test-time adaptation, the details of setting the hyper-parameter values for learning rate, batch size, $\alpha$ , $\gamma$ , and $K$ are given in Appendix A and shown in Table 10. All model parameters are updated during adaptation. Dropout is turned off for Tent and PL when generating the model outputs or pseudo-labels. The adaptation time of OIL on each dataset from COLDQA is shown in Table 6,
|
| 232 |
+
|
| 233 |
+
7, and 8. All experiments were performed on one NVIDIA A100 GPU.
|
| 234 |
+
|
| 235 |
+
# 6.2 Main Results
|
| 236 |
+
|
| 237 |
+
Table 3 shows the benchmarking results of TTA, RT, and their combination on COLDQA. The detailed results on each subset of MRQA are reported in Table 4. We have the following observations.
|
| 238 |
+
|
| 239 |
+
COLDQA is challenging on which not all RT methods are effective. In Fig. 7 from the appendix, we report the gains of RT baselines over vanilla fine-tuning on the development set of SQuAD. Not surprisingly, each RT baseline improves the model results on the in-distribution set. However, after re-benchmarking the RT baselines on COLDQA, we see that xTune and MVR are more effective than the adversarial training baselines. Among the adversarial training methods, only PGD and FreeLB can improve the average results but the improvements are marginal. Overall, COLDQA introduces new challenges to the existing RT methods.
|
| 240 |
+
|
| 241 |
+
OIL is stronger than PL and Tent. Tent is much less effective than OIL and PL on COLDQA though it is a very strong baseline on CV tasks (Wang et al., 2021b). This shows the necessity of re-analyzing TTA methods on QA tasks. OIL is consistently better than PL based on the average results in Table 3. OIL mostly outperforms Tent and PL based on the detailed results of MRQA in Table 4.
|
| 242 |
+
|
| 243 |
+
TTA and RT are both effective and they are comparable to each other. On XLMR-base and XLMR-large, both TTA (OIL and PL) and RT (xTune and MVR) can significantly improve the average results by around 1-3 absolute points. Overall, TTA and RT are comparable to each other. More specifically, on XLMR-large, the best TTA method which is OIL outperforms xTune on the average results. On XLMR-large, OIL is better than xTune on NoiseQA and MRQA, but lags behind xTune on XQuAD and MLQA. However, on XLMR-base, TTA does not outperform RT. We
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Figure 4: Robustness of PL and OIL to variance of hyper-parameter values. With OIL and PL, we adapt XLMR-base tuned by xTune by using various value combinations of hyper-parameters, which are learning rate and memory size. Learning rate is selected from $\{5\mathrm{e} - 6,1\mathrm{e} - 6,5\mathrm{e} - 7,1\mathrm{e} - 7\}$ and memory size from $\{1,3,5\}$ . For OIL, different values of $\alpha$ are tested.
|
| 247 |
+
|
| 248 |
+

|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
|
| 252 |
+

|
| 253 |
+
Figure 5: Robustness of PL and OIL to changes of test distribution over time. $y$ -axis shows relative gains on each language over XLMR-base tuned by xTune. $x$ -axis shows test sets from MLQA. mem: memory size $K$ . OIL sets $\alpha$ to 0.999.
|
| 254 |
+
|
| 255 |
+
think the reason is that the effectiveness of TTA depends on the source model, since TTA starts from the source model and the source model decides the accuracy of predicted pseudo-labels on the test data.
|
| 256 |
+
|
| 257 |
+
Applying TTA after RT significantly boosts the performance and achieves SOTA results. On both XLMR-base and XLMR-large, xTune+OIL achieves the best average performance compared to all other methods. On XLMR-large, xTune+OIL improves xTune by 3 points on EM score. Among the three types of distribution shifts, xTune+OIL is more effective on text corruption and domain change than language change. Finally, xTune+OIL improves over the baseline EM score by more than 4 points on XLMR-base and 6 points on XLMR-large, significantly improving QA robustness against distribution shifts.
|
| 258 |
+
|
| 259 |
+
# 6.3 Further Analysis
|
| 260 |
+
|
| 261 |
+
Compared to PL, OIL is more robust to varying hyper-parameter values. OIL utilizes an expert model to perform model adaptation. $\alpha$ controls updating of the expert model. In Fig. 4, we fix the value of $\alpha$ , adapt the model with various combinations of hyper-parameter values, and report the absolute gains on EM score after adaptation. We
|
| 262 |
+
|
| 263 |
+
observe that for OIL, a higher $\alpha$ value such as 0.99 or 1 achieves positive gains under varying hyperparameter values. However, PL is less stable than OIL under varying hyper-parameter values. To be robust to varying hyper-parameter values is important for TTA, since tuning hyper-parameters on unknown test data is difficult.
|
| 264 |
+
|
| 265 |
+
OIL is better than PL when dealing with changes in test distribution. We further evaluate TTA methods in the setting of continual adaptation, where the test distribution changes over time (rather than staying fixed as in Table 3), and the model needs to be adapted continually without stopping. In Fig. 5, we adapt the source model from the test language of es to the language hi without stopping. On each test distribution, we report the relative gain over the source model without adaptation. We find that PL is less robust in such a setting and often has negative gains, especially in the last few adaptations. However, our proposed method OIL achieves positive gains among nearly all adaptations, which demonstrates the robustness of OIL in continual adaptation.
|
| 266 |
+
|
| 267 |
+
Effects of Causal Inference. Table 5 shows the effects of removing causal inference in OIL. Without causal inference, adaptation performance consistently drops on the test sets. In Fig. 6, we further show how $\beta$ affects causal inference. $\beta$ does affect the final results and the optimal $\beta$ value varies with different datasets. To avoid tuning $\beta$ , model bias is completely removed by setting $\beta$ to 1 in all our experiments.
|
| 268 |
+
|
| 269 |
+
# 7 Conclusion
|
| 270 |
+
|
| 271 |
+
We study test-time adaptation (TTA) for robust question answering under distribution shifts. A unified evaluation benchmark, COLDQA, over text corruption, language change, and domain change is provided. A novel TTA method, OIL, is proposed that achieves good performance when combined
|
| 272 |
+
|
| 273 |
+
<table><tr><td>EM / F1</td><td>Avg.</td><td>NoiseQA-syn</td><td>NoiseQA-na</td><td>XQuAD</td><td>MLQA</td><td>MRQA</td></tr><tr><td>OIL</td><td>57.06 / 70.38</td><td>68.75 / 79.86</td><td>68.40 / 79.40</td><td>57.96 / 72.64</td><td>48.39 / 66.08</td><td>41.80 / 53.92</td></tr><tr><td>w/o CI</td><td>56.68 / 69.85</td><td>68.41 / 79.48</td><td>68.11 / 79.14</td><td>57.98 / 72.43</td><td>48.08 / 65.65</td><td>40.82 / 52.57</td></tr><tr><td>xTune + OIL</td><td>59.63 / 72.68</td><td>71.90 / 82.24</td><td>70.81 / 81.15</td><td>60.13 / 74.46</td><td>50.67 / 68.53</td><td>44.65 / 57.00</td></tr><tr><td>w/o CI</td><td>59.06 / 72.11</td><td>71.13 / 81.76</td><td>70.18 / 80.64</td><td>59.73 / 73.93</td><td>50.49 / 68.31</td><td>43.75 / 55.92</td></tr></table>
|
| 274 |
+
|
| 275 |
+
Table 5: Effects of causal inference on XLMR-base. CI: causal inference
|
| 276 |
+
|
| 277 |
+

|
| 278 |
+
Figure 6: The effects of $\beta$ used in causal inference. Results are evaluated on NoiseQA with XLMR-base tuned by xTune.
|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
|
| 282 |
+
with a robustness tuning method.
|
| 283 |
+
|
| 284 |
+
# Acknowledgements
|
| 285 |
+
|
| 286 |
+
This research is supported by the National Research Foundation, Singapore under its AI Singapore Programme (AISG Award No: AISG-RP-2018-007 and AISG2-PhD-2021-08-016[T]). The computational work for this article was partially performed on resources of the National Supercomputing Centre, Singapore (https://www.nssc.sg).
|
| 287 |
+
|
| 288 |
+
# Limitations
|
| 289 |
+
|
| 290 |
+
Though test-time adaptation shows strong improvements for robust QA under distribution shifts, it still has some issues that need to be addressed in the future. First, model updating is costly. TTA needs to update the model online. However, the cost of updating should be controlled especially for large pre-trained language models. Second, how to choose suitable hyper-parameter values for adaptation is also important. The test data is usually not available and we cannot tune the hyperparameters before adaptation, so how to effectively select hyper-parameter values is important. In our work, we did not perform hyper-parameter search for OIL. We have also demonstrated in Fig 4 that OIL is robust to various combinations of hyperparameter values with the help of the expert model.
|
| 291 |
+
|
| 292 |
+
# References
|
| 293 |
+
|
| 294 |
+
Mikel Artetxe, Sebastian Ruder, and Dani Yogatama. 2020. On the cross-lingual transferability of monolingual representations. In Proceedings of the Annual
|
| 295 |
+
|
| 296 |
+
Meeting of the Association for Computational Linguistics.
|
| 297 |
+
|
| 298 |
+
Pratyay Banerjee, Tejas Gokhale, and Chitta Baral. 2021. Self-supervised test-time learning for reading comprehension. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.
|
| 299 |
+
|
| 300 |
+
Alexander Bartler, Andre Bühler, Felix Wiewel, Mario Döbler, and Bin Yang. 2022. MT3: Meta test-time training for self-supervised test-time adaption. In Proceedings of the International Conference on Artificial Intelligence and Statistics.
|
| 301 |
+
|
| 302 |
+
Yonatan Belinkov and Yonatan Bisk. 2018. Synthetic and natural noise both break neural machine translation. In International Conference on Learning Representations.
|
| 303 |
+
|
| 304 |
+
Eyal Ben-David, Nadav Oved, and Roi Reichart. 2022. PADA: Example-based prompt learning for on-the-fly adaptation to unseen domains. Transactions of the Association for Computational Linguistics.
|
| 305 |
+
|
| 306 |
+
Shai Ben-David, John Blitzer, Koby Crammer, Alex Kulesza, Fernando Pereira, and Jennifer Wortman Vaughan. 2010. A theory of learning from different domains. Machine Learning.
|
| 307 |
+
|
| 308 |
+
Hao Cheng, Xiaodong Liu, Lis Pereira, Yaoliang Yu, and Jianfeng Gao. 2021. Posterior differential regularization with f-divergence for improving model robustness. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.
|
| 309 |
+
|
| 310 |
+
Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishrav Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettlemoyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the Annual Meeting of the Association for Computational Linguistics.
|
| 311 |
+
|
| 312 |
+
Matthew Dunn, Levent Sagun, Mike Higgins, V. Ugur Güney, Volkan Cirik, and Kyunghyun Cho. 2017. SearchQA: A new Q&A dataset augmented with context from a search engine. ArXiv preprint, abs/1704.05179.
|
| 313 |
+
|
| 314 |
+
Adam Fisch, Alon Talmor, Robin Jia, Minjoon Seo, Eunsol Choi, and Danqi Chen. 2019. MRQA 2019 shared task: Evaluating generalization in reading comprehension. In Proceedings of the 2nd Workshop on Machine Reading for Question Answering.
|
| 315 |
+
|
| 316 |
+
Wee Chung Gan and Hwee Tou Ng. 2019. Improving the robustness of question answering systems to question paraphrasing. In Proceedings of the Annual Meeting of the Association for Computational Linguistics.
|
| 317 |
+
Ge Gao, Eunsol Choi, and Yoav Artzi. 2022. Simulating bandit learning from user feedback for extractive question answering. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers).
|
| 318 |
+
Madelyn Glymour, Judea Pearl, and Nicholas P Jewell. 2016. Causal Inference in Statistics: A Primer. John Wiley & Sons.
|
| 319 |
+
Ruidan He, Linlin Liu, Hai Ye, Qingyu Tan, Bosheng Ding, Liying Cheng, Jiawei Low, Lidong Bing, and Luo Si. 2021. On the effectiveness of adapter-based tuning for pretrained language model adaptation. In Proceedings of the Annual Meeting of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (Volume 1: Long Papers).
|
| 320 |
+
Junjie Hu, Sebastian Ruder, Aditya Siddhant, Graham Neubig, Orhan First, and Melvin Johnson. 2020. XTREME: A massively multilingual multitask benchmark for evaluating cross-lingual generalisation. In Proceedings of the International Conference on Machine Learning.
|
| 321 |
+
Robin Jia and Percy Liang. 2017. Adversarial examples for evaluating reading comprehension systems. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.
|
| 322 |
+
Haoming Jiang, Pengcheng He, Weizhu Chen, Xiaodong Liu, Jianfeng Gao, and Tuo Zhao. 2020. SMART: Robust and efficient fine-tuning for pretrained natural language models through principled regularized optimization. In Proceedings of the Annual Meeting of the Association for Computational Linguistics.
|
| 323 |
+
Mandar Joshi, Eunsol Choi, Daniel Weld, and Luke Zettlemoyer. 2017. TriviaQA: A large scale distantly supervised challenge dataset for reading comprehension. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers).
|
| 324 |
+
Constantinos Karouzos, Georgios Paraskevopoulos, and Alexandros Potamianos. 2021. UDALM: Unsupervised domain adaptation through language modeling. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.
|
| 325 |
+
Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natural Questions: A benchmark for question answering
|
| 326 |
+
|
| 327 |
+
research. Transactions of the Association for Computational Linguistics.
|
| 328 |
+
Dong-Hyun Lee. 2013. Pseudo-Label: the simple and efficient semi-supervised learning method for deep neural networks. In ICML Workshop on Challenges in Representation Learning.
|
| 329 |
+
Brian Lester, Rami Al-Rfou, and Noah Constant. 2021. The power of scale for parameter-efficient prompt tuning. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.
|
| 330 |
+
Patrick Lewis, Barlas Oguz, Rudy Rinott, Sebastian Riedel, and Holger Schwenk. 2020. MLQA: Evaluating cross-lingual extractive question answering. In Proceedings of the Annual Meeting of the Association for Computational Linguistics.
|
| 331 |
+
Juntao Li, Ruidan He, Hai Ye, Hwee Tou Ng, Lidong Bing, and Rui Yan. 2020. Unsupervised domain adaptation of a pretrained cross-lingual language model. In Proceedings of the Twenty-Ninth International Joint Conference on Artificial Intelligence.
|
| 332 |
+
Zichao Li, Prakhar Sharma, Xing Han Lu, Jackie Chi Kit Cheung, and Siva Reddy. 2022. Using interactive feedback to improve the accuracy and explainability of question answering systems post-deployment. In Findings of the Association for Computational Linguistics.
|
| 333 |
+
Jian Liang, Dapeng Hu, and Jiashi Feng. 2020. Do we really need to access the source data? source hypothesis transfer for unsupervised domain adaptation. In Proceedings of the International Conference on Machine Learning.
|
| 334 |
+
Bill Yuchen Lin, Sida Wang, Xi Lin, Robin Jia, Lin Xiao, Xiang Ren, and Scott Yih. 2022. On continual model refinement in out-of-distribution data streams. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers).
|
| 335 |
+
Jiexi Liu, Ryuichi Takanobu, Jiaxin Wen, Dazhen Wan, Hongguang Li, Weiran Nie, Cheng Li, Wei Peng, and Minlie Huang. 2021a. Robustness testing of language understanding in task-oriented dialog. In Proceedings of the Annual Meeting of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (Volume 1: Long Papers).
|
| 336 |
+
Yuejiang Liu, Parth Kothari, Bastien van Delft, Baptiste Bellot-Gurlet, Taylor Mordan, and Alexandre Alahi. 2021b. TTT++: When does self-supervised test-time training fail or thrive? In Advances in Neural Information Processing Systems: Annual Conference on Neural Information Processing Systems.
|
| 337 |
+
Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu. 2018. Towards deep learning models resistant to adversarial attacks. In International Conference on Learning Representations.
|
| 338 |
+
|
| 339 |
+
Takeru Miyato, Andrew M. Dai, and Ian J. Goodfellow. 2017. Adversarial training methods for semi-supervised text classification. In International Conference on Learning Representations.
|
| 340 |
+
German I Parisi, Ronald Kemker, Jose L Part, Christopher Kanan, and Stefan Wermter. 2019. Continual lifelong learning with neural networks: A review. Neural Networks, 113.
|
| 341 |
+
Judea Pearl. 2009. Causal inference in statistics: An overview. Statistics Surveys, 3.
|
| 342 |
+
Baolin Peng, Chunyuan Li, Zhu Zhang, Chenguang Zhu, Jinchao Li, and Jianfeng Gao. 2021. RADDLE: An evaluation benchmark and analysis platform for robust task-oriented dialog systems. In Proceedings of the Annual Meeting of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (Volume 1: Long Papers).
|
| 343 |
+
Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.
|
| 344 |
+
Alan Ramponi and Barbara Plank. 2020. Neural unsupervised domain adaptation in NLP—A survey. In Proceedings of the 28th International Conference on Computational Linguistics.
|
| 345 |
+
Abhilasha Ravichander, Siddharth Dalmia, Maria Ryskina, Florian Metze, Eduard Hovy, and Alan W Black. 2021. NoiseQA: Challenge set evaluation for user-centric question answering. In Proceedings of the Conference of the European Chapter of the Association for Computational Linguistics: Main Volume.
|
| 346 |
+
Marco Tulio Ribeiro, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. 2020. Beyond accuracy: Behavioral testing of NLP models with CheckList. In Proceedings of the Annual Meeting of the Association for Computational Linguistics.
|
| 347 |
+
Stéphane Ross, Geoffrey Gordon, and Drew Bagnell. 2011. A reduction of imitation learning and structured prediction to no-regret online learning. In Proceedings of the Fourteenth International Conference on Artificial Intelligence and Statistics.
|
| 348 |
+
Barbara Rychalska, Dominika Basaj, Alicja Gosiewska, and Przemysław Biecek. 2019. Models in the wild: On corruption robustness of neural NLP systems. In Proceedings of the International Conference on Neural Information Processing.
|
| 349 |
+
Xin Su, Yiyun Zhao, and Steven Bethard. 2022. A comparison of strategies for source-free domain adaptation. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers).
|
| 350 |
+
|
| 351 |
+
Yu Sun, Xiaolong Wang, Zhuang Liu, John Miller, Alexei A. Efros, and Moritz Hardt. 2020. Test-time training with self-supervision for generalization under distribution shifts. In Proceedings of the International Conference on Machine Learning.
|
| 352 |
+
Antti Tarvainen and Harri Valpola. 2017. Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. In Advances in Neural Information Processing Systems: Annual Conference on Neural Information Processing Systems.
|
| 353 |
+
Adam Trischler, Tong Wang, Xingdi Yuan, Justin Harris, Alessandro Sordoni, Philip Bachman, and Kaheer Suleman. 2017. NewsQA: A machine comprehension dataset. In Proceedings of the 2nd Workshop on Representation Learning for NLP.
|
| 354 |
+
Lifu Tu, Garima Lalwani, Spandana Gella, and He He. 2020. An empirical study on robustness to spurious correlations using pre-trained language models. Transactions of the Association for Computational Linguistics.
|
| 355 |
+
Boxin Wang, Shuohang Wang, Yu Cheng, Zhe Gan, Ruoxi Jia, Bo Li, and Jingjing Liu. 2021a. InfoBERT: improving robustness of language models from an information theoretic perspective. In International Conference on Learning Representations.
|
| 356 |
+
Dequan Wang, Evan Shelhamer, Shaoteng Liu, Bruno A. Olshausen, and Trevor Darrell. 2021b. Tent: Fully test-time adaptation by entropy minimization. In International Conference on Learning Representations.
|
| 357 |
+
Xinyi Wang, Sebastian Ruder, and Graham Neubig. 2021c. Multi-view subword regularization. In Proceedings of the Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.
|
| 358 |
+
Xinyi Wang, Yulia Tsvetkov, Sebastian Ruder, and Graham Neubig. 2021d. Efficient test time adapter assembling for low-resource language varieties. In *Findings of the Association for Computational Linguistics*.
|
| 359 |
+
Xuezhi Wang, Haohan Wang, and Diyi Yang. 2022. Measure and improve robustness in NLP models: A survey. In Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies.
|
| 360 |
+
Zhilin Yang, Peng Qi, Saizheng Zhang, Yoshua Bengio, William Cohen, Ruslan Salakhutdinov, and Christopher D. Manning. 2018. HotpotQA: A dataset for diverse, explainable multi-hop question answering. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.
|
| 361 |
+
Hai Ye, Hwee Tou Ng, and Wenjuan Han. 2022. On the robustness of question rewriting systems to questions of varying hardness. In Proceedings of the Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers).
|
| 362 |
+
|
| 363 |
+
Hai Ye, Qingyu Tan, Ruidan He, Juntao Li, Hwee Tou Ng, and Lidong Bing. 2020. Feature adaptation of pre-trained language models across languages and domains with robust self-training. In Proceedings of the Conference on Empirical Methods in Natural Language Processing.
|
| 364 |
+
|
| 365 |
+
Bo Zheng, Li Dong, Shaohan Huang, Wenhui Wang, Zewen Chi, Saksham Singhal, Wanxiang Che, Ting Liu, Xia Song, and Furu Wei. 2021. Consistency regularization for cross-lingual fine-tuning. In Proceedings of the Annual Meeting of the Association for Computational Linguistics and the International Joint Conference on Natural Language Processing (Volume 1: Long Papers).
|
| 366 |
+
|
| 367 |
+
Chen Zhu, Yu Cheng, Zhe Gan, Siqi Sun, Tom Goldstein, and Jingjing Liu. 2020. FreeLB: Enhanced adversarial training for natural language understanding. In International Conference on Learning Representations.
|
| 368 |
+
|
| 369 |
+
<table><tr><td>OIL</td><td>HotpotQA</td><td>NaturalQA</td><td>NewsQA</td><td>SearchQA</td><td>TriviaQA</td></tr><tr><td>xlmr-base</td><td>247</td><td>771</td><td>502</td><td>1265</td><td>579</td></tr><tr><td>xlmr-large</td><td>1116</td><td>3450</td><td>1441</td><td>6779</td><td>4369</td></tr></table>
|
| 370 |
+
|
| 371 |
+
Table 6: Adaptation time (in seconds) on MRQA.
|
| 372 |
+
|
| 373 |
+
<table><tr><td colspan="8">MLQA</td></tr><tr><td>OIL</td><td>en</td><td>es</td><td>de</td><td>ar</td><td>hi</td><td>vi</td><td>zh</td></tr><tr><td>xlmr-base</td><td>489</td><td>159</td><td>164</td><td>218</td><td>203</td><td>239</td><td>172</td></tr><tr><td>xlmr-large</td><td>1635</td><td>533</td><td>548</td><td>724</td><td>679</td><td>802</td><td>574</td></tr></table>
|
| 374 |
+
|
| 375 |
+
Table 7: Adaptation time (in seconds) on MLQA.
|
| 376 |
+
|
| 377 |
+
<table><tr><td>OIL</td><td>XQuAD</td><td>NoiseQA</td></tr><tr><td>xlmr-base</td><td>124</td><td>162</td></tr><tr><td>xlmr-large</td><td>187</td><td>193</td></tr></table>
|
| 378 |
+
|
| 379 |
+
Table 8: Adaptation time (in seconds) on each subset of XQuAD and NoiseQA.
|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
Figure 7: Gains over the results of XLMR-large with vanilla fine-tuning on the development set of SQuAD. abs.: absolute.
|
| 383 |
+
|
| 384 |
+
# A Appendix
|
| 385 |
+
|
| 386 |
+
# A.1 Hyper-parameters
|
| 387 |
+
|
| 388 |
+
We provide the values of hyper-parameters for test-time adaptation. (1) For learning rate, we select a value smaller than the one used for training the source model. We set the learning rate to 1e-6. (2) For batch size, for smaller test sets, we set the batch size to 8. For larger test sets, we set the batch size to 16. (3) For $\alpha$ used in updating the expert model, if the test set is large, we set $\alpha$ to a larger value such as 1. Otherwise, we set $\alpha$ to a smaller value such as 0.99. (4) For $\gamma$ used in filtering the noisy labels, $\gamma = \infty$ works well for most of the test sets, except the datasets NoiseQA, XQuAD, and NaturalQA, where we set $\gamma$ to 0.5. (5) For memory size $K$ , we set $K$ to a smaller value for large sets but to a larger value for small sets. The specific hyper-parameters used for TTA baselines are presented in Table 10.
|
| 389 |
+
|
| 390 |
+
# B Effects of Denoising in OIL
|
| 391 |
+
|
| 392 |
+
Table 9 shows the effects of denoising in OIL. For NoiseQA and XQuAD, we set $\gamma$ to 0.5 to filter out the noisy labels. When using XLMR-large as the base model for NoiseQA-na, the average performance drops substantially if noisy labels are not removed.
|
| 393 |
+
|
| 394 |
+
<table><tr><td>NoiseQA-na</td><td colspan="2">OIL</td><td colspan="2">w/o denoise</td></tr><tr><td>XLMR-base</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>asr</td><td>63.59</td><td>74.74</td><td>62.35</td><td>74.05</td></tr><tr><td>keyboard</td><td>72.04</td><td>82.90</td><td>72.18</td><td>83.21</td></tr><tr><td>translation</td><td>69.58</td><td>80.55</td><td>70.08</td><td>81.18</td></tr><tr><td>avg.</td><td>68.40</td><td>79.40</td><td>68.21</td><td>79.48</td></tr><tr><td>XLMR-large</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>asr</td><td>62.10</td><td>75.34</td><td>47.98</td><td>66.36</td></tr><tr><td>keyboard</td><td>74.96</td><td>86.61</td><td>74.43</td><td>86.24</td></tr><tr><td>translation</td><td>73.28</td><td>84.72</td><td>73.14</td><td>84.70</td></tr><tr><td>avg.</td><td>70.11</td><td>82.22</td><td>65.18</td><td>79.10</td></tr><tr><td>NoiseQA-syn</td><td colspan="2">OIL</td><td colspan="2">w/o denoise</td></tr><tr><td>XLMR-base</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>asr</td><td>70.14</td><td>81.97</td><td>69.55</td><td>81.59</td></tr><tr><td>keyboard</td><td>66.97</td><td>77.29</td><td>66.72</td><td>77.41</td></tr><tr><td>translation</td><td>69.13</td><td>80.31</td><td>69.16</td><td>80.19</td></tr><tr><td>avg.</td><td>68.75</td><td>79.86</td><td>68.48</td><td>79.73</td></tr><tr><td>XQuAD</td><td colspan="2">OIL</td><td colspan="2">w/o denoise</td></tr><tr><td>XLMR-base</td><td>EM</td><td>F1</td><td>EM</td><td>F1</td></tr><tr><td>en</td><td>72.83</td><td>83.75</td><td>72.91</td><td>83.89</td></tr><tr><td>es</td><td>60.14</td><td>77.37</td><td>59.55</td><td>77.33</td></tr><tr><td>de</td><td>58.18</td><td>74.25</td><td>58.63</td><td>74.48</td></tr><tr><td>el</td><td>56.58</td><td>73.05</td><td>56.13</td><td>73.15</td></tr><tr><td>ru</td><td>58.26</td><td>74.28</td><td>58.18</td><td>74.51</td></tr><tr><td>tr</td><td>52.38</td><td>67.89</td><td>51.57</td><td>68.01</td></tr><tr><td>ar</td><td>49.89</td><td>66.40</td><td>49.83</td><td>66.72</td></tr><tr><td>vi</td><td>54.99</td><td>73.83</td><td>55.41</td><td>74.11</td></tr><tr><td>th</td><td>63.59</td><td>72.22</td><td>62.21</td><td>71.58</td></tr><tr><td>zh</td><td>59.66</td><td>68.59</td><td>58.15</td><td>67.85</td></tr><tr><td>hi</td><td>51.04</td><td>67.38</td><td>51.09</td><td>68.17</td></tr><tr><td>avg.</td><td>57.96</td><td>72.64</td><td>57.61</td><td>72.71</td></tr></table>
|
| 395 |
+
|
| 396 |
+
Table 9: Effects of not filtering noisy labels in OIL for NoiseQA and XQuAD.
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
Figure 8: Effects of memory size $K$ on NoiseQA-na. The source model is XLMR-base tuned by xTune.
|
| 400 |
+
|
| 401 |
+
# C Results of RT methods on the Development Set of SQuAD
|
| 402 |
+
|
| 403 |
+
Fig. 7 shows the gains on the development set of SQuAD trained by each RT baseline, to demonstrate the effectiveness of the RT methods.
|
| 404 |
+
|
| 405 |
+
# D Effects of Memory Size $K$
|
| 406 |
+
|
| 407 |
+
Fig. 8 shows the effects of memory size $K$ . We see that using a larger memory size can improve the adaptation results when the learning rate is not so large. When the learning rate is large, larger memory size can worsen the results.
|
| 408 |
+
|
| 409 |
+
<table><tr><td>Dataset</td><td>NoiseQA</td><td>XQuAD</td><td>MLQA</td><td>HotpotQA</td><td>NaturalQA</td><td>NewsQA</td><td>SearchQA</td><td>TriviaQA</td></tr><tr><td>ISizel</td><td>1,190</td><td>1,190</td><td>4,517-11,590</td><td>5,901</td><td>12,836</td><td>4,212</td><td>16,980</td><td>7,785</td></tr><tr><td rowspan="6">OIL</td><td>K: 5</td><td>K: 5</td><td>K: 3</td><td>K: 3</td><td>K: 3</td><td>K: 3</td><td>K: 1</td><td>K: 1</td></tr><tr><td>γ: 0.5</td><td>γ: 0.5</td><td>γ: ∞</td><td>γ: ∞</td><td>γ: 0.5</td><td>γ: ∞</td><td>γ: ∞</td><td>γ: ∞</td></tr><tr><td>α: 0.99</td><td>α: 0.99</td><td>α: 0.99</td><td>α: 0.99</td><td>α: 0.99</td><td>α: 0.99</td><td>α: 1</td><td>α: 1</td></tr><tr><td>β: 1</td><td>β: 1</td><td>β: 1</td><td>β: 1</td><td>β: 1</td><td>β: 1</td><td>β: 1</td><td>β: 1</td></tr><tr><td>BATCH_SIZE: 8</td><td>BATCH_SIZE: 8</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td></tr><tr><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td></tr><tr><td rowspan="3">PL</td><td>K: 5</td><td>K: 5</td><td>K: 3</td><td>K: 1</td><td>K: 1</td><td>K: 1</td><td>K: 1</td><td>K: 1</td></tr><tr><td>BATCH_SIZE: 8</td><td>BATCH_SIZE: 8</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td></tr><tr><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 2e-7</td><td>LR: 1e-7</td><td>LR: 1e-7</td><td>LR: 1e-7</td><td>LR: 1e-7</td></tr><tr><td rowspan="3">Tent</td><td>K: 5</td><td>K: 5</td><td>K: 3</td><td>K: 1</td><td>K: 1</td><td>K: 1</td><td>K: 1</td><td>K: 1</td></tr><tr><td>BATCH_SIZE: 8</td><td>BATCH_SIZE: 8</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BATCH_SIZE: 16</td><td>BCHATT SIZE: 16</td></tr><tr><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-6</td><td>LR: 1e-7</td><td>LR: 1e-7</td><td>LR: 1e-7</td><td>LR: 1e-7</td><td>LR: 1e-7</td></tr></table>
|
| 410 |
+
|
| 411 |
+
Table 10: Hyper-parameters for TTA baselines. For MRQA, we find that for PL and OIL, when we keep the same learning rate and batch size as OIL, the final results are bad, so we choose better hyper-parameters for PL and Tent as the table shows.
|
robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b629ceefb58fb30c2b6d2cedcda8f9b6a7dfa49279fd9eece9d41cde68db5577
|
| 3 |
+
size 747065
|
robustquestionansweringagainstdistributionshiftswithtesttimeadaptionanempiricalstudy/layout.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:725001830575904c5b0072f64058bc4a044fa97d5eff3711d1e2e8d99735e487
|
| 3 |
+
size 529174
|
robusttaskorienteddialoguegenerationwithcontrastivepretrainingandadversarialfiltering/a15e73ef-1aec-4cf2-ab25-137f1fe5c70c_content_list.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3b363ad197b2659cf4c100d5b214450b0009a8794f7e7922882fc764589f86a5
|
| 3 |
+
size 92268
|
robusttaskorienteddialoguegenerationwithcontrastivepretrainingandadversarialfiltering/a15e73ef-1aec-4cf2-ab25-137f1fe5c70c_model.json
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e7f58403fd19dab4ad5852ab341c91fb79fa1f8b759a7fac08a036e93f754db
|
| 3 |
+
size 116494
|