SlowGuess commited on
Commit
c043936
·
verified ·
1 Parent(s): b0eab43

Add Batch c2e10b0e-8b80-47ac-8f9a-4ee55aa6bcba

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. abstractionnotmemorybertandtheenglisharticlesystem/f4e75b11-1b68-4891-86ae-b2154ac66500_content_list.json +3 -0
  2. abstractionnotmemorybertandtheenglisharticlesystem/f4e75b11-1b68-4891-86ae-b2154ac66500_model.json +3 -0
  3. abstractionnotmemorybertandtheenglisharticlesystem/f4e75b11-1b68-4891-86ae-b2154ac66500_origin.pdf +3 -0
  4. abstractionnotmemorybertandtheenglisharticlesystem/full.md +249 -0
  5. abstractionnotmemorybertandtheenglisharticlesystem/images.zip +3 -0
  6. abstractionnotmemorybertandtheenglisharticlesystem/layout.json +3 -0
  7. actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/4b553e45-b416-4cf8-a184-922c736ba3fe_content_list.json +3 -0
  8. actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/4b553e45-b416-4cf8-a184-922c736ba3fe_model.json +3 -0
  9. actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/4b553e45-b416-4cf8-a184-922c736ba3fe_origin.pdf +3 -0
  10. actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/full.md +469 -0
  11. actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/images.zip +3 -0
  12. actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/layout.json +3 -0
  13. adaptableadapters/1d1ffb58-64c7-4e7f-b5d1-fa9254c75cca_content_list.json +3 -0
  14. adaptableadapters/1d1ffb58-64c7-4e7f-b5d1-fa9254c75cca_model.json +3 -0
  15. adaptableadapters/1d1ffb58-64c7-4e7f-b5d1-fa9254c75cca_origin.pdf +3 -0
  16. adaptableadapters/full.md +260 -0
  17. adaptableadapters/images.zip +3 -0
  18. adaptableadapters/layout.json +3 -0
  19. aisfgabundantinformationslotfillinggenerator/3a5c13e9-f446-4491-8fd5-5f416433df0f_content_list.json +3 -0
  20. aisfgabundantinformationslotfillinggenerator/3a5c13e9-f446-4491-8fd5-5f416433df0f_model.json +3 -0
  21. aisfgabundantinformationslotfillinggenerator/3a5c13e9-f446-4491-8fd5-5f416433df0f_origin.pdf +3 -0
  22. aisfgabundantinformationslotfillinggenerator/full.md +202 -0
  23. aisfgabundantinformationslotfillinggenerator/images.zip +3 -0
  24. aisfgabundantinformationslotfillinggenerator/layout.json +3 -0
  25. aligningtosocialnormsandvaluesininteractivenarratives/801313c5-da51-4f59-a1c1-c34f96e38cf0_content_list.json +3 -0
  26. aligningtosocialnormsandvaluesininteractivenarratives/801313c5-da51-4f59-a1c1-c34f96e38cf0_model.json +3 -0
  27. aligningtosocialnormsandvaluesininteractivenarratives/801313c5-da51-4f59-a1c1-c34f96e38cf0_origin.pdf +3 -0
  28. aligningtosocialnormsandvaluesininteractivenarratives/full.md +0 -0
  29. aligningtosocialnormsandvaluesininteractivenarratives/images.zip +3 -0
  30. aligningtosocialnormsandvaluesininteractivenarratives/layout.json +3 -0
  31. allyoumayneedforvqaareimagecaptions/070b1b0d-8876-49d0-b66a-1d68d5dd2c92_content_list.json +3 -0
  32. allyoumayneedforvqaareimagecaptions/070b1b0d-8876-49d0-b66a-1d68d5dd2c92_model.json +3 -0
  33. allyoumayneedforvqaareimagecaptions/070b1b0d-8876-49d0-b66a-1d68d5dd2c92_origin.pdf +3 -0
  34. allyoumayneedforvqaareimagecaptions/full.md +459 -0
  35. allyoumayneedforvqaareimagecaptions/images.zip +3 -0
  36. allyoumayneedforvqaareimagecaptions/layout.json +3 -0
  37. ambipungeneratinghumorouspunswithambiguouscontext/4bcece5c-c02f-4b98-96cb-a24dd1fa23c3_content_list.json +3 -0
  38. ambipungeneratinghumorouspunswithambiguouscontext/4bcece5c-c02f-4b98-96cb-a24dd1fa23c3_model.json +3 -0
  39. ambipungeneratinghumorouspunswithambiguouscontext/4bcece5c-c02f-4b98-96cb-a24dd1fa23c3_origin.pdf +3 -0
  40. ambipungeneratinghumorouspunswithambiguouscontext/full.md +262 -0
  41. ambipungeneratinghumorouspunswithambiguouscontext/images.zip +3 -0
  42. ambipungeneratinghumorouspunswithambiguouscontext/layout.json +3 -0
  43. analyzingencodedconceptsintransformerlanguagemodels/acb8ed2b-8d9a-4c87-ab61-70550ef0179b_content_list.json +3 -0
  44. analyzingencodedconceptsintransformerlanguagemodels/acb8ed2b-8d9a-4c87-ab61-70550ef0179b_model.json +3 -0
  45. analyzingencodedconceptsintransformerlanguagemodels/acb8ed2b-8d9a-4c87-ab61-70550ef0179b_origin.pdf +3 -0
  46. analyzingencodedconceptsintransformerlanguagemodels/full.md +533 -0
  47. analyzingencodedconceptsintransformerlanguagemodels/images.zip +3 -0
  48. analyzingencodedconceptsintransformerlanguagemodels/layout.json +3 -0
  49. analyzingmodalityrobustnessinmultimodalsentimentanalysis/093a9f0e-2204-426d-b42c-001378b0e186_content_list.json +3 -0
  50. analyzingmodalityrobustnessinmultimodalsentimentanalysis/093a9f0e-2204-426d-b42c-001378b0e186_model.json +3 -0
abstractionnotmemorybertandtheenglisharticlesystem/f4e75b11-1b68-4891-86ae-b2154ac66500_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eed30fc9a640e369ef238454c16a67b4709a697493ac9c567ea005176a9d1f87
3
+ size 61162
abstractionnotmemorybertandtheenglisharticlesystem/f4e75b11-1b68-4891-86ae-b2154ac66500_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a30ca37b21fdbea9036c355e9fa8ce3db9d8f7184655506512559315ef441670
3
+ size 72583
abstractionnotmemorybertandtheenglisharticlesystem/f4e75b11-1b68-4891-86ae-b2154ac66500_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb014ee4157a9266922289435067860a46dfb13d316530bab0b5b7d90520203e
3
+ size 178779
abstractionnotmemorybertandtheenglisharticlesystem/full.md ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Abstraction not Memory: BERT and the English Article System
2
+
3
+ # Harish Tayyar Madabushi $^{1,2}$ , Dagmar Divjak $^{3,4}$ and Petar Milin $^{3}$
4
+
5
+ $^{1}$ Department of Computer Science, University of Sheffield
6
+
7
+ $^{2}$ School of Computer Science, University of Birmingham
8
+
9
+ $^{3}$ Department of Modern Languages, University of Birmingham
10
+
11
+ $^{4}$ Department of English Language and Linguistics, University of Birmingham
12
+
13
+ H.TayyarMadabushi@sheffield.ac.uk
14
+
15
+ (D.Divjak, P.Milin)@bham.ac.uk
16
+
17
+ # Abstract
18
+
19
+ Article prediction is a task that has long defied accurate linguistic description. As such, this task is ideally suited to evaluate models on their ability to emulate native-speaker intuition. To this end, we compare the performance of native English speakers and pre-trained models on the task of article prediction set up as a three way choice (a/an, the, zero). Our experiments with BERT show that BERT outperforms humans on this task across all articles. In particular, BERT is far superior to humans at detecting the zero article, possibly because we insert them using rules that the deep neural model can easily pick up. More interestingly, we find that BERT tends to agree more with annotators than with the corpus when inter-annotator agreement is high but switches to agreeing more with the corpus as inter-annotator agreement drops. We contend that this alignment with annotators, despite being trained on the corpus, suggests that BERT is not memorising article use, but captures a high level generalisation of article use akin to human intuition.
20
+
21
+ # 1 Introduction and Motivation
22
+
23
+ Pre-trained models, such as BERT (Devlin et al., 2018), RoBERTa (Liu et al., 2019) and more recently T5 (Raffel et al., 2020), are the state of the art across several tasks in computational linguistics. In addition, transformer-based models are known to have access to information as varied as part of speech information (Chrupala and Alishahi, 2019; Tenney et al., 2019b), parse trees (Hewitt and Manning, 2019), the NLP pipeline (Tenney et al., 2019a), and constructional information (Tayyar Madabushi et al., 2020). These models tend to perform so well that, on certain tasks, they outperform human baselines (Zhang et al., 2020).
24
+
25
+ In this work, we investigate how well language models, specifically BERT Large, perform on the linguistically interesting task of article prediction.
26
+
27
+ English article prediction, further discussed in Section 2, is a phenomenon that native speakers of English find almost trivial. At the same time, linguists find it particularly difficult to formulate the rules that would govern article usage: article use cannot be captured by local co-occurrence but is dependent on the wider context and often there is no one "right" article, but multiple options are possible, albeit with slight differences in the meaning conveyed. Grammar correction systems prior to BERT struggled to reach acceptable levels of performance on article selection (detailed in Section 2). As we will show, BERT shows performance on this task that is superior to that of humans. Given this, it is interesting to investigate how BERT attains this level of accuracy and what the implications are for the system: does BERT manage to go beyond the local vicinity into the larger context to track the referent?
28
+
29
+ The current study compares the performance of transformer-based pre-trained models and humans in an attempt to explore how language models handle an, in essence, creative task, with an emphasis on how model performance changes with inter-annotator agreement. We also explicitly incorporate the plural indefinite or zero (Ø) article (detailed in Section 2) as in the sentence There are Ø merchant bankers who find it convenient to stir up Ø apprehension with a view to drumming up Ø business for themselves.
30
+
31
+ The flexibility that is inherent in article usage requires us to explore methods of evaluation that do not rely solely on accuracy. While the shortcomings of relying too heavily on accuracy based metrics have been highlighted in prior work (see Section 3), these difficulties are accentuated by the presence of flexibility. Clearly, there is little need to require a model to output one specific class if people are comfortable with multiple options. As such, we evaluate performance based on the
32
+
33
+ Matthews correlation coefficient between human annotators and model outputs at each different level of inter-annotator agreement.
34
+
35
+ To this end, this works aims to answer the following questions: a) How well do language models perform on a task that humans rely on intuition rather than deliberate reasoning, specifically article prediction, and b) how does this performance vary with increased flexibility in the article that can be used, as measured by inter-annotator agreement. So as to ensure reproducibility and to aid future research in this direction, we make our scripts freely available and our dataset, built from the British National Corpus (BNC) (BNC Consortium, 2007), available under the required licence<sup>1</sup>. Further details on the BNC are presented in Appendix A.
36
+
37
+ # 2 The English Article System
38
+
39
+ There are three articles in English: a) the definite article, the, b) the indefinite article, $a/an$ , and c) the absence of an article or the zero $(\emptyset)$ article (Swan and Walter, 1997).
40
+
41
+ There have been several sets of guidelines for the use of articles starting with the early works by Huebner (1983, 1985); Thomas (1989). The most general ones rely on a few parameters only, such as Hearer Knowledge (whether the interlocutor can be considered to be able to identify the referent) and Referent Specificity (whether a specific referent is identified), augmented with Number and Countability, while the more specific ones offer numerous semantic types and subtypes, bordering on the idiosyncratic; see work by Swan and Walter (1997) for an overview. Although none of these variables, individually or in conjunction, can accurately predict article usage, recent work on the classification of a large, manually annotated sample has found that a hierarchical ordering of these same parameters, with Hearer Knowledge at the top, predicts article usage correctly in 93 percent of all cases that allow variation (about $15\%$ of all instances can be considered a set phrase in that only one article can be used, e.g., "one at a time" (Divjak et al., 2022).
42
+
43
+ However, deciding whether the interlocutor can be considered as able to identify the referent involves world knowledge, including cultural knowledge; although both Sheffield and Birmingham are home to many universities, when we refer to the
44
+
45
+ University of Sheffield/Birmingham we have one particular one in mind, which our interlocutor only knows if they are familiar with the local landscape. In addition, article usage appears to be a matter of what cognitive linguists would call construal, or the freedom to present a situation linguistically in different ways. Analysing 3 alternative forced-choice data from 181 native speakers of English who were asked to insert articles that had been removed from longer (200-300 words) texts, (Romain et al., 2022) relied on Entropy to quantify the restrictiveness of the context and to identify types of contexts in which choice is allowed versus inhibited. They found that some contextual properties, such as Referent Specificity, are rather restrictive, leaving the speaker with little choice in terms of which article to use while other contextual properties, such as Hearer Knowledge, are such that several articles are possible, albeit with slightly different semantic implications. In other words, only in situations where the referent is specific do native speakers tend to converge on the same article.
46
+
47
+ The English article system thus finds itself in the awkward position of its strongest predictor being open to interpretation. The freedom regarding the interpretation of the top predictor, and the semantic differences it entails, is possibly why second language learners whose first language does not include an article system find the article system notoriously difficult to master. The same can be expected to apply to computational systems who tend to struggle to capture fine-grained meaning nuances, even though they have acquired world knowledge.
48
+
49
+ # 3 Related Work
50
+
51
+ Automatic article prediction has been the focus of study for several decades starting with rule based systems, aimed at improving machine translation (Murata, 1993; Bond et al., 1994). Subsequent machine learning models for article prediction included work by Knight and Chander (1994), who use decision trees and Han et al. (2006), who use a maximum entropy classifier to select among a/an, the, or the zero article.
52
+
53
+ Article prediction was then clubbed with similar phenomena, such as prepositions and noun numbers, to be included as part of shared tasks on Grammatical Error Correction at CoNLL-2013 (Ng et al., 2013) and CoNLL-2014 (Ng et al., 2014). These shared tasks, and their associated datasets, signifi
54
+
55
+ cantly increased interest in article prediction albeit as part of the broader problem of grammatical error correction. More recent methods, such as work by Lichtarge et al. (2020), make use of advances in neural machine translation for grammatical error correction. For an up-to-date and extensive handling of grammatical error correction, including article prediction, we direct readers to the tutorial by Grundkiewicz et al. (2020).
56
+
57
+ Of relevance to the second question we aim to answer, that of how annotator agreement affects model performance, is the work by Lee et al. (2009), who study the various factors that influence the level of human agreement. Additionally, Ribeiro et al. (2020) show that state-of-the-art models are better evaluated using a checklist as opposed to traditional metrics, a notion that we supplement in our experimental procedure (Section 4).
58
+
59
+ # 4 Methodology
60
+
61
+ As mentioned in Section 1, our goal is to understand how language models do on the task of article prediction and how their performance varies with inter-annotator agreement. Our overall methodology for answering these questions involved the following steps:
62
+
63
+ 1. We start by explicitly adding the null article $(\emptyset)$ to the British National Corpus (BNC).
64
+ 2. We then set up the task of classifying articles as a token classification (sequence to sequence) task and train a (BERT Base) model. We use 150,000 examples as the training set.
65
+ 3. Using the results of this model, we construct a set of around 2,500 examples, about $30\%$ of which are selected to be incorrectly tagged by BERT Base. This is to ensure that the evaluation set contains examples from different levels of difficulty. These 2,500 examples are annotated by paid annotators, thus providing us with an evaluation set.
66
+ 4. We compare the performance of human annotators to that of BERT Large, trained on the training set of 150,000 examples from the BNC.
67
+
68
+ These results are presented in Section 5 along with an analysis. The following sections detail the steps listed above.
69
+
70
+ # 4.1 Data Preparation and Zero article Tagging
71
+
72
+ Table 1 provides examples of when the zero article is used and we include the scripts used to add zero articles to sentences in the code released with this work.
73
+
74
+ <table><tr><td>Referent
75
+ Specificity</td><td>Noun Count</td><td>Example</td></tr><tr><td rowspan="2">Not Specific,
76
+ known to
77
+ the hearer</td><td>Uncountable</td><td>Ø Pasta is an Italian commodity.</td></tr><tr><td>Plural</td><td>Ø Tigers are magnificent animals.</td></tr><tr><td rowspan="2">Not specific,
78
+ not known
79
+ to the hearer</td><td>Uncountable</td><td>Can I order Ø rice?</td></tr><tr><td>Plural</td><td>I would like Ø better shoes.</td></tr><tr><td rowspan="2">Specific,
80
+ not known
81
+ to the hearer</td><td>Uncountable</td><td>Ø Soup was served with the meal.</td></tr><tr><td>Plural</td><td>Ø Engineers were called to the scene.</td></tr></table>
82
+
83
+ Table 1: Examples of some occurrences of the zero article, also known as the plural indefinite article.
84
+
85
+ All training and evaluation examples are created to consist of three sentences: the target sentence with one article blanked out and one preceding and one succeeding sentence with no words blanked out. We provide context to ensure that there is sufficient information available to correctly predict an article. Example 1, illustrates one element of the data used.
86
+
87
+ (1) It is a local landmark which received $\varnothing$ national and international recognition and helped turn the tide against the thoughtless demolition of the Sixties. Still with Booth Shaw, Denison produced ______ radical proposal for $\varnothing$ flats for $\varnothing$ single people in the heart of the city centre. The site was a rambling and derelict pub, the Royal Hotel, which was originally a Georgian coaching inn.
88
+
89
+ # 4.2 Model Selection and Training
90
+
91
+ Although masked language modelling, which involves "filling in the blanks" is most similar to the task at hand, the introduction of the zero article makes this impractical as pre-trained models are not trained on the zero article. Given these limitations we model this as a sequence to sequence task where, as is typical of, the output sequence is required to consist of the token 'A', 'The' or 'Zero' based on the corresponding article, or the token 'O' otherwise. As such, the model makes a prediction associated with every input token, not just the one that is masked.
92
+
93
+ Based on initial experimentation with different models and hyperparameters (i.e., manual tuning), we settled on the use of BERT fine-tuned on a
94
+
95
+ training set consisting of 150,000 examples for one epoch, based on model performance on a development set (consisting of 30,000 examples). More epochs quickly lead to overfitting. RoBERTa (trained for 6 epochs), despite being considered a more optimised version of BERT, surprisingly does not perform as well as BERT.
96
+
97
+ We first use BERT Base, trained on 150,000 examples for 1 epoch, to predict all articles in the target (central) sentence. Based on this initial classification we pick 2,500 examples for manual tagging, such that approximately $30\%$ of the examples were incorrectly tagged by BERT Base. We perform this additional step to ensure that we pick some examples that are 'difficult', as determined by BERT Base's inability to get them right. Finally, BERT Large trained on the same set of examples, is used to predict the articles presented to human annotators. In both cases, we use the models implemented by Wolf et al. (2020). These results and an analysis are presented in Section 5. Model and hyperparameters are presented in Appendix B.
98
+
99
+ # 4.3 Human Annotation
100
+
101
+ Manual annotation took the format of an online survey modelled after a cloze test. Participants were presented with individual examples consisting of three sentences each, wherein the central sentence had exactly one article omitted and replaced with a blank space, as illustrated in Example 1 above. Participants were required to select which article had been omitted from a multiple-choice list that was presented below the sentences.
102
+
103
+ A total of 2500 sentences were tagged, with each participant tagging 160 randomly selected items. The aim was for each sentence to be tagged by five different participants. Further details on the process including instructions, recruitment, payment and approvals are provided in Appendix C.
104
+
105
+ # 5 Empirical Evaluation and Discussion
106
+
107
+ The results presented in this section were obtained by evaluating $\mathrm{BERT_L}$ on the same gap filling exercise that was presented to humans. $\mathrm{BERT_L}$ was fine-tuned 5 times on 150,000 training examples and evaluated on a development set which, like the training set, was extracted from the corpus and not human annotated. The training data used consisted of 150,000 examples, of which about 135,000 were "the", 60,000 "a" and 146,000 "zero". The development set consisted of 30,000 examples, of which
108
+
109
+ about 25,000 were "the", 12,000 were "a" and 25,000 were "zero".
110
+
111
+ The best performing run on this development set was used for the human annotated test set. Of the 2,500 examples picked for manual annotation, 2,383 were annotated by the required five annotators and this subset was used for evaluation. This evaluation set consists of about 1200 sentences that were annotated by the majority of annotators with "the", 500 with "a", and about 550 with "zero". A further 108 sentences had multiple labels receiving the same number of votes and were thus tied. The complete evaluation set consists of about 150,000 tokens.
112
+
113
+ <table><tr><td></td><td></td><td>The</td><td>A/An</td><td>Zero (Ø)</td></tr><tr><td rowspan="6">All Data (2384)</td><td>BERTL vs 4 Human</td><td>0.580</td><td>0.659</td><td>0.589</td></tr><tr><td>BERTL vs Corpus</td><td>0.631</td><td>0.658</td><td>0.731</td></tr><tr><td>4 Human vs Corpus</td><td>0.553</td><td>0.589</td><td>0.590</td></tr><tr><td>BERTL vs Control</td><td>0.488</td><td>0.573</td><td>0.514</td></tr><tr><td>4 Human vs Control</td><td>0.490</td><td>0.578</td><td>0.515</td></tr><tr><td>Corpus vs Control</td><td>0.440</td><td>0.519</td><td>0.501</td></tr></table>
114
+
115
+ Table 2: Phi coefficient $(\phi)$ of correlation between four human annotators (4 Human), BERT Large, a fifth annotator used as a human baseline (Control) and the corpus presented by each article. Number of examples in parenthesis.
116
+
117
+ Tables 2 and 3 present the Phi coefficients (Matthews Correlation Coefficient) between four human annotators (4 Human), different models, a fifth human used as a control (Control) and the corpus. Table 2 presents the Phi coefficients across all of the data. Each block in Table 3 presents Phi correlations between subsets of examples on which either the 4 annotators completely agree (4 agree), exactly three agree (3 agree), or on those examples on which two agreed. In instances other than where all data (Table 2) is presented, we exclude from our analysis those examples where there is a tie between different articles. Importantly, this results in a different number of examples at each level of agreement presented above (example counts listed in parenthesis). Finally, the last three rows in each block, which provide the correlations with the fifth annotator, provide a baseline or control for comparison.
118
+
119
+ Across all data, $\mathrm{BERT_L}$ has a higher correlation with the corpus (BERTL vs Corpus) than do the four human annotators (Corpus vs 4 Human) across all articles. While this can be ascribed to the fact that BERT was fine-tuned on a fairly large training set of 150,000 examples, BERT Large also has a higher
120
+
121
+ correlation with the four annotators (BERTL vs 4 Human) than they do with the corpus (4 Human vs Corpus) across all but one of the articles on which it misses out by an insignificant margin.
122
+
123
+ <table><tr><td></td><td></td><td>The</td><td>A/An</td><td>Zero (Ø)</td></tr><tr><td rowspan="6">4 Agree (984)</td><td>BERTL vs 4 Human</td><td>0.810</td><td>0.869</td><td>0.792</td></tr><tr><td>BERTL vs Corpus</td><td>0.738</td><td>0.777</td><td>0.755</td></tr><tr><td>4 Human vs Corpus</td><td>0.787</td><td>0.822</td><td>0.767</td></tr><tr><td>BERTL vs Control</td><td>0.645</td><td>0.721</td><td>0.621</td></tr><tr><td>4 Human vs Control</td><td>0.713</td><td>0.770</td><td>0.667</td></tr><tr><td>Corpus vs Control</td><td>0.600</td><td>0.665</td><td>0.592</td></tr><tr><td rowspan="6">3 Agree (886)</td><td>BERTL vs 4 Human</td><td>0.545</td><td>0.617</td><td>0.626</td></tr><tr><td>BERTL vs Corpus</td><td>0.605</td><td>0.639</td><td>0.719</td></tr><tr><td>4 Human vs Corpus</td><td>0.469</td><td>0.554</td><td>0.639</td></tr><tr><td>BERTL vs Control</td><td>0.427</td><td>0.525</td><td>0.511</td></tr><tr><td>4 Human vs Control</td><td>0.456</td><td>0.581</td><td>0.542</td></tr><tr><td>Corpus vs Control</td><td>0.374</td><td>0.489</td><td>0.524</td></tr><tr><td rowspan="6">2 Agree (168)</td><td>BERTL vs 4 Human</td><td>0.227</td><td>0.468</td><td>0.390</td></tr><tr><td>BERTL vs Corpus</td><td>0.501</td><td>0.549</td><td>0.692</td></tr><tr><td>4 Human vs Corpus</td><td>0.280</td><td>0.344</td><td>0.403</td></tr><tr><td>BERTL vs Control</td><td>0.269</td><td>0.338</td><td>0.283</td></tr><tr><td>4 Human vs Control</td><td>0.204</td><td>0.256</td><td>0.323</td></tr><tr><td>Corpus vs Control</td><td>0.295</td><td>0.334</td><td>0.200</td></tr></table>
124
+
125
+ Table 3: Phi coefficients $\left( \phi \right)$ at different levels of inter-annotator agreement. See text for details.
126
+
127
+ Although BERT has a high correlation with the corpus across all data, a fine-grained analysis based on the possible level of flexibility in article use, as determined by inter-annotator agreement (Table 3), shows that this is not always the cast. Surprisingly, when there is least flexibility (i.e. when all four annotators agree) BERT agrees more with human annotators than with the corpus. In fact, in this case ('4 Agree' in Table 3) the agreement between BERT and the four annotators is higher than between any other pair. Also interesting is the fact that BERT switches back to being more highly correlated with the corpus when there is any possibility of flexibility (i.e. inter-annotator agreement is not perfect). This is contrary to what we expect as BERT is trained on the corpus and as such we expect to see a higher correlation between BERT and the corpus across all cases. This behaviour suggests that BERT seems to have access to a high level generalised representation of article use that cannot be ascribed to memory.
128
+
129
+ BERT also has a significantly higher correlation with the corpus on the null article than do either the four human annotators or the fifth control annotator except in the case where there is complete agreement between the four annotators (4 Agree). We believe that this is a result of the fact that we insert
130
+
131
+ the null article using a fixed set of rules that deep neural models can easily pick up. Human annotators, on the other hand, seem to find it harder to identify this addition to the article system, except in the more obvious cases.
132
+
133
+ # 6 Conclusions and Future Work
134
+
135
+ In this work, we aimed to study the capabilities of pre-trained language models, specifically BERT, on the linguistically relevant task of article prediction that native speakers are intuitively good at but linguists have been unable to formalise adequately, while focusing on how these abilities change with the increased flexibility in article use. Our results show that BERT has a very high correlation with human annotators when there is least flexibility as measured by inter-annotator agreement, but switches to agreeing with the corpus when there is flexibility in article use. These results, we contend, point to BERT having access to a high level generalised representation of article use distinct from memorisation.
136
+
137
+ We intend to focus future work on better understanding the specifics of this high level representation of article use contained within BERT. Also, the current study is limited in the languages explored and we intend to address this limitation by studying similar intuitive phenomena that evade linguistic description on languages other than English; an example would be aspect in Slavonic languages. Finally, we intend to extend our analysis by comparing BERT's output 'confidence' with annotator agreement, similar to methods presented by (Divjak et al., 2016).
138
+
139
+ # Acknowledgements
140
+
141
+ We would like to thank Christian Adam, who developed the script for null article tagging, and Daisy Collins for help with setting up and collecting annotations on Qualtrics.
142
+
143
+ The manual annotation presented in this work was made possible by the research grant awarded to Harish Tayyar Madabushi by the Paul and Yuanbi Ramsay Research Fund (School of Computer Science, The University of Birmingham).
144
+
145
+ This work was also partially supported by the UK EPSRC grant EP/T02450X/1
146
+
147
+ # References
148
+
149
+ BNC Consortium. 2007. British national corpus. Oxford Text Archive Core Collection.
150
+ Francis Bond, Kentaro Ogura, and Satoru Ikehara. 1994. Countability and number in Japanese to English machine translation. In COLING 1994 Volume 1: The 15th International Conference on Computational Linguistics.
151
+ Grzegorz Chrupa and Afra Alishahi. 2019. Correlating neural and symbolic representations of language. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 2952-2962, Florence, Italy. Association for Computational Linguistics.
152
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. BERT: pre-training of deep bidirectional transformers for language understanding. CoRR, abs/1810.04805.
153
+ Dagmar Divjak, Ewa Dabrowska, and Antti Arppe. 2016. Machine meets man: Evaluating the psychological reality of corpus-based probabilistic models. Cognitive Linguistics, 27(1):1-33.
154
+ Dagmar Divjak, Laurence Romain, and Petar Milin. 2022. From their point of view: the article category as a hierarchically structured referent tracking system. Under revision, Linguistics: an interdisciplinary journal of the language sciences.
155
+ Roman Grundkiewicz, Christopher Bryant, and Mariano Felice. 2020. A crash course in automatic grammatical error correction. In Proceedings of the 28th International Conference on Computational Linguistics: Tutorial Abstracts, pages 33-38, Barcelona, Spain (Online). International Committee for Computational Linguistics.
156
+ NA-RAE Han, MARTIN Chodorow, and CLAUDIA LEACOCK. 2006. Detecting errors in english article usage by non-native speakers. *Natural Language Engineering*, 12(2):115–129.
157
+ John Hewitt and Christopher D. Manning. 2019. A structural probe for finding syntax in word representations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4129-4138, Minneapolis, Minnesota. Association for Computational Linguistics.
158
+ Thomas G Huebner. 1983. A longitudinal analysis of the acquisition of English by an adult Hmong speake. Ph.D. thesis, The University of Hawaii at Manoa.
159
+ Thorn Huebner. 1985. System and variability in interlanguage syntax. Language Learning, 35(2):141-163.
160
+ Kevin Knight and Ishwar Chander. 1994. Automated postediting of documents. In Proceedings of the Twelfth AAAI National Conference on Artificial Intelligence, AAAI'94, page 779-784. AAAI Press.
161
+
162
+ John Lee, Joel Tetreault, and Martin Chodorow. 2009. Human evaluation of article and noun number usage: Influences of context and construction variability. In Proceedings of the Third Linguistic Annotation Workshop (LAW III), pages 60-63, Suntec, Singapore. Association for Computational Linguistics.
163
+ Jared Lichtarge, Chris Alberti, and Shankar Kumar. 2020. Data Weighted Training Strategies for Grammatical Error Correction. Transactions of the Association for Computational Linguistics, 8:634-646.
164
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized BERT pretraining approach. CoRR, abs/1907.11692.
165
+ M Murata. 1993. Determination of referential property and number of nouns in Japanese sentences for machine translation into english. In Proc. 5th International Conference on Theoretical and Methodological Issues in Machine Translation, Kyoto, Japan, July 1993, pages 218-225.
166
+ Hwee Tou Ng, Siew Mei Wu, Ted Briscoe, Christian Hadiwinoto, Raymond Hendy Susanto, and Christopher Bryant. 2014. The CoNLL-2014 shared task on grammatical error correction. In Proceedings of the Eighteenth Conference on Computational Natural Language Learning: Shared Task, pages 1-14, Baltimore, Maryland. Association for Computational Linguistics.
167
+ Hwee Tou Ng, Siew Mei Wu, Yuanbin Wu, Christian Hadiwinoto, and Joel Tetreault. 2013. The CoNLL-2013 shared task on grammatical error correction. In Proceedings of the Seventeenth Conference on Computational Natural Language Learning: Shared Task, pages 1-12, Sofia, Bulgaria. Association for Computational Linguistics.
168
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21(140):1-67.
169
+ Marco Tulio Ribeiro, Tongshuang Wu, Carlos Guestrin, and Sameer Singh. 2020. Beyond accuracy: Behavioral testing of NLP models with CheckList. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4902-4912, Online. Association for Computational Linguistics.
170
+ Laurence Romain, Petar Milin, and Dagmar Dagmar. 2022. Ruled by construal? framing article choice in english. Submitted.
171
+ M. Swan and C. Walter. 1997. How English Works: A Grammar Practice Book; with Answers. Oxford English. Oxford University Press.
172
+
173
+ Harish Tayyar Madabushi, Laurence Romain, Dagmar Divjak, and Petar Milin. 2020. CxGBERT: BERT meets construction grammar. In Proceedings of the 28th International Conference on Computational Linguistics, pages 4020-4032, Barcelona, Spain (Online). International Committee on Computational Linguistics.
174
+
175
+ Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019a. BERT rediscovers the classical NLP pipeline. CoRR, abs/1905.05950.
176
+
177
+ Ian Tenney, Patrick Xia, Berlin Chen, Alex Wang, Adam Poliak, R Thomas McCoy, Najoung Kim, Benjamin Van Durme, Sam Bowman, Dipanjan Das, and Ellie Pavlick. 2019b. What do you learn from context? probing for sentence structure in contextualized word representations. In International Conference on Learning Representations.
178
+
179
+ Margaret Thomas. 1989. The acquisition of english articles by first- and second-language learners. Applied Psycholinguistics, 10(3):335-355.
180
+
181
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
182
+
183
+ Zhuosheng Zhang, Junjie Yang, and Hai Zhao. 2020. Retrospective reader for machine reading comprehension.
184
+
185
+ # A The BNC
186
+
187
+ The dataset used in the experiments presented in this work is extracted from the British National Corpus (BNC) distributed by the University of Oxford on behalf of the BNC Consortium and is consistent with its intended use. We extract sentences from both the spoken (BNC 2014 release) and the written (BNC 1994 release) versions of the BNC. Examples cited within the paper have been extracted from the BNC and all rights in the texts cited are reserved. We make use of the BNC to ensure that we use a well balanced data source that does not uniquely identify individuals or include offensive content. Detailed statistics pertaining to the BNC are available on the BNC website<sup>2</sup>.
188
+
189
+ The BNC is available under the BNC User Licence<sup>3</sup> and given that we build our dataset from the BNC, access to our dataset is subject to access to the BNC.
190
+
191
+ # B Model, Training, Hyperparameter and Hardware Details
192
+
193
+ For our experiments, we make use of BERT Base, which consists of 110 million parameters and BERT Large consisting of 340 million parameters. We use the default hyperparameters for both models except in changing the number of epochs to 1 and the maximum input sequence length to 150. This was based on our initial experimentation wherein we found that more epochs quickly lead to overfitting. In particular, we run our experiments using the Hugging Face Transformers implementation available online<sup>4</sup>.
194
+
195
+ Models were trained using a Tesla V 100 GPU, and the entire training and optimisation process took approximately forty hours.
196
+
197
+ Models were run multiple times, each with a different random seed so as to avoid local minimum. In each case, models were evaluated on the development set which, like the training set was extracted from the corpus and not manually annotated. The best performing model on the development set was used for subsequent experiments. The results over 10 different random seeds on the development set for BERT Base are presented in Table 4.
198
+
199
+ <table><tr><td>Run No.</td><td>Dev F1</td></tr><tr><td>1</td><td>0.8940</td></tr><tr><td>2</td><td>0.8936</td></tr><tr><td>3</td><td>0.8953</td></tr><tr><td>4</td><td>0.8942</td></tr><tr><td>5</td><td>0.8957</td></tr><tr><td>6</td><td>0.8930</td></tr><tr><td>7</td><td>0.8941</td></tr><tr><td>8</td><td>0.8947</td></tr><tr><td>9</td><td>0.8936</td></tr><tr><td>10</td><td>0.8944</td></tr></table>
200
+
201
+ Table 4: Results over 10 different random seeds on the development set for BERT Base – used to pick the best run used in subsequent experiments. We note that the variation in results across radom seeds isn’t significant due to the large training set used.
202
+
203
+ We calculate the Phi coefficients $(\phi)$ in R (version 4.0.3) using the psych package (version 2.0.9).
204
+
205
+ # C Annotation Details
206
+
207
+ The annotation was done using Qualtrics and participants were recruited through Prolific. Each participant was compensated £3.75 for annotating approximately 160 examples, which took participants an average of 42 minutes, a little over the 30 minutes we estimated it would take. We recruited a total of 108 annotators of whom 68 were female and 40 were male. Most annotators had a Bachelor's degree or had attended some college, and close to $65\%$ of them were between the ages of 20 and 40.
208
+
209
+ Participants, who were all native speakers of British English and residing in the UK or Ireland (due to the use of the BNC), were instructed to read all three sentences before choosing which article they would fill the gap with. Four quality control questions were included in order to make sure that participants were paying attention.
210
+
211
+ The exact quality control questions were chosen following a pilot study run on 15 participants - a manual analysis of these results by linguists indicated that those who failed to correctly answer any one of these quality control questions, considered to be relatively straightforward, seemed to do little better than chance overall. If any one of the quality control questions were answered incorrectly, participants were not allowed to continue with the survey.
212
+
213
+ The risks associated with annotation are two fold: The first is to do with the risk of annotators not being representative of the general population. As such, we placed no restrictions on the demographics of our annotators except as required by the study. That is, we recruited fluent English speakers from the UK and Ireland, to ensure that they speak British English, consistent with our use of the BNC. The second risk is to do with annotators not being treated fairly. To ensure that this was not the case, we paid annotators a sum of £3.75 for what we estimated, based on our internal trials, would constitute 30 minutes of work. In addition, data collection was run with the approval of the ethics committee at the University.
214
+
215
+ # C.1 Instructions to Annotators
216
+
217
+ Thank you for agreeing to take part in this study. For participating in the study you will earn £3.75. This study is run with the approval of the ethics
218
+
219
+ committee at the University.
220
+
221
+ If you have any questions about the survey please contact me, Dr Harish Tayyar Madabushi at: H.TayyarMadabushi.1@bham.ac.uk.
222
+
223
+ # Instructions
224
+
225
+ Please read these instructions carefully before continuing to fill in this survey.
226
+
227
+ In this study you will be presented with three sentences on each trial. In the middle sentence, one word is missing and it is your task to provide it; it can be either a(n), the or ZERO. In the first and last sentence, all words are provided. Please read all three sentences before filling the gap.
228
+
229
+ # Example
230
+
231
+ Consider the following example where the special character $\varnothing$ represents locations where an article could have occurred, but, in this particular case, does not:
232
+
233
+ But there is no escape for $\varnothing$ non - runners, who are required to sign up for $\varnothing$ light duties. That takes ______ care of Sunday. We cannot refuse, because we are in $\varnothing$ awe of the formidable women running the PTA.
234
+
235
+ You are required to fill in the ______ with one of:
236
+
237
+ 1. a/an
238
+ 2. the
239
+ 3. Zero $(\emptyset)$
240
+
241
+ In the example above, the correct answer is Zero $(\emptyset)$ .
242
+
243
+ # Instructions
244
+
245
+ This survey consists of approximately 170 questions and should take you about 30 minutes to complete.
246
+
247
+ IMPORTANT: Some of these questions - the quality check questions - will be used to perform a quality check and will be presented at random points in this survey. If you get too many of the quality check questions incorrect, your submission may be rejected. Please pay attention to the answers you provide as rejected submissions are not eligible for payment.
248
+
249
+ Thank you very much for taking the time to participate in this study. You will first need to answer some questions about your background, followed by a few benchmark questions, before you start on the bulk of the survey.
abstractionnotmemorybertandtheenglisharticlesystem/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b76449baec11ff79dba2ba71bd17a660f88e3b55efe6994fcfebcc921752b5cc
3
+ size 166854
abstractionnotmemorybertandtheenglisharticlesystem/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:438a46f32a72b14d6f687e57483f1e6732d4cf2956383f3033f36d5a5ddaf6cb
3
+ size 243349
actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/4b553e45-b416-4cf8-a184-922c736ba3fe_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af0c202b4b7db8a3caff7d36cb5cf04e20a04cb5f1e49e1492ed1674c1834c7b
3
+ size 105071
actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/4b553e45-b416-4cf8-a184-922c736ba3fe_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bfec093523e593ca80d14a0bc120b783bc1339c73d23a3fa2a1a0d3650d9bf3
3
+ size 134215
actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/4b553e45-b416-4cf8-a184-922c736ba3fe_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a8859f7702384436f83692ea617d689c2ad8f63774c7eae7eedfa677ea6dd9a
3
+ size 9932073
actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/full.md ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ACTUNE: Uncertainty-Based Active Self-Training for Active Fine-Tuning of Pretrained Language Models
2
+
3
+ Yue Yu $^{1}$ , Lingkai Kong $^{1}$ , Jieyu Zhang $^{2}$ , Rongzhi Zhang $^{1}$ , Chao Zhang $^{1}$ $^{1}$ Georgia Institute of Technology, Atlanta, GA $^{2}$ University of Washington, Seattle, WA {yueyu, lkkong, rongzhi.zhang, chaozhang} $@$ gatech.edu, jieyuz2@cs.washington.edu
4
+
5
+ # Abstract
6
+
7
+ While pre-trained language model (PLM) fine-tuning has achieved strong performance in many NLP tasks, the fine-tuning stage can be still demanding in labeled data. Recent works have resorted to active fine-tuning to improve the label efficiency of PLM fine-tuning, but none of them investigate the potential of unlabeled data. We propose ACTUNE, a new framework that leverages unlabeled data to improve the label efficiency of active PLM fine-tuning. ACTUNE switches between data annotation and model self-training based on uncertainty: it selects high-uncertainty unlabeled samples for active annotation and low-uncertainty ones for model self-training. Under this framework, we design (1) a region-aware sampling strategy that reduces redundancy when actively querying for annotations and (2) a momentum-based memory bank that dynamically aggregates the model's pseudo labels to suppress label noise in self-training. Experiments on 6 text classification datasets show that ACTUNE outperforms the strongest active learning and self-training baselines and improves the label efficiency of PLM fine-tuning by $56.2\%$ on average. Our implementation is available at https://github.com/yueyu1030/actune.
8
+
9
+ # 1 Introduction
10
+
11
+ Fine-tuning pre-trained language models (PLMs) has achieved much success in natural language processing (NLP) (Devlin et al., 2019; Liu et al., 2019; Brown et al., 2020). One benefit of PLM fine-tuning is the promising performance it offers when consuming only a few labeled data (Bansal et al., 2020; Gao et al., 2021). However, there are still significant gaps between few-shot and fully-supervised PLM fine-tuning in many tasks. Besides, the performance of few-shot PLM fine-tuning can be sensitive to different sets of training data (Bragg et al., 2021). Therefore, there is a
12
+
13
+ crucial need for approaches that make PLM finetuning more label-efficient and robust to selection of training data, especially for applications where labeled data are scarce and expensive to obtain.
14
+
15
+ Towards this goal, researchers have recently reported to active fine-tuning of PLMs and achieved comparable performance to fully-supervised methods with much less annotated samples (Ein-Dore et al., 2020; Margatina et al., 2021a,b; Yuan et al., 2020). Nevertheless, they usually neglect unlabeled data, which can be useful for improving label efficiency for PLM fine-tuning (Du et al., 2021). To incorporate unlabeled data into active learning, efforts have been made in the semi-supervised active learning literature (Wang et al., 2016; Rottmann et al., 2018; Simeoni et al., 2020). However, the query strategies proposed in these works can return highly redundant samples due to limited representation power, resulting in suboptimal label efficiency. Moreover, they usually rely on pseudo-labeling to utilize unlabeled data, which requires greater (yet often absent) care to denoise the pseudo labels, otherwise the errors could accumulate and hurt model performance. This issue can be even more severe for PLMs, as the fine-tuning process is often sensitive to different weight initialization and data orderings (Dodge et al., 2020). Thus, it still remains open and challenging to design robust and label efficient method for active PLM fine-tuning.
16
+
17
+ To tackle the above challenges, we propose ACTUNE, a new method that improves the label efficiency and robustness of active PLM fine-tuning. Based on the estimated model uncertainty, ACTUNE tightly couples active learning with self-training in each learning round: (1) when the average uncertainty of a region is low, we trust the model's predictions and select its most certain predictions within the region for self-training; (2) when the average uncertainty of a region is high, indicating inadequate observations for parameter learning, we actively annotate its most uncertain
18
+
19
+ samples within the region to improve model performance. Different from existing AL methods that only leverage uncertainty for querying labels, our uncertainty-driven self-training paradigm gradually leverages unlabeled data with low uncertainty via self-training, while reducing the chance of error propagation triggered by highly-uncertain mislabeled data.
20
+
21
+ To further boost model performance for AC-TUNE, we design two techniques to improve the query strategy and suppress label noise, namely region-aware sampling (RS) and momentum-based memory bank (MMB). Inspired by the fact that existing uncertainty-based AL methods often end up with choosing uncertain yet repetitive data (Eindor et al., 2020; Margatina et al., 2021b), we design the region-aware sampling technique to promote both diversity and representativeness by leveraging the representation power of PLMs. Specifically, we first estimate the uncertainties of the unlabeled data with PLMs, then cluster the data using their PLM representations and weigh the data by the corresponding uncertainty. Such a clustering scheme partitions the embedding space into small sub-regions with an emphasis on highly-uncertain samples. Finally, by sampling over multiple high-uncertainty regions, our strategy selects data with high uncertainty and low redundancy.
22
+
23
+ To rectify the erroneous pseudo labels derived by self-training, we design a simple but effective way to select low-uncertainty data for self-training. Our method is motivated by the fact that fine-tuning PLMs suffer from instability issues — different initializations and data orders can lead to large variance in model performance (Dodge et al., 2020; Zhang et al., 2020b; Mosbach et al., 2021). However, previous approaches only select pseudo-labeled data based on the prediction of the current round and are thus less reliable. In contrast, we maintain a dynamic memory bank to save the predictions of unlabeled samples for later use. We propose a momentum updating method to dynamically aggregate the predictions from preceding rounds (Laine and Aila, 2016) and select low-uncertainty samples based on aggregated prediction. As a result, only the samples with high prediction confidence over multiple rounds will be used for self-training, which mitigates the issue of label noise. We highlight that our active self-training approach is an efficient substitution to existing AL methods, requiring little extra computational cost.
24
+
25
+ Our key contributions are: (1) an active self-training paradigm ACTUNE that integrates self-training and active learning to minimize the labeling cost for fine-tuning PLMs; (2) a region-aware querying strategy to enforce both the informativeness and the diversity of queried samples during AL; (3) a simple and effective momentum-based method to leverage the predictions in preceding rounds to alleviate the label noise in self-training and (4) experiments on 6 benchmarks demonstrating ACTUNE improves the label efficiency over existing self-training and active learning baselines by $56.2\%$ .
26
+
27
+ # 2 Uncertainty-aware Active Self-training
28
+
29
+ # 2.1 Problem Formulation
30
+
31
+ We study active fine-tuning of pre-trained language models for text classification, formulated as follows: Given a small number of labeled samples $\mathcal{X}_l = \{(x_i,y_i)\}_{i = 1}^L$ and unlabeled samples $\mathcal{X}_u = \{x_j\}_{j = 1}^U (|\mathcal{X}_l| \ll |\mathcal{X}_u|)$ , we aim to fine-tune a pre-trained language model $f(\pmb{x};\theta): \mathcal{X} \to \mathcal{Y}$ in an interactive way: we perform active self-training for $T$ rounds with the total labeling budget $b$ . In each round, we aim to query $B = b / T$ samples denoted as $\mathcal{B}$ from $\mathcal{X}_u$ to fine-tune a pre-trained language model $f(\pmb{x};\theta)$ with both $\mathcal{X}_l, \mathcal{B}$ and $\mathcal{X}_u$ to maximize the performance on downstream text classification tasks. Here $\mathcal{X} = \mathcal{X}_l \cup \mathcal{X}_u$ denotes all samples, and $\mathcal{Y} = \{1,2,\dots,C\}$ is the label set where $C$ is the number of classes.
32
+
33
+ # 2.2 Overview of ACTUNE Framework
34
+
35
+ We now present our active self-training paradigm ACTUNE underpinned by estimated uncertainty. We begin the active self-training loop by finetuning a BERT $f(\theta^{(0)})$ on the initial labeled data $\mathcal{X}_L$ . Formally, we solve the following optimization problem
36
+
37
+ $$
38
+ \min _ {\theta} \frac {1}{| \mathcal {X} _ {L} |} \sum_ {\left(\boldsymbol {x} _ {i}, y _ {i}\right) \in \mathcal {X} _ {L}} \ell_ {\mathrm {C E}} \left(f \left(\boldsymbol {x} _ {i}; \theta^ {(0)}\right), y _ {i}\right). \tag {1}
39
+ $$
40
+
41
+ In round $t$ $(1\leq t\leq T)$ of active self-training, we first calculate the uncertainty score based on a given function $a_{i}^{(t)} = a(\pmb{x}_{i},\theta^{(t)})^{1}$ for all $\pmb{x}_i\in \mathcal{X}_u$ . Then, we query labeled samples and generate pseudolabels for unlabeled data $\mathcal{X}_u$ simultaneously to facilitate self-training. For each sample $\pmb{x}_i$ , the pseudo-label $\widetilde{y}$ is calculated based on the current
42
+
43
+ # Algorithm 1: Training Procedures of ACTUNE.
44
+
45
+ Input: Initial labeled samples $\mathcal{X}_l$ ; Unlabeled samples $\mathcal{X}_u$ ; Pre-trained LM $f(\cdot; \theta)$ , number of active self-training rounds $T$ .
46
+ // Fine-tune the LM with initial labeled data.
47
+ 1. Calculate $\theta^{(0)}$ based on Eq. (1).
48
+ 2. Initialize the memory bank $g(\pmb{x}; \theta^t)$ based on the current prediction.
49
+ // Conduct active self-training with all data.
50
+ for $t = 1,2,\dots ,T$ do
51
+ 1. Run weighted K-Means (Eq. (3), (4)) until convergence.
52
+ 2. Select sample set $\mathcal{Q}^{(t)}$ for AL and $S^{(t)}$ for self-training from $\mathcal{X}_u$ based on Eq. (11) or (13).
53
+ 3. Augment the labeled set $\mathcal{X}_L = \mathcal{X}_L\cup \mathcal{Q}^{(t)}$
54
+ 4. Obtain $\theta^{(t)}$ by finetuning $f(\cdot ;\theta^t)$ with $\mathcal{L}_{\mathrm{ST}}$ (Eq. (14)) using AdamW.
55
+ 5. Update memory bank $g(\pmb {x};\theta^t)$ with Eq. (10) or (12).
56
+
57
+ Output: The final fine-tuned model $f(\cdot ;\theta^T)$ .
58
+
59
+ model's output:
60
+
61
+ $$
62
+ \tilde {y} = \underset {j \in \mathcal {Y}} {\operatorname {a r g m a x}} \left[ f (\boldsymbol {x}; \theta^ {(t)}) \right] _ {j}, \tag {2}
63
+ $$
64
+
65
+ where $f(\pmb {x};\theta^{(t)})\in \mathbb{R}^C$ is a probability simplex and $[f(\pmb {x};\theta^{(t)})]_j$ is the $j$ -th entry. The procedure of ACTUNE is summarized in Algorithm 1.
66
+
67
+ # 2.3 Region-aware Sampling for Active Learning on High-uncertainty Data
68
+
69
+ After obtaining the uncertainty for unlabeled data, we aim to query annotation for high-uncertainty samples. However, directly sampling the most uncertain samples gives suboptimal results as it tends to query repetitive data (Ein-Dor et al., 2020) that represent the overall data distribution poorly.
70
+
71
+ To tackle this issue, we propose region-aware sampling to capture both uncertainty and diversity during active self-training. Specifically, in the $t$ -th round, we first conduct the weighted K-means clustering (Huang et al., 2005), which weights samples based on their uncertainty. Denote by $K$ the number of clusters and $\pmb{v}_i^{(t)} = \mathrm{BERT}(\pmb{x}_i)$ the representation of $\pmb{x}_i$ from the penultimate layer of BERT. The weighted K-means process first initializes the center of each cluster $\pmb{\mu}_i(1 \leq i \leq K)$ via K-Means++ (Arthur and Vassilvitskii, 2007). Then, it jointly updates the centroid of each cluster and assigns each sample to cluster $c_i$ as
72
+
73
+ $$
74
+ c _ {i} ^ {(t)} = \underset {k = 1, \dots , K} {\operatorname {a r g m i n}} \| \boldsymbol {v} _ {i} - \boldsymbol {\mu} _ {k} \| ^ {2}, \tag {3}
75
+ $$
76
+
77
+ $$
78
+ \boldsymbol {\mu} _ {k} ^ {(t)} = \frac {\sum_ {\boldsymbol {x} _ {i} \in \mathcal {C} _ {k} ^ {(t)}} a \left(\boldsymbol {x} _ {i} , \theta^ {(t)}\right) \cdot \boldsymbol {v} _ {i} ^ {(t)}}{\sum_ {\boldsymbol {x} \in \mathcal {C} _ {k} ^ {(t)}} a \left(\boldsymbol {x} _ {i} , \theta^ {(t)}\right)} \tag {4}
79
+ $$
80
+
81
+ where $\mathcal{C}_k^{(t)} = \{\pmb{x}_i^{(t)}|c_i^{(t)} = k\} (k = 1,\dots ,K)$ stands for the $k$ -th cluster. The above two steps in Eq. (3), (4) are repeated until convergence. Compared with vanilla K-Means method, the weighting scheme increases the density of the samples with high uncertainty, thus enabling the K-Means methods to discover clusters with high uncertainty. After obtaining $K$ regions with the corresponding data $\mathcal{C}_k^{(t)}$ , we calculate the uncertainty of each region as
82
+
83
+ $$
84
+ u _ {k} ^ {(t)} = U \left(\mathcal {C} _ {k} ^ {(t)}\right) + \beta I \left(\mathcal {C} _ {k} ^ {(t)}\right) \tag {5}
85
+ $$
86
+
87
+ where
88
+
89
+ $$
90
+ U \left(\mathcal {C} _ {k} ^ {(t)}\right) = \frac {1}{\left| \mathcal {C} _ {k} ^ {(t)} \right|} \sum_ {\boldsymbol {x} _ {i} \in \mathcal {C} _ {k} ^ {(t)}} a \left(\boldsymbol {x} _ {i}, \theta^ {(t)}\right), \tag {6}
91
+ $$
92
+
93
+ is the average uncertainty of samples and
94
+
95
+ $$
96
+ I \left(\mathcal {C} _ {k} ^ {(t)}\right) = - \sum_ {j \in C} f _ {j} ^ {(t)} \log f _ {j} ^ {(t)} \tag {7}
97
+ $$
98
+
99
+ is the inter-class diversity within cluster $k$ and $f_{j}^{(t)} = \frac{\sum_{i}\mathbb{1}\{\widetilde{y}_{i} = j\}}{|\mathcal{C}_{k}^{(t)}|}$ is the frequency of class $j$ on cluster $k$ . Notably, the term $U(\mathcal{C}_k^{(t)})$ assigns higher score for clusters with more uncertain samples, and $I(\mathcal{C}_k^{(t)})$ grants higher scores for clusters containing samples with more diverse predicted classes from pseudo labels since such clusters would be closer to the decision boundary.
100
+
101
+ Then, we rank the clusters in an ascending order in $u_{k}^{(t)}$ . A high score indicates high uncertainty of the model in these regions, and we need to actively annotate the member instances to reduce uncertainty and improve the model's performance. We adopt a hierarchical sampling strategy: we first select the $M$ clusters with the highest uncertainty, and then sample $b' = \lfloor \frac{B}{M} \rfloor$ data with the highest uncertainty to form the batch $\mathcal{Q}^{(t)}$ .
102
+
103
+ $$
104
+ \mathcal{K}_{a}^{(t)} = \operatorname *{top - M}_{k\in \{1,\ldots ,K\}}u_{k}^{(t)},
105
+ $$
106
+
107
+ $$
108
+ \mathcal {Q} ^ {(t)} = \bigcup_ {k \in \mathcal {K} _ {a} ^ {(t)}} \mathcal {C} _ {a, k} ^ {(t)} \text {w h e r e} \mathcal {C} _ {a, k} ^ {(t)} = \underset {\boldsymbol {x} _ {i} \in \mathcal {C} _ {k} ^ {(t)}} {\operatorname {T o p - b}} ^ {\prime} a (\boldsymbol {x} _ {i}, \theta^ {(t)}). \tag {8}
109
+ $$
110
+
111
+ We remark that such a hierarchical sampling strategy queries most uncertain samples from different regions, thus the uncertainty and diversity of queried samples can be both achieved.
112
+
113
+ # 2.4 Self-training over Confident Samples from Low-uncertainty Regions
114
+
115
+ For self-training, we aim to select unlabeled samples which are most likely to have been correctly classified by the current model. This requires the sample to have low uncertainty. Therefore, we select the top $k$ samples from the $M$ lowest uncertainty regions to form the acquired batch $S^{(t)}$ :
116
+
117
+ $$
118
+ \begin{array}{l} \mathcal {C} _ {s} ^ {(t)} = \bigcup_ {k \in \mathcal {K} _ {s} ^ {(t)}} \mathcal {C} _ {k} ^ {(t)} \text {w h e r e} \mathcal {K} _ {s} ^ {(t)} = \operatorname {b o t t o m - M} u _ {k} ^ {(t)}, \tag {9} \\ \mathcal {S} ^ {(t)} = \underset {\boldsymbol {x} _ {i} \in \mathcal {C} _ {s} ^ {(t)}} {\text {b o t t o m - k}} a (\boldsymbol {x} _ {i}, \theta^ {(t)}). \\ \end{array}
119
+ $$
120
+
121
+ Momentum-based Memory Bank for Self-training. As PLMs are sensitive to the stochasticity involved in fine-tuning, the model suffers from the instability issue — different weight initialization and data orders may result in different predictions on the same dataset (Dodge et al., 2020). Additionally, if the model gives inconsistent predictions in different rounds for a specific sample, then it is potentially uncertain about the sample, and adding it to the training set may harm the active self-training process. For example, for a two-class classification problem, suppose we obtain $f(\pmb{x}; \theta^{(t-1)}) = [0.65, 0.35]$ for sample $\pmb{x}$ the round $(t-1)$ and $f(\pmb{x}; \theta^{(t)}) = [0.05, 0.95]$ for the round $t$ . Although the model is quite 'confident' on the class of $\pmb{x}$ when we only consider the result of the round $t$ , it gives contradictory predictions over these two consecutive rounds, which indicates that the model is actually uncertain to which class $\pmb{x}$ belongs.
122
+
123
+ To effectively mitigate the noise and stabilize the active self-training process, we maintain a dynamic memory bank to save the results from previous rounds, and use momentum update (He et al., 2020; Laine and Aila, 2016) to aggregate the results from both the previous and current rounds. Then, during active self-training, we will select samples with the highest aggregated score. In this way, only those samples that the model is certain about over all previous rounds will be selected for self-training. We design two variants for the memory bank, namely prediction-based and value-based aggregation.
124
+
125
+ Prediction based Momentum Update. We adopt an exponential moving average approach to aggregate the prediction $g(\pmb{x}; \theta^{(t)})$ on round $t$ as
126
+
127
+ $$
128
+ g (\boldsymbol {x}; \theta^ {(t)}) = m _ {t} f (\boldsymbol {x}; \theta^ {(t)}) + (1 - m _ {t}) g (\boldsymbol {x}; \theta^ {(t - 1)}), \tag {10}
129
+ $$
130
+
131
+ where $m_t = (1 - \frac{t}{T})m_L + \frac{t}{T} m_H$ ( $0 < m_L \leq m_H \leq 1$ ) is a momentum coefficient. We gradually increase the weight for models on later rounds,
132
+
133
+ since they are trained with more labeled data thus being able to provide more reliable predictions. Then, we calculate the uncertainty based on $g(\pmb{x};\theta^{(t)})$ and rewrite Eq. (9) and (2) as
134
+
135
+ $$
136
+ \begin{array}{l} \boldsymbol {S} ^ {(t)} = \underset {\boldsymbol {x} _ {i} \in \mathcal {C} _ {s} ^ {(t)}} {\text {b o t t o m - k}} a \left(\boldsymbol {x} _ {i}, g (\boldsymbol {x}; \theta^ {(t)}), \theta^ {(t)}\right) \tag {11} \\ \widetilde {y} = \operatorname * {a r g m a x} _ {j \in \mathcal {Y}} \left[ g (\boldsymbol {x}; \theta^ {(t)}) \right] _ {j}, \\ \end{array}
137
+ $$
138
+
139
+ Value-based Momentum Update. For methods that do not directly use prediction for uncertainty estimation, we aggregate the uncertainty value as
140
+
141
+ $$
142
+ g (\boldsymbol {x}; \theta^ {(t)}) = m _ {t} a (\boldsymbol {x}; \theta^ {(t)}) + (1 - m _ {t}) g (\boldsymbol {x}; \theta^ {(t - 1)}). \tag {12}
143
+ $$
144
+
145
+ Then, we use Eq. (12) to sample low-uncertainty data for self-training as<sup>3</sup>
146
+
147
+ $$
148
+ \begin{array}{l} \mathcal {S} ^ {(t)} = \underset {\boldsymbol {x} _ {i} \in \mathcal {C} _ {s} ^ {(t)}} {\text {b o t t o m - k}} g (\boldsymbol {x} _ {i}, \theta^ {(t)}), \\ \widetilde {y} = \underset {j \in \mathcal {Y}} {\operatorname {a r g m a x}} \left[ f (\boldsymbol {x}; \theta^ {(t)}) \right] _ {j}. \tag {13} \\ \end{array}
149
+ $$
150
+
151
+ By aggregating the prediction results over previous rounds, we filter the sample with inconsistent predictions to suppress noisy labels.
152
+
153
+ # 2.5 Model Learning and Update
154
+
155
+ After obtaining both the labeled data and pseudolabeled data, we fine-tune a new pre-trained BERT model $\theta^{(t + 1)}$ on them. Although we only include low-uncertainty samples during self-training, it is difficult to eliminate all the wrong pseudo-labels, and such mislabeled samples can still hurt model performance. To suppress such label noise, we use a threshold-based strategy to further remove noisy labels by selecting samples that agree with the corresponding pseudo labels. The loss objective of optimizing $\theta^{(t + 1)}$ is
156
+
157
+ $$
158
+ \begin{array}{l} \mathcal {L} _ {\mathrm {S T}} = \frac {1}{| \mathcal {L} ^ {(t)} |} \sum_ {\boldsymbol {x} _ {i} \in \mathcal {L} ^ {(t)}} \ell_ {\mathrm {C E}} \left(f \left(\boldsymbol {x} _ {i}; \theta^ {(t + 1)}\right), y _ {i}\right) \tag {14} \\ + \frac {\lambda}{| \mathcal {S} ^ {(t)} |} \sum_ {\widetilde {\boldsymbol {x}} _ {i} \in \mathcal {S} ^ {(t)}} \omega_ {i} \ell_ {\mathrm {C E}} \left(f (\widetilde {\boldsymbol {x}} _ {i}; \theta^ {(t + 1)}), \widetilde {y} _ {i}\right), \\ \end{array}
159
+ $$
160
+
161
+ where $\mathcal{L}^{(t)} = \mathcal{X}_L\cup \mathcal{Q}^{(t)}$ is the labeled set, $\lambda$ is a hyper-parameter balancing the weight between clean and pseudo labels, and $\omega_{i} = \mathbb{1}\{\left[f(\pmb{x}_{i};\theta^{(t + 1)})\right]_{\widetilde{y}_{i}} > \gamma \}$ stands for the thresholding function.
162
+
163
+ Complexity Analysis. The running time of AC-TUNE is mainly consisted of two parts: the inference time $O(|\mathcal{X}_u|)$ and the time for K-Means clustering $O(dK|\mathcal{X}_u|)$ , where $d$ is the dimension of the BERT feature $\pmb{v}$ . For self-training, the size
164
+
165
+ <table><tr><td>Dataset</td><td>Label Type</td><td># Class</td><td># Train</td><td># Dev</td><td>#Test</td></tr><tr><td>SST-2</td><td>Sentiment</td><td>2</td><td>60.6k</td><td>0.8k</td><td>1.8k</td></tr><tr><td>AG News</td><td>News Topic</td><td>4</td><td>119k</td><td>1k</td><td>7.6k</td></tr><tr><td>Pubmed</td><td>Medical Abstract</td><td>5</td><td>180k</td><td>1k</td><td>30.1k</td></tr><tr><td>DBPedia</td><td>Wikipedia Topic</td><td>14</td><td>280k</td><td>1k</td><td>70k</td></tr><tr><td>TREC</td><td>Question</td><td>6</td><td>5.0k</td><td>0.5k</td><td>0.5k</td></tr><tr><td>Chemprot</td><td>Medical Abstract</td><td>10</td><td>12.8k</td><td>0.5k</td><td>1.6k</td></tr></table>
166
+
167
+ Table 1: Dataset Statistics. For DBPedia, we randomly sample $20\mathrm{k}$ sample from each class due to the limited computational resource.
168
+
169
+ of the memory bank $g(\pmb{x};\theta)$ is proportional to $|\mathcal{X}_u|$ , while the extra computation of maintaining this dictionary is ignorable since we do not inference over the unlabeled data for multiple times in each round as BALD (Gal et al., 2017) does. The running time of ACTUNE will be shown in section C.
170
+
171
+ # 3 Experiments
172
+
173
+ # 3.1 Experiment Setup
174
+
175
+ Tasks and Datasets. In our main experiments, we use 4 datasets, including SST-2 (Socher et al., 2013) for sentiment analysis, AGNews (Zhang et al., 2015) for news topic classification, PubmedRCT (Dernoncourt and Lee, 2017) for medical abstract classification, and DBPedia (Zhang et al., 2015) for wikipedia topic classification. For weakly-supervised text classification, we choose 2 datasets, namely TREC (Li and Roth, 2002) and Chemprot (Krallinger et al., 2017) from the WRENCH benchmark (Zhang et al., 2021) for evaluation. The statistics are shown in Table 1.
176
+
177
+ Active Learning Setups. Following (Yuan et al., 2020), we set the number of rounds $T = 10$ , the overall budget for all datasets $b = 1000$ and the initial size of the labeled $|\mathcal{X}_l|$ is set to 100. In each AL round, we sample a batch of 100 samples from the unlabeled set $\mathcal{X}_u$ and query their labels. Since large development sets are impractical in low-resource settings (Kann et al., 2019), we keep the size of development set as 1000, which is the same as the labeling budget<sup>4</sup>. For weakly-supervised text classification, since the datasets are much smaller, we keep the labeling budget and the size of development set to $b = 500$ .
178
+
179
+ Implementation Details. We choose RoBERTabase (Liu et al., 2019) from the HuggingFace codebase (Wolf et al., 2020) as the backbone for AcTUNE and all baselines except for Pubmed and Chemprot, where we use SciBERT (Beltagy et al., 2019), a BERT model pre-trained on scientific cor
180
+
181
+ pora. In each round, we train from scratch to avoid overfitting the data collected in earlier rounds as observed by Hu et al. (2019). More details are in Appendix B.
182
+
183
+ Hyperparameters. The hyperparameters setting is in Appendix B.5. In the $t$ -th round of active self-training, we increase the number of pseudolabeled samples by $k$ , where $k$ is 500 for TREC and Chemprot, 3000 for SST-2 and Pubmed-RCT, and 5000 for others. For the momentum factor, we tune $m_{L}$ from $[0.6, 0.7, 0.8]$ and $m_{H}$ from $[0.8, 0.9, 1.0]$ and report the best $\{m_{L}, m_{H}\}$ based on the performance of the development set.
184
+
185
+ # Baselines.
186
+
187
+ Self-training Methods: (1) Self-training (ST, Lee (2013)): It is the vanilla self-training method that generates pseudo labels for unlabeled data. (2) UST (Mukherjee and Awadallah, 2020; Rizve et al., 2021): It is an uncertainty-based self-training method that only uses low-uncertainty data for self-training. (3) COSINE (Yu et al., 2021): It uses self-training to fine-tune LM with weakly-labeled data, which achieves SOTA performance on various text datasets in WRENCH benchmark (Zhang et al., 2021). Note that for these two baselines, we randomly sample $b$ labeled data as the initialization.
188
+
189
+ Active Learning Methods: (1) Random: It acquires annotation randomly, which serves as a baseline for all methods. (2) Entropy (Holub et al., 2008): It is an uncertainty-based method that acquires annotations on samples with the highest predictive entropy. (3) BALD (Gal et al., 2017): It is also an uncertainty-based method, which calculates model uncertainty using MC Dropout (Gal and Ghahramani, 2015). (4) BADGE (Ash et al., 2020): It first selects high uncertainty samples then uses KMeans++ over the gradient embedding to sample data. (5) ALPS (Yuan et al., 2020): It uses the masked language model (MLM) loss of BERT to query labels for samples. (6) CAL (Margatina et al., 2021b) is the most recent AL method for pretrained LMs. It calculates the uncertainty of each sample based on the KL divergence between the prediction of itself and its neighbors' prediction.
190
+
191
+ Semi-supervised Active Learning (SSAL) Methods: (1) ASST (Tomanek and Hahn, 2009; Simeoni et al., 2020) is an active semi-supervised learning method that jointly queries labels for AL and samples pseudo labels for self-training. (2) CEAL (Wang et al., 2016) acquires annotations on informative samples, and uses high-confidence
192
+
193
+ samples with predicted pseudo labels for weights updating. (3) BASS (Rottmann et al., 2018) is similar to CEAL, but use MC dropout for querying labeled sample. (4) REVIVAL (Guo et al., 2021) is the most recent SSAL method, which uses an adversarial loss to query samples and leverage label propagation to exploit adversarial examples.
194
+
195
+ Our Method: We experiment with both Entropy and CAL as uncertainty measures for ACTUNE. Note that when compared with active learning baselines, we do not augment the train set with pseudolabeled data (Eq. (9)) to ensure fair comparisons.
196
+
197
+ # 3.2 Main Result
198
+
199
+ Figure 1 reports the performance of ACTUNE and the baselines on 4 benchmarks. From the results, we have the following observations:
200
+
201
+ - ACTUNE consistently outperforms baselines in most of the cases. Different from studies in the computer vision (CV) domain (Siméoni et al., 2020) where the model does not perform well in the low-data regime, pre-trained LM has achieved competitive performance with only a few labeled data, which makes further improvements to the vanilla fine-tuning challenging. Nevertheless, ACTUNE surpasses baselines in more than $90\%$ of the rounds and achieves $0.4\% - 0.7\%$ and $0.3\% - 1.5\%$ absolute gain at the end of AL and SSAL respectively. Figure 3 quantitatively measures the number of labels needed for the most advanced active learning model and self-training model (UST) to outperform ACTUNE with 1000 labels. These baselines need $>2000$ clean labeled samples to reach the performance as ours. ACTUNE saves on average $56.2\%$ and $57.0\%$ of the labeled samples than most advanced active learning and self-training baselines respectively, which justifies its promising performance under low-resource scenarios. Such improvements show the merits of two key designs under our active self-training framework: the region-aware sampling for active learning and the momentum-based memory bank for robust self-training, which will be discussed in the section 3.5.
202
+
203
+ - Compared with the previous AL baselines, ACTUNE can bring consistent performance gain, while previous semi-supervised active learning methods cannot. For instance, BASS is based on BALD for active learning, but sometimes it performs even worse than BALD with the same number of labeled data (see Fig. 1(b) and Fig. 1(f)). This is mainly because previous methods simply combine noisy pseudo labels with clean labels for training
204
+
205
+ without explicitly rectifying the wrongly-labeled data, which will cause the LM to overfit these hazardous labels. Moreover, previous methods do not exploit momentum updates to stabilize the learning process, as there are oscillations in the beginning rounds. In contrast, ACTUNE achieves a more stable learning process and enables an active self-training process to benefit from more labeled data.
206
+
207
+ - The self-training methods (ST & UST) achieve superior performance with limited labels. However, they mainly focus on leveraging unlabeled data for improving the performance, while our results demonstrate that adaptive selecting the most useful data for fine-tuning is also important for improving the performance. With a powerful querying policy, ACTUNE can improve these self-training baselines by $1.05\%$ in terms of accuracy on average.
208
+
209
+ # 3.3 Weakly-supervised Learning
210
+
211
+ ACTUNE can be naturally used for weakly-supervised classification, where $\mathcal{X}_l$ is a set of noisy labels derived from linguistic patterns or rules. Since the initial label set is noisy, the model trained with Eq. (1) can overfit the label noise (Zhang et al., 2022a), and we can actively query labeled data to refine the model. We conduct experiments on the TREC and Chemprot dataset<sup>5</sup>, where we first use Snorkel (Ratner et al., 2017) to obtain weak label set $\mathcal{X}_l$ , then fine-tune the pre-trained LM $f(\theta^{(0)})$ on $\mathcal{X}_l$ . After that, we adopt ACTUNE for active self-training.
212
+
213
+ Fig. 2 shows the results of these two datasets. When combining ACTUNE with CAL, the performance is unsatisfactory. We believe it is because CAL requires clean labels to calculate uncertainties. When labels are inaccurate, it will prevent ACTUNE from querying informative samples. In contrast, ACTUNE achieves the best performance over baselines when using Entropy as the uncertainty measure. The performance gain is more notable on the TREC dataset, where we achieve $96.68\%$ accuracy, close to the fully supervised performance $(96.80\%)$ with only $\sim 6\%$ of clean labels.
214
+
215
+ # 3.4 Combination with Other AL Methods
216
+
217
+ Fig. 5(a) demonstrates the performance of Ac-TUNE combined with other AL methods (e.g. BADGE, ALPS) on SST-2 dataset. It is clear that even if the AL methods are not uncertainty-based
218
+
219
+ ![](images/c1caae3389ced359fc3c8625f362451fe1020e75055fbfc09801e6d0f1c66af2.jpg)
220
+
221
+ ![](images/f217067ad0a87a2b9e5dc476d53b5f420c9ab4f9b65e525e728c8dc4e8cba4be.jpg)
222
+
223
+ ![](images/f3a9f72b961a17c848e59cf9c1caf0b62402329437707efe06be9cae73eb9282.jpg)
224
+
225
+ ![](images/288f05084d0e8c760eda57f6c86db6b0d3ef3bbbb97cb5b767fb0d87e7b60435.jpg)
226
+
227
+ ![](images/695d426b89d7f3220e13a7ded51fe7596dfc3182e8dc0bcf3c0fa23dd0c88383.jpg)
228
+ (a) SST-2, AL
229
+ (e) SST-2, SSAL
230
+
231
+ ![](images/949e8b90932679f1f2074b7c076a0aab14d8675aae035e7e19fc2cb1aeb35f8b.jpg)
232
+ (b) AG News, AL
233
+ (f) AG News, SSAL
234
+
235
+ ![](images/a13d8c941c3076a0cec132e9266798951a08183e6e3ad257dc170b0d86b9b7ea.jpg)
236
+ (c) Pubmed, AL
237
+ (g) Pubmed, SSAL
238
+
239
+ ![](images/fcd9cffd7a374c616b37c7496c393127a042fe0d63b81abf3a5e6b74d2224971.jpg)
240
+ (d) DBPedia, AL
241
+ (h) DBPedia, SSAL†
242
+
243
+ ![](images/f362932b3a2c0ed2acc0b39a06be472faa96625785b53c68cd8cfd258eea8cf6.jpg)
244
+ Figure 1: The comparison of ACTUNE with active learning, semi-supervised active learning and self-training baselines. The first row is the result under active learning setting (AL, i.e. no unlabeled data is used), the second row is the result under semi-supervised active learning (SSAL) setting. The metric is accuracy. $\dagger$ : REVIVAL causes OOM error for DBPedia dataset.
245
+ (a) TREC
246
+
247
+ ![](images/4f091b2b6d567f313650cb33a315e252fae75aa50d06812bc6f4a4f3b209c8de.jpg)
248
+ (b) Chemprot
249
+
250
+ ![](images/9d411ff0d8e76add8242d34dd4fcafd77787611b06cf3f7574e47eee5ececdfa.jpg)
251
+ Figure 2: The comparison of ACTUNE and baselines on weakly-supervised classification tasks.
252
+ Figure 3: The label-efficiency of ACTUNE compared with AL and self-training baselines. According to Fig. 1, the best AL method is Entropy for DBPedia and CAL for others.
253
+
254
+ (e.g. BADGE), when using the entropy as an uncertainty measure to select pseudo-labeled data for self-training, ACTUNE can further boost the performance. This indicates that ACTUNE is a general active self-training approach, as it can serve as an efficient plug-in module for existing AL methods.
255
+
256
+ # 3.5 Ablation and Hyperparameter Study
257
+
258
+ The Effect of Different Components in ActTUNE. We inspect different components of ACTUNE, including the region-sampling (RS), momentum-based memory bank (MMB), and
259
+
260
+ weighted clustering (WClus)<sup>7</sup>. Experimental results (Fig. 5(b)) show that all the three components contribute to the final performance, as removing any of them hurts the classification accuracy. Also, we find that when removing MMB, the performance hurts most in the beginning rounds, which indicates that MMB effectively suppresses label noise when the model's capacity is weak. Conversely, removing WClus hurts the performance on later rounds, as it enables the model to select most informative samples.
261
+
262
+ Hyperparameter Study. We study two hyperparameters, namely $\beta$ and $K$ used in querying labels. Figure 4(a) and 4(b) show the results. In general, the model is insensitive to $\beta$ as the performance difference is less than $0.6\%$ . The model cannot perform well with smaller $K$ since it cannot pinpoint to high-uncertainty regions. For larger $K$ , the performance also drops as some of the high-uncertainty regions can be outliers and sampling from them would hurt the model performance (Karamcheti et al., 2021).
263
+
264
+ A Closer Look at the Momentum-based Memory Bank. To examine the role of MMB, we show the overall accuracy of pseudo-labels on AG News dataset in Fig. 4(c). From the result, it is clear that the momentum-based memory bank can stabilize the active self-training process, as the accuracy of pseudo labels increases around $1\%$ , especially in
265
+
266
+ ![](images/bae0af47f3ada903bcb9e7bc06923fd1ce144001deea26f54f51e8630df94dd3.jpg)
267
+ (a) Effect of $\beta$
268
+
269
+ ![](images/dc743d19e621b2f471452705bcd02335f90711b8202546ade790970f538eee46.jpg)
270
+ (b) Effect of $K$
271
+
272
+ ![](images/bce39ee684f6bb25d1190882333b95e7c37ec2ded875fee385117a9c82094ff6.jpg)
273
+ (c) Acc. of PL
274
+
275
+ ![](images/b58f0b572e9f7e896f9280c7c6ff95b2ec1d280e841bd4ac51aac49c8565719e.jpg)
276
+ (d) Entropy
277
+
278
+ ![](images/d981894ac1ea040c2f1c9ed78d5c63804a6b210e05635567f0703583dd2dd867.jpg)
279
+ (e) CAL
280
+
281
+ ![](images/bea648232b5d27117f19f41b6d90237195dd661af311b106542c9b8cc520cd43.jpg)
282
+ Figure 4: Parameter study. Note the effect of different $m_{L}$ and $m_{H}$ is conducted on AG News dataset.
283
+ Figure 5: Results of ActUNE with different AL methods (SST-2), ablation study (SST-2 with ActTUNE+Entropy).
284
+
285
+ ![](images/3812b1d5d6fd66cb3d7e14fb57ed99b038500a3dac55df21aeae4f65ef66768d.jpg)
286
+ (a) Combining w/ AL Methods
287
+ (b) Ablation Study
288
+
289
+ later rounds. Fig 4(d) and 4(e) illustrates the model performance with different $m_{L}$ and $m_{H}$ . Overall, we find that our model is robust to different choices as ACTUNE outperform the baseline without momentum update consistently. Moreover, we find that the larger $m_{H}$ will generally lead to better performance in later rounds. This is mainly because in later rounds, the model's prediction is more reliable. Conversely, at the beginning of the training, the model's prediction might be oscillating on unlabeled data. In this case, using a smaller $m_{L}$ will favor samples with consistent predictions to improve the robustness of active self-training.
290
+
291
+ Another finding is that for different AL methods, the optimal memory bank can be different. For Entropy, probability-based memory bank leads to a better result, while for CAL, simple aggregating over uncertainty score achieves better performance. This is mainly because the method used in CAL is more complicated, and using probability-based memory bank may hurt the uncertainty calculation.
292
+
293
+ # 3.6 Case Study
294
+
295
+ We give an example of our querying strategy on AG News dataset for the 1st round of active self-training process in figure 6. Note that we use t-SNE algorithm (Van der Maaten and Hinton, 2008) for dimension reduction, and the black triangle stands for the queried samples while other circles stands for the unlabeled data. We can see that the existing uncertainty-based methods such as Entropy and CAL, are suffered from the issue of limited diversity. However, when combined with ACTUNE, the diversity is much improved. Such results, com
296
+
297
+ pared with the main results in figure 1, demonstrate the efficacy of ACTUNE empirically.
298
+
299
+ # 4 Related Work
300
+
301
+ Active Learning. Active learning has been widely applied to various NLP tasks (Yuan et al., 2020; Shelmanov et al., 2021; Karamcheti et al., 2021). So far, AL methods can be categorized into uncertainty-based methods (Gal et al., 2017; Margatina et al., 2021a,b), diversity-based methods (Ru et al., 2020; Sener and Savarese, 2018) and hybrid methods (Yuan et al., 2020; Ash et al., 2020). Ein-Dor et al. (2020) offer an empirical study of active learning with PLMs. Very recently, there are also several works attempted to query labeling functions for weakly-supervised learning (Boecking et al., 2020; Hsieh et al., 2022; Zhang et al., 2022b). In our study, we leverage the power of unlabeled instances via self-training to further promote the performance of AL.
302
+
303
+ Semi-supervised Active Learning (SSAL). Gao et al. (2020); Guo et al. (2021) design query strategies for specific semi-supervised methods, Zhang et al. (2020a); Jiang et al. (2020) combine active learning with data augmentation and Tomanek and Hahn (2009); Rottmann et al. (2018); Simeoni et al. (2020) exploit the most-certain samples from the unlabeled with pseudo-labeling to augment the training set. So far, most of the SSAL approaches are designed for CV domain and it remains unknown how this paradigm performs with PLMs on NLP tasks. In contrast, we propose ACTUNE to effectively leverage unlabeled data during finetuning PLMs for NLP tasks.
304
+
305
+ Self-training. Self-training is one of the earliest and simplest approaches to semi-supervised learning (Lee, 2013). It first generates pseudo labels for high-confidence samples, then fits a new model on pseudo labeled data to improve the generalization ability. However, it is known to be vulnerable to error propagation (Arazo et al., 2020; Rizve et al., 2021; Zuo et al., 2021). To alleviate this, we adopt a simple momentum-based method to select high confidence samples, effectively reducing
306
+
307
+ ![](images/34895fb5e14694e0f1074d9993efae380bbcf459e7575109e2246ce46ab70980.jpg)
308
+ (a) AG News, Entropy
309
+
310
+ ![](images/1517555ff90bd23246725c886283811655ca5a56b1a24c9d9ce5b2028cc2675f.jpg)
311
+ (b) AG News, ACTUNE w/ Entropy
312
+
313
+ ![](images/1dcbf0ebb7057b486dbe1f2c15b70faec667b9bca4c0008a221d0197655c9cc1.jpg)
314
+ (c) AG News, CAL
315
+
316
+ ![](images/d9ee8694e0610b6de9517b23b024b5179956a48734e47664be8d9989f91c09e8.jpg)
317
+ (d) AG News, ACTUNE w/ CAL
318
+ Figure 6: Visualization of the querying strategy of ACTUNE. Black dots stand for the queried data points. Different colors indicate different categories.
319
+
320
+ the pseudo labels noise for active learning. Note that although Mukherjee and Awadallah (2020); Rizve et al. (2021) also leverage uncertainty information for self-training, their focus is on developing better self-training methods, while we aim to jointly query high-uncertainty samples and generate pseudo-labels for low-uncertainty samples. The experiments in Sec. 3 show that with appropriate querying methods, ACTUNE can further improve the performance of self-training.
321
+
322
+ # 5 Conclusion and Discussion
323
+
324
+ In this paper, we develop ACTUNE, a general active self-training framework for enhancing both label efficiency and model performance in fine-tuning pre-trained language models (PLMs). We propose a region-aware sampling approach to guarantee both the uncertainty the diversity for querying labels. To combat the label noise propagation issue, we design a momentum-based memory bank to effectively utilize the model predictions for preceding AL rounds. Empirical results on 6 public text classification benchmarks suggest the superiority of ACTUNE to conventional active learning and semi-supervised active learning methods for fine-tuning PLMs with limited resources.
325
+
326
+ There are several directions to improve ACTUNE. First, since our focus is on fine-tuning pre-trained language models, we use the representation of [CLS] token for classification. In the future work, we can consider using prompt tuning (Gao et al., 2021; Schick and Schütze, 2021), a more data-efficient method for adopting pre-trained language models on classification tasks to further promote the efficiency. Also, due to the computational resource constraints, we do not use larger pre-trained language models such as RoBERTa-large (Liu et al., 2019) which shown even better performance with only a few labels (Du et al., 2021). Moreover, we can explore more advanced uncertainty estimation approach (Kong et al., 2020) into ACTUNE to further improve the performance. Last, apart from the text classification task, we can also extend our work into other tasks such as sequence labeling and natural language inference (NLI).
327
+
328
+ # Acknowledgements
329
+
330
+ We thank the anonymous reviewers for their feedback. This work was supported in part by NSF IIS-2008334, IIS-2106961, CAREER IIS-2144338, and ONR MURI N00014-17-1-2656.
331
+
332
+ # References
333
+
334
+ Eric Arazo, Diego Ortego, Paul Albert, Noel E O'Connor, and Kevin McGuinness. 2020. Pseudolabeling and confirmation bias in deep semi-supervised learning. In 2020 International Joint Conference on Neural Networks (IJCNN), pages 1-8. IEEE.
335
+ David Arthur and Sergei Vassilvitskii. 2007. K-means++: The advantages of careful seeding. In Proceedings of the Eighteenth Annual ACM-SIAM Symposium on Discrete Algorithms, page 1027-1035, USA.
336
+ Jordan T. Ash, Chicheng Zhang, Akshay Krishna-murthy, John Langford, and Alekh Agarwal. 2020. Deep batch active learning by diverse, uncertain gradient lower bounds. In International Conference on Learning Representations.
337
+ Trapit Bansal, Rishikesh Jha, Tsendsuren Munkhdalai, and Andrew McCallum. 2020. Self-supervised meta-learning for few-shot natural language classification tasks. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 522-534, Online. Association for Computational Linguistics.
338
+ Iz Beltagy, Kyle Lo, and Arman Cohan. 2019. SciBERT: A pretrained language model for scientific text. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3615-3620.
339
+ Benedikt Boecking, Willie Neiswanger, Eric Xing, and Artur Dubrawski. 2020. Interactive weak supervision: Learning useful heuristics for data labeling. arXiv preprint arXiv:2012.06046.
340
+ Jonathan Bragg, Arman Cohan, Kyle Lo, and Iz Beltagy. 2021. Flex: Unifying evaluation for few-shot nlp. Advances in Neural Information Processing Systems, 34.
341
+ Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, et al. 2020. Language models are few-shot learners. In Advances in Neural Information Processing Systems, volume 33, pages 1877-1901.
342
+ Franck Dernoncourt and Ji Young Lee. 2017. PubMed 200k RCT: a dataset for sequential sentence classification in medical abstracts. In Proceedings of the Eighth International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 308-313, Taipei, Taiwan. Asian Federation of Natural Language Processing.
343
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association
344
+
345
+ for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
346
+ Jesse Dodge, Gabriel Ilharco, Roy Schwartz, Ali Farhadi, Hannaneh Hajishirzi, and Noah A. Smith. 2020. Fine-tuning pretrained language models: Weight initializations, data orders, and early stopping. CoRR, abs/2002.06305.
347
+ Rotem Dror, Gili Baumer, Segev Shlomov, and Roi Reichart. 2018. The hitchhiker's guide to testing statistical significance in natural language processing. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1383-1392.
348
+ Jingfei Du, Edouard Grave, Beliz Gunel, Vishrav Chaudhary, Onur Celebi, Michael Auli, Veselin Stoyanov, and Alexis Conneau. 2021. Self-training improves pre-training for natural language understanding. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 5408-5418. Association for Computational Linguistics.
349
+ Liat Ein-Dor, Alon Halfon, Ariel Gera, Eyal Shnarch, Lena Dankin, Leshem Choshen, Marina Danilevsky, Ranit Aharonov, Yoav Katz, and Noam Slonim. 2020. Active Learning for BERT: An Empirical Study. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7949-7962. Association for Computational Linguistics.
350
+ Yarin Gal and Zoubin Ghahramani. 2015. Bayesian convolutional neural networks with bernoulli approximate variational inference. CoRR, abs/1506.02158.
351
+ Yarin Gal, Riashat Islam, and Zoubin Ghahramani. 2017. Deep bayesian active learning with image data. In International Conference on Machine Learning, pages 1183-1192. PMLR.
352
+ Mingfei Gao, Zizhao Zhang, Guo Yu, Sercan Ö Arik, Larry S Davis, and Tomas Pfister. 2020. Consistency-based semi-supervised active learning: Towards minimizing labeling cost. In European Conference on Computer Vision, pages 510-526. Springer.
353
+ Tianyu Gao, Adam Fisch, and Danqi Chen. 2021. Making pre-trained language models better few-shot learners. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 3816-3830, Online. Association for Computational Linguistics.
354
+ Jiannan Guo, Haochen Shi, Yangyang Kang, Kun Kuang, Siliang Tang, Zhuoren Jiang, Changlong
355
+
356
+ Sun, Fei Wu, and Yueting Zhuang. 2021. Semi-supervised active learning for semi-supervised models: Exploit adversarial examples with graph-based virtual labels. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 2896-2905.
357
+ Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. 2020. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR).
358
+ Alex Holub, Pietro Perona, and Michael C Burl. 2008. Entropy-based active learning for object recognition. In 2008 IEEE Computer Society Conference on Computer Vision and Pattern Recognition Workshops, pages 1-8. IEEE.
359
+ Cheng-Yu Hsieh, Jieyu Zhang, and Alexander Ratner. 2022. Nemo: Guiding and contextualizing weak supervision for interactive data programming. arXiv preprint arXiv:2203.01382.
360
+ Peiyun Hu, Zack Lipton, Anima Anandkumar, and Deva Ramanan. 2019. Active learning with partial feedback. In International Conference on Learning Representations.
361
+ Joshua Zhexue Huang, Michael K Ng, Hongqiang Rong, and Zichen Li. 2005. Automated variable weighting in k-means type clustering. IEEE transactions on pattern analysis and machine intelligence, 27(5):657-668.
362
+ Zhuoren Jiang, Zhe Gao, Yu Duan, Yangyang Kang, Changlong Sun, Qiong Zhang, and Xiaozhong Liu. 2020. Camouflaged Chinese spam content detection with semi-supervised generative active learning. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 3080-3085, Online. Association for Computational Linguistics.
363
+ Katharina Kann, Kyunghyun Cho, and Samuel R. Bowman. 2019. Towards realistic practices in low-resource natural language processing: The development set. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3342-3349, Hong Kong, China. Association for Computational Linguistics.
364
+ Siddharth Karamcheti, Ranjay Krishna, Li Fei-Fei, and Christopher Manning. 2021. Mind your outliers! investigating the negative impact of outliers on active learning for visual question answering. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 7265-7281, Online. Association for Computational Linguistics.
365
+
366
+ Lingkai Kong, Jimeng Sun, and Chao Zhang. 2020. Sde-net: Equipping deep neural networks with uncertainty estimates. In International Conference on Machine Learning, pages 5405-5415. PMLR.
367
+ Martin Krallinger, Obdulia Rabal, Saber A Akhondi, et al. 2017. Overview of the biocreative VI chemical-protein interaction track. In *BioCreative evaluation Workshop*, volume 1, pages 141-146.
368
+ Samuli Laine and Timo Aila. 2016. Temporal ensembling for semi-supervised learning. arXiv preprint arXiv:1610.02242.
369
+ Dong-Hyun Lee. 2013. Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. In ICML Workshop on challenges in representation learning, volume 3, page 896.
370
+ Xin Li and Dan Roth. 2002. Learning question classifiers. In The 19th International Conference on Computational Linguistics.
371
+ Chen Liang, Yue Yu, Haoming Jiang, Siawpeng Er, Ruijia Wang, Tuo Zhao, and Chao Zhang. 2020. Bond: Bert-assisted open-domain named entity recognition with distant supervision. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 1054-1064.
372
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.
373
+ Katerina Margatina, Loic Barrault, and Nikolaos Aletras. 2021a. Bayesian active learning with pretrained language models. arXiv preprint arXiv:2104.08320.
374
+ Katerina Margatina, Giorgos Vernikos, Loic Barrault, and Nikolaos Aletras. 2021b. Active learning by acquiring contrastive examples. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 650-663, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
375
+ Marius Mosbach, Maksym Andriushchenko, and Dietrich Klakow. 2021. On the stability of fine-tuning $\{\text{bert}\}$ : Misconceptions, explanations, and strong baselines. In International Conference on Learning Representations.
376
+ Subhabrata Mukherjee and Ahmed Awadallah. 2020. Uncertainty-aware self-training for few-shot text classification. Advances in Neural Information Processing Systems, 33.
377
+ Alexander Ratner, Stephen H Bach, Henry Ehrenberg, Jason Fries, Sen Wu, and Christopher Ré. 2017. Snorkel: Rapid training data creation with weak supervision. In Proceedings of the VLDB Endowment, volume 11, page 269.
378
+
379
+ Mamshad Nayeem Rizve, Kevin Duarte, Yogesh S Rawat, and Mubarak Shah. 2021. In defense of pseudo-labeling: An uncertainty-aware pseudolabel selection framework for semi-supervised learning. In International Conference on Learning Representations.
380
+ Matthias Rottmann, Karsten Kahl, and Hanno Gottschalk. 2018. Deep bayesian active semi-supervised learning. In 2018 17th IEEE International Conference on Machine Learning and Applications (ICMLA), pages 158-164. IEEE.
381
+ Dongyu Ru, Jiangtao Feng, Lin Qiu, Hao Zhou, Mingxuan Wang, Weinan Zhang, Yong Yu, and Lei Li. 2020. Active sentence learning by adversarial uncertainty sampling in discrete space. In *Findings of the Association for Computational Linguistics: EMNLP* 2020, pages 4908-4917, Online. Association for Computational Linguistics.
382
+ Timo Schick and Hinrich Schütze. 2021. Exploiting cloze-questions for few-shot text classification and natural language inference. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 255–269, Online. Association for Computational Linguistics.
383
+ Ozan Sener and Silvio Savarese. 2018. Active learning for convolutional neural networks: A core-set approach. In International Conference on Learning Representations.
384
+ Artem Shelmanov, Dmitri Puzyrev, Lyubov Kupriyanova, Denis Belyakov, Daniil Larionov, Nikita Khromov, Olga Kozlova, Ekaterina Artemova, Dmitry V. Dylov, and Alexander Panchenko. 2021. Active learning for sequence tagging with deep pre-trained models and Bayesian uncertainty estimates. In Proceedings of the 16th Conference of the European Chapter of the Association for Computational Linguistics: Main Volume, pages 1698-1712, Online. Association for Computational Linguistics.
385
+ Oriane Simeoni, Mateusz Budnik, Yannis Avrithis, and Guillaume Gravier. 2020. Rethinking deep active learning: Using unlabeled data at model training. In the 25th International Conference on Pattern Recognition (ICPR), pages 1220-1227. IEEE.
386
+ Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1631-1642. Association for Computational Linguistics.
387
+ Katrin Tomanek and Udo Hahn. 2009. Semi-supervised active learning for sequence labeling. In Proceedings of the Joint Conference of the 47th Annual Meeting of the ACL and the 4th International
388
+
389
+ Joint Conference on Natural Language Processing of the AFNLP, pages 1039-1047.
390
+ Laurens Van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-sne. Journal of machine learning research, 9(11).
391
+ Keze Wang, Dongyu Zhang, Ya Li, Ruimao Zhang, and Liang Lin. 2016. Cost-effective active learning for deep image classification. IEEE Transactions on Circuits and Systems for Video Technology, 27(12):2591-2600.
392
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Remi Louf, Morgan Funtowicz, and Others. 2020. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online. Association for Computational Linguistics.
393
+ Qizhe Xie, Zihang Dai, Eduard Hovy, Thang Luong, and Quoc Le. 2020. Unsupervised data augmentation for consistency training. Advances in Neural Information Processing Systems, 33.
394
+ Yue Yu, Simiao Zuo, Haoming Jiang, Wendi Ren, Tuo Zhao, and Chao Zhang. 2021. Fine-tuning pretrained language model with weak supervision: A contrastive-regularized self-training approach. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1063–1077. Association for Computational Linguistics.
395
+ Michelle Yuan, Hsuan-Tien Lin, and Jordan Boyd-Graber. 2020. Cold-start active learning through self-supervised language modeling. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 7935-7948, Online. Association for Computational Linguistics.
396
+ Jieyu Zhang, Cheng-Yu Hsieh, Yue Yu, Chao Zhang, and Alexander Ratner. 2022a. A survey on programmatic weak supervision. arXiv preprint arXiv:2202.05433.
397
+ Jieyu Zhang, Yue Yu, Yinghao Li, Yujing Wang, Yaming Yang, Mao Yang, and Alexander Ratner. 2021. WRENCH: A comprehensive benchmark for weak supervision. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track.
398
+ Rongzhi Zhang, Yue Yu, Pranav Shetty, Le Song, and Chao Zhang. 2022b. *Prboost: Prompt-based rule discovery and boosting for interactive weakly-supervised learning*. arXiv preprint arXiv:2203.09735.
399
+
400
+ Rongzhi Zhang, Yue Yu, and Chao Zhang. 2020a. SeqMix: Augmenting active sequence labeling via sequence mixup. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 8566-8579, Online. Association for Computational Linguistics.
401
+ Tianyi Zhang, Felix Wu, Arzoo Katiyar, Kilian Q Weinberger, and Yoav Artzi. 2020b. Revisiting few-sample bert fine-tuning. arXiv preprint arXiv:2006.05987.
402
+ Xiang Zhang, Junbo Zhao, and Yann LeCun. 2015. Character-level convolutional networks for text classification. Advances in neural information processing systems, 28:649-657.
403
+ Simiao Zuo, Yue Yu, Chen Liang, Haoming Jiang, Siawpeng Er, Chao Zhang, Tuo Zhao, and Hongyuan Zha. 2021. Self-training with differentiable teacher. arXiv preprint arXiv:2109.07049.
404
+
405
+ # A Datasets Details
406
+
407
+ # A.1 Data Source
408
+
409
+ The seven benchmarks in our experiments are all publicly available. Below are the links to downloadable versions of these datasets.
410
+
411
+ $\diamond$ SST-2: We use the datasets from https://huggingface.co/datasets/glue.
412
+ $\diamond$ AGNews: We use the datasets from https://huggingface.co/datasets/ag_news.
413
+ $\diamond$ Pubmed-RCT: Dataset is available at https://github.com/Franck-Dernoncourt/pubmed-rct.
414
+ DBPedia: Dataset is available at https://huggingface.co/datasets/dbpedia_14.
415
+
416
+ For two weakly-supervised classification tasks, we use the data from WRENCH benchmark (Zhang et al., 2021).
417
+
418
+ TREC: Dataset is available at https://drive.google.com/drive/u/1/folders/1v55IKG2JN9fMtKJWU48B_5_DcPWGnpTq.
419
+ $\diamond$ ChemProt: The raw dataset is available at http://www.cbs.dtu.dk/services/ChemProt/ChemProt-2.0/.
420
+
421
+ The preprocessed dataset is available at https://drive.google.com/drive/u/1/folders/1v55IKG2JN9fMtKJWU48B_5_DcPWGnpTq.
422
+
423
+ # A.2 Train/Test Split
424
+
425
+ For all the datasets, we use the original train/dev/test split from the web. To keep the size of the development set small, we randomly sample 1000 data for SST-2, AGNews, Pubmed-RCT, DBPedia and randomly sample 500 samples for TREC, ChemProt.
426
+
427
+ # B Details on Implementation and Experiment Setups
428
+
429
+ # B.1 Computing Infrastructure
430
+
431
+ System: Ubuntu 18.04.3 LTS; Python 3.6; Pytorch 1.6.
432
+
433
+ CPU: Intel(R) Core(TM) i7-5930K CPU @ 3.50GHz.
434
+
435
+ GPU: NVIDIA 2080Ti.
436
+
437
+ # B.2 Number of Parameters
438
+
439
+ ACTUNE and all baselines use Roberta-base (Liu et al., 2019) with a task-specific classification head on the top as the backbone, which contains 125M trainable parameters. We do not introduce any other parameters in our experiments.
440
+
441
+ # B.3 Experiment Setups
442
+
443
+ Following (Ein-Dor et al., 2020; Yuan et al., 2020; Margatina et al., 2021b), all of our methods and baselines are run with 3 different random seed and the result is based on the average performance on them. This indeed creates 4 (the number of datasets) $\times 3$ (the number of random seeds) $\times$ 11 (the number of methods) $\times 10$ (the number of fine-tuning rounds in AL) $= 1320$ experiments for fine-tuning PLMs, which is almost the limit of our computational resources, not to mention additional experiments on weakly-supervised text classification as well as different hyper-parameter tuning. We have show both the mean and the standard deviation of the performance in our experiment sections. All the results have passed a paired t-test with $p < 0.05$ (Dror et al., 2018).
444
+
445
+ # B.4 Hyper-parameters for General Experiments
446
+
447
+ We use AdamW as the optimizer, and the learning rate is chosen from $1 \times 10^{-5}, 2 \times 10^{-5}$ . A linear learning rate decay schedule with warm-up 0.1 is used, and the number of training epochs is 15 for fine-tuning. For active self-training & SSAL baselines, we tune the model with 2000 steps, and evaluate the performance on the development set in every 50 steps. Finally, we use the model with best performance on the development set for testing.
448
+
449
+ # B.5 Hyper-parameters for ActUNE
450
+
451
+ Although ACTUNE introduces several hyperparameters including $K$ , $M$ , $m_L$ , $m_H$ , $\beta, \gamma, \lambda$ , most of them are keep fixed during our experiments, thus it does not require heavy hyper-parameter tuning. All results are reported as the average over three runs.
452
+
453
+ In our experiments, we keep $\beta = 0.5$ , $\lambda = 1$ for all datasets. For other parameters, we use a grid search to find the optimal setting for each datasets. Specifically, we search $\gamma$ from [0.5, 0.6, 0.7], $m_{L}$ from [0.6, 0.7, 0.8], $m_{H}$ from [0.8, 0.9, 1]. For ACTUNE with Entropy, we use probability based aggregation and for ACTUNE with CAL, we use value
454
+
455
+ <table><tr><td>Hyper-parameter</td><td>SST-2</td><td>AG News</td><td>Pubmed</td><td>DBPedia</td><td>TREC</td><td>Chemprot</td></tr><tr><td>Dropout Ratio</td><td colspan="6">0.1</td></tr><tr><td>Maximum Tokens</td><td>32</td><td>96</td><td>96</td><td>64</td><td>64</td><td>128</td></tr><tr><td>Batch Size for Xl</td><td colspan="6">8</td></tr><tr><td>Batch Size for Xu in Self-training</td><td>32</td><td>48</td><td>48</td><td>32</td><td>16</td><td>24</td></tr><tr><td>Weight Decay</td><td colspan="6">10-8</td></tr><tr><td>Learning Rate</td><td colspan="6">2 × 10-5</td></tr><tr><td>β</td><td colspan="6">0.5</td></tr><tr><td>M</td><td>25</td><td>30</td><td>30</td><td>40</td><td>40</td><td>40</td></tr><tr><td>K</td><td>5</td><td colspan="5">10</td></tr><tr><td>γ</td><td>0.7</td><td colspan="5">0.6</td></tr><tr><td>mL</td><td>0.8</td><td>0.9</td><td>0.7</td><td>0.8</td><td>0.8</td><td>0.8</td></tr><tr><td>mH</td><td>0.9</td><td>0.9</td><td>0.8</td><td>0.9</td><td>0.9</td><td>1.0</td></tr><tr><td>λ</td><td colspan="6">1</td></tr></table>
456
+
457
+ Table 2: Hyper-parameter configurations. Note that we only keep certain number of tokens.
458
+
459
+ <table><tr><td rowspan="2">Method</td><td colspan="2">Dataset</td></tr><tr><td>Pubmed</td><td>DBPedia</td></tr><tr><td>Finetune (Random)</td><td>&lt;0.1s</td><td>&lt;0.1s</td></tr><tr><td>Entropy (Holub et al., 2008)</td><td>461s</td><td>646s</td></tr><tr><td>BALD (Gal et al., 2017)</td><td>4595s</td><td>6451s</td></tr><tr><td>ALPS (Yuan et al., 2020)</td><td>488s</td><td>677s</td></tr><tr><td>BADGE (Ash et al., 2020)</td><td>554s</td><td>1140s</td></tr><tr><td>CAL (Margatina et al., 2021b)</td><td>493s</td><td>688s</td></tr><tr><td>REVIVAL (Guo et al., 2021)</td><td>3240s</td><td>OOM</td></tr><tr><td>ACTUNE + Entropy</td><td>477s</td><td>733s</td></tr><tr><td>w/ RS for Active Learning</td><td>15.8s</td><td>44.9s</td></tr><tr><td>w/ MMB for Self-training</td><td>0.12s</td><td>0.18s</td></tr><tr><td>ACTUNE + CAL</td><td>510s</td><td>735s</td></tr><tr><td>w/ RS for Active Learning</td><td>16.6s</td><td>46.4s</td></tr><tr><td>w/ MMB for Self-training</td><td>0.12s</td><td>0.18s</td></tr></table>
460
+
461
+ Table 3: The running time of different baselines. Note that for ASST, CEAL and BASS, they directly use existing active learning methods so we do not list the running time here.
462
+
463
+ based aggregation by default.
464
+
465
+ # C Runtime Analysis
466
+
467
+ Table 3 shows the time in one active learning round of ACTUNE and baselines. Here we highlight that the additional time for region-aware sampling and momentum-based memory bank is rather small compared with the inference time. Also, we find that BALD and REVIVAL are not so efficient. For BALD, it needs to infer the uncertainty of the model by passing the data to model with multiple times. Such an operation will make the total inference time for PLMs very long. For REVIVAL, we find that calculating the adversarial gradient needs extra forward passes and backward passes, which could be time-consuming for PLMs with
468
+
469
+ millions of parameters<sup>8</sup>.
actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f26077c83a173b148b60cef84b833749cf22359a0a8aeb8e0ec2b4f2465c6c07
3
+ size 676272
actuneuncertaintybasedactiveselftrainingforactivefinetuningofpretrainedlanguagemodels/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c03e21b1731eb7191f1c4f091c8e874b77196501e8d0c302d761371414a8099f
3
+ size 601530
adaptableadapters/1d1ffb58-64c7-4e7f-b5d1-fa9254c75cca_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e94dd1415afc45b5e511256d677c2cb9eb1c8145e9457dc978e6c8037b9c574
3
+ size 84716
adaptableadapters/1d1ffb58-64c7-4e7f-b5d1-fa9254c75cca_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:443cc0b2c63ca80bb611c25b4f01f87d32f24b002482cd5f55f4f4f67d59d7ba
3
+ size 99924
adaptableadapters/1d1ffb58-64c7-4e7f-b5d1-fa9254c75cca_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965ace31b997a4c0ddb0bf4c0f6668b17bd9a35990119e1c53814044e0fe45b7
3
+ size 402223
adaptableadapters/full.md ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adaptable Adapters
2
+
3
+ Nafise Sadat Moosavi $^{1*}$ , Quentin Delfosse $^{3}$ , Kristian Kersting $^{2,3}$ , Iryna Gurevych $^{2,4}$
4
+
5
+ <sup>1</sup> Department of Computer Science, The University of Sheffield
6
+
7
+ 2 Hessian Center for AI (hessian.AI)
8
+
9
+ $^{3}$ AI & Machine Learning Lab, $^{4}$ Ubiquitous Knowledge Processing Lab (UKP Lab),
10
+
11
+ Department of Computer Science, Technical University of Darmstadt
12
+
13
+ https://www.ukp.tu-darmstadt.de
14
+
15
+ # Abstract
16
+
17
+ State-of-the-art pretrained NLP models contain a hundred million to trillion parameters. Adapters provide a parameter-efficient alternative for the full finetuning in which we can only finetune lightweight neural network layers on top of pretrained weights. Adapter layers are initialized randomly. However, existing work uses the same adapter architecture—i.e., the same adapter layer on top of each layer of the pretrained model—for every dataset, regardless of the properties of the dataset or the amount of available training data. In this work, we introduce adaptable adapters that contain (1) learning different activation functions for different layers and different input data, and (2) a learnable switch to select and only use the beneficial adapter layers. We show that adaptable adapters achieve on-par performances with the standard adapter architecture while using a considerably smaller number of adapter layers. In addition, we show that the selected adapter architecture by adaptable adapters transfers well across different data settings and similar tasks. We propose to use adaptable adapters for designing efficient and effective adapter architectures. The resulting adapters (a) contain about $50\%$ of the learning parameters of the standard adapter and are therefore more efficient at training and inference, and require less storage space, and (b) achieve considerably higher performances in low-data settings. $^{1}$
18
+
19
+ # 1 Introduction
20
+
21
+ Recent improvements in NLP are heavily skewed towards using larger pretrained models (Roberts et al., 2020) and given their considerably better performances, using them is becoming unavoidable (Kaplan et al., 2020). Their improvements, however, come at the cost of significant computational resources at training and inference times. For
22
+
23
+ instance, the number of parameters in recent pretrained models can vary from 110M in BERT-base (Devlin et al., 2019) to 11 billion in T0 (Sanh et al., 2022) to trillion parameters in Switch Transformers (Fedus et al., 2021). Using such models for each downstream application requires a vast amount of storage, training, and inference computation budget that is not accessible to every user.
24
+
25
+ Instead of fine-tuning these massive numbers of parameters for each downstream task, we can use adapter architectures (Houlsby et al., 2019; Pfeiffer et al., 2020). Adapters are lightweight neural network layers that are added on top of each layer of the pretrained model. As opposed to the standard model fine-tuning, in which all layers are fine-tuned for the target task, adapter-based tuning freezes the transformer layers and only trains the newly added adapter layers. Since the majority of parameters—i.e., the layers of the large pretrained model—are shared between different downstream tasks, the use of adapters results in parameter-efficient transfer learning. In addition to their parameter-efficiency, He et al. (2021) show that training adapter-layers (a) outperforms fine-tuning the whole model on low-data and cross-lingual settings, and (b) is more robust to overfitting.
26
+
27
+ Existing work suggests that (a) different layers of the pretrained models may capture different aspects of the form, syntax, or meaning of the input text (Tenney et al., 2019; Clark et al., 2019), and (b) they may not be all needed for performing a given task (Houlsby et al., 2019; Fan et al., 2020; Rücklé et al., 2021). In addition, adapter layers are initialized randomly. Therefore, it is not necessary to use the same adapter architecture for different downstream tasks and given different amounts of annotated data. However, existing works use the same adapter architecture for all the different input data, i.e., (a) one adapter layer on top of all the pretrained layers while using all the layers may not be necessary, and (b) the same activation func
28
+
29
+ tion for all the layers and different tasks while the best activation function may vary for different tasks (Delfosse et al., 2021).
30
+
31
+ In this paper, we propose a systematic approach for designing more adequate and flexible adapter architectures by introducing the adaptable adapter (AA). Adaptable adapters (1) use a learnable activation function—called Rational activation (Molina et al., 2020)—instead of a constant activation in adapter layers allowing the adapter model to learn different activation functions at different adapter layers and for different tasks, and (2) consist of a learnable switch at each adapter layer to determine the beneficial adapter layers during training and to only use the selected layers during inference.
32
+
33
+ We evaluate adaptable adapters on the GLUE benchmark (Wang et al., 2018) that consists of various text classification tasks. We perform evaluations based on different data settings in which different amounts of annotated examples are available for training. Our results show that adaptable adapters achieve on-par performances with the full adapter architecture while using considerably fewer adapter layers at the inference.
34
+
35
+ We further propose to use adaptable adapters for designing efficient adapter architectures—i.e., to only add an adapter layer to the layers that are selected by the adaptable adapter. We show that while the selected adapter architecture by $AA$ , called $AA$ -focused, is considerably more efficient at both training and inference times and requires less storage, it achieves on-par performances with the full adapter architecture when trained on all available training data and considerably outperforms it on low-data settings. In addition, we show that the selected adapter architecture by $AA$ transfers well across similar tasks and different data settings. Therefore, we can train $AA$ using a limited amount of training data, and for one of the tasks, and then use the resulting $AA$ -focused architecture for different data settings and other similar tasks.
36
+
37
+ Overall, the contributions of this paper are as follows:
38
+
39
+ - We propose adaptable adapters that introduce flexibility in adapter architectures by (a) selecting the beneficial adapter layers to use, and (b) learning the suitable activation function for each layer and each task.
40
+ - We propose to use adaptable adapters to design efficient adapters that require less training time, inference time, and storage space.
41
+
42
+ - We show that using fewer adapter layers with a learnable activation function considerably improves the performance on low-data settings.
43
+
44
+ # 2 Related Work
45
+
46
+ # 2.1 Rational Activation
47
+
48
+ Rational activation functions, empirically introduced as Padé Activation Units (Molina et al., 2020), are learnable activation functions that can approximate common activation functions as well as learn new ones. The rational activation function $R(x)$ of order $m, n$ is defined as follows:
49
+
50
+ $$
51
+ R (x) = \frac {\sum_ {j = 0} ^ {m} a _ {j} x ^ {j}}{1 + \left| \sum_ {k = 1} ^ {n} b _ {k} x _ {k} \right|} \tag {1}
52
+ $$
53
+
54
+ where $a_{j}$ and $b_{k}$ are learnable parameters. These rational functions use an absolute value in the denominator to avoid potential poles, which will make the training unstable. Such rational activation functions provide stable training, as empirically shown in image classification and reinforcement learning (Molina et al., 2020; Delfosse et al., 2021). $R(x)$ can be initialized to initially approximate any of the known activation functions or with constant functions. Molina et al. (2020) show that rationals outperform other commonly used activation functions in common image classification tasks. Rational activation functions are also integrated in Generative Adversarial Networks (Boulle et al., 2020). Delfosse et al. (2021) show that some of the layers in very deep pretrained Residual Networks tend to approximate activation functions' behavior, and we can achieve on-par or better performances with the full network by replacing some of the complete layers with rational activation functions. Similar to this observation, as we show in § 5, using rational activation functions instead of a constant activation (ReLU) in adapters allows them to achieve high accuracy using a fewer number of adapter layers.
55
+
56
+ # 2.2 Reducing Model's Size for Efficiency
57
+
58
+ Improving the efficiency of large pretrained models has received particular attention for the inference time. The argument is that the effect of training cost is limited, i.e., the model can be trained once but it will be used many times. However, the inference time has a wide impact on the everyday use of NLP models.
59
+
60
+ Existing approaches for improving the inference-time efficiency belong to two different categories:
61
+
62
+ (a) the distillation and pruning techniques that create a smaller model for inference but often require re-training or fine-tuning the smaller model (Tang et al., 2019; Sanh et al., 2019; Voita et al., 2019; Sun et al., 2020; Bai et al., 2021), and (b) on-demand network size reduction at the inference time. There are two different approaches in the second category, namely layer dropping and early exiting.
63
+
64
+ Fan et al. (2020) use layer dropping during the training that randomly drops the model's layers to make the model robust to the inference time layer selection. They show that it is possible to select sub-networks of any depth from large models at inference with limited impact on the performance and without the need for additional finetuning. Layer dropping was previously investigated by Huang et al. (2016) who propose to drop layers during training for regularizing the model and reducing the training time of deep convolutional networks. Rucklé et al. (2021) use layer dropping for adapter architectures. They show that by randomly dropping adapter layers during training, they can prune the adapter model on-demand at the inference time.
65
+
66
+ Schwartz et al. (2020) propose to add an output layer to each transformer layer. At inference time, while the model calculates the layer-wise representation, from the bottom layer to the top layer, it also makes the prediction using the associated classification layer. They use the output labels' scores of the classification layers as confidence scores to decide whether to exit early if the classifier is confident or to proceed to process the input with the next layers. This hierarchical architecture offers an inference time-accuracy tradeoff by setting the confidence threshold. The early exiting approach is similar to layer dropping in which the dropped layers are always from the last top layers.
67
+
68
+ All these approaches select the number of layers to drop and the dropped layers heuristically at the inference time with the goal of improving the inference time. Instead, the adaptable adapter is a systematic approach for selecting the useful adapter layers for the given task during training. Besides layer selection, an adaptable adapter allows for learning the desired activation function for different inputs. As we show, we can use adaptable
69
+
70
+ adapters to design efficient adapter architectures with a considerably smaller number of training parameters with on-par or considerably higher performances, especially with larger models and in low-data settings.
71
+
72
+ # 3 Proposed Architecture
73
+
74
+ # 3.1 Learnable Activation
75
+
76
+ Empirical observations of performances have led experts in several fields to use different activation functions for different tasks. Functions from the ReLU family are usually used for neural network-based visual computing, Tanh has been used in PPO for reinforcement learning, while GeLU has progressively been adopted in transformers. With the growth of the models, and the complexity of the tasks they are applied to, choosing one fixed activation function to equip the complete architecture is suboptimal. By using rational (§ 2.1), we let the adapter layer learn the suitable activation function at each different adapter layer, task, and dataset. In adaptable adapters, we replace the constant activation function of each adapter layer—i.e., ReLU in the default configuration used in AdapterHub (Pfeiffer et al., 2020)—with rational.
77
+
78
+ Figure 1 shows a standard adapter layer as well as an adapter layer in adaptable adapters.
79
+
80
+ ![](images/ec6335241cfe3022dbd4a1214d10b0bcbcc8ecaae2326810ffec3e77935b90c9.jpg)
81
+ (a)
82
+ Figure 1: (a) a standard adapter layer with linear feedforward layers and a fixed activation, (b) an adapter layer in adaptable adapters with linear feedforward layers and a rational activation. Learnable parameters are shown within pink boxes.
83
+
84
+ ![](images/243bde8ad9a12e93900e8ce4076bc80cf378c3e3331758071390c8cbeb22a2a7.jpg)
85
+ (b)
86
+
87
+ # 3.2 Learnable Layer Selection
88
+
89
+ Houlsby et al. (2019) examined various choices of adapter architectures. They report that using
90
+
91
+ two feedforward linear layers—one down-project and one up-project layer—results in good performances while only introducing a few parameters. Assuming $d$ is the dimensionality of the input—i.e., the embedding size of the transformer layer—the down-project layer maps the input dimension to $n$ where $n < d$ , and the up-project layer maps the input dimension back to $d$ . $n$ is called the hidden size of the adapter. Each adapter contains a skip-connection that lets an adapter layer approximate an identity function, i.e., to pass the input of a transformer layer unchanged to the next layer. The learnable switches in adaptable adapter explicitly model the selection between the feedforward adapter layer and the identity function. By examining the switch probabilities we can determine the adapter layers that are beneficial for the overall performance of the model.
92
+
93
+ As mentioned in § 1, existing work shows that different layers of the pretrained models capture different aspects of the input data, and not all of them are necessary for performing various tasks. Therefore, for different input data, different layers may be of different importance. Adding a learnable switch at each adapter layer provides a more systematic approach to determining the beneficial layers for each input task during training. We use the Gumbel Softmax $(\mathcal{G}\mathcal{S})$ estimator as an end-to-end differentiable switch (hard attention) to make the network attend to an element of a set. Assuming $\pi_{i}$ are the probabilities of selecting each element of the set, i.e., $\forall_{i}\pi_{i} \geq 0, \sum_{i}\pi_{i} = 1$ , $\mathcal{G}\mathcal{S}$ estimates the hard attention $y_{i}$ as follows:
94
+
95
+ $$
96
+ y _ {i} = \frac {\exp \left(\left(\log \left(\pi_ {i}\right) + g _ {i}\right) / \tau\right)}{\sum_ {j} \exp \left(\left(\log \left(\pi_ {j}\right) + g _ {j}\right) / \tau\right)} \tag {2}
97
+ $$
98
+
99
+ where $g_{i}$ are i.i.d. samples from a Gumbel distribution, and $\tau$ is a temperature parameter. Setting $\tau$ to small values results in distributions that are similar to categorical ones.
100
+
101
+ # 3.3 Adaptable Adapters
102
+
103
+ The adaptable adapter (AA) is the combination of the learnable layer selection and the learnable activation function. The learnable layer selection—i.e., a Gumbel Softmax estimator—selects between an adapter layer, with no skip connection, and an identity function with zero parameters that passes the input without any changes to the next layer. The adapter layers in adaptable adapters consist of two linear layers—i.e., down-project and up-
104
+
105
+ ![](images/738695a4b257aea21037b7c5f359dc94bb3abe9f8f2eee14166f774a1b68f1b9.jpg)
106
+ Figure 2: The adaptable adapter layer that consist of a Gumbel Softmax to choose between an adapter layer with a rational activation and an identity function.
107
+
108
+ project layers—, and the non-linearity function between these two linear layers consists of a rational activation function. The adaptable adapter allows to learn different adapter architectures for different input data by (a) learning to use a subset of adapter layers, and (b) learning a potentially different activation function at each layer. Figure 3 shows the structure of an adapter layer in adaptable adapters.
109
+
110
+ # 4 Experimental Setup
111
+
112
+ # 4.1 Datasets
113
+
114
+ We use the English text classification datasets from the GLUE benchmark (Wang et al., 2019) including MNLI (Williams et al., 2018), $\mathrm{QQP}^3$ , QNLI (Rajpurkar et al., 2016), SST-2 (Socher et al., 2013), CoLA (Warstadt et al., 2019), STS-B (Cer et al., 2017), MRPC (Dolan and Brockett, 2005), RTE (Dagan et al., 2006), and WNLI (Levesque et al., 2011). Table 1 shows the number of training examples and the evaluation metric for each dataset.
115
+
116
+ <table><tr><td>Dataset</td><td>|Train|</td><td>Metric</td><td>Dataset</td><td>|Train|</td><td>Metric</td></tr><tr><td>MNLI</td><td>393k</td><td>acc.</td><td>STS-B</td><td>7k</td><td>Pearson/Spearman</td></tr><tr><td>QQP</td><td>364k</td><td>acc./F1</td><td>MRPC</td><td>3.7k</td><td>acc./F1</td></tr><tr><td>QNLI</td><td>105k</td><td>acc.</td><td>RTE</td><td>2.5k</td><td>acc.</td></tr><tr><td>SST-2</td><td>67k</td><td>acc.</td><td>WNLI</td><td>634</td><td>acc.</td></tr><tr><td>CoLA</td><td>8.5k</td><td>Matthews</td><td></td><td></td><td></td></tr></table>
117
+
118
+ Table 1: GLUE datasets with their number of training examples and the corresponding evaluation metric.
119
+
120
+ # 4.2 Transformer Model
121
+
122
+ As the base model, we use the BERT-large model (Devlin et al., 2019). BERT-large contains 24 layers, an embedding size of 1024, and a total number of 340M parameters.
123
+
124
+ # 4.3 Adapter Models
125
+
126
+ Baseline As a baseline adapter, we use the adapter layers with the pfeiffer configuration from AdapterHub (Pfeiffer et al., 2020). The adapter layers with the pfeiffer configuration are similar to the one in Figure 1, in which learnable parameters include two feedforward layers. For BERT-base, each pfeiffer layer consists of $73.7\mathrm{k}$ parameters resulting in a total number of 884.7K. For BERT-large, the number of parameters for each adapter layer is 131K, and the total number of parameters is 3.1M. We see that as the underlying model gets larger, the number of parameters in adapters also increases notably. Therefore, adapter architecture selection using AA is a potential solution to control this exponential increase to some extent.
127
+
128
+ Adaptable Adapter (AA) For the rational activation, similar to Molina et al. (2020), we use order $m = 5$ and $n = 4$ for rational. Therefore, the rational activation function only consists of ten learnable parameters. The rational activation can be initialized to initially estimate an existing function. Based on our preliminary experiments, using $f(x) = 1$ for initializing $R(x)$ results in better performances on the GLUE benchmark.
129
+
130
+ For the Gumble-Softmax switch, we set the temperature parameter $\tau$ to 0.1, and we initialize $\pi_{i}$ to 0.5 for both inputs—i.e., the same initial probability for the rational adapter and the identity function.
131
+
132
+ AA-focused We can use the selected architecture by AA for designing a new adapter architecture, i.e., to only include an adapter layer—with a rational function—at layers in which the switch has selected the adapter layer over the identity function. We call this architecture AA-focused. Note that compared to AA, AA-focused is more efficient both at training and inference time, as it includes a fewer number of layers and no switch functions. It also requires less storage space for saving the new adapter weights.
133
+
134
+ Also, training $AA$ includes both the architecture selection and training of the adapter layers, which are initialized randomly, simultaneously. As a result, as we see in our evaluations, $AA$ -focused achieves higher performances as its training is only focused on training the adapter layers.
135
+
136
+ AdapterDrop (Rücklé et al., 2021) During training, AdapterDrop randomly drops the first $n$ layers in which $n$ varies for different iterations. At inference, $n$ can be set to any desired number of layers. In our experiments, we select $n$ based on the number of dropped layers by $AA$ , i.e., the number of layers that are not selected by the switch functions.
137
+
138
+ # 4.4 Experiments
139
+
140
+ We evaluate the models in different settings: (a) using full training data, and (b) low-data settings. For all the experiments, we consider $25\%$ of the training data as the development set and use the official development sets as the test data. We perform the low-data evaluations when 100, 300, and 500 annotated examples are available. $^{6}$ The test data is the same for all the evaluations. We run all the low-data experiments for 20 epochs and five different random seeds $^{7}$ . We report the average and standard deviation over the five different runs. When training on full datasets, the experiments are computationally very expensive using BERT-large. Therefore, for this setting, we only report the results using the first random seed. All experiments are done on one A100 NVIDIA GPU. All implementations are based on AdapterHub (Pfeiffer et al., 2020).
141
+
142
+ # 5 Evaluation
143
+
144
+ Table 2 presents the results of Baseline, AdapterDrop, AA, and AA-focused. AA selects different layers for different tasks and different random seeds. We evaluate three configurations for AA-focused:
145
+
146
+ - AA-focused $^{spec}$ : for each task, we design the corresponding $AA$ -focused based on the selected architecture by $AA$ for that task given and the first random seed (42). For instance, the $AA$ -focused architecture is the same for all
147
+
148
+ <table><tr><td></td><td>MNLI</td><td>QQP</td><td>QNLI</td><td>SST-2</td><td>CoLA</td><td>STS-B</td><td>MRPC</td><td>RTE</td><td>WNLI</td><td>Avg</td></tr><tr><td colspan="11">Low-data-100</td></tr><tr><td>Baseline</td><td>33.893.02</td><td>30.650.38</td><td>58.784.81</td><td>56.013.68</td><td>5.204.84</td><td>40.009.64</td><td>74.800.0</td><td>49.392.86</td><td>55.213.01</td><td>44.87</td></tr><tr><td>AA</td><td>33.642.66</td><td>30.880.39</td><td>59.616.19</td><td>51.282.52</td><td>-0.551.87</td><td>45.1814.17</td><td>74.800.0</td><td>50.113.44</td><td>55.482.46</td><td>44.49</td></tr><tr><td>AdapterDropAA</td><td>33.722.84</td><td>30.620.40</td><td>57.505.78</td><td>54.012.59</td><td>4.107.95</td><td>36.538.93</td><td>74.800.0</td><td>49.392.86</td><td>56.061.38</td><td>44.08</td></tr><tr><td>AdapterDrop13</td><td>33.712.76</td><td>30.610.4</td><td>58.394.27</td><td>53.442.56</td><td>3.917.6</td><td>36.238.68</td><td>74.800.0</td><td>49.462.81</td><td>55.761.91</td><td>44.04</td></tr><tr><td>AA-focusedspec</td><td>35.282.06</td><td>44.3716.31</td><td>63.754.39</td><td>52.944.64</td><td>5.6810.91</td><td>62.793.34</td><td>74.800.01</td><td>51.482.72</td><td>54.084.51</td><td>49.47</td></tr><tr><td>AA-focuseduni</td><td>36.362.61</td><td>44.3716.31</td><td>63.364.86</td><td>55.874.42</td><td>4.754.9</td><td>59.376.78</td><td>74.940.2</td><td>51.123.45</td><td>51.834.12</td><td>49.11</td></tr><tr><td>AA-focusedsim</td><td>34.773.18</td><td>45.7814.40</td><td>63.134.30</td><td>61.5810.95</td><td>17.5411.19</td><td>59.897.70</td><td>74.770.07</td><td>52.202.93</td><td>51.835.52</td><td>51.28</td></tr><tr><td>|Baseline|</td><td>24</td><td>24</td><td>24</td><td>24</td><td>24</td><td>24</td><td>24</td><td>24</td><td>24</td><td></td></tr><tr><td>|AA|</td><td>13.21.7</td><td>15.03.0</td><td>13.62.2</td><td>14.64.0</td><td>15.82.1</td><td>16.42.7</td><td>13.01.8</td><td>11.21.8</td><td>12.35.7</td><td></td></tr><tr><td>|AdapterDropAA|</td><td>14</td><td>13</td><td>15</td><td>16</td><td>16</td><td>14</td><td>15</td><td>13</td><td>16</td><td></td></tr><tr><td>|AdapterDrop13|</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td></td></tr><tr><td>|AA-focusedspec|</td><td>14</td><td>13</td><td>15</td><td>16</td><td>16</td><td>14</td><td>15</td><td>13</td><td>16</td><td></td></tr><tr><td>|AA-focuseduni|</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td></td></tr><tr><td>|AA-focusedsim|</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td>13</td><td></td></tr><tr><td colspan="11">Low-data-300</td></tr><tr><td>Baseline</td><td>36.554.76</td><td>61.508.66</td><td>69.621.24</td><td>79.8614.15</td><td>30.405.48</td><td>78.242.81</td><td>76.551.31</td><td>51.623.21</td><td>45.924.33</td><td>58.91</td></tr><tr><td>AA</td><td>37.142.49</td><td>66.070.38</td><td>71.331.82</td><td>72.5216.59</td><td>26.058.74</td><td>82.081.6</td><td>74.032.21</td><td>51.832.84</td><td>47.044.76</td><td>58.68</td></tr><tr><td>AdapterDropAA</td><td>38.865.93</td><td>62.984.85</td><td>66.712.91</td><td>79.2914.17</td><td>16.8912.06</td><td>78.51.99</td><td>75.740.67</td><td>51.193.35</td><td>46.764.02</td><td>57.44</td></tr><tr><td>AdapterDrop13</td><td>37.955.56</td><td>63.724.84</td><td>66.712.91</td><td>80.014.47</td><td>16.312.05</td><td>77.522.08</td><td>76.030.92</td><td>51.333.4</td><td>46.484.27</td><td>57.34</td></tr><tr><td>AA-focusedspec</td><td>44.624.11</td><td>66.831.06</td><td>73.721.09</td><td>85.872.94</td><td>34.518.3</td><td>81.162.04</td><td>76.721.06</td><td>54.584.72</td><td>46.203.92</td><td>62.69</td></tr><tr><td>AA-focuseduni</td><td>46.694.29</td><td>69.251.33</td><td>74.162.95</td><td>87.570.72</td><td>35.653.26</td><td>81.712.64</td><td>75.971.55</td><td>56.895.56</td><td>52.397.26</td><td>64.48</td></tr><tr><td>AA-focusedsim</td><td>45.972.08</td><td>68.361.36</td><td>73.982.68</td><td>86.831.90</td><td>37.433.10</td><td>78.813.58</td><td>76.661.30</td><td>55.962.81</td><td>48.445.53</td><td>63.61</td></tr><tr><td>|AA|</td><td>17.01.3</td><td>16.21.0</td><td>14.81.8</td><td>12.83.2</td><td>16.82.2</td><td>18.61.9</td><td>16.01.1</td><td>12.41.2</td><td>12.42.1</td><td></td></tr><tr><td>|AA-focusedspec|</td><td>18</td><td>16</td><td>13</td><td>9</td><td>17</td><td>16</td><td>16</td><td>13</td><td>13</td><td></td></tr><tr><td colspan="11">Low-data-500</td></tr><tr><td>Baseline</td><td>44.356.08</td><td>69.491.12</td><td>73.481.89</td><td>88.261.53</td><td>37.984.42</td><td>82.070.99</td><td>78.331.11</td><td>59.281.76</td><td>49.866.08</td><td>64.79</td></tr><tr><td>AA</td><td>47.335.11</td><td>67.522.99</td><td>75.02.3</td><td>84.933.06</td><td>39.964.87</td><td>84.560.87</td><td>78.381.0</td><td>59.283.18</td><td>50.135.16</td><td>65.23</td></tr><tr><td>AdapterDropAA</td><td>42.667.02</td><td>69.521.03</td><td>74.152.19</td><td>89.010.49</td><td>38.444.51</td><td>82.051.05</td><td>78.191.04</td><td>59.282.6</td><td>49.3636</td><td>64.73</td></tr><tr><td>AdapterDrop13</td><td>43.056.41</td><td>69.120.88</td><td>72.821.83</td><td>88.970.6</td><td>36.895.03</td><td>80.771.32</td><td>77.860.8</td><td>58.562.44</td><td>49.016.57</td><td>64.12</td></tr><tr><td>AA-focusedspec</td><td>54.962.66</td><td>69.521.14</td><td>77.301.27</td><td>87.941.10</td><td>39.513.47</td><td>84.300.69</td><td>78.921.70</td><td>59.202.58</td><td>48.736.27</td><td>66.71</td></tr><tr><td>AA-focuseduni</td><td>56.131.88</td><td>69.322.29</td><td>76.852.37</td><td>87.891.47</td><td>41.753.83</td><td>83.481.25</td><td>78.000.35</td><td>60.421.75</td><td>50.425.07</td><td>67.14</td></tr><tr><td>AA-focusedsim</td><td>55.852.62</td><td>69.862.56</td><td>77.301.93</td><td>87.571.69</td><td>39.791.42</td><td>83.231.61</td><td>78.751.26</td><td>60.071.62</td><td>49.586.75</td><td>66.89</td></tr><tr><td>|AA|</td><td>12.86.0</td><td>16.81.3</td><td>16.42.6</td><td>14.62.1</td><td>10.68.3</td><td>19.61.4</td><td>16.62.4</td><td>14.36.8</td><td>12.63.2</td><td></td></tr><tr><td>|AA-focusedspec|</td><td>14</td><td>17</td><td>18</td><td>15</td><td>17</td><td>18</td><td>14</td><td>16</td><td>14</td><td></td></tr><tr><td colspan="11">Full Data</td></tr><tr><td>Baseline</td><td>85.08</td><td>88.68</td><td>91.95</td><td>93.00</td><td>58.28</td><td>89.75</td><td>83.12</td><td>70.39</td><td>56.34</td><td>79.62</td></tr><tr><td>AA</td><td>84.73</td><td>88.38</td><td>91.01</td><td>92.55</td><td>57.60</td><td>90.11</td><td>82.36</td><td>63.18</td><td>53.52</td><td>78.16</td></tr><tr><td>AdapterDropAA</td><td>84.96</td><td>88.75</td><td>91.38</td><td>93.35</td><td>58.63</td><td>89.85</td><td>82.84</td><td>66.06</td><td>56.34</td><td>79.12</td></tr><tr><td>AdapterDrop13</td><td>84.73</td><td>87.15</td><td>90.92</td><td>92.78</td><td>57.42</td><td>88.84</td><td>83.34</td><td>64.25</td><td>56.34</td><td>78.42</td></tr><tr><td>AA-focusedspec</td><td>84.77</td><td>88.46</td><td>91.38</td><td>92.32</td><td>56.79</td><td>89.74</td><td>83.42</td><td>64.98</td><td>57.75</td><td>78.84</td></tr><tr><td>AA-focuseduni</td><td>85.41</td><td>88.61</td><td>91.51</td><td>92.66</td><td>54.62</td><td>89.34</td><td>84.88</td><td>67.15</td><td>56.34</td><td>78.94</td></tr><tr><td>AA-focusedsim</td><td>85.32</td><td>88.41</td><td>91.85</td><td>91.4</td><td>57.96</td><td>89.38</td><td>84.42</td><td>67.86</td><td>57.75</td><td>79.37</td></tr><tr><td>|AA|</td><td>14</td><td>18</td><td>17</td><td>18</td><td>20</td><td>20</td><td>18</td><td>16</td><td>15</td><td></td></tr><tr><td>|AA-focusedspec|</td><td>14</td><td>18</td><td>17</td><td>18</td><td>20</td><td>20</td><td>18</td><td>16</td><td>15</td><td></td></tr></table>
149
+
150
+ Table 2: Comparing the results of (a) the standard adapter model that includes an adapter layer on all the 24 BERT-large layers (Baseline), (b) adaptable adapter (AA), (c) AdapterDrop, and (d) AA-focused adapters, in which the architecture of the adapter is selected based on the selected layers by AA. The architecture of $AA$ -focused $spec$ is selected based on the selected layers by AA for the corresponding task and data setting when the random seed is 42. The architecture of $AA$ -focused $uni$ is selected based on the selected layers by AA for the task of $QQP$ on the Low-data-100 setting and for random seed 42. $AA$ -focused $sim$ only contains an adapter layer with a rational activation function at the last 13 layers of BERT-large, i.e., the total number of adapter layers in $AA$ -focused $uni$ . The number of layers at the inference time for the AdapterDrop $AA$ experiments are selected based on the number of layers in the corresponding $AA$ -focused $spec$ experiments. The number of inference time layers for AdapterDrop $13$ equals 13. Except for Full Data, the reported results are averaged over five random seeds. The subscript reports the corresponding standard deviation. The Full Data results are reported for one random seed. The $|AA|$ rows report the average number of selected adapter layers by AA using different random seeds. $|AA$ -focused $^*$ | rows report the number of added adapter layers in the corresponding $AA$ -focused $^*$ experiments. $|AA$ -focused $uni$ | and $|AA$ -focused $sim$ | are the same for all data settings. $AdapterDrop^*$ | rows report the number of included adapter layers for the corresponding AdapterDrop experiment at the inference time. $AdapterDrop^{AA}$ | is always the same as the corresponding $AA$ -focused $spec$ , and $AdapterDrop^{13}$ | is always the same as $AA$ -focused $sim$ . The test data is the same for all the experiments. The Avg column reports the average score across all datasets. The highest performances for each dataset and each data setting are boldfaced.
151
+
152
+ the experiments of RTE for Low-data-100—i.e., over the five different random seeds. However, it is different for the rest of the tasks and different data settings.
153
+
154
+ - AA-focused $^{uni}$ : we design this adapter architecture of all tasks and data settings based on a single random seed, single task, and a single data regime, i.e. random seed 42, the QQP task, and low-data-100. We choose low-data-100 because the architecture selection process—i.e., training AA—is very fast in this setting. We select the selected architecture by QQP because AA selects the smallest number of layers for QQP when the random seed is 42. The selected layers are {2, 6, 10, 12, 14, 15, 16, 18, 19, 20, 21, 22, 23}, i.e., three layers from the first half of the original 24 layers, and ten layers from the second half. The results of AA-focused $^{uni}$ compared to AA-focused $^{spec}$ indicate whether the selected architecture by AA transfers between similar tasks and different data settings.
155
+
156
+ - AA-focused $^{sim}$ : we design a simplified adapter based on AA in which we only use the number of selected layers, instead of the layer numbers, in a single random seed, single task, and a single data setting. We use the number of selected layers when the random seed is 42 for the QQP task and the low-data-100 setting, i.e., 13. As investigated by Houlsby et al. (2019), the last adapter layers are in general more effective. As a result, we add adapter layers, with rational activation, to the last 13 transformer layers in AA-focused $^{sim}$ experiments. The results of AA-focused $^{sim}$ compared to AA-focused $^{uni}$ show whether only the number of selected layers by AA matters or it is also important to specify at which layers to add the adapters.
157
+
158
+ The number of inference layers for AdapterDrop $AA$ are equivalent to the number of layers in AA-focused $spec$ experiments for each task and data setting. The number of layers for AdapterDrop $13$ is 13, which is the same as AA-focused $uni$ and AA-focused $sim$ . Note that the number of layers for AA-focused experiments are the same both at training and inference while it is not the case for AdapterDrop.
159
+
160
+ The $|AA|$ rows in Table 2 show the average number of selected layers for each task over the five dif
161
+
162
+ ferent random seeds. $|AA\text{-focused}^*|$ rows report the number of added adapter layers in the corresponding $AA\text{-focused}^*$ experiments. $|\text{AdapterDrop}^*|$ rows report the number of included adapter layers for the corresponding AdapterDrop experiments at the inference time.
163
+
164
+ We make the following observations from the results of Table 2:
165
+
166
+ - $AA$ achieves on-par performances with the Baseline, and on average it uses about 13-15 layers out of 24 layers. We can use this insight for designing efficient adapter architectures.
167
+
168
+ - All AA-focused architectures considerably outperform Baseline in all the tasks in low-data settings while using considerably smaller number of parameters, and therefore, being considerably more efficient. For instance, while AA-focused $^{uni}$ only uses 13 layers out of 24 layers—i.e., reducing the number of training parameters from 3M to 1.7M—, it outperforms the Avg score by 4.24, 5.57, and 2.35 points in Low-data-100, Low-data-300, and Low-data-500, respectively.
169
+
170
+ - The high performances of $AA$ -focused<sup>uni</sup> show that the selected architecture by $AA$ for one task and one data setting transfers well to other data regimes and similar tasks.<sup>9</sup> Therefore, it is not necessary to design the adapter architecture separately for a different amount of available data and similar tasks.
171
+
172
+ - $AA$ -focused $^{sim}$ and AdapterDrop $^{13}$ both use the last 13 adapter layers during the inference while the results of $AA$ -focused $^{sim}$ are considerably higher for all data regimes. This indicates the importance of rational activation in adaptable adapters. We will further investigate the impact on rational activation in the next section.
173
+
174
+ - In average, AdapterDrop $^{AA}$ contains more inference layers compared to AdapterDrop $^{13}$ . However, there is not a significant difference between their performances. They achieve on-par or lower results compared to Baseline.
175
+
176
+ <table><tr><td>Adap. layers</td><td>MNLI</td><td>QQP</td><td>QNLI</td><td>SST-2</td><td>CoLA</td><td>STS-B</td><td>MRPC</td><td>RTE</td><td>WNLI</td><td>Avg</td></tr><tr><td colspan="11">Low-data-300</td></tr><tr><td>13</td><td>45.972.08</td><td>68.361.36</td><td>73.982.68</td><td>86.831.9</td><td>37.433.1</td><td>78.813.58</td><td>76.661.3</td><td>55.962.81</td><td>48.445.53</td><td>63.61</td></tr><tr><td>12</td><td>36.845.51</td><td>62.435.65</td><td>65.773.43</td><td>84.633.64</td><td>13.2312.63</td><td>77.082.36</td><td>75.270.39</td><td>54.303.75</td><td>46.765.45</td><td>57.37</td></tr><tr><td>11</td><td>36.165.12</td><td>62.595.8</td><td>67.931.42</td><td>79.9514.16</td><td>16.3211.65</td><td>73.226.75</td><td>76.421.19</td><td>56.532.02</td><td>46.24.12</td><td>57.26</td></tr></table>
177
+
178
+ Table 3: Evaluating the impact of the number of adapter layers on the overall performance. The adapter layers are added to the top $n$ layers of the model for $n = {13},{12},{11}$ . Adapter layers contain rational activation,i.e., $n = {13}$ is equivalent to ${AA}$ -focused ${}^{\text{sim }}$ . Results are reported for the low-data-300 setting.
179
+
180
+ <table><tr><td></td><td>MNLI</td><td>QQP</td><td>QNLI</td><td>SST-2</td><td>CoLA</td><td>STS-B</td><td>MRPC</td><td>RTE</td><td>WNLI</td><td>Avg</td></tr><tr><td colspan="11">Low-data-300</td></tr><tr><td>Baseline</td><td>36.554.76</td><td>61.508.66</td><td>69.621.24</td><td>79.8614.15</td><td>30.405.48</td><td>78.242.81</td><td>76.551.31</td><td>51.623.21</td><td>45.924.33</td><td>58.91</td></tr><tr><td>AA</td><td>37.142.49</td><td>66.070.38</td><td>71.331.82</td><td>72.5216.59</td><td>26.058.74</td><td>82.081.6</td><td>74.032.21</td><td>51.832.84</td><td>47.044.76</td><td>58.68</td></tr><tr><td>Switch-Only</td><td>35.052.81</td><td>43.816.02</td><td>65.592.61</td><td>61.866.26</td><td>9.7712.86</td><td>75.413.29</td><td>75.370.7</td><td>50.183.44</td><td>45.923.03</td><td>51.44</td></tr><tr><td>Rational-Only</td><td>37.723.88</td><td>64.752.51</td><td>69.691.04</td><td>79.8614.15</td><td>23.208.33</td><td>78.581.94</td><td>75.841.07</td><td>52.273.11</td><td>46.483.88</td><td>58.70</td></tr><tr><td>\( Baseline^{13} \)</td><td>37.985.80</td><td>63.374.72</td><td>68.761.55</td><td>85.163.63</td><td>12.1112.69</td><td>77.962.23</td><td>75.250.71</td><td>54.442.06</td><td>45.353.72</td><td>57.80</td></tr><tr><td>\( AA-focusedsim \)</td><td>45.972.08</td><td>68.361.36</td><td>73.982.68</td><td>86.831.90</td><td>37.433.10</td><td>78.813.58</td><td>76.661.30</td><td>55.962.81</td><td>48.445.53</td><td>63.61</td></tr><tr><td>|AA|</td><td>17.01.3</td><td>16.21.0</td><td>14.81.8</td><td>12.83.2</td><td>16.82.2</td><td>18.61.9</td><td>16.01.1</td><td>12.41.2</td><td>12.42.1</td><td></td></tr><tr><td>|Switch-Only|</td><td>14.01.1</td><td>15.82.5</td><td>17.01.9</td><td>16.22.8</td><td>16.41.9</td><td>16.41.5</td><td>17.81.7</td><td>15.02.1</td><td>14.01.7</td><td></td></tr></table>
181
+
182
+ Table 4: Evaluating the impact of rational in adaptable adapters. Experiments are run for five different random seeds. Switch-only shows the results when learnable switches are used with standard adapter layers, i.e., linear layers with the ReLU activation. Rational-only shows the result when all the activation functions in the standard adapter are replaced with rational. Baseline<sup>13</sup> contains a standard adapter layer on the last 13 transformer layer. AA-focused<sup>sim</sup> contains adapter layers with rational activation on the last 13 layers.
183
+
184
+ Evaluating the impact of AA on selecting the number of beneficial layers. In the results of Table 2, we select the number of layers in $AA$ -focused $^{sim}$ , i.e., 13, based on the minimum number of selected layers by AA on the low-data-100 setting and for random seed 42. AA-focused $^{sim}$ is equivalent to an adapter architecture in which only the last 13 adapter layers are added to the model. To investigate whether the improvements of AA-focused $^{sim}$ over the baseline are only due to using a fewer number of adapter layers, we report the results of an adapter architecture in which only the last $n$ adapter layers are added to the model, e.g., for $n = 13$ the resulting architecture is the same as AA-focused $^{sim}$ . Table 3 shows the result of this experiment for $n = 13, 12, 11$ . We observe that by decreasing the number of layers from 13 to 12, the overall performance drops notably from 63.61 to 57.37.
185
+
186
+ Evaluating the impact of rational activation. The results of AA-focused experiments vs. Baseline in Table 2 mostly emphasize the impact of layer selection by the learnable switches in AA. In this section, we investigate the impact of learnable activation functions in more details in the evaluations of Table 4.
187
+
188
+ First, we replace all rationals in $AA$ with ReLU. The results are reported in the Switch-Only row. By
189
+
190
+ comparing the results of $AA$ and Switch-only we observe that the use of rational activation considerably improves the performance of $AA$ , i.e., using rational is a key component to achieving higher performances with fewer layers.
191
+
192
+ Second, we replace the activation functions in the standard adapter with rational. The results are reported in Rational-only rows. The results of Baseline compared to Rational-only show that the impact of rational is prominent when the model contains fewer parameters and using rational with an overparameterized model is not very effective, i.e., both layer selection and learnable activation play an important role.
193
+
194
+ Third, we only add a standard adapter layer at the last 13 layers of BERT-large (Baseline $^{13}$ ), which is the same number of adapter layers in AA-focused $^{sim}$ . The difference is the activation function that is used in these 13 adapter layers is ReLU in Baseline $^{13}$ and rational in AA-focused $^{sim}$ . The considerably higher performances of AA-focused $^{sim}$ show that higher performances of AA-focused are due to both layer selection as well as a learnable activation function.
195
+
196
+ Learned rational activation functions. Figure 3 shows the learned activation functions across different layers of the same trained adapter and different tasks. We see that the learned activation differs
197
+
198
+ ![](images/55d82dc69e62ddfe138511cfe0763ab41bf63004fe53c7d3257e08df677d0782.jpg)
199
+ Rational activation functions
200
+ Figure 3: Learned rational activation functions differ according to their place within the network and to the task they are trained for. Right: activation functions at different layers within adapters trained on the QNLI task. Left: activation functions trained at layer 2 of adapters trained on different tasks.
201
+
202
+ ![](images/3ce23d6a1aabe79d1c3bb744f4813241262239aad3dd6c22f6aece42e395bc9b.jpg)
203
+
204
+ for different layers of the same task as well as for different tasks.
205
+
206
+ # 6 Conclusion
207
+
208
+ In this paper, we propose adaptable adapters. They consist of a learnable switch to select a subset of beneficial adapter layers and a learnable activation function to learn the suitable activation at each adapter layer and for each input data. The results of adaptable adapters show that we can achieve on-par performances with the full adapter architecture by using a smaller subset of layers. We show that adaptable adapters are viable tools for designing efficient and effective adapter architectures that require less storage space, lower training and inference time with high performances.
209
+
210
+ # Acknowledgements
211
+
212
+ The authors would like to thank Jorge Cardona for his valuable contribution to the implementation of adaptable adapters. We thank Michael Bugert, Ji-Ung Lee, and Soumya Sarkar for their constructive suggestions and feedback on this work. We would like to thank Jonas Pfeiffer and Clifton Poth for always being very helpful with all questions about adapters and AdapterHub. This research work has been funded by the German Federal Ministry of Education and Research and the Hessian Ministry of Higher Education, Research, Science and the Arts (HMWK) within their joint support of the National Research Center for Applied Cybersecurity ATHENE. It benefited from the HMWK cluster project "The Third Wave of AI".
213
+
214
+ # References
215
+
216
+ Haoli Bai, Wei Zhang, Lu Hou, Lifeng Shang, Jin Jin, Xin Jiang, Qun Liu, Michael Lyu, and Irwin King. 2021. BinaryBERT: Pushing the limit of BERT quantization. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 4334-4348, Online. Association for Computational Linguistics.
217
+ Nicolas Boullé, Yuji Nakatsukasa, and Alex Townsend. 2020. Rational neural networks. In Advances in Neural Information Processing Systems 33: Annual Conference on Neural Information Processing Systems (NeurIPS).
218
+ Daniel Cer, Mona Diab, Eneko Agirre, Inigo Lopez-Gazpio, and Lucia Specia. 2017. SemEval-2017 task 1: Semantic textual similarity multilingual and crosslingual focused evaluation. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 1-14, Vancouver, Canada. Association for Computational Linguistics.
219
+ Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? an analysis of BERT's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286, Florence, Italy. Association for Computational Linguistics.
220
+ Ido Dagan, Oren Glickman, and Bernardo Magnini. 2006. The PASCAL recognising textual entailment challenge. In Machine learning challenges. evaluating predictive uncertainty, visual object classification, and recognising textual entailment, pages 177-190. Springer.
221
+ Quentin Delfosse, Patrick Schramowski, Alejandro Molina, and Kristian Kersting. 2021. Recurrent rational networks. arXiv preprint arXiv:2102.09407.
222
+
223
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
224
+ William B. Dolan and Chris Brockett. 2005. Automatically constructing a corpus of sentential paraphrases. In Proceedings of the Third International Workshop on Paraphrasing (IWP2005).
225
+ Angela Fan, Edouard Grave, and Armand Joulin. 2020. Reducing transformer depth on demand with structured dropout. In International Conference on Learning Representations.
226
+ William Fedus, Barret Zoph, and Noam Shazeer. 2021. Switch transformers: Scaling to trillion parameter models with simple and efficient sparsity. arXiv preprint arXiv:2101.03961.
227
+ Ruidan He, Linlin Liu, Hai Ye, Qingyu Tan, Bosheng Ding, Liying Cheng, Jiawei Low, Lidong Bing, and Luo Si. 2021. On the effectiveness of adapter-based tuning for pretrained language model adaptation. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 2208-2222, Online. Association for Computational Linguistics.
228
+ Neil Houlsby, Andrei Giurgiu, Stanislaw Jastrzebski, Bruna Morrone, Quentin De Laroussilhe, Andrea Gesmundo, Mona Attariyan, and Sylvain Gelly. 2019. Parameter-efficient transfer learning for NLP. In International Conference on Machine Learning, pages 2790-2799. PMLR.
229
+ Gao Huang, Yu Sun, Zhuang Liu, Daniel Sedra, and Kilian Q Weinberger. 2016. Deep networks with stochastic depth. In European conference on computer vision, pages 646-661. Springer.
230
+ Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. 2020. Scaling laws for neural language models. arXiv preprint arXiv:2001.08361.
231
+ Hector J Levesque, Ernest Davis, and Leora Morgenstern. 2011. The Winograd schema challenge. In AAAI Spring Symposium: Logical Formalizations of Commonsense Reasoning, volume 46, page 47.
232
+ Alejandro Molina, Patrick Schramowski, and Kristian Kersting. 2020. Padé activation units: End-to-end learning of flexible activation functions in deep networks. In International Conference on Learning Representations.
233
+
234
+ Jonas Pfeiffer, Andreas Rückle, Clifton Poth, Aishwarya Kamath, Ivan Vulic, Sebastian Ruder, Kyunghyun Cho, and Iryna Gurevych. 2020. Adapterhub: A framework for adapting transformers. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP 2020): Systems Demonstrations, pages 46-54, Online. Association for Computational Linguistics.
235
+ Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: $100,000+$ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2383-2392, Austin, Texas. Association for Computational Linguistics.
236
+ Adam Roberts, Colin Raffel, and Noam Shazeer. 2020. How much knowledge can you pack into the parameters of a language model? In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5418-5426, Online. Association for Computational Linguistics.
237
+ Andreas Rückle, Gregor Geigle, Max Glockner, Tilman Beck, Jonas Pfeiffer, Nils Reimers, and Iryna Gurevych. 2021. AdapterDrop: On the efficiency of adapters in transformers. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7930-7946, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
238
+ Victor Sanh, Lysandre Debut, Julien Chaumont, and Thomas Wolf. 2019. Distilbert, a distilled version of bert: smaller, faster, cheaper and lighter. In Proceedings of the 5th Workshop on Energy Efficient Machine Learning and Cognitive Computing - NeurIPS 2019.
239
+ Victor Sanh, Albert Webson, Colin Raffel, Stephen Bach, Lintang Sutawika, Zaid Alyafeai, Antoine Chaffin, Arnaud Stiegler, Arun Raja, Manan Dey, M Saiful Bari, Canwen Xu, Urmish Thakker, Shanya Sharma Sharma, Eliza Szczechla, Taewoon Kim, Gunjan Chhablani, Nihal Nayak, Debajyoti Datta, Jonathan Chang, Mike Tian-Jian Jiang, Han Wang, Matteo Manica, Sheng Shen, Zheng Xin Yong, Harshit Pandey, Rachel Bawden, Thomas Wang, Trishala Neeraj, Jos Rozen, Abheesht Sharma, Andrea Santilli, Thibault Fevry, Jason Alan Fries, Ryan Teehan, Teven Le Scao, Stella Biderman, Leo Gao, Thomas Wolf, and Alexander M Rush. 2022. Multitask prompted training enables zero-shot task generalization. In International Conference on Learning Representations.
240
+ Roy Schwartz, Gabriel Stanovsky, Swabha Swayamdipta, Jesse Dodge, and Noah A. Smith. 2020. The right tool for the job: Matching model and instance complexities. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6640-6651, Online. Association for Computational Linguistics.
241
+ Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and
242
+
243
+ Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1631-1642, Seattle, Washington, USA. Association for Computational Linguistics.
244
+ Sainbayar Sukhbaatar, Edouard Grave, Piotr Bojanowski, and Armand Joulin. 2019. Adaptive attention span in transformers. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 331-335, Florence, Italy. Association for Computational Linguistics.
245
+ Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. 2020. MobileBERT: a compact task-agnostic BERT for resource-limited devices. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 2158-2170, Online. Association for Computational Linguistics.
246
+ Raphael Tang, Yao Lu, Linqing Liu, Lili Mou, Olga Vechtomova, and Jimmy Lin. 2019. Distilling task-specific knowledge from bert into simple neural networks. arXiv preprint arXiv:1903.12136.
247
+ Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019. BERT rediscovers the classical NLP pipeline. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4593-4601, Florence, Italy. Association for Computational Linguistics.
248
+ Elena Voita, David Talbot, Fedor Moiseev, Rico Senrich, and Ivan Titov. 2019. Analyzing multi-head self-attention: Specialized heads do the heavy lifting, the rest can be pruned. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5797-5808, Florence, Italy. Association for Computational Linguistics.
249
+ Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel Bowman. 2018. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 353-355, Brussels, Belgium. Association for Computational Linguistics.
250
+ Alex Wang, Amanpreet Singh, Julian Michael, Felix Hill, Omer Levy, and Samuel R. Bowman. 2019. GLUE: A multi-task benchmark and analysis platform for natural language understanding. In International Conference on Learning Representations.
251
+ Alex Warstadt, Amanpreet Singh, and Samuel R. Bowman. 2019. Neural network acceptability judgments. Transactions of the Association for Computational Linguistics, 7:625-641.
252
+ Adina Williams, Nikita Nangia, and Samuel Bowman. 2018. A broad-coverage challenge corpus for sentence understanding through inference. In Proceedings of the 2018 Conference of the North American
253
+
254
+ Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1112-1122, New Orleans, Louisiana. Association for Computational Linguistics.
255
+
256
+ # A BERT-base Results
257
+
258
+ <table><tr><td></td><td>MNLI</td><td>QQP</td><td>QNLI</td><td>SST-2</td><td>CoLA</td><td>STS-B</td><td>MRPC</td><td>RTE</td><td>WNLI</td><td>Avg</td></tr><tr><td>Baseline</td><td>83.530.19</td><td>88.120.14</td><td>90.630.26</td><td>91.740.36</td><td>56.510.84</td><td>88.480.14</td><td>84.810.07</td><td>63.831.4</td><td>54.086.64</td><td>77.97</td></tr><tr><td>AA</td><td>82.890.43</td><td>88.090.16</td><td>89.960.25</td><td>91.310.51</td><td>51.441.82</td><td>88.250.17</td><td>85.091.06</td><td>64.251.72</td><td>52.117.61</td><td>77.05</td></tr><tr><td>AA-Layers</td><td>9.80.3</td><td>11.20.7</td><td>10.61.0</td><td>9.81.1</td><td>8.62.1</td><td>11.40.4</td><td>9.00.6</td><td>9.40.7</td><td>8.01.4</td><td></td></tr><tr><td colspan="11">Low-data-100</td></tr><tr><td>Baseline</td><td>35.663.38</td><td>29.700.86</td><td>60.514.5</td><td>51.542.14</td><td>-1.273.56</td><td>41.525.93</td><td>74.860.12</td><td>50.42.98</td><td>54.935.84</td><td>44.21</td></tr><tr><td>AA</td><td>37.052.35</td><td>30.590.68</td><td>62.524.27</td><td>52.732.55</td><td>-0.080.16</td><td>48.7323.91</td><td>74.830.07</td><td>50.183.21</td><td>55.216.13</td><td>45.75</td></tr><tr><td>|AA|</td><td>6.41.8</td><td>8.62.1</td><td>8.81.7</td><td>8.61.6</td><td>7.42.4</td><td>10.80.7</td><td>9.41.4</td><td>9.41.4</td><td>8.20.9</td><td></td></tr><tr><td colspan="11">Low-data-300</td></tr><tr><td>Baseline</td><td>37.884.09</td><td>49.2410.32</td><td>68.172.9</td><td>75.533.49</td><td>3.408.59</td><td>69.3915.05</td><td>75.991.2</td><td>54.222.96</td><td>47.614.91</td><td>53.49</td></tr><tr><td>AA</td><td>40.274.78</td><td>66.311.86</td><td>74.032.03</td><td>76.426.07</td><td>3.565.49</td><td>82.062.24</td><td>76.120.89</td><td>54.733.09</td><td>47.045.46</td><td>57.84</td></tr><tr><td>|AA|</td><td>10.41.6</td><td>10.80.7</td><td>11.00.8</td><td>9.41.3</td><td>7.62.0</td><td>10.80.7</td><td>9.61.0</td><td>9.81.4</td><td>8.21.1</td><td></td></tr><tr><td colspan="11">Low-data-500</td></tr><tr><td>Baseline</td><td>42.822.4</td><td>67.631.44</td><td>72.71.31</td><td>83.460.64</td><td>20.94.14</td><td>81.970.89</td><td>76.510.95</td><td>57.112.93</td><td>52.116.96</td><td>61.69</td></tr><tr><td>AA</td><td>47.721.67</td><td>69.270.89</td><td>75.6491.9</td><td>84.521.18</td><td>19.1314.46</td><td>83.740.67</td><td>78.032.33</td><td>55.963.08</td><td>51.836.13</td><td>62.87</td></tr><tr><td>|AA|</td><td>9.81.1</td><td>10.41.3</td><td>10.00.8</td><td>9.20.7</td><td>9.41.8</td><td>10.61.4</td><td>9.81.6</td><td>9.61.0</td><td>8.01.5</td><td></td></tr></table>
259
+
260
+ Table 5: Comparing the results of (a) the baseline adapter model that includes an adapter layer on all BERT-base layers (Baseline), and (b) the adaptable adapter (AA). The reported results are averaged over five different random seeds. The subscript reports the corresponding standard deviation. |AA| reports the average number of selected adapter layers by the adaptable adapter over different runs. The full data results show the performance when the model is trained on all the available training data. The Low-data- $X$ settings report the results when only $X$ examples are used for training the model. The test data is the same for all the experiments.
adaptableadapters/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55a4abaf300258172996d6b34ebc7496e38a4a8cb50d2fa65539053e5ba52baf
3
+ size 653239
adaptableadapters/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6c822367becf5768ab2bf6d1adf8944d1ba8e283c765f4a7c9b06e8abb3e382
3
+ size 402589
aisfgabundantinformationslotfillinggenerator/3a5c13e9-f446-4491-8fd5-5f416433df0f_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad730a422abf671c72c24f430358936d9624356c137155c03142fd30ff5e2621
3
+ size 56973
aisfgabundantinformationslotfillinggenerator/3a5c13e9-f446-4491-8fd5-5f416433df0f_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebf9ac9692987a879cc68d2fec2744f861ff0a0894a1b9f03fa8c051b4b85c2a
3
+ size 66542
aisfgabundantinformationslotfillinggenerator/3a5c13e9-f446-4491-8fd5-5f416433df0f_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ba95d822c2c76c1666240687e2c88fc0e4746ee50ea5fd486adf4be9a2856e3
3
+ size 315881
aisfgabundantinformationslotfillinggenerator/full.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AISFG: Abundant Information Slot Filling Generator
2
+
3
+ Yang Yan,\* Junda Ye\*, Zhongbao Zhang, Liwen Wang
4
+
5
+ Beijing University of Posts and Telecommunications, Beijing, China {yanyang42, jundaye, zhongbaozb, w_liwen}@bupt.edu.cn
6
+
7
+ # Abstract
8
+
9
+ As an essential component of task-oriented dialogue systems, slot filling requires enormous labeled training data in a certain domain. However, in most cases, there is little or no target domain training data is available in the training stage. Thus, cross-domain slot filling has to cope with the data scarcity problem by zero/few-shot learning. Previous researches on zero/few-shot cross-domain slot filling focus on slot descriptions and examples while ignoring the slot type ambiguity and example ambiguity issues. To address these problems, we propose Abundant Information Slot Filling Generator (AISFG), a generative model with a novel query template that incorporates domain descriptions, slot descriptions, and examples with context. Experimental results show that our model outperforms state-of-the-art approaches in zero/few-shot slot filling task. $^{1}$
10
+
11
+ # 1 Introduction
12
+
13
+ Slot filling is a critical part of downstream tasks in natural language understanding (NLU) such as dialogue systems. Recently, some supervised slot filling models have achieved state-of-the-art results within deep learning (Mesnil et al., 2013; Yao et al., 2013; Louvan and Magnini, 2018; Kim et al., 2019). Nonetheless, these methods have a strong dependency on the domain-specific labels, which is not capable of transferring to new domains that always contain little or no data.
14
+
15
+ To alleviate the problem of resource gap between source and target domains, cross-domain zero-shot has become an important research direction in slot filling. However, most researches on slot filling utilize token-level classification frameworks, which means they either convert it to a BIO tag labeling task (Shah et al., 2019; Bapna et al., 2017; Du et al., 2021) or predict the start and end position as
16
+
17
+ QA task in the sentence (Du et al., 2021; Yu et al., 2021a), to extract spans. Recently, prompt learning methods reformulate the downstream tasks to a similar form with the pre-training tasks, which can fully utilize the knowledge encoded in PLMs and improve the performance of the downstream tasks in the scenarios of data scarcity. Motivated by this, we consider making the slot filling task consistent with PLMs pre-training tasks.
18
+
19
+ In this paper, we propose a generative template-based zero-shot slot filling framework named Abundant Information Slot Filling Generator (AISFG), which utilizes pre-trained generative model as the backbone and generates responses in natural language style. Thus, the slot filling task is consistent with the pre-training task of the PLM. In particular, we notice that shared cross-domain slot types sometimes refer to totally different entities across different domains. For example, in the SNIPS dataset (Coucke et al., 2018), domains RateBook and SearchScreeningEvent share a common slot type object_type, but it refers to book type and movie schedule, respectively. We call this slot type ambiguity issue. Moreover, we argue that incorporating slot examples only is far from fully utilizing the example information. For instance, for sentence give 5 out of 6 stars to creatures of light and darkness, we give the slot description to find best rating with examples like 6 and 5, the model produces the wrong answer 5 but not 6. We conjecture that, the model just learn that predicting a number is satisfying from these two examples but does not understand what is the best rating means. We call this example ambiguity issue. Thus, we attach our attention to domain-specific descriptions and examples with context to alleviate the above two issues. Specifically, we design the query template by incorporating domain descriptions, slot descriptions and examples with context.
20
+
21
+ The contribution of this paper can be summarized in three aspects:
22
+
23
+ ![](images/a415d3cff1192d43a647edff43c3e2c323cbbd22e3ea4cd1f464a75d2e7d92c4.jpg)
24
+ Figure 1: Illustration of our proposed AISFG.
25
+
26
+ - We propose a generative template-based zero-shot slot filling framework. To the best of our knowledge, we are the first to apply the generative framework to perform the zero-shot cross-domain slot filling task.
27
+ - We focus on slot type ambiguity and example ambiguity, which is ignored by previous researches. We incorporate domain-specific descriptions and examples with context to the query template to handle these two issues.
28
+ - The experimental results show that AISFG achieves better performance than the existing methods on the setting of zero/few-shot.
29
+
30
+ # 2 Methodology
31
+
32
+ # 2.1 Slot Filling as Generation
33
+
34
+ Given a sentence $x$ from domain $d \in \mathcal{D}$ , slot filling aims to predict a set of (slot type, span) pairs $(s, y)$ where $s \in S$ is a specific entity type from a fixed set of slot types, and $y = \{y_1, y_2, \ldots, y_i\}$ is a set of spans in sentence $x$ . Most work on slot filling utilizes token-level classification frameworks, which means they either convert it to BIO tag labeling or predict the start and end position in the sentence, to extract spans.
35
+
36
+ In contrast to this convention, we frame slot filling as a conditional sequence generation task and solve it in a sequence-to-sequence manner. As shown in Figure 1, given a sentence $x$ , domain $d$ , slot type $s$ and target $y$ , we construct natural language query $q = t_{q}(x,d,s)$ and response $r = t_{r}(y)$ based on the predefined template. The generative model takes $q$ as input and directly predicts $r$ in a generation manner.
37
+
38
+ Leveraging the rich knowledge in the pre-trained generative model is important for cross-domain slot filling, especially in zero/few-shot setting. Thus, we construct query and response as natural language sentences to naturally utilize them.
39
+
40
+ Query Construction To solve the slot type ambiguity and example ambiguity, we synthesize the query by incorporating domain descriptions, slot descriptions and examples with context. Specifically, query construction template is formulated as:
41
+
42
+ in domain ①, find the ②, like ③ in ④, in sentence: $x$
43
+
44
+ where the blank ①, ②, ③, ④ are filled with domain description, slot description, example entity and context for example, respectively. To avoid data leakage, we construct examples and contexts manually to ensure the example entities are not appeared in the dataset. The predefined specific mappings between domain/slot and description/example/context are reported in Appendix A.1 and A.3. Moreover, a query example is illustrated in Figure 1.
45
+
46
+ Response Construction Ideally, the response should be as simple as possible to alleviate the generation difficulty. Besides, an explicit template is easily converted to the original format for compatibility. Thus, we construct response by concatenating target entities with commas:
47
+
48
+ $$
49
+ y _ {1}, y _ {2}, \dots , y _ {i}.
50
+ $$
51
+
52
+ where $y_{i}$ is the $i$ -th entity span in sentence $x$ . A response example is illustrated in Figure 1.
53
+
54
+ # 2.2 Train and Inference
55
+
56
+ The sequence-to-sequence model is instantiated as a pre-trained generative language model, such as BART (Lewis et al., 2020). In the training stage, for the input sentence $x$ with multiple slots, we build a training pair, which consists of a query and a response based on the templates above for each slot. We further fine-tune the parameters based on these training pairs to maximize the log likelihood for predicting gold responses just like in an ordinary generation task. In the inference stage, we
57
+
58
+ <table><tr><td rowspan="2">Corpus</td><td>Training Setting</td><td colspan="5">Zero-shot</td><td colspan="4">Few-shot on 20 samples</td><td colspan="4">Few-shot on 50 samples</td></tr><tr><td>Domain↓ Model→</td><td>CT</td><td>RZT</td><td>Coach</td><td>QASF</td><td>AISFG</td><td>CT</td><td>RZT</td><td>Coach</td><td>AISFG</td><td>CT</td><td>RZT</td><td>Coach</td><td>AISFG</td></tr><tr><td rowspan="7">SNIPS</td><td>AddTo Playlist</td><td>38.82</td><td>42.77</td><td>50.90</td><td>57.57</td><td>56.20</td><td>58.36</td><td>63.18</td><td>62.76</td><td>81.64</td><td>68.69</td><td>74.89</td><td>74.68</td><td>83.51</td></tr><tr><td>BookRestaurant</td><td>27.54</td><td>30.68</td><td>34.01</td><td>48.75</td><td>65.94</td><td>45.65</td><td>50.54</td><td>65.97</td><td>78.06</td><td>54.22</td><td>54.49</td><td>74.82</td><td>84.60</td></tr><tr><td>GetWeather</td><td>46.45</td><td>50.28</td><td>50.47</td><td>61.27</td><td>67.66</td><td>54.22</td><td>58.86</td><td>67.89</td><td>82.68</td><td>63.23</td><td>58.87</td><td>79.64</td><td>83.73</td></tr><tr><td>PlayMusic</td><td>32.86</td><td>33.12</td><td>32.01</td><td>38.54</td><td>50.12</td><td>46.35</td><td>47.20</td><td>54.04</td><td>77.59</td><td>54.32</td><td>59.20</td><td>66.38</td><td>78.79</td></tr><tr><td>RateBook</td><td>14.54</td><td>16.43</td><td>22.06</td><td>36.51</td><td>41.05</td><td>64.37</td><td>63.33</td><td>74.68</td><td>79.06</td><td>76.45</td><td>76.87</td><td>84.62</td><td>92.85</td></tr><tr><td>SearchCreativeWork</td><td>39.79</td><td>44.45</td><td>46.65</td><td>60.82</td><td>67.46</td><td>57.83</td><td>63.39</td><td>57.19</td><td>71.95</td><td>66.38</td><td>67.81</td><td>64.56</td><td>76.00</td></tr><tr><td>SearchScreeningEvent</td><td>13.83</td><td>12.25</td><td>25.63</td><td>27.72</td><td>35.05</td><td>48.59</td><td>49.18</td><td>67.38</td><td>73.91</td><td>70.67</td><td>74.58</td><td>83.85</td><td>91.29</td></tr><tr><td></td><td>Average F1</td><td>30.55</td><td>32.85</td><td>37.39</td><td>47.31</td><td>54.78</td><td>53.62</td><td>56.53</td><td>64.27</td><td>77.84</td><td>64.85</td><td>66.67</td><td>75.51</td><td>84.39</td></tr><tr><td>ATIS</td><td>AirlineTravel</td><td>2.14</td><td>2.86</td><td>1.64</td><td>-</td><td>35.17</td><td>26.05</td><td>41.37</td><td>54.91</td><td>64.04</td><td>35.87</td><td>51.80</td><td>66.99</td><td>75.31</td></tr></table>
59
+
60
+ Table 1: F1-scores (%) on SNIPS and ATIS for different target domains under zero-shot and few-shot learning settings. Bold indicates the best results. $-$ represents the result is missed in published papers.
61
+
62
+ also build a query for each candidate slot, and the response $\hat{r}$ is generated in an auto-regressive manner, which means selecting the token as the next token with the highest probability over the vocabulary set at each time step. Note that, although we do not explicitly restrict the response tokens should originate from the input sentence $x$ , AISFG can always do it (but there are exceptions, we show some cases in Section 3.4). Then, the response $\hat{r}$ is split by commas to recover to the original format $\hat{y}$ for evaluation.
63
+
64
+ # 3 Experiments
65
+
66
+ # 3.1 Setup
67
+
68
+ Dataset We evaluate our method on SNIPS (Coucke et al., 2018), a public spoken language understanding dataset which contains 7 domains and 39 slots. To simulate the cross-domain scenarios, we choose one domain as the target domain for test and the left six domains as the source domains for training following the setup of Liu et al. (2020).
69
+
70
+ However, domains in SNIPS are not completely independent with each other. To achieve a real cross-domain scenario, we use another commonly used dataset namely ATIS (Price, 1990) as the target domain to test our model which is trained on SNIPS.
71
+
72
+ Baselines We compare our method against a number of representative baselines as follows:
73
+
74
+ - Concept Tagger (CT) A slot-filling framework proposed by Gobbi et al. (2018), which utilizes original slot descriptions to generalize to unseen slot types.
75
+ - Robust Zero-shot Tagger (RZT) Besides leveraging slot descriptions like CT, RZT (Shah et al., 2019) further introduces examples to improve the robustness of zero-shot slot filling.
76
+
77
+ - Coarse-to-fine Approach (Coach) Coach (Liu et al., 2020) is a two-step coarse-to-fine model for slot-filling, which performs coarse-grained BIO labeling task in the first step and performs fine-grained slot type classification task in the second step. It also encodes slot descriptions to help recognize unseen slot types.
78
+ - QA-driven slot filling (QASF) QASF (Du et al., 2021) uses a linguistically motivated question generation strategy for converting slot descriptions and example values into natural questions and solves the slot filling by extracting spans from utterances with a span-based QA model.
79
+
80
+ # 3.2 Main Results
81
+
82
+ Cross-Domain Slot Filling The cross-domain slot filling results are reported in Table 1. In SNIPS dataset, AISFG outperforms the state-of-the-art models (QASF for zero-shot and Coach for few-shot) by $7.47\%$ on the average F1 under zero-shot setting, $13.57\%$ under 20-shot setting and $8.88\%$ under 50-shot setting, which demonstrates the superiority of our method. We train AISFG on SNIPS dataset and test it on the ATIS dataset for simulating a real cross-domain scenario, results are reported in the bottom of Table 1. AISFG consistently outperforms the existing state-of-the-art approaches, especially in zero-shot setting, where AISFG achieves $32.31\%$ F1 score improvement. The significant performance improvement proves that utilizing rich domain information to prompt knowledge in PLMs may be a shortcut for solving data scarcity issue.
83
+
84
+ Analysis on Seen versus Unseen Slots We divide the samples in each target domain into "seen" and "unseen" categories in the SNIPS dataset for further understanding the transferring ability of our model. Following Liu et al. (2020), an example is categorized as "unseen" as long as the slot does not
85
+
86
+ <table><tr><td>Training Setting</td><td colspan="5">Zero-shot</td><td colspan="5">Few-shot on 50 samples</td></tr><tr><td>Domain↓ Model→</td><td>SD</td><td>SD+DD</td><td>SD+E</td><td>SD+EC</td><td>AISFG</td><td>SD</td><td>SD+DD</td><td>SD+E</td><td>SD+EC</td><td>AISFG</td></tr><tr><td>AddTo Playlist</td><td>54.57</td><td>54.94</td><td>55.03</td><td>56.22</td><td>56.20</td><td>80.84</td><td>83.49</td><td>82.84</td><td>83.23</td><td>83.51</td></tr><tr><td>BookRestaurant</td><td>63.48</td><td>63.85</td><td>62.82</td><td>65.93</td><td>65.94</td><td>81.11</td><td>83.15</td><td>83.16</td><td>83.86</td><td>84.60</td></tr><tr><td>GetWeather</td><td>61.09</td><td>61.35</td><td>64.73</td><td>65.56</td><td>67.66</td><td>83.35</td><td>83.50</td><td>83.17</td><td>83.66</td><td>83.73</td></tr><tr><td>PlayMusic</td><td>44.41</td><td>44.99</td><td>46.51</td><td>47.48</td><td>50.12</td><td>78.62</td><td>78.50</td><td>78.06</td><td>78.68</td><td>78.79</td></tr><tr><td>RateBook</td><td>30.67</td><td>30.61</td><td>33.38</td><td>41.44</td><td>41.05</td><td>92.04</td><td>92.08</td><td>91.23</td><td>92.22</td><td>92.85</td></tr><tr><td>SearchCreativeWork</td><td>65.60</td><td>64.02</td><td>66.14</td><td>66.45</td><td>67.46</td><td>75.78</td><td>76.36</td><td>74.19</td><td>74.89</td><td>76.00</td></tr><tr><td>SearchScreeningEvent</td><td>30.33</td><td>30.18</td><td>32.81</td><td>34.61</td><td>35.05</td><td>89.53</td><td>90.25</td><td>90.49</td><td>90.82</td><td>91.29</td></tr><tr><td>Average F1</td><td>50.02</td><td>49.99</td><td>51.63</td><td>53.95</td><td>54.78</td><td>83.03</td><td>83.90</td><td>83.30</td><td>83.90</td><td>84.39</td></tr></table>
87
+
88
+ Table 2: F1-scores (%) on SNIPS for different target domains under zero-shot and few-shot learning settings. Bold indicates the best results.
89
+
90
+ <table><tr><td rowspan="2">Setting→
91
+ Model↓</td><td colspan="2">0 sample</td><td colspan="2">50 samples</td></tr><tr><td>unseen</td><td>seen</td><td>unseen</td><td>seen</td></tr><tr><td>CT</td><td>3.38</td><td>37.23</td><td>52.65</td><td>65.66</td></tr><tr><td>RZT</td><td>2.19</td><td>40.99</td><td>50.28</td><td>61.63</td></tr><tr><td>Coach</td><td>9.31</td><td>46.22</td><td>68.59</td><td>74.55</td></tr><tr><td>QASF</td><td>41.73</td><td>56.23</td><td>-</td><td>-</td></tr><tr><td>AISFG</td><td>63.96</td><td>76.11</td><td>85.09</td><td>88.77</td></tr></table>
92
+
93
+ Table 3: Averaged F1-scores $(\%)$ over all target domains on SNIPS dataset for "seen" and "unseen" slots. $^{\prime -}$ represents the result is missed in published papers
94
+
95
+ exist in the remaining six source domains. Otherwise, it is tagged as "seen".
96
+
97
+ Table 3 shows the average F1 results on seen and unseen slots in target domains under zero-shot and 50-shot settings. From this table, AISFG achieves the best results in both seen and unseen slots, especially on unseen slots in zero-shot scenario, which indicates our method is more effective for transferring knowledge from source to the target domain. Besides, approaches leverage PLMs (i.e., QASF and AISFG) have significant advantages over others, which demonstrates utilizing knowledge encoded in PLMs can enhance the transferring ability.
98
+
99
+ # 3.3 Ablation Studies
100
+
101
+ To solve the slot type ambiguity issue and example ambiguity issue, we incorporate domain description and context for example to the query template, respectively. To understand the influence of these components in the query template, we perform ablation studies on the SNIPS dataset and report results in Table 2. In this table, SD, DD, E, EC refers to Slot Description, Domain Description, Example and Example with Context, respectively. For example, "SD+EC" represents the query template
102
+
103
+ is built by domain description plus example with context.
104
+
105
+ The Effect of Domain Description Comparing SD versus SD+DD and SD+EC versus AISFG, we observe that incorporating domain descriptions can improve the performance under few-shot setting but not for zero-shot setting. We conjecture the reason is that, the domain description we used (shown in Table 4) is simply converted from the domain name and contains limited information, which is not enough to provide domain knowledge. However, when some target domain examples are given (i.e., few-shot), the meaning of domain descriptions can be enhanced by these training examples. Then, the domain description becomes a domain indicator and can be used to distinguish the slot types shared by different domains, resulting in improved performance.
106
+
107
+ The Effect of Context Example Comparing SD versus $\mathrm{SD + EC}$ and $\mathrm{SD + DD}$ versus AISFG, we observe that incorporating examples with contexts can further boost the performance in both zero-shot and few-shot settings, which demonstrates these examples with contexts are helpful for cross-domain slot filling. Moreover, $\mathrm{SD + EC}$ outperforms $\mathrm{SD + E}$ in both zero-shot and few-shot settings. Especially, for the zero-shot setting in domain Rate-Book, which contains plenty of ambiguity examples, $\mathrm{SD + EC}$ achieves $8.06\%$ improvement over $\mathrm{SD + E}$ , which indicates contexts are useful to alleviate example ambiguity.
108
+
109
+ # 3.4 Error Analysis and Case Study
110
+
111
+ To better understand our proposed model, we analyze the error predictions in all cross-domain experiments on SNIPS dataset (Table 1) and categorize them into three types: boundary mistakes, vocabu
112
+
113
+ ![](images/c3a48e3cf7a0ad034969d51c4dd24179eae045d5db92130574297e959e7a09b3.jpg)
114
+ Figure 2: Number of samples against mistake types.
115
+ Figure 3: Examples for boundary mistake and vocabulary mistake.
116
+
117
+ <table><tr><td>Boundary mistake example
118
+ Sentence: i want the complete global albums collection.
119
+ Slot: object_name
120
+ Gold Label: the complete global albums collection.
121
+ Prediction: global albums collection.</td></tr><tr><td>Vocabulary mistake example (synonym)
122
+ Sentence: find a picture called blink of an eye.
123
+ Slot: object_name
124
+ Gold Label: blink of an eye.
125
+ Prediction: wink of an eye.</td></tr><tr><td>Vocabulary mistake example (tense)
126
+ Sentence: show schedule for loved by thousands.
127
+ Slot: object_name
128
+ Gold Label: loved by thousands.
129
+ Prediction: love by thousands.</td></tr></table>
130
+
131
+ lary mistakes and others. Boundary mistakes indicate that AISFG predicts the main body of the gold label while missing/adding other words, which may lead to an inaccurate boundary (top example in Figure 3). Vocabulary mistakes refer to the predictions that i) some words in the gold label are replaced by synonyms (middle example in Figure 3); ii) some words in the gold label are substituted by different tenses (bottom example in Figure 3).
132
+
133
+ As shown in Figure 2, the most common mistakes are the boundary mistakes. Especially, in 50-shot experiment, the amount of boundary-mistake predictions reaches almost half of all mistakes. We find that the model often struggles to detect the exact same span as the ground truths, as shown in the top example in Figure 3. When the shot number increases from 0 to 20 and from 20 to 50, the amount of boundary mistakes reduces by $36.4\%$ and $22.7\%$ , respectively, indicating that increasing shot number is beneficial for addressing the boundary mistakes.
134
+
135
+ Vocabulary mistakes such as synonyms or tenses replacement are the least common. Since the pro
136
+
137
+ posed AISFG is a generative model, we carry no restriction on what the model produces, therefore it may come with the vocabulary mistakes. However, most of these mistakes can be solved by post-processing the outcome of the model (i.e., comparing the prediction with the original sentence and replacing the wrong words), which is one of the improvements of our future AISFG. Moreover, as shown in Figure 2, increasing shot number is useless for tackling the vocabulary mistakes.
138
+
139
+ # 4 Related Work
140
+
141
+ The main challenge of cross-domain slot filling is to handle domain-specific slot types which have few or no supervision signals during the training stage. To handle the unseen slots, previous methods introduce slot descriptions (Lee and Jha, 2019; Liu et al., 2020; Bapna et al., 2017) and slot examples (Guerini et al., 2018; Shah et al., 2019) to capture the semantic relationship between unseen slots and input sentences. Based on these researches, we incorporate more information, like domain description and example context, to boost the performance on unseen slots.
142
+
143
+ Typically, traditional methods formulate the slot filling task as a token-level classification task (Liu et al., 2020; Bapna et al., 2017; Shah et al., 2019; Wang et al., 2021). To leverage the knowledge encoded in pre-trained language models, RCSF (Yu et al., 2021b) utilizes BERT (Devlin et al., 2019) to predict the start/end position of the target slot entity. However, the downstream task (i.e., token/position prediction) is inconsistent with the pre-training task (i.e., Masked LM and NSP for BERT), which may limit the expressiveness of the PLM (Mehri and Eskenazi, 2021). To bridge the gap, we treat the slot filling as a generative task and utilize a PLM whose pre-training task is also a generative task.
144
+
145
+ # 5 Conclusion & Discussion
146
+
147
+ In this paper, we introduce a novel zero-shot cross-domain slot filling model named AISFG, which can adapt to unseen domains seamlessly with the help of domain and slot description and together with full context examples. Experiments show that our model significantly outperforms existing zero-shot cross-domain slot filling approaches. Moreover, we conduct error analysis and case study to better understand our proposed model and leave solving boundary mistakes and vocabulary mistakes as our future work.
148
+
149
+ # Acknowledgments
150
+
151
+ This work was partially supported by National Natural Science Foundation under Grants U1936103 and 61921003.
152
+
153
+ # References
154
+
155
+ Ankur Bapna, Gokhan Tur, Dilek Hakkani-Tur, and Larry P. Heck. 2017. Towards zero-shot frame semantic parsing for domain scaling. In Interspeech 2017, 18th Annual Conference of the International Speech Communication Association, Stockholm, Sweden, August 20-24, 2017, pages 2476-2480. ISCA.
156
+ Alice Coucke, Alaa Saade, Adrien Ball, Théodore Bluche, Alexandre Caulier, David Leroy, Clément Doumouro, Thibault Gisselbrecht, Francesco Caltagirone, Thibaut Lavril, Maël Primet, and Joseph Dureau. 2018. Snips voice platform: an embedded spoken language understanding system for private-by-design voice interfaces. CoRR, abs/1805.10190.
157
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
158
+ Xinya Du, Luheng He, Qi Li, Dian Yu, Panupong Pa-supat, and Yuan Zhang. 2021. QA-driven zero-shot slot filling with weak supervision pretraining. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 2: Short Papers), pages 654-664, Online. Association for Computational Linguistics.
159
+ Jacopo Gobbi, Evgeny A. Stepanov, and Giuseppe Riccardi. 2018. Concept tagging for natural language understanding: Two decadelong algorithm development. In Proceedings of the Fifth Italian Conference on Computational Linguistics (CLiC-it 2018), Torino, Italy, December 10-12, 2018, volume 2253 of CEUR Workshop Proceedings. CEUR-WS.org.
160
+ Marco Guerini, Simone Magnolini, Vevake Balaraman, and Bernardo Magnini. 2018. Toward zero-shot entity recognition in task-oriented conversational agents. In Proceedings of the 19th Annual SIGdial Meeting on Discourse and Dialogue, pages 317-326, Melbourne, Australia. Association for Computational Linguistics.
161
+ Kunho Kim, Rahul Jha, Kyle Williams, Alex Marin, and Imed Zitouni. 2019. Slot tagging for task oriented spoken language understanding in human-to-human conversation scenarios. In Proceedings of the 23rd Conference on Computational Natural Language Learning (CoNLL), pages 757-767, Hong
162
+
163
+ Kong, China. Association for Computational Linguistics.
164
+ Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
165
+ Sungjin Lee and Rahul Jha. 2019. Zero-shot adaptive transfer for conversational language understanding. In The Thirty-Third AAAI Conference on Artificial Intelligence, 2019, pages 6642-6649. AAAI Press.
166
+ Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. BART: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880, Online. Association for Computational Linguistics.
167
+ Zihan Liu, Genta Indra Winata, Peng Xu, and Pascale Fung. 2020. Coach: A coarse-to-fine approach for cross-domain slot filling. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 19-25, Online. Association for Computational Linguistics.
168
+ Samuel Louvan and Bernardo Magnini. 2018. Exploring named entity recognition as an auxiliary task for slot filling in conversational language understanding. In Proceedings of the 2018 EMNLP Workshop SCAI: The 2nd International Workshop on Search-Oriented Conversational AI, pages 74-80, Brussels, Belgium. Association for Computational Linguistics.
169
+ Shikib Mehri and Maxine Eskenazi. 2021. GenSF: Simultaneous adaptation of generative pre-trained models and slot filling. In Proceedings of the 22nd Annual Meeting of the Special Interest Group on Discourse and Dialogue, pages 489-498, Singapore and Online. Association for Computational Linguistics.
170
+ Grégoire Mesnil, Xiaodong He, Li Deng, and Yoshua Bengio. 2013. Investigation of recurrent-neural network architectures and learning methods for spoken language understanding. In INTERSPEECH 2013, 14th Annual Conference of the International Speech Communication Association, Lyon, France, August 25-29, 2013, pages 3771-3775. ISCA.
171
+ Patti J. Price. 1990. Evaluation of spoken language systems: the ATIS domain. In Speech and Natural Language: Proceedings of a Workshop Held at Hidden Valley, Pennsylvania, USA, June 24-27, 1990. Morgan Kaufmann.
172
+ Darsh Shah, Raghav Gupta, Amir Fayazi, and Dilek Hakkani-Tur. 2019. Robust zero-shot cross-domain slot filling with example values. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 5484-5490, Florence, Italy. Association for Computational Linguistics.
173
+
174
+ Liwen Wang, Xuefeng Li, Jiachi Liu, Keqing He, Yuanmeng Yan, and Weiran Xu. 2021. Bridge to target domain by prototypical contrastive learning and label confusion: Re-exlore zero-shot learning for slot filling. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 9474-9480, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
175
+
176
+ Kaisheng Yao, Geoffrey Zweig, Mei-Yuh Hwang, Yangyang Shi, and Dong Yu. 2013. Recurrent neural networks for language understanding. In IN-TERSPEECH 2013, 14th Annual Conference of the International Speech Communication Association, Lyon, France, August 25-29, 2013, pages 2524-2528. ISCA.
177
+
178
+ Mengshi Yu, Jian Liu, Yufeng Chen, Jinan Xu, and Yujie Zhang. 2021a. Cross-domain slot filling as machine reading comprehension. In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI-21, pages 3992-3998. International Joint Conferences on Artificial Intelligence Organization. Main Track.
179
+
180
+ Mengshi Yu, Jian Liu, Yufeng Chen, Jinan Xu, and Yujie Zhang. 2021b. Cross-domain slot filling as machine reading comprehension. In Proceedings of the Thirtieth International Joint Conference on Artificial Intelligence, IJCAI 2021, Virtual Event / Montreal, Canada, 19-27 August 2021, pages 3992-3998. ijcai.org.
181
+
182
+ # A Appendix
183
+
184
+ # A.1 Domain Descriptions
185
+
186
+ <table><tr><td>Domain</td><td>Domain Description</td></tr><tr><td>AddTo Playlist</td><td>add to playlist</td></tr><tr><td>BookRestaurant</td><td>book restaurant</td></tr><tr><td>GetWeather</td><td>get weather</td></tr><tr><td>PlayMusic</td><td>play music</td></tr><tr><td>RateBook</td><td>rate book</td></tr><tr><td>SearchCreativeWork</td><td>search creative work</td></tr><tr><td>SearchScreeningEvent</td><td>search screening event</td></tr></table>
187
+
188
+ Table 4: Domain descriptions for different domains.
189
+
190
+ # A.2 Implementation Details
191
+
192
+ For a fair comparison with QASF, which is based on BERT (Du et al., 2021), we instantiate the sequence-to-sequence model as BART-base, which contains a similar parameter size as BERT. Following Liu et al. (2020), we reserve 500 samples in target domain as the validation set and regard the rest as the test set. We use Adam optimizer (Kingma and Ba, 2015) with a learning rate 2e-5 to fine-tune all parameters. The batch size is set to
193
+
194
+ 16 and the decoding beam search size for BART is set to 2. The early stop strategy with patience of 5 is used to save the best checkpoint based on validation set.
195
+
196
+ Similar to the baselines, we use F1 score as the evaluation metric. For a given query, the true positive is the prediction with exactly matched entity extraction boundaries.
197
+
198
+ A.3 Illustration of Slot Description, Context and Example
199
+
200
+ <table><tr><td>Domain</td><td>Slot</td><td>Slot Description</td><td>Context</td><td>Example</td></tr><tr><td rowspan="5">AddToPlaylist</td><td>music_item</td><td>music item</td><td>funk outta here please add a piece to my playlist</td><td>piece</td></tr><tr><td>playlist Owner</td><td>playlist owner</td><td>add the album to sebastian s ejercicio playlist</td><td>sebastian</td></tr><tr><td>entity_name</td><td>entity name</td><td>i am m going to add love letter to my list of parties</td><td>love letter</td></tr><tr><td>playlist</td><td>playlist</td><td>add this song to my stay happy playlist</td><td>stay happy</td></tr><tr><td>artist</td><td>artist</td><td>i am going to include the taylor swift track in my bass gaming playlist</td><td>taylor swift</td></tr><tr><td rowspan="14">BookRestaurant</td><td>city</td><td>city</td><td>make a reservation at the best pub in shanghai</td><td>shanghai</td></tr><tr><td>facility</td><td>facility</td><td>in washington reserve a tavern with baby chair</td><td>baby chair</td></tr><tr><td>timeRange</td><td>time range</td><td>i need a reservation for the chinese food in cuba in 10 minutes</td><td>in 10 minutes</td></tr><tr><td>restaurant_name</td><td>restaurant name</td><td>i am going to kfc together with my friends</td><td>kfc</td></tr><tr><td>country</td><td>country</td><td>at 9 am reserve a restaurant in jerusalem for 8 persons</td><td>jerusalem</td></tr><tr><td>cuisine</td><td>cuisine</td><td>i d like to reserve a table for one at a spanish restaurant in wesley</td><td>spanish</td></tr><tr><td>restaurant_type</td><td>restaurant type</td><td>i d like to reserve a buffet for my family</td><td>buffet</td></tr><tr><td>served_dish</td><td>served dish</td><td>find a restaurant serves hot-pot and make a reservation</td><td>hot-pot</td></tr><tr><td>party_size_number</td><td>number</td><td>make a six-person reservation at an alpine wine bar</td><td>six-person</td></tr><tr><td>poi</td><td>position</td><td>i d want to reserve a restaurant near my hotel</td><td>near my hotel</td></tr><tr><td>sort</td><td>type</td><td>get a highly regarded sandwich shop in colombia</td><td>highly regarded</td></tr><tr><td>spatial Relation</td><td>spatial relation</td><td>book a restaurant for 2 that s 10 minutes walk from here</td><td>10 minutes walk</td></tr><tr><td>state</td><td>state</td><td>we d want to go to a brasserie in omaha that serves sicilian cuisine</td><td>omaha</td></tr><tr><td>party_size_description</td><td>person</td><td>book a table for sebastian perez and leclerc</td><td>sebastian perez and leclerc</td></tr><tr><td rowspan="9">GetWeather</td><td>city</td><td>city</td><td>is the temperature going down to 2 in shanghai</td><td>shanghai</td></tr><tr><td>state</td><td>state</td><td>check the weather in omaha</td><td>omaha</td></tr><tr><td>timeRange</td><td>time range</td><td>what is the forecast for haidian in next half an hour</td><td>in next half an hour</td></tr><tr><td>current_location</td><td>current location</td><td>will it rain in my present local street on 11/10/2023</td><td>present local street</td></tr><tr><td>country</td><td>country</td><td>what s the weather like in jerusalem right now</td><td>jerusalem</td></tr><tr><td>spatial_location</td><td>spatial relation</td><td>is it going to rain within 10 minutes bus distance</td><td>within 10 minutes bus distance</td></tr><tr><td>geographic poi</td><td>geographic position</td><td>in west lake park, how cold will it be tomorrow</td><td>west lake park</td></tr><tr><td>condition_temperature</td><td>temperature</td><td>will it get hotter in 2 hours</td><td>hotter</td></tr><tr><td>condition_description</td><td>weather</td><td>will israel be hit by a snow storm</td><td>snow storm</td></tr><tr><td rowspan="7">PlayMusic</td><td>genre</td><td>genre</td><td>find me a lullaby in netease cloud music</td><td>lullaby</td></tr><tr><td>music_item</td><td>music item</td><td>funk outta here please add a piece to my playlist</td><td>piece</td></tr><tr><td>service</td><td>service</td><td>find me a lullaby in netease cloud music</td><td>netease cloud music</td></tr><tr><td>year</td><td>year</td><td>play the most popular song in 2021</td><td>2021</td></tr><tr><td>album</td><td>album</td><td>add this song to my stay happy playlist</td><td>stay happy</td></tr><tr><td>sort</td><td>type</td><td>play shall we talk by eason chan</td><td>getting ready</td></tr><tr><td>artist</td><td>artist</td><td>i am going to include the taylor swift track in my bass gaming playlist</td><td>shall we talk</td></tr><tr><td rowspan="7">RateBook</td><td>object_part_of_series_type</td><td>series</td><td>i rate the sequel 0 point</td><td>sequel</td></tr><tr><td>object_select</td><td>this current</td><td>the book deserves a 5 star</td><td>the</td></tr><tr><td>rating_value</td><td>rating value</td><td>the book deserves a 5 star</td><td>5</td></tr><tr><td>object_name</td><td>object name</td><td>lessons from madame chic is a fine pick for anyone interested in fashion style</td><td>lessons from madame chic</td></tr><tr><td>object_type</td><td>object type</td><td>i am too timid to read horror literatures</td><td>horror</td></tr><tr><td>rating_unit</td><td>rating unit</td><td>this book deserves a 5 star</td><td>star</td></tr><tr><td>best_rating</td><td>best rating</td><td>the highest rating for this book is 10</td><td>10</td></tr><tr><td rowspan="2">SearchCreativeWork</td><td>object_name</td><td>object name</td><td>paris baguette is a bakery chain based in south korea owned by the spc group</td><td>paris baguette</td></tr><tr><td>object_type</td><td>object type</td><td>paris baguette is a bakery chain based in south korea owned by the spc group</td><td>bakery</td></tr><tr><td rowspan="7">SearchScreeningEvent</td><td>timeRange</td><td>time range</td><td>the movie starts at half past eight pm</td><td>half past eight pm</td></tr><tr><td>movie_type</td><td>movie type</td><td>i want to see a comedy like green book</td><td>comedy</td></tr><tr><td>object_location_type</td><td>location type</td><td>the cinema theatre is the closest movie house showing green book</td><td>movie house</td></tr><tr><td>object_type</td><td>object type</td><td>show me the movie poster of green book</td><td>movie poster</td></tr><tr><td>location_name</td><td>location name</td><td>the cinema theatre is the closest movie house showing green book</td><td>the cinema theatre</td></tr><tr><td>spatial_location</td><td>spatial relation</td><td>the cinema theatre is the closest movie house showing green book</td><td>closest</td></tr><tr><td>movie_name</td><td>movie name</td><td>the cinema theatre is the closest movie house showing green book</td><td>green book</td></tr></table>
201
+
202
+ Table 5: Illustration of slot description, context and example.
aisfgabundantinformationslotfillinggenerator/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7279edcf62a94d7f170b1e86b31c88514d1ee0bb34d145634108f894dfc28e1e
3
+ size 576410
aisfgabundantinformationslotfillinggenerator/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2639aa6a626d0c9585613aa1634767d7dbf8f2d6aa9387879a786315135a0224
3
+ size 222700
aligningtosocialnormsandvaluesininteractivenarratives/801313c5-da51-4f59-a1c1-c34f96e38cf0_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:361f9c34416fe8cc7342ad09e3f84793e7e76aa7db8fe2640b01c7f9749112b6
3
+ size 252393
aligningtosocialnormsandvaluesininteractivenarratives/801313c5-da51-4f59-a1c1-c34f96e38cf0_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea37b3c0097937bc435a87f1338b836a24c140a5f2515dd405836182e915052
3
+ size 312841
aligningtosocialnormsandvaluesininteractivenarratives/801313c5-da51-4f59-a1c1-c34f96e38cf0_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9fb6f63d6de65728fc6a4b759980cd64ed09ecbb00f713a406354e154904b3b
3
+ size 2373629
aligningtosocialnormsandvaluesininteractivenarratives/full.md ADDED
The diff for this file is too large to render. See raw diff
 
aligningtosocialnormsandvaluesininteractivenarratives/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fd4c6f1d0d01978d9bbd9f6d83ad91c95a72e733979ce54d767171ad13367fb
3
+ size 809487
aligningtosocialnormsandvaluesininteractivenarratives/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:095b758bb4dce53b10d5691faaa8156ae276032d31fb70410fd1cc2747be4dbc
3
+ size 1268669
allyoumayneedforvqaareimagecaptions/070b1b0d-8876-49d0-b66a-1d68d5dd2c92_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:470f102b498032c0df166925decd58c1d12432a3286fd25001acecf9d08266d6
3
+ size 124068
allyoumayneedforvqaareimagecaptions/070b1b0d-8876-49d0-b66a-1d68d5dd2c92_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:254dafccc076dcc82cf5e4d66139170b73226c70f9f90f59e2101202e0505623
3
+ size 148124
allyoumayneedforvqaareimagecaptions/070b1b0d-8876-49d0-b66a-1d68d5dd2c92_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be302a4b5e44e52cb0c60948e46ebebefd40e65f87fbb892c120a032be5df46e
3
+ size 2117275
allyoumayneedforvqaareimagecaptions/full.md ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # All You May Need for VQA are Image Captions
2
+
3
+ Soravit Changpinyo* Doron Kukliansky*
4
+ Idan Szpektor Xi Chen Nan Ding Radu Soricut
5
+ Google Research
6
+
7
+ {schangpi,doronk,szpektor,chillxichen,dingnan,rsoricut}@google.com
8
+
9
+ # Abstract
10
+
11
+ Visual Question Answering (VQA) has benefited from increasingly sophisticated models, but has not enjoyed the same level of engagement in terms of data creation. In this paper, we propose a method that automatically derives VQA examples at volume, by leveraging the abundance of existing image-caption annotations combined with neural models for textual question generation. We show that the resulting data is of high-quality. VQA models trained on our data improve state-of-the-art zero-shot accuracy by double digits and achieve a level of robustness that lacks in the same model trained on human-annotated VQA data.
12
+
13
+ # 1 Introduction
14
+
15
+ Visual Question Answering (VQA) is a complex multimodal task that, to be successfully modeled and evaluated, requires large amounts of annotations that are not naturally produced by existing business processes, the way translation-pair annotations (Guo et al., 2018) or image alt-text annotations (Sharma et al., 2018) are produced.
16
+
17
+ At present, a main bottleneck for developing robust VQA systems that are useful for downstream applications, such as for visually-impaired people and in the medical and education domains, appears to be a lack of large image-question-answer training triplets (on the order of millions). Manual annotation of such triplets is costly, time-consuming, and prone to a variety of human biases that are difficult to account for (Yuan, 2021). In addition, the brittleness of VQA systems trained on such manual annotations is well-understood and documented (Agrawal et al., 2018; Kafle and Kanan, 2017).
18
+
19
+ To address the data limitation, we turn to a potential source for creating VQA examples: image-English caption pairs (Chen et al., 2015; Sharma et al., 2018). Large-scale image caption datasets
20
+
21
+ ![](images/ed2c7fcb64650611180022d9b18d2ac884f4cd18ef417a0696fcdb2a7c341bdc.jpg)
22
+ Figure 1: Given an English caption (along with its corresponding image), our $\mathrm{VQ^2A}$ method generates high-quality question-answer pairs. These image-question-answer triplet data can be automatically produced at volume (millions of examples) and used to effectively train VQA systems.
23
+
24
+ exist with millions (Changpinyo et al., 2021), several hundreds millions (Radford et al., 2021), or even billions (Jia et al., 2021) of examples. Captions come mostly in the form of declarative sentences, e.g., "two bears are laying down on the ice". Yet, the task of converting declarative captions into VQA question/answer pairs is still largely unexplored. It requires automatically inducing candidate answers fitting the VQA task, along with their respective questions based on the caption text (Fig. 1). We note that transforming declarative form to interrogative form plus answer(s) seems crucial, as there exists evidence that a vision-and-language model trained on declarative-language data cannot be successfully adapted or transferred "out-of-the-box" for VQA (Wang et al., 2021).
25
+
26
+ In this paper, we explore the automatic creation of millions of VQA training data using neural models for textual question generation and question answering. We refer to this method as $\mathrm{VQ^2A}$
27
+
28
+ for Visual Question Generation with Question Answering validation. We demonstrate that VQA models trained on such data, with no exposure to human-annotated VQA data at all, exhibit high zero-shot performance. Our best models obtain $61.1\%$ accuracy on VQA2.0, $52.1\%$ on GQA, around 15-17 points higher than previous zero-shot state-of-the-art results, and getting close to fully-supervised performance. In addition, taking our generated examples as a test set, we provide further evidence for the brittleness of VQA systems built with human-annotated examples, as well as evidence for the robustness of VQA systems built with the automatically-induced $\mathrm{VQ}^2\mathrm{A}$ data.
29
+
30
+ # 2 Related Work
31
+
32
+ # 2.1 Question generation in NLP
33
+
34
+ Question Generation (QG) is an active research topic in NLP. It is explored as a standalone task (Heilman and Smith, 2009; Nema et al., 2019), as a pre-training task for language models (Narayan et al., 2020) and as a component in solutions for other textual tasks, such as question answering (Alberti et al., 2019; Puri et al., 2020), information retrieval (Mass et al., 2020; Gaur et al., 2021) and generation evaluation (Durmus et al., 2020; Wang et al., 2020; Honovich et al., 2021). There are two main directions to QG: template-based (Heilman and Smith, 2009; Lyu et al., 2021; Dhole and Manning, 2020) and neural-based, with the latter achieving state-of-the-art results (Alberti et al., 2019; Narayan et al., 2020).
35
+
36
+ # 2.2 Question generation in computer vision
37
+
38
+ Question generation in computer vision aims at generating visual questions about a given image (or video), either for generating questions without knowing the answer (Mostafazadeh et al., 2016; Zhang et al., 2017; Yang et al., 2018; Uehara et al., 2018; Krishna et al., 2019), e.g., for them to be answered by humans, or to help improving the VQA task (Kafle et al., 2017; Li et al., 2018; Shah et al., 2019; Xu et al., 2021; Kil et al., 2021; Akula et al., 2021), e.g., for additional evaluation and as means of data augmentation. Such QG models are typically based on VQA triplets as training data, whose language complexity is often limited, or require the collection of visual QG data (Mostafazadeh et al., 2016). We take a different approach by leveraging models trained on textual QA datasets instead.
39
+
40
+ Multiple works leverage image captions or video transcripts as training sources (Ren et al., 2015a; Banerjee et al., 2021; Yang et al., 2021a; Lee et al., 2021). In this approach, question-answer pairs are automatically generated from the text, ignoring the visual source, and are then combined with the related image/video to produce image-question-answer triplets. Banerjee et al. (2021) propose WeaQA, in which they generate questions from MSCOCO image captions (Chen et al., 2015) using an improved template-based approach in CO-COQA (Ren et al., 2015a) as well as QA-SRL methods, enhanced by paraphrasing and backtranslation for linguistic variations. Lee et al. (2021) similarly train a VQA model from question-answer pairs derived from MSCOCO Captions but only use noun phrases as candidate answers, focusing on using it to verify generated captions but not on the VQA task itself. Yang et al. (2021a) generate question-answer pairs from instructional video ASR transcripts, which are then coupled with the related video.
41
+
42
+ In this work, we follow this direction, investigating what requires to generate data with good coverage for the VQA task in the image domain. We show that our neural-based textual question generation approach with captions is much more effective than previous approaches. Further, unlike previous work, we also explore automatically-curated out-of-domain image-text data sources.
43
+
44
+ # 2.3 Transfer learning for and in VQA
45
+
46
+ Existing work also explores the relationship between the image captioning task and the VQA task without question generation (Section 2.2). Fisch et al. (2020) perform image captioning by anticipating visual questions (i.e., using VQA data as additional supervision and post-inference evaluation). Wu et al. (2019) generate question-relevant image captions to aid VQA. Yang et al. (2021b) prompt the GPT-3 (Brown et al., 2020) to answer knowledge-based visual questions based on generated captions and tags and a few VQA examples.
47
+
48
+ Evidence suggests that image-text pre-training, especially when performed at scale, benefits vision-and-language tasks, including VQA (Lu et al., 2019; Li et al., 2019; Chen et al., 2020; Tan and Bansal, 2019; Su et al., 2020; Lu et al., 2020; Zhou et al., 2020; Li et al., 2020; Zhang et al., 2021; Cho et al., 2021; Wang et al., 2021; Yuan et al., 2021). However, these approaches do not work
49
+
50
+ well without fine-tuning on the downstream VQA data (Wang et al., 2021). Further, prompt-based learning and inference (Liu et al., 2021) from a pre-trained image-text model that works for VQA is still an open research problem. In contrast, our approach directly works with the training data, explicitly transforms them into the interrogative form of question-answer pairs.
51
+
52
+ Our focus is the zero-shot transfer setting in WeaQA (Banerjee et al., 2021) in which no manually-created VQA triplets are available during training. Note that the term zero-shot here is different from the one used in (Teney and Hengel, 2016), in which the model still has access to manually-created VQA triplets but is evaluated with unseen questions at test time. Similar to this, Chao et al. (2018b) explore cross-dataset VQA but they solely focus on human-annotated data along with approaches to transfer.
53
+
54
+ # 3 Textual Question Generation for VQA
55
+
56
+ We study whether automatically producing VQA annotations from existing image-text resources can alleviate or completely replace the need for manual data annotation. We only focus on English in this paper. To this end, we follow and improve upon some of the recent directions in Section 2.2 on automatic question-answer generation from text.
57
+
58
+ We start with a given dataset of image-caption pairs $D = \{\mathrm{img}_i, \mathrm{cap}_i\}_{i=1}^N$ . An important assumption we take is that the information conveyed by the caption is, in the vast majority of cases, present in the image, i.e., captions do not contain an excessive amount of external-world or personal knowledge (e.g., "my friend at my birthday party").
59
+
60
+ For each pair $\{\mathrm{img}_i,\mathrm{cap}_i\}$ , an initial set of candidate answers $\{\mathrm{a}_{i,j}\}_{j = 1}^{M_i}$ is first automatically derived from $\mathrm{cap}_i$ . For each such candidate answer, a question is generated by a neural model $\mathrm{q}_{i,j} = QG(\mathrm{a}_{i,j},\mathrm{cap}_i)$ . Each generated question-answer pair undergoes a validation step, and, if validated, is coupled with the corresponding image $\mathrm{img}_i$ to induce a VQA example triplet $\{\mathrm{img}_i,\mathrm{q}_{i,j},\mathrm{a}_{i,j}\}$
61
+
62
+ We refer to this method as $\mathrm{VQ^2A}$ (Visual Question Generation with Question Answering validation). Figure 2 provides an overview of our approach. We next detail the steps in $\mathrm{VQ^2A}$ .
63
+
64
+ # 3.1 Candidate Answer Extraction
65
+
66
+ The only prior work on neural question generation from captions we are aware of, Lee et al.
67
+
68
+ (2021), focuses on noun phrases as candidate answers. Yet, these are not enough to cover the answer types included in typical VQA benchmarks such as VQA2.0 (as we will show in Section 5.1), such as boolean, attribute, and verb answers, to name a few, which are required for questions like "Is there...", "What color...", "What is the dog doing". We present a method that covers all of these answer types.
69
+
70
+ To extract candidate answers from a given caption, we parse it using $\mathsf{spaCy}^1$ and then extract candidates based on the Part-of-Speech (POS) and dependency parse tree annotations, as follows:
71
+
72
+ Noun Phrases. We extract all noun phrases annotated by spaCy, including named entities.
73
+
74
+ POS Spans. We extract sequences that begin with an open-class POS (nouns, verbs, adjectives and adverbs), that end with an open-class POS or an adverbial particle, and that do not contain any other POS in between except closed-class POS for determiners, adpositions and conjunctions.
75
+
76
+ Parse Tree Spans. We consider all sub-trees that include at least one open-class POS and no more than 3 words altogether. We only extract maximal spans, i.e., not extracting sub-trees that are fully included in other extracted sub-trees.
77
+
78
+ Boolean. Boolean questions are frequent in VQA benchmarks (Goyal et al., 2017). Yet, 'yes' and 'no' are not found in captions, and so cannot be extracted as candidates by extracting text spans from captions. To this end, we also add 'yes' and 'no' as candidate answers and generate one question per candidate (see Section 3.2).
79
+
80
+ How many? 0. Captions do not normally contain mentions of ‘zero’ object counts. Hence, marking spans in a caption does not generate questions with the answer ‘0’. Therefore, we randomly sample a generated “How many?” question (with a non-zero answer) from a different caption and add it with the answer changed to ‘zero’ to the candidate set of the target caption. This procedure is potentially noisy because the answer for the sampled question could be non-zero also for the target image. From a manual inspection of 200 such questions, we found this to happen infrequently – about $4.5\%$ .
81
+
82
+ Our extraction method covers various answer candidates such as compound nouns, noun phrases,
83
+
84
+ ![](images/01d469ee63a1145365dc287c4864a625dcd753747c30dda980db80990c698f30.jpg)
85
+ Figure 2: Visual Question Generation with Question Answering validation $(\mathrm{VQ}^2\mathrm{A})$ has three main stages: Candidate Answer Extraction (Section 3.1), Question Generation (Section 3.2), and Question-Answering Filtering (Question Answering + Answer Validation, Section 3.3).
86
+
87
+ <table><tr><td>Candidate Answer</td><td>Noun Phrase</td><td>POS</td><td>Parse Tree</td><td>Boolean</td></tr><tr><td>‘two’</td><td>V</td><td>V</td><td></td><td></td></tr><tr><td>‘bears’</td><td></td><td>V</td><td></td><td></td></tr><tr><td>‘two bears’</td><td>V</td><td></td><td>V</td><td></td></tr><tr><td>‘laying’</td><td></td><td>V</td><td></td><td></td></tr><tr><td>‘laying down’</td><td></td><td>V</td><td></td><td></td></tr><tr><td>‘ice’</td><td></td><td>V</td><td></td><td></td></tr><tr><td>‘the ice’</td><td>V</td><td></td><td></td><td></td></tr><tr><td>‘on the ice’</td><td></td><td></td><td>V</td><td></td></tr><tr><td>‘no’</td><td></td><td></td><td></td><td>V</td></tr><tr><td>‘yes’</td><td></td><td></td><td></td><td>V</td></tr></table>
88
+
89
+ Table 1: Answer candidates extracted from the sentence "two bears are laying down on the ice" and the mechanism used to extract them.
90
+
91
+ named entities, boolean answers, cardinal and ordinal numbers, verbs and their compounds, (multiword) adjectives and prepositional phrases. Table 1 provides an example of candidate answers of various types and the mechanism used to extract them.
92
+
93
+ # 3.2 Question Generation
94
+
95
+ Our question generation model, $\mathrm{q} = QG(\mathrm{a},\mathrm{cap})$ , takes as input a caption, cap, and a candidate answer span within it, a, and generates a question q, whose answer given the input caption is the input answer span. Importantly, the answer a does not need to appear verbatim in the caption, enabling the generation of questions for answer types like boolean and zero counts (see Section 3.1).
96
+
97
+ Given the advances in neural text generation, including models like T5 (Raffel et al., 2020), we choose to use a neural generation model as $QG$ . Concretely, we use a T5-XXL model and further fine-tune it on SQuAD1.1 (Rajpurkar et al., 2016) for question generation. We take the top-scoring generated question for each caption-answer input. We note that our QG model is trained on a question
98
+
99
+ answering dataset that is not caption-specific, and therefore is not optimized for caption inputs. From manual inspection of hundreds of generated questions, our QG model copes well with captions as input; see examples in Table 2 and Section 3.5.
100
+
101
+ # 3.3 Question-Answer Filtering
102
+
103
+ Generative models may hallucinate, that is, generate content that is inconsistent with its input source (Alberti et al., 2019; Honovich et al., 2021). To mitigate this, we follow (Alberti et al., 2019) and apply round-trip consistency by answering the generated question on the caption text with a question answering model. If the answer does not match the answer candidate offered as input to the question generation model, the generated question is discarded.
104
+
105
+ We use the token-level F1 score (Wang et al., 2020) to determine if the candidate answer and the QA model's answer is a match; If the score is above a threshold (manually set to 0.54, exemplified in Table 2), the pair is a match. For question answering, we use a T5-XXL model and further fine-tune it on SQuAD2.0 (Rajpurkar et al., 2018) and Natural Questions (Kwiatkowski et al., 2019).
106
+
107
+ # 3.4 Sources of Image/Caption Data
108
+
109
+ To gain insights on $\mathrm{VQ^2A}$ potential performance, we generate VQA triplets with $\mathrm{VQ^2A}$ from two sources of image captions: MSCOCO Captions (COCO-CAP) (Chen et al., 2015) and Conceptual Captions (CC3M) (Sharma et al., 2018). COCOCAP captions contains 123,287 images from the COCO dataset (Lin et al., 2014), each with 5 gold captions manually created by raters with careful guidelines. CC3M contains 3.32M images
110
+
111
+ <table><tr><td>Candidate Answer</td><td>Generated Question</td><td>Validated Answer</td><td>Match Score &amp; Result</td></tr><tr><td>‘two’</td><td>‘How many bears are laying on the ice?’</td><td>‘two’</td><td>1.0 (Pass)</td></tr><tr><td>‘bears’</td><td>‘What are the two animals laying on the ice?’</td><td>‘bears’</td><td>1.0 (Pass)</td></tr><tr><td>‘two bears’</td><td>‘How many bears are laying on the ice?’</td><td>‘two’</td><td>1.0 (Pass)</td></tr><tr><td>‘laying’</td><td>‘What are the bears doing?’</td><td>‘laying down on the ice’</td><td>0.4 (Fail)</td></tr><tr><td>‘laying down’</td><td>‘What are the bears doing?’</td><td>‘laying down on the ice’</td><td>0.7 (Pass)</td></tr><tr><td>‘ice’</td><td>‘Two bears are laying down on what?’</td><td>‘the ice’</td><td>1.0 (Pass)</td></tr><tr><td>‘the ice’</td><td>‘Where are the bears laying?’</td><td>‘on the ice’</td><td>0.7 (Pass)</td></tr><tr><td>‘on the ice’</td><td>‘Where are the bears laying?’</td><td>‘on the ice’</td><td>1.0 (Pass)</td></tr><tr><td>‘no’</td><td>‘Are the bears sleeping?’</td><td>‘yes’</td><td>0.0 (Fail)</td></tr><tr><td>‘yes’</td><td>‘Are the bears on the ice?’</td><td>‘yes’</td><td>1.0 (Pass)</td></tr><tr><td>‘zero’</td><td>‘How many people are sitting down?’</td><td>-</td><td>Pass by definition</td></tr></table>
112
+
113
+ Table 2: Question/answer pairs generated from the sentence "two bears are laying down on the ice" and the filtering decision. For answer candidate 'zero', no validation is performed.
114
+
115
+ <table><tr><td rowspan="2">Dataset</td><td colspan="2">Image</td><td colspan="2">VQA examples</td></tr><tr><td>train</td><td>dev</td><td>train</td><td>dev</td></tr><tr><td>\( VQ^2A \) COCO</td><td>114.9K</td><td>8.4K</td><td>3.50M</td><td>257.5K</td></tr><tr><td>\( VQ^2A \) CC3M</td><td>3.32M</td><td>15.8K</td><td>13.29M</td><td>61.2K</td></tr><tr><td>COCOQA</td><td>64.5K</td><td>4.7K</td><td>108.7K</td><td>38.6K</td></tr><tr><td>VQA2.0</td><td>114.9K</td><td>8.4K</td><td>582K</td><td>65.1K</td></tr><tr><td>GQA</td><td>82.4K</td><td>0.4K</td><td>1.08M</td><td>12.6K</td></tr><tr><td>OKVQA</td><td>9K</td><td>5K</td><td>8.3K</td><td>4.7K</td></tr></table>
116
+
117
+ Table 3: Sizes of our generated VQ $^2$ A data (top two rows) and VQA datasets used in our experiments.
118
+
119
+ automatically-collected from the web, each with one associated alt-text which we treat as a silver caption.
120
+
121
+ These datasets are quite different. Both the amount and the domain of CC3M images are larger and its captions look more plausible for capturing a larger set of object/attribute/action annotations. On the other hand, COCO-CAP's captions are cleaner and represent image content more adequately (see also Section 3.5). Thus, using COCO-CAP would show the potential of training a VQA model using $\mathrm{VQ^2A}$ in a "cleaner" zero-shot setup, where captions are human-curated. Using CC3M would indicate the potential of training on noisy web image-alt-text pairs, where scaling up to billions of examples is possible.
122
+
123
+ To quantify the impact of our method, we focus on VQA classification for the VQA2.0 (Goyal et al., 2017), GQA (Hudson and Manning, 2019), and OKVQA (Marino et al., 2019) benchmarks (see Section 4.2). We thus restrict our classifier to top 5,971 answers that are part of a unified answer vocabulary from these benchmarks (Appendix B.1). To this end, we remove triplets whose answers are not in the target answer vocabulary, and leave the study of using all generated triplets to future work. We then split our datasets into train/dev sets. In particular, since the images in VQA2.0 are taken from COCO, we split the COCO dataset based on the standard VQA2.0 train/dev splits of *train2014 and minival2014 (Jiang et al.,
124
+
125
+ 2018) $^{2}$ . For the CC3M dataset, we use the default CC3M train/dev splits (Sharma et al., 2018). For each unique image-question pair in the dev split, we construct an answer target of size 10, following VQA2.0, by reducing or expanding the set of seed answers that occur for this image-question pair. Additional details are in Appendix B.1.
126
+
127
+ Table 3 depicts the size of the induced datasets, named VQ $^2$ A-COCO and VQ $^2$ A-CC3M, as well as the VQA datasets used in our experiments.
128
+
129
+ # 3.5 Quality Analysis
130
+
131
+ To measure the quality of the generated datasets, we sampled 800 examples from each of the $\mathrm{VQ^2A}$ COCO and $\mathrm{VQ^2A}$ -CC3M datasets. The sample was split between four authors, who assessed whether the answer to the question in an example is justified based on the example's image. For each dataset, 50 examples were rated by all raters, resulting in a free-margin Kappa (Randolph, 2005) of 0.71 for $\mathrm{VQ^2A}$ -COCO and 0.59 for $\mathrm{VQ^2A}$ -CC3M, corresponding to high inter-rater agreement. The measured percentage of valid triplets is $87.3\%$ for $\mathrm{VQ^2A}$ -COCO and $66.0\%$ for $\mathrm{VQ^2A}$ -CC3M. This shows the difference between the high-quality captions of COCO-CAP and the noisier web-based ones of CC3M.
132
+
133
+ Fig. 3 demonstrates the diversity of questions generates in the $\mathrm{VQ^2A}$ datasets. One can see that a significant amount of questions generated by $\mathrm{VQ^2A}$ for the shared VQA2.0/COCO image do not appear in VQA2.0. Additional analysis and examples are in Appendix A.
134
+
135
+ # 4 Visual Question Answering (VQA)
136
+
137
+ To assess the effectiveness of our automatic generation of VQA annotations, we perform extrinsic
138
+
139
+ ![](images/c9d751e962b4e3df48c29121327e3da207a19a4a495d816c7b33e14b07cae520.jpg)
140
+
141
+ <table><tr><td>Question</td><td>Answers</td></tr><tr><td>How many pieces of fruit are in the bowl</td><td>“0”</td></tr><tr><td>Is there a refrigerator in the kitchen</td><td>“yes”</td></tr><tr><td>What color are the cabinets in the kitchen</td><td>“white”</td></tr><tr><td>Is the kitchen lit or dark</td><td>“lit”</td></tr><tr><td>Is there a stove in the kitchen</td><td>“no”</td></tr><tr><td>What color is the formula in the kitchen</td><td>“white”</td></tr><tr><td>What is on the door of the refrigerator</td><td>“papers”, “several papers”</td></tr><tr><td>Where are the papers on the refrigerator</td><td>“door”</td></tr><tr><td>What kind of kitchen does the house have</td><td>“small white formula kitchen”</td></tr></table>
142
+
143
+ ![](images/d8269d2870a85fec3dce43faca209a352cc14485c5d06181006d9401e599e02c.jpg)
144
+
145
+ <table><tr><td>Question</td><td>Answers</td></tr><tr><td>Is the tumbler dishwasher safe</td><td>“no”</td></tr><tr><td>Is the tumbler insulated</td><td>“yes”</td></tr><tr><td>What color is the tumbler</td><td>“blue”, “shiny blue”</td></tr><tr><td>What kind of blue is on the tumbler</td><td>“shiny”</td></tr><tr><td>What is the name of the blue drinkware item</td><td>“tumbler”</td></tr><tr><td>What is ceramic used for in the tumbler</td><td>“lining”</td></tr><tr><td>Which part of the tumbler is made of stainless steel</td><td>“exterior”</td></tr><tr><td>Which part of the tumbler is ceramic</td><td>“inner”</td></tr><tr><td>What is the purpose of the stainless steel exterior of the tumbler</td><td>“insulating”</td></tr><tr><td>What kind of steel is on the outside of the tumbler</td><td>“stainless”</td></tr><tr><td>What material is the inside of the tumbler made of</td><td>“ceramic”</td></tr><tr><td>What material is the outside of the tumbler made of</td><td>“steel”, “stainless steel”</td></tr></table>
146
+
147
+ ![](images/dd4e86266f19fab37be8ddea4423a8843ec35ae5ec476a14e877945bc8bd36e1.jpg)
148
+
149
+ <table><tr><td>Question</td><td>Answers</td></tr><tr><td>What is flying over the ocean</td><td>&quot;eagle&quot;, &quot;brown eagle&quot;</td></tr><tr><td>What color is the bird&#x27;s head</td><td>&quot;white&quot;</td></tr><tr><td>What color is the bird</td><td>&quot;brown&quot;</td></tr><tr><td>Is the brown eagle flying over land</td><td>&quot;No&quot;</td></tr><tr><td>Is the brown eagle flying over the ocean</td><td>&quot;Yes&quot;</td></tr><tr><td>What does the bird do over the water</td><td>&quot;glides&quot;</td></tr><tr><td>Which part of the bird is white</td><td>&quot;head&quot;</td></tr><tr><td>What color are the bird&#x27;s wings</td><td>&quot;brown&quot;, &quot;brown wings&quot;</td></tr><tr><td>What color is the eagle in the picture</td><td>&quot;brown&quot;</td></tr><tr><td>Why is the bird flying down to the water</td><td>&quot;To catch food&quot;</td></tr></table>
150
+
151
+ ![](images/40fe3812f9d94ac7682e7e985bcde44f9aff6ec166d69ee9fb74896db832a371.jpg)
152
+
153
+ <table><tr><td>Question</td><td>Answers</td></tr><tr><td>What type of art is this</td><td>“vector”</td></tr><tr><td>Aside from watches what else is included in the illustration</td><td>“clocks”</td></tr><tr><td>What is the illustration of</td><td>“clocks and watches”</td></tr><tr><td>Is this a hand drawn clock and watch set</td><td>“yes”</td></tr><tr><td>How were the clocks and watches drawn</td><td>“hand”</td></tr><tr><td>In addition to gold what color is used for the clocks and watches in this illustration</td><td>“gray”</td></tr><tr><td>What colors are the clocks and watches in the illustration</td><td>“gray and gold”</td></tr><tr><td>What kind of illustration is this</td><td>“vector art”</td></tr></table>
154
+
155
+ ![](images/c0454e2ae175df883391b9c9c7b7b931ac600568414ecca620dd98a2ebb68021.jpg)
156
+ Figure 3: Examples from VQ $^2$ A COCO (top) and VQ $^2$ A CC3M (bottom). Questions with the green background are also present in VQA2.0.
157
+ Figure 4: VQA model used in our experiments. The text encoder is initialized from a T5-base checkpoint, while the image-text encoder is initialized from scratch. The parameters of ResNet and Faster R-CNN are frozen during VQA training.
158
+
159
+ evaluations of the generated data by measuring its impact on a variety of established VQA benchmarks. We first describe the model, followed by the experimental setup and the results.
160
+
161
+ # 4.1 VQA Formulation and Model
162
+
163
+ Following the literature, we treat VQA as a classification task, i.e., vocab-based VQA. In particular, we treat our target answers as labels, where a label could be multi-token (e.g., "Christmas tree", "black and white", "play tennis"). We define our set of labels based on top answers in the training set of downstream VQA datasets, which allows for a fair comparison with most work in the VQA literature since Antol et al. (2015).
164
+
165
+ Since our work explores the impact of automatically-generated training data, we fix the VQA model architecture across all experimental conditions. Our model fuses the input image and question (Fig. 4). On the image side, we take global image features from ResNet-152 (He et al., 2016) pre-trained on ImageNet (Russakovsky et al., 2015) plus 16 region-of-interest image features from Faster R-CNN (Ren et al., 2015b) pre-trained on Visual Genome (Krishna et al., 2017), following the bottom-up-features paradigm (Anderson et al., 2018). On the question side, we use the encoder
166
+
167
+ of a pre-trained T5-base checkpoint (Raffel et al., 2020). Given the image features and the output token embeddings of the question encoder, a Transformer (Vaswani et al., 2017) fuses the multi-modal intermediate representation and classifies it into the predefined answer space. We train the (randomly-initialized) fusing encoder and the text encoder end-to-end using standard cross-entropy loss. The parameters of both ResNet and Faster R-CNN are frozen during training. Additional details are given in Appendix B.2.
168
+
169
+ # 4.2 Experimental Setup
170
+
171
+ We consider three VQA benchmarks: VQA2.0 (Goyal et al., 2017), GQA (Hudson and Manning, 2019), and OKVQA (Marino et al., 2019). These datasets have their own characteristics and thus test different capability of VQA models. For instance, GQA puts emphasis on reasoning and OKVQA on external knowledge, whereas VQA2.0 is more general; VQA2.0 and GQA are order-of-magnitude larger than OKVQA; GQA is generated using a question engine while VQA2.0 and OKVQA are human-annotated.
172
+
173
+ For training and evaluating on VQA2.0, we use the standard train/dev splits *train2014 and minival2014 (Jiang et al., 2018). For GQA, we use the balanced v1.2 and combine the train and val splits for training and use the testdev split for evaluation, following the official guideline<sup>3</sup> and (Tan and Bansal, 2019). For OKVQA, we use the train/val splits for training/evaluation. Table 3 summarizes the sizes of the different datasets.
174
+
175
+ # Evaluation Settings and Baselines. The main
176
+
177
+ goal of our experiments is to explore the utility of our $\mathrm{VQ^2A}$ data for transfer learning, as training or evaluation data.
178
+
179
+ Our main focus in this paper is on zero-shot evaluation. Still, fine-tuning would provide additional insight on using our induced data for pre-training. Therefore, following (Banerjee et al., 2021), we train VQA models on the generated $\mathrm{VQ^2A}$ data and then evaluate them in two settings: (i) zero-shot evaluation, in which we evaluate our models as-is on the dev split of VQA2.0, GQA, or OKVQA; and (ii) fully-supervised fine-tuning evaluation, in which we further fine-tune our models on the training split of $\mathrm{VQ^2A}$ , GQA, or OKVQA before evaluating them. When training on $\mathrm{VQ^2A}$ data, we explore training on $\mathrm{VQ^2A}$ -COCO only, $\mathrm{VQ^2A}$ -CC3M only, and a two-stage training $\mathrm{VQ^2A}$ -CC3M followed by $\mathrm{VQ^2A}$ -COCO ( $\mathrm{VQ^2A}$ CC3M $\rightarrow$ COCO).
180
+
181
+ Our baselines, which do not use $\mathrm{VQ}^2\mathrm{A}$ data, include (i) our VQA model trained on template-based question generation data COCOQA4 (Ren et al., 2015a), (ii) state-of-the-art zero-shot WeaQA (Banerjee et al., 2021) and its fully-supervised variants, and (iii) our VQA model trained supervisely on each of the target benchmarks' training data.
182
+
183
+ Metrics. To be compatible with prior work, on VQA2.0 and OKVQA we measure the standard VQA Accuracy. It is the average score over 9 subsets of the ground-truth 10 answers<sup>5</sup>, where each score is: min(#answer occurrences/3, 1). On GQA, we measure Top-1 Accuracy against the single ground-truth answer.
184
+
185
+ # 5 Results
186
+
187
+ We report several sets of experimental results that shed light both on the accuracy and on the robustness of VQA models trained on $\mathrm{VQ^2A}$ data in this section, with additional results, analysis and ablation studies in Appendix C.
188
+
189
+ # 5.1 Zero-Shot Setting
190
+
191
+ Table 4 summarizes the outcomes of our VQA experiments on various benchmarks. Our main result is that the $\mathrm{VQ^2A}$ models achieve new state-of-the-art results in the zero-shot transfer learning setting. The improvement in performance is large: to the best of our knowledge, previous state-ofthe-art zero-shot accuracy was $46.8\%$ on VQA2.0
192
+
193
+ <table><tr><td rowspan="2">Approach</td><td colspan="3">Evaluation Benchmark</td></tr><tr><td>VQA2.0</td><td>GQA</td><td>OKVQA</td></tr><tr><td colspan="4">Zero-shot</td></tr><tr><td>VQ2A COCO, nouns only</td><td>10.5</td><td>-</td><td>-</td></tr><tr><td>COCOQA</td><td>11.7</td><td>4.4</td><td>6.3</td></tr><tr><td>WeaQA ZSL</td><td>46.8</td><td>33.7</td><td>-</td></tr><tr><td>VQ2A COCO</td><td>60.0</td><td>51.3</td><td>18.0</td></tr><tr><td>VQ2A CC3M</td><td>56.5</td><td>49.9</td><td>19.1</td></tr><tr><td>VQ2A CC3M → COCO</td><td>61.1</td><td>52.1</td><td>19.7</td></tr><tr><td>VQ2A CC3M +D</td><td>57.9</td><td>50.0</td><td>19.8</td></tr><tr><td colspan="4">Fully-supervised</td></tr><tr><td>WeaQA FSL</td><td>65.3</td><td>55.2</td><td>-</td></tr><tr><td>w/o VQ2A data</td><td>68.8</td><td>61.8</td><td>22.1</td></tr><tr><td>w. VQ2A COCO</td><td>71.6</td><td>63.3</td><td>36.0</td></tr><tr><td>w. VQ2A CC3M</td><td>71.3</td><td>63.4</td><td>39.0</td></tr><tr><td>w. VQ2A CC3M → COCO</td><td>71.4</td><td>64.0</td><td>39.3</td></tr><tr><td>Human performance</td><td>82.4†</td><td>89.3‡</td><td>82.8†</td></tr></table>
194
+
195
+ † from the inter-annotator agreement of ground-truth answers.
196
+ ‡ from (Hudson and Manning, 2019).
197
+
198
+ Table 4: $\mathrm{VQ^2A}$ as training data. Accuracy in zero-shot and fully-supervised settings. All results use our architecture, except WeaQA ZSL and WeaQA FSL, which are the zero-shot (ZSL + Patches + Encoder) and fully-supervised (FSL + Patches + Encoder) models in (Banerjee et al., 2021), respectively. +D stands for recovered raw CC3M alt-texts with digits.
199
+
200
+ and $33.7\%$ on GQA by WeaQA (Banerjee et al., 2021), which also induces their training VQA data from COCO Captions. Our $\mathrm{VQ^2A}$ -COCO model reaches $60.0\%$ on VQA2.0 and $51.3\%$ on GQA, an absolute improvement of $+13.2\%$ and $+17.6\%$ , respectively. Even higher accuracy for the zero-shot setting $-61.1\%$ (VQA2.0) and $52.1\%$ (GQA) is reached with the $\mathrm{VQ^2A}$ CC3M $\rightarrow$ COCO model (trained first on the CC3M-derived data and then fine-tuned on the COCO-derived data), establishing new state-of-the-art results.
201
+
202
+ Training the same model architecture on the manually-constructed VQA2.0 and GQA training sets in a fully-supervised manner achieves $68.8\%$ and $61.8\%$ accuracy, respectively. Hence, our results significantly close the performance gap between automatically-generated and manually-constructed training sources, indicating that the $\mathrm{VQ^2A}$ method may reduce the need for human curated VQA training examples.
203
+
204
+ The captions for COCO images are carefully annotated to be of high-quality (Chen et al., 2015). Additionally, the VQA2.0 images are taken from COCO. To test the robustness of $\mathrm{VQ^2A}$ , we also evaluate a $\mathrm{VQ^2A - CC3M}$ model. While CC3M contains more image-alt-text pairs than COCO (see Table 3), the images are from a different distribution and the text annotations are noisier and may represent a larger spectrum of discourse intents
205
+
206
+ (Alikhani et al., 2020). In spite of these differences, the gap between COCO-based and CC3M-based $\mathrm{VQ^2A}$ models is not large, $60.0\%$ vs $56.5\%$ on VQA2.0 and $51.3\%$ vs. $49.9\%$ on GQA. This result strengthens our previous observation, in that it does not seem to be crucial that the starting captions are manually-annotated; it appears that "silver" annotations such as the ones provided by CC3M are competitive in zero-shot VQA performance.
207
+
208
+ To cover the types of answers present in VQA benchmarks, there is a need for thorough extraction of various answer/question types (Section 3). The QACE model (Lee et al., 2021), for example, focuses only on noun- phrases as answer types. By analyzing the VQA2 devset, we find that only $32\%$ of its answers are nouns. As such, it makes sense that, when limiting to only this answer type, the VQA Accuracy of $\mathrm{VQ^2A}$ -COCO is $10.5\%$ , compared to the $60\%$ achieved with a full coverage. As another example, our model trained COCOQA (Ren et al., 2015a), which focuses on a few answer types and one-word answers, barely surpasses the accuracy of our COCO, nouns only baseline. For similar reasons, we want to be able to generate 'how many' questions from the CC3M data, even though the published annotations have been stripped of digits and numerals. To solve this problem, we recover the original captions from the CC3M URLs, generate questions of the type 'how many', and train an additional $\mathrm{VQ^2A - CC3M + D}$ model. The results in Table 4 show a small but consistent improvement over vanilla $\mathrm{VQ^2A - CC3M}$ , further closing the gap between $\mathrm{VQ^2A}$ models using curated "gold" captions and noisier "silver" captions.
209
+
210
+ To gain further insights, we provide a breakdown of VQA Accuracy per VQA2.0 question types in Table 5. Boolean questions are the easiest and all models perform well on them. More challenging question types are 'How many?' and 'What is'. One reason could be the validity of various answers, like "several" for counts. 'What time?' is the most difficult, probably due to lack of such information in captions.
211
+
212
+ Finally, we provide zero-shot results on the more difficult OKVQA benchmark. In this setting, a supervised model reaches $22.1\%$ accuracy, while $\mathrm{VQ^2A}$ models in zero-shot setting achieve close to that - $18.0\%$ with COCO and $19.1\%$ with CC3M, while their combination reaches $19.7\%$ , $-2.4\%$ shy of the supervised level. This result also supports the conclusion that creating training data with the
213
+
214
+ <table><tr><td>Question Prefix</td><td>VQA2.0 Supervised</td><td>VQ2A-COCO Zero-shot</td><td>VQ2A-CC3M Zero-shot</td></tr><tr><td>Boolean</td><td>96.3</td><td>93.2</td><td>94.2</td></tr><tr><td>‘What color’</td><td>69.2</td><td>64.8</td><td>56.8</td></tr><tr><td>‘What kind/is’</td><td>52.6</td><td>36.9</td><td>32.1</td></tr><tr><td>‘How many’</td><td>49.3</td><td>29.4</td><td>19.5</td></tr><tr><td>‘Where are/is’</td><td>38.0</td><td>30.0</td><td>25.3</td></tr><tr><td>‘What does’</td><td>33.0</td><td>24.1</td><td>20.3</td></tr><tr><td>‘What time’</td><td>23.6</td><td>11.9</td><td>12.7</td></tr></table>
215
+
216
+ Table 5: Aggregated average Accuracy on VQA2.0 for the most common question types.
217
+
218
+ $\mathrm{VQ^2A}$ method is a good replacement for small-scale supervised training data.
219
+
220
+ # 5.2 Fully-Supervised Setting
221
+
222
+ Another aspect of the $\mathrm{VQ^2A}$ method that we want to evaluate is whether it produces training data that is similar with the human-annotated data, or it complements it. To this end, we perform experiments in which we first train a model using the $\mathrm{VQ^2A}$ data, and then fine-tune it in a supervised manner using the human-annotated training data.
223
+
224
+ The results, in the Fully-supervised part of Table 4, tell two stories. For VQA2.0 and GQA, there is a small yet consistent improvement of the finetuned models on top of a model trained directly on the supervised data in each benchmark (labeled w/o $\mathrm{VQ^2A}$ ). This indicates that, at least for these two benchmarks, there is a high overlap in the nature of the signal between the human-annotated data and the $\mathrm{VQ^2A}$ data.
225
+
226
+ The results on OKVQA show a different trend. Here, training first with $\mathrm{VQ^2A}$ boosts performance by $+17.2\%$ compared to supervised training without $\mathrm{VQ^2A}$ $(22.1\% \rightarrow 39.3\%)$ . The small scale of the OKVQA training set (Table 3) certainly contributes to this effect, but it also points to another aspect: question-answer pairs that subsume world knowledge can only be made available at-scale to models by means that are not bottlenecked by human-annotation processes.
227
+
228
+ # 5.3 Robustness of Existing VQA Training Sets
229
+
230
+ So far we have assessed the capability of models trained on $\mathrm{VQ^2A}$ data. As a complementary study, we use 500 manually-validated random samples (see Section 3.5) from the dev part of each $\mathrm{VQ^2A}$ dataset to assess VQA robustness for various training setups. We use the VQA Accuracy metric for the $\mathrm{VQ^2A}$ datasets (10 target answers, see Section 3.4), and Top-1 Accuracy on COCOQA (one target answer).
231
+
232
+ Table 6 shows the results. The fully-supervised
233
+
234
+ <table><tr><td rowspan="2">Training data</td><td colspan="4">Evaluation Benchmark (Acc %)</td></tr><tr><td>COCO-QA</td><td>VQA2.0</td><td>VQ2A COCO</td><td>VQ2A CC3M</td></tr><tr><td>COCOQA</td><td>70.3</td><td>11.7</td><td>13.2</td><td>5.8</td></tr><tr><td>VQA2.0</td><td>35.9</td><td>68.8</td><td>44.4</td><td>41.6</td></tr><tr><td>VQ2A COCO</td><td>55.9</td><td>60.0</td><td>72.6</td><td>56.8</td></tr><tr><td>VQ2A CC3M</td><td>42.1</td><td>56.5</td><td>65.6</td><td>76.4</td></tr></table>
235
+
236
+ Table 6: Manually-validated VQ²A data for robustness evaluation: Accuracy of training on "row" and tested on "column"; diagonal (gray) numbers denote supervised setting, non-diagonal numbers denote zero-shot cross-dataset setting. Best zero-shot is in bold.
237
+
238
+ models (diagonal, similar training and test distributions) achieve in-domain Accuracy around $70\%$ , with $\mathrm{VQ^2A}$ CC3M achieving slightly higher $76.4\%$ Accuracy. When tested on out-of-domain (nondiagonal), however, each model poses performance degradation at different degrees. First, the model based on template-generated COCOQA does not generalize at all. Second, the VQA2.0 model sees significant accuracy drops, even on the COCO $(44.4\%)$ and COCOQA $(35.9\%)$ , which share a similar image domain with VQA2.0. This result provides another evidence that progress made on the VQA2.0 benchmark may not reflect progress on the VQA task in full (Chao et al., 2018a; Bras et al., 2020).
239
+
240
+ In contrast, both VQ $^2$ A COCO and VQ $^2$ A CC3M perform robustly with more modest performance drops. For instance, on COCOQA, VQ $^2$ A CC3M achieves even better performance than VQA2.0 (42.1% vs. 35.9%) despite being tested on out-of-domain images. This suggests that the VQ $^2$ A training data possesses a higher degree of question variations, provides better answer coverage, and exhibits less biases than the manually-curated VQA2.0 training data, at least enough to address these different benchmarks.
241
+
242
+ # 6 Considerations and Limitations
243
+
244
+ Automatic data generation is prone to erroneous outputs. In $\mathrm{VQ^2A}$ these may include hallucinations of the generative model, incorrect negative sampling, and bad answer span extraction. In addition, the image captions may contain details not in the image, e.g. additional details only aware to the photo taker or personal opinions, or information that is inconsistent with the image due to human mistakes and biases. We address some of these issues automatically, filtering bad generations via question answering round-trip validation. In addition, the classification task itself curbs the effects
245
+
246
+ of such errors through the use of a fixed answer vocabulary. Yet, for automatic generation to be more robust, additional methods to narrow down mistakes or mismatches need to be developed.
247
+
248
+ The resulting VQA model incorporates and may reinforce some of the biases and stereotypes present in the data. For instance, it may learn that answering questions such as "What is the gender of this person?" is a binary choice dictated by shallow cues, or that the answer to "For whom is this room decorated?" depends on stereotypical features present (or not) in the room depicted in the image. Mitigation strategies for such issues go beyond the scope of this paper, but we encourage the research community to consider addressing these issues as central for the successful deployment of this technology.
249
+
250
+ # 7 Conclusions
251
+
252
+ In this paper, we show that high-quality VQA training data can be automatically induced at scale from existing image-caption datasets. Our method, $\mathrm{VQ^2A}$ , annotates candidate answers using syntactic parsing of the captions and then derives questions for them using neural models for question generation and question answering verification. We demonstrate that VQA models trained only on such data exhibit high zero-shot performance with new state-of-the-art results on VQA2.0 and GQA. Additionally, we provide evidence for the brittleness of VQA systems built with human-annotated examples compared to the ones built with automatically-induced image-question-answer triplets using $\mathrm{VQ^2A}$ .
253
+
254
+ For future work, we plan to explore even larger automatically-curated image-text datasets, consisting of billions of examples. In addition, we want to test the applicability of $\mathrm{VQ}^2\mathrm{A}$ to languages other than English, for which human-annotated VQA data is scarce.
255
+
256
+ Acknowledgments. We would like to thank Or Honovich, Hagai Taitelbaum and Roee Aharoni for their help with question generation, Sebastian Goodman for his help with the VQA infrastructure, Piyush Sharma for his help with the Conceptual Captions, Nassim Oufattole for his early exploration of question generation, Gal Elidan, Sasha Goldshtein, and Avinatan Hassidim for their useful feedback.
257
+
258
+ # References
259
+
260
+ Aishwarya Agrawal, Dhruv Batra, Devi Parikh, and Aniruddha Kembhavi. 2018. Don't just assume; look and answer: Overcoming priors for visual question answering. In CVPR.
261
+ Arjun Akula, Soravit Changpinyo, Boqing Gong, Piyush Sharma, Song-Chun Zhu, and Radu Soricut. 2021. CrossVQA: Scalably generating benchmarks for systematically testing vqa generalization. In EMNLP.
262
+ Chris Alberti, Daniel Andor, Emily Pittler, Jacob Devlin, and Michael Collins. 2019. Synthetic QA corpora generation with roundtrip consistency. In ACL.
263
+ Malihe Alikhani, Piyush Sharma, Shengjie Li, Radu Soricut, and Matthew Stone. 2020. Cross-modal coherence modeling for caption generation. In ACL.
264
+ Peter Anderson, Xiaodong He, Chris Buehler, Damien Teney, Mark Johnson, Stephen Gould, and Lei Zhang. 2018. Bottom-up and top-down attention for image captioning and visual question answering. In CVPR.
265
+ Stanislaw Antol, Aishwarya Agrawal, Jiasen Lu, Margaret Mitchell, Dhruv Batra, C. Lawrence Zitnick, and Devi Parikh. 2015. VQA: Visual question answering. In ICCV.
266
+ Pratyay Banerjee, Tejas Gokhale, Yezhou Yang, and Chitta Baral. 2021. WeaQA: Weak supervision via captions for visual question answering. In Findings of ACL-IJCNLP.
267
+ Ronan Le Bras, Swabha Swayamdipta, Chandra Bhagavatula, Rowan Zellers, Matthew E. Peters, Ashish Sabharwal, and Yejin Choi. 2020. Adversarial filters of dataset biases. In ICML.
268
+ Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, T. J. Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeff Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. 2020. Language models are few-shot learners. In NeurIPS.
269
+ Soravit Changpinyo, Piyush Sharma, Nan Ding, and Radu Soricut. 2021. Conceptual 12M: Pushing web-scale image-text pre-training to recognize long-tail visual concepts. In CVPR.
270
+ Wei-Lun Chao, Hexiang Hu, and Fei Sha. 2018a. Being negative but constructively: Lessons learnt from creating better visual question answering datasets. In NAACL.
271
+ Wei-Lun Chao, Hexiang Hu, and Fei Sha. 2018b. Cross-dataset adaptation for visual question answering. In CVPR.
272
+ Xinlei Chen, Hao Fang, Tsung-Yi Lin, Ramakrishna Vedantam, Saurabh Gupta, Piotr Dolkar, and C. Lawrence Zitnick. 2015. Microsoft COCO Captions: Data collection and evaluation server. arXiv preprint arXiv:1504.00325.
273
+ Yen-Chun Chen, Linjie Li, Licheng Yu, Ahmed El Kholy, Faisal Ahmed, Zhe Gan, Yu Cheng, and Jingjing Liu. 2020. UNITER: Learning UNiversal Image-TExt Representations. In ECCV.
274
+
275
+ Jaemin Cho, Jie Lei, Hao Tan, and Mohit Bansal. 2021. Unifying vision-and-language tasks via text generation. In ICML.
276
+ Kaustubh D. Dhole and Christopher D. Manning. 2020. SynQG: Syntactic and shallow semantic rules for question generation. In ACL.
277
+ Esin Durmus, He He, and Mona Diab. 2020. FEQA: A question answering evaluation framework for faithfulness assessment in abstractive summarization. In ACL.
278
+ Adam Fisch, Kenton Lee, Ming-Wei Chang, Jonathan H. Clark, and Regina Barzilay. 2020. CapWap: Captioning with a purpose. In EMNLP.
279
+ Manas Gaur, Kalpa Gunaratna, Vijay Srinivasan, and Hongxia Jin. 2021. ISEEq: Information seeking question generation using dynamic meta-information retrieval and knowledge graphs. arXiv preprint arXiv:2112.07622.
280
+ Yash Goyal, Tejas Khot, Douglas Summers-Stay, Dhruv Batra, and Devi Parikh. 2017. Making the V in VQA matter: Elevating the role of image understanding in visual question answering. In CVPR.
281
+ Mandy Guo, Qinlan Shen, Yinfei Yang, Heming Ge, Daniel Cer, Gustavo Abrego, Keith Stevens, Noah Constant, Yun-Hsuan Sung, Brian Strope, and Ray Kurzweil. 2018. Effective parallel corpus mining using bilingual sentence embeddings. In WMT.
282
+ Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In CVPR.
283
+ Michael Heilman and Noah A. Smith. 2009. Question generation via overgenerating transformations and ranking. Technical report, Carnegie Mellon University.
284
+ Or Honovich, Leshem Choshen, Roee Aharoni, Ella Neeman, Idan Szpektor, and Omri Abend. 2021. $Q^2$ : Evaluating factual consistency in knowledge-grounded dialogues via question generation and question answering. In EMNLP.
285
+ Drew A. Hudson and Christopher D. Manning. 2019. GQA: A new dataset for real-world visual reasoning and compositional question answering. In CVPR.
286
+ Chao Jia, Yinfei Yang, Ye Xia, Yi-Ting Chen, Zarana Parekh, Hieu Pham, Quoc V. Le, Yunhsuan Sung, Zhen Li, and Tom Duerig. 2021. Scaling up visual and vision-language representation learning with noisy text supervision. In ICML.
287
+ Yu Jiang, Vivek Natarajan, Xinlei Chen, Marcus Rohrbach, Dhruv Batra, and Devi Parikh. 2018. Pythia v0.1: the winning entry to the vqa challenge 2018. arXiv preprint arXiv:1807.09956.
288
+ Kushal Kafle and Christopher Kanan. 2017. An analysis of visual question answering algorithms. In ICCV.
289
+ Kushal Kafle, Mohammed Yousefhussien, and Christopher Kanan. 2017. Data augmentation for visual question answering. In INLG.
290
+ Jihyung Kil, Cheng Zhang, Dong Xuan, and Wei-Lun Chao. 2021. Discovering the unknown knowns: Turning implicit knowledge in the dataset into explicit training examples for visual question answering. In EMNLP.
291
+
292
+ Ranjay Krishna, Michael Bernstein, and Li Fei-Fei. 2019. Information maximizing visual question generation. In CVPR.
293
+ Ranjay Krishna, Yuke Zhu, Oliver Groth, Justin Johnson, Kenji Hata, Joshua Kravitz, Stephanie Chen, Yannis Kalantidis, Li-Jia Li, David A. Shamma, Michael Bernstein, and Li Fei-Fei. 2017. Visual Genome: Connecting language and vision using crowdsourced dense image annotations. IJCV, 123(1):32-73.
294
+ Tom Kwiatkowski, Jennimaria Palomaki, Olivia Redfield, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Kenton Lee, Kristina Toutanova, Llion Jones, Matthew Kelcey, Ming-Wei Chang, Andrew M. Dai, Jakob Uszkoreit, Quoc Le, and Slav Petrov. 2019. Natural Questions: a benchmark for question answering research. TACL, 7:453-466.
295
+ Hwanhee Lee, Thomas Scialom, Seunghyun Yoon, Franck Dernoncourt, and Kyomin Jung. 2021. QACE: Asking questions to evaluate an image caption. In Findings of EMNLP.
296
+ Lianian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, and Kai-Wei Chang. 2019. VisualBERT: A simple and performant baseline for vision and language. arXiv preprint arXiv:1908.03557.
297
+ Xiujun Li, Xi Yin, Chunyuan Li, Xiaowei Hu, Pengchuan Zhang, Lei Zhang, Lijuan Wang, Houdong Hu, Li Dong, Furu Wei, Yejin Choi, and Jianfeng Gao. 2020. Oscar: Object-semantics aligned pre-training for vision-language tasks. In ECCV.
298
+ Yikang Li, Nan Duan, Bolei Zhou, Xiao Chu, Wanli Ouyang, Xiaogang Wang, and Ming Zhou. 2018. Visual question generation as dual task of visual question answering. In CVPR.
299
+ Tsung-Yi Lin, Michael Maire, Serge Belongie, Lubomir Bourdev, Ross Girshick, James Hays, Pietro Perona, Deva Ramanan, C. Lawrence Zitnick, and Piotr Dólar. 2014. Microsoft COCO: Common objects in context. In ECCV.
300
+ Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. 2021. Pre-train, prompt, and predict: A systematic survey of prompting methods in natural language processing. arXiv preprint arXiv:2107.13586.
301
+ Jiasen Lu, Dhruv Batra, Devi Parikh, and Stefan Lee. 2019. ViLBERT: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. In NeurIPS.
302
+ Jiasen Lu, Vedanuj Goswami, Marcus Rohrbach, Devi Parikh, and Stefan Lee. 2020. 12-in-1: Multi-task vision and language representation learning. In CVPR.
303
+ Chenyang Lyu, Lifeng Shang, Yvette Graham, Jennifer Foster, Xin Jiang, and Qun Liu. 2021. Improving unsupervised question answering via summarization-informed question generation. In EMNLP.
304
+ Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. 2019. OK-VQA: A visual question answering benchmark requiring external knowledge. In CVPR.
305
+ Yosi Mass, Boaz Carmeli, Haggai Roitman, and David Konopnicki. 2020. Unsupervised FAQ retrieval with question generation and BERT. In ACL.
306
+
307
+ Nasrin Mostafazadeh, Ishan Misra, Jacob Devlin, Margaret Mitchell, Xiaodong He, and Lucy Vanderwende. 2016. Generating natural questions about an image. In ACL.
308
+ Shashi Narayan, Goncalo Simoes, Ji Ma, Hannah Craighead, and Ryan Mcdonald. 2020. QURIOUS: Question generation pretraining for text generation. arXiv preprint arXiv:2004.11026.
309
+ Preksha Nema, Akash Kumar Mohankumar, Mitesh M. Khapra, Balaji Vasan Srinivasan, and Balaraman Ravindran. 2019. Let's ask again: Refine network for automatic question generation. In EMNLP-IJCNLP.
310
+ Raul Puri, Ryan Spring, Mohammad Shoeybi, Mostofa Patwary, and Bryan Catanzaro. 2020. Training question answering models from synthetic data. In EMNLP.
311
+ Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, Gretchen Krueger, and Ilya Sutskever. 2021. Learning transferable visual models from natural language supervision. In ICML.
312
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. JMLR.
313
+ Pranav Rajpurkar, Robin Jia, and Percy Liang. 2018. Know what you don't know: Unanswerable questions for SQuAD. In ACL.
314
+ Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. SQuAD: 100,000+ questions for machine comprehension of text. In EMNLP.
315
+ Justus J. Randolph. 2005. Free-marginal multirater kappa (multirater $\kappa_{\mathrm{free}}$ ): An alternative to Fleiss' fixed-marginal multirater kappa. Joensuu Learning and Instruction Symposium.
316
+ Mengye Ren, Ryan Kiros, and Richard Zemel. 2015a. Exploring models and data for image question answering. In NIPS.
317
+ Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. 2015b. Faster R-CNN: Towards real-time object detection with region proposal networks. In NIPS.
318
+ Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy, Aditya Khosla, Michael Bernstein, Alexander C. Berg, and Li Fei-Fei. 2015. ImageNet large scale visual recognition challenge. IJCV, 115(3):211-252.
319
+ Meet Shah, Xinlei Chen, Marcus Rohrbach, and Devi Parikh. 2019. Cycle-consistency for robust visual question answering. In CVPR.
320
+ Piyush Sharma, Nan Ding, Sebastian Goodman, and Radu Soricut. 2018. Conceptual Captions: A cleaned, hypernymed, image alt-text dataset for automatic image captioning. In ACL.
321
+ Noam Shazeer and Mitchell Stern. 2018. Adafactor: Adaptive learning rates with sublinear memory cost. In ICLR.
322
+ Amanpreet Singh, Vedanuj Goswami, Vivek Natarajan, Yu Jiang, Xinlei Chen, Meet Shah, Marcus Rohrbach, Dhruv Batra, and Devi Parikh. 2020. MMF: A multimodal framework for vision and language research. https://github.com/facebookresearch/mmf.
323
+
324
+ Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. 2020. VL-BERT: Pre-training of generic visual-linguistic representations. In ICLR.
325
+ Hao Tan and Mohit Bansal. 2019. LXMERT: Learning cross-modality encoder representations from transformers. In EMNLP-IJCNLP.
326
+ Damien Teney and Anton van den Hengel. 2016. Zero-shot visual question answering. arXiv preprint arXiv:1611.05546.
327
+ Kohei Uehara, Antonio Tejero-de-Pablos, Yoshitaka Ushiku, and Tatsuya Harada. 2018. Visual question generation for class acquisition of unknown objects. In ECCV.
328
+ Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In NeurIPS.
329
+ Alex Wang, Kyunghyun Cho, and Mike Lewis. 2020. Asking and answering questions to evaluate the factual consistency of summaries. In ACL.
330
+ Zirui Wang, Jiahui Yu, Adams Wei Yu, Zihang Dai, Yulia Tsvetkov, and Yuan Cao. 2021. SimVLM: Simple visual language model pretraining with weak supervision. arXiv preprint arXiv:2108.10904.
331
+ Jialin Wu, Zeyuan Hu, and Raymond J. Mooney. 2019. Generating question relevant captions to aid visual question answering. In NeurIPS.
332
+ Xing Xu, Tan Wang, Yang Yang, Alan Hanjalic, and Heng Tao Shen. 2021. Radial graph convolutional network for visual question generation. IEEE Transactions on Neural Networks and Learning Systems, 32(4):1654-1667.
333
+ Antoine Yang, Antoine Miech, Josef Sivic, Ivan Laptev, and Cordelia Schmid. 2021a. Just ask: Learning to answer questions from millions of narrated videos. In ICCV.
334
+ Jianwei Yang, Jiasen Lu, Stefan Lee, Dhruv Batra, and Devi Parikh. 2018. Visual curiosity: Learning to ask questions to learn visual recognition. In CoRL.
335
+ Zhengyuan Yang, Zhe Gan, Jianfeng Wang, Xiaowei Hu, Yu-mao Lu, Zicheng Liu, and Lijuan Wang. 2021b. An empirical study of GPT-3 for few-shot knowledge-based VQA. arXiv preprint arXiv:2109.05014.
336
+ Desen Yuan. 2021. Language bias in visual question answering: A survey and taxonomy. arXiv preprint arXiv:2111.08531.
337
+ Lu Yuan, Dongdong Chen, Yi-Ling Chen, Noel Codella, Xiyang Dai, Jianfeng Gao, Houdong Hu, Xuedong Huang, Boxin Li, Chunyuan Li, Ce Liu, Mengchen Liu, Zicheng Liu, Yumao Lu, Yu Shi, Lijuan Wang, Jianfeng Wang, Bin Xiao, Zhen Xiao, Jianwei Yang, Michael Zeng, Luowei Zhou, and Pengchuan Zhang. 2021. Florence: A new foundation model for computer vision. arXiv preprint arXiv:2111.11432.
338
+ Pengchuan Zhang, Xiujun Li, Xiaowei Hu, Jianwei Yang, Lei Zhang, Lijuan Wang, Yejin Choi, and Jianfeng Gao. 2021. VinVL: Revisiting visual representations in vision-language models. In CVPR.
339
+ Shijie Zhang, Lizhen Qu, Shaodi You, Zhenglu Yang, and Jiawan Zhang. 2017. Automatic generation of grounded visual questions. In *IJCAI*.
340
+
341
+ Luowei Zhou, Hamid Palangi, Lei Zhang, Houdong Hu, Jason J. Corso, and Jianfeng Gao. 2020. Unified vision-language pre-training for image captioning and VQA. In AAAI.
342
+
343
+ # A Additional Examples and Analysis of Generated Data
344
+
345
+ Fig. 5 provides additional examples of $\mathrm{VQ^2A}$ COCO and CC3M generated VQA triplets, showing the diversity compared to what can be found in VQA2.0.
346
+
347
+ Table 7 presents the top question prefixes and their distribution in the VQA2.0 and $\mathrm{VQ^2A}$ -based dev sets, showing significant differences between datasets. Many questions in VQA2.0 are of boolean answer type, e.g. 'is the', 'is there' and 'does the', summing to $29.2\%$ . In addition, (how many') questions are frequent, $11\%$ . Finally, questions for the color attribute are standing out with $9 \%$ .On the other hand, COCO and CC3M questions are more explanatory in nature, with the majority of questions $(45.5\%)$ in COCO, $43.9\%$ in CC3M) of the form what is/are/do/does/type'. Another type that is more prominent in COCO and CC3M are where is/are' questions, which are more than twice frequent compared to VQA2.0.
348
+
349
+ Another difference between the manually curated VQA2.0 dataset and the $\mathrm{VQ^2A}$ automatically generated datasets is question and answer word length distribution (Fig. 6 and 7). The questions in $\mathrm{VQ^2A - CC3M}$ and $\mathrm{VQ^2A - COCO}$ have an average word length of 8.3 and 7.8 respectively, while the average VQA2.0 is 6.3. Inspecting the generated questions, we noticed that QG model tends to quote parts of the caption, extending the question length. The average answer word length in $\mathrm{VQ^2A - CC3M}$ and $\mathrm{VQ^2A - COCO}$ is 1.76 and 1.85 words respectively, while in VQA2.0 it is 1.1. While all answers tend to be short, the $\mathrm{VQ^2A}$ -induced datasets have more "detailed" answers of length 2-3 words.
350
+
351
+ Fig. 8 offers a more visual view of the differences between question type distribution presented in Table 7.
352
+
353
+ Table 8 depicts the percentage of questions of each type (prefix) that were retained (not filtered out) when applying the question answer validation phase of $\mathrm{VQ^2A}$ (Section 3.3).
354
+
355
+ # B Implementation Details
356
+
357
+ # B.1 Details on Data Processing
358
+
359
+ Our default question and answer preprocessor is based on (Jiang et al., 2018; Singh et al., 2020)<sup>6</sup>,
360
+
361
+ with the exception of GQA which we use<sup>7</sup>. The unified answer vocabulary used in our experiments is the union of top answers from existing COCO-based VQA benchmarks: VQA2.0 (3,128, minimum answer frequency=9), GQA (1,843, all), OKVQA (2,000, top), and Visual7W (3,140, minimum answer frequency=3) of total size 5,971
362
+
363
+ For each image-unique question pair generated by our $\mathrm{VQ^2A}$ approach, we reduce or expand a list of possibly different candidate answers based on the list length, such that we eventually have a target list of answers of size 10. In particular, we first sort the answers based on their lengths ("dog" before "black dog"), and select up to top-10 answers. If the list length is less than 10, we replicate each of the top answers one-by-one until we have the list of size 10, similar to the process in OKVQA(Marino et al., 2019). This is to ensure that we can adopt VQA Accuracy to make the performance comparison.
364
+
365
+ # B.2 Details on Training and Evaluating Visual Question Answering
366
+
367
+ Our code for the VQA model is based on the Flaxformer framework. Both the text encoder and the multi-modal encoder have 6 blocks of Transformers, each of which consists of self-attention and a feed-forward network. We use 12 heads of inner dimension of 64, the embedding dimension of 768, and the MLP dimension of 2048. During training, we use Adafactor (Shazeer and Stern, 2018), with an initial learning rate of 0.0025, a linear warm-up step of 5K for (pre-)training and 1K for fine-tuning, and an "inverse square root" learning rate schedule $\frac{1}{\sqrt{\max(n,k)}}$ , where $n$ is the current training iteration and $k$ is the number of warm-up steps. We use a dropout rate of 0.0. We train each of the models with data parallelism using 16 Cloud TPU Pods<sup>9</sup>, each with a batch size of 256, unless otherwise stated.
368
+
369
+ The default numbers of training steps during training and fine-tuning are 100K and 30K, respectively. The exceptions are OKVQA (30K/15K) and $\mathrm{VQ^2A}$ CC3M (150K/NA). In addition, in the two-stage training where we fine-tune a $\mathrm{VQ^2A}$ -CC3M model with $\mathrm{VQ^2A}$ COCO, we also use 100K steps. Each single training run on average took fewer than 10 hours, including the time used to evaluate
370
+
371
+ ![](images/401d908563ede5fcefd8809cc8d401dda3c8285e97b2b62ab844ffe64838848a.jpg)
372
+
373
+ ![](images/aa4ebf3157baddebfab39dd17dc3536d2eb23245ab60d1e2e8c90a3f2f29ce77.jpg)
374
+
375
+ <table><tr><td>Question</td><td>Answers</td></tr><tr><td>How many people are in the picture</td><td>“0”</td></tr><tr><td>Is there a stoplight at the intersection of S Lane St. and 12th Ave S.</td><td>“no”</td></tr><tr><td>Is there a street sign in the picture</td><td>“yes”</td></tr><tr><td>On what is a street sign pictured</td><td>“hill”</td></tr><tr><td>What color is the street sign next to the dirt field by the playground</td><td>“green”</td></tr><tr><td>What is in front of the street sign</td><td>“gravel”</td></tr><tr><td>Which avenue meets S Lane St</td><td>“12th”</td></tr><tr><td>Which street does 12th Ave S intersect with</td><td>“lane st”</td></tr><tr><td>What does a street sign do at the intersection of S Lane St. and 12th Ave S.</td><td>“marks”</td></tr></table>
376
+
377
+ ![](images/7507bdbf79fcdfea54b746ea9d78cb36909016dcdd35b596eac9f9e080ff948a.jpg)
378
+ Figure 5: Additional examples from VQ $^2$ A COCO (top) and VQ $^2$ A CC3M (bottom). Questions with the green background are present in VQA2.0.
379
+
380
+ <table><tr><td>Question</td><td>Answers</td></tr><tr><td>Where is the woman in the photo</td><td>“car”</td></tr><tr><td>Is the woman in the picture doing her makeup</td><td>“yes”</td></tr><tr><td>What adjective would you use to describe the woman in the car</td><td>“pretty”</td></tr><tr><td>Who is in the car doing makeup</td><td>“pretty young woman”</td></tr><tr><td>What is the woman in the car doing</td><td>“makeup” “doing makeup”</td></tr></table>
381
+
382
+ ![](images/24342f3c74234de0d5d6c9b31eb15d70810b32f35e979fa071221d0a0bd35b82.jpg)
383
+
384
+ <table><tr><td>Question</td><td>Answers</td></tr><tr><td>Pink shoes are for whom</td><td>&quot;women&quot;</td></tr><tr><td>What are the pink shoes for</td><td>&quot;for women&quot;</td></tr><tr><td>What item for women is all pink</td><td>&quot;shoes&quot;</td></tr><tr><td>What are all pink</td><td>&quot;shoes for women&quot;</td></tr></table>
385
+
386
+ <table><tr><td>Question Prefix</td><td>VQA2.0 %</td><td>\( \mathrm{VQ}^2\mathrm{A}\text{-}\mathrm{COCO}\% \)</td><td>\( \mathrm{VQ}^2\mathrm{A}\text{-}\mathrm{CC3M}\% \)</td><td>Question Example from \( \mathrm{VQ}^2\mathrm{A}\text{-}\mathrm{COCO} \)</td></tr><tr><td>‘What is’</td><td>0.140</td><td>0.288</td><td>0.217</td><td>‘What is the man swinging?’</td></tr><tr><td>‘How many’</td><td>0.110</td><td>0.022</td><td>0.005</td><td>‘How many people are standing in front of a tv?’</td></tr><tr><td>‘Is the’</td><td>0.098</td><td>0.084</td><td>0.053</td><td>‘Is the baby wearing a Santa hat?’</td></tr><tr><td>‘What color’</td><td>0.090</td><td>0.022</td><td>0.018</td><td>‘What color is the man’s hair?’</td></tr><tr><td>‘Is this’</td><td>0.082</td><td>0.008</td><td>0.015</td><td>‘Is this a safe way to fly?’</td></tr><tr><td>‘Is there’</td><td>0.037</td><td>0.011</td><td>0.022</td><td>‘Is there a pool in the backyard?’</td></tr><tr><td>‘What kind’</td><td>0.025</td><td>0.049</td><td>0.078</td><td>‘What kind of truck is the yellow one?’</td></tr><tr><td>‘What are’</td><td>0.024</td><td>0.049</td><td>0.022</td><td>‘What are the sheep and other animals roaming?’</td></tr><tr><td>‘Are the’</td><td>0.024</td><td>0.022</td><td>0.007</td><td>‘Are the apples on the cutting board green?’</td></tr><tr><td>‘Are there’</td><td>0.020</td><td>0.002</td><td>0.004</td><td>‘Are there any exceptions to this rule?’</td></tr><tr><td>‘Where is’</td><td>0.019</td><td>0.071</td><td>0.034</td><td>‘Where is the tennis player pictured?’</td></tr><tr><td>‘What type’</td><td>0.018</td><td>0.006</td><td>0.022</td><td>‘What type of picture is this?’</td></tr><tr><td>‘Is it’</td><td>0.017</td><td>0.001</td><td>0.005</td><td>‘Is it possible to eat a whole pizza?’</td></tr><tr><td>‘Does the’</td><td>0.014</td><td>0.007</td><td>0.007</td><td>‘Does the adult giraffe have any young?’</td></tr><tr><td>‘What does’</td><td>0.011</td><td>0.015</td><td>0.038</td><td>‘What does a giraffe do with its long neck?’</td></tr><tr><td>‘Where are’</td><td>0.006</td><td>0.032</td><td>0.014</td><td>‘Where are the skateboarders in the photo?’</td></tr><tr><td>‘Who is’</td><td>0.005</td><td>0.054</td><td>0.020</td><td>‘Who is in the photo?’</td></tr><tr><td>‘What do’</td><td>0.002</td><td>0.003</td><td>0.018</td><td>‘What do the father and son ride?’</td></tr><tr><td>‘What was’</td><td>0.000</td><td>0.009</td><td>0.023</td><td>‘What was the woman looking at?’</td></tr><tr><td>‘What did’</td><td>0.000</td><td>0.001</td><td>0.021</td><td>‘What did the cat lay inside of?’</td></tr></table>
387
+
388
+ Table 7: Most popular question prefix distribution on valid questions whose answers are in the $6\mathrm{k}$ target vocabulary.
389
+
390
+ ![](images/10ada56b3e11df4f6e550bcd938a9719c2bbfe1676f5261809547c2e717232ed.jpg)
391
+ Figure 6: Question length distributions per dataset.
392
+
393
+ ![](images/f72da7ae68759debe30a09b26182116f1b5881262c71fb58475479fa477dcae5.jpg)
394
+ Figure 7: Answer length distributions per dataset.
395
+
396
+ a checkpoint — every 1K iterations. For instance, training on VQA2.0 took approximately 7 hours, $\mathrm{VQ^2A}$ COCO 13 hours, $\mathrm{VQ^2A}$ CC3M 10 hours. Note that $\mathrm{VQ^2A}$ COCO has larger evaluation set than other datasets, hence taking longer time to to
397
+
398
+ train then $\mathrm{VQ^2A}$ CC3M.
399
+
400
+ The hyperparameters for Transformers are selected to be consistent with a T5-base checkpoint, which has 220 million parameters (Raffel et al., 2020) (except that now we have 2 encoders rather
401
+
402
+ ![](images/d417da0685f264b2b1c9abe2b988c2672b1ffeaf5d8b1f36464183aa67dd9c25.jpg)
403
+
404
+ ![](images/a80ccb50ffb4979c3c9be9be7b36cf88cb5cd784925b2ed233b5cab6a3ad6fd5.jpg)
405
+
406
+ ![](images/ab67115d220edf808c9c6c93d6a01dea206522efaf0250799b25ff8de1bf3fc9.jpg)
407
+ Figure 8: VQA2.0 (top), $\mathrm{VQ^2A}$ -COCO (middle), $\mathrm{VQ^2A}$ -CC3M (bottom) sunburst plots of question prefixes.
408
+
409
+ <table><tr><td>Question Prefix</td><td>VQ2A-COCO Filter Pass Ratio</td><td>VQ2A-CC3M Filter Pass Ratio</td></tr><tr><td>‘What is’</td><td>0.73</td><td>0.65</td></tr><tr><td>‘Is the’</td><td>0.64</td><td>0.39</td></tr><tr><td>‘What kind’</td><td>0.84</td><td>0.80</td></tr><tr><td>‘How many’</td><td>0.83</td><td>0.51</td></tr><tr><td>‘What color’</td><td>0.92</td><td>0.90</td></tr><tr><td>‘Where is’</td><td>0.79</td><td>0.79</td></tr><tr><td>‘Is this’</td><td>0.83</td><td>0.62</td></tr><tr><td>‘What are’</td><td>0.75</td><td>0.71</td></tr><tr><td>‘Who is’</td><td>0.85</td><td>0.79</td></tr><tr><td>‘Is there’</td><td>0.73</td><td>0.47</td></tr><tr><td>‘What does’</td><td>0.75</td><td>0.67</td></tr><tr><td>‘Are the’</td><td>0.58</td><td>0.32</td></tr><tr><td>‘Where are’</td><td>0.80</td><td>0.81</td></tr><tr><td>‘What type’</td><td>0.84</td><td>0.81</td></tr><tr><td>‘What was’</td><td>0.72</td><td>0.67</td></tr><tr><td>‘Does the’</td><td>0.60</td><td>0.43</td></tr><tr><td>‘Are there’</td><td>0.80</td><td>0.62</td></tr><tr><td>‘What do’</td><td>0.76</td><td>0.72</td></tr><tr><td>‘What did’</td><td>0.69</td><td>0.64</td></tr><tr><td>‘Is it’</td><td>0.62</td><td>0.59</td></tr></table>
410
+
411
+ Table 8: Question filtering stats.
412
+
413
+ than an encoder and a decoder). We initially tuned the initial learning rate (0.0125, 0.075, 0025, 0.00125, 0.00075) and the dropout rate (0.0, 0.1, 0.2) on a fully-supervised model on VQA2.0 baseline using VQA Accuracy and observed that 0.0025 and 0.0 work robustly across our experiments but we did not extensively tuned them in all of our experiments.
414
+
415
+ We implement VQA Accuracy ourselves based on the official challenge page for VQA2.0<sup>10</sup>.
416
+
417
+ # C Additional Results
418
+
419
+ Table 9 offers the Accuracy of the supervised VQA2.0 model, as well as of the zero-shot $\mathrm{VQ^2A}$ models, on the VQA2.0 devset, split by most common question prefixes. The Table is sorted by the supervised model's Accuracy. It shows a several performance differences, first between all types of boolean questions, which all have high precision on all models, vs. other types, which show not only lower performance for all models, but also more significant performance drop between the supervised and zero-shot models.
420
+
421
+ Table 10 shows the zero-shot performance of models when using all of the $\mathrm{VQ^2A}$ dev sets, not only the manually validated sample, for which Table 6 reports results. What we see is that the difference in performance on the whole $\mathrm{VQ^2A}$ dev sets (Table 10) is similar in magnitude to that of the manually validated dev samples (Table 6), and most
422
+
423
+ <table><tr><td>Question Prefix</td><td>VQA2.0 Supervised</td><td>VQ2A-COCO Zero-shot</td><td>VQ2A-CC3M Zero-shot</td></tr><tr><td>‘is there’</td><td>98.6</td><td>98.1</td><td>98.2</td></tr><tr><td>‘are there’</td><td>98.0</td><td>97.1</td><td>97.2</td></tr><tr><td>‘does this’</td><td>98.0</td><td>95.1</td><td>95.8</td></tr><tr><td>‘are they’</td><td>96.9</td><td>95.0</td><td>95.3</td></tr><tr><td>‘does the’</td><td>96.4</td><td>95.2</td><td>95.9</td></tr><tr><td>‘is it’</td><td>96.3</td><td>91.4</td><td>92.7</td></tr><tr><td>‘is this’</td><td>96.1</td><td>91.2</td><td>92.8</td></tr><tr><td>‘are the’</td><td>95.6</td><td>92.1</td><td>93.1</td></tr><tr><td>‘is the’</td><td>95.3</td><td>91.7</td><td>92.9</td></tr><tr><td>‘are these’</td><td>95.1</td><td>90.7</td><td>92.2</td></tr><tr><td>‘what color’</td><td>69.2</td><td>64.8</td><td>56.8</td></tr><tr><td>‘what kind’</td><td>56.3</td><td>35.8</td><td>31.4</td></tr><tr><td>‘what type’</td><td>54.4</td><td>32.3</td><td>30.8</td></tr><tr><td>‘what are’</td><td>51.3</td><td>40.2</td><td>33.9</td></tr><tr><td>‘how many’</td><td>49.3</td><td>29.4</td><td>19.5</td></tr><tr><td>‘what is’</td><td>48.5</td><td>39.4</td><td>32.2</td></tr><tr><td>‘where are’</td><td>40.9</td><td>33.9</td><td>27.6</td></tr><tr><td>‘where is’</td><td>35.1</td><td>26.0</td><td>23.0</td></tr><tr><td>‘what does’</td><td>33.0</td><td>24.1</td><td>20.3</td></tr><tr><td>‘what time’</td><td>23.6</td><td>11.9</td><td>12.7</td></tr></table>
424
+
425
+ Table 9: Average accuracy $(\%)$ on VQA2.0 for the most common question prefixes.
426
+
427
+ <table><tr><td rowspan="2">Training data</td><td colspan="4">Evaluation Benchmark</td></tr><tr><td>COCO-QA</td><td>VQA2.0</td><td>VQ2ACOCO</td><td>VQ2AC3M</td></tr><tr><td>COCOQA</td><td>70.3</td><td>11.7</td><td>11.5</td><td>3.7</td></tr><tr><td>VQA2.0</td><td>35.9</td><td>68.8</td><td>41.1</td><td>33.3</td></tr><tr><td>VQ2ACOCO</td><td>55.9</td><td>60.0</td><td>71.2</td><td>49.3</td></tr><tr><td>VQ2AC3M</td><td>42.1</td><td>56.5</td><td>60.3</td><td>69.5</td></tr></table>
428
+
429
+ Table 10: $\mathrm{VQ^2A}$ as evaluation data for measuring robustness: VQA Accuracy when training on "row" and tested on "column"; diagonal (gray) numbers denote the supervised setting, non-diagonal numbers denote zero-shot cross-dataset setting. Best zero-shot is bold.
430
+
431
+ importantly, it keeps the order of models in terms of capabilities/performance. We therefore suggests that the utility of the $\mathrm{VQ^2A}$ approach could go beyond training; it can be used as an automatic test-bed for VQA robustness, if not for absolute figures, for ranking models for robustness zero-shot capabilities.
432
+
433
+ Table 11 shows the effect of candidate answer types on the VQA2.0 performance. We train our model on $\mathrm{VQ}^2\mathrm{A}$ COCO or $\mathrm{VQ}^2\mathrm{A}$ CC3M subsets with questions with (i) noun answers, (ii) yes/no answers, (iii) answers containing color-related tokens based on a list of common colors from Wikipedia, and (iv) answers containing digits from 0 to 100. We then evaluate models trained on these subsets on VQA2.0 using VQA Accuracy and the normalized version (by the percentage of evaluation questions with corresponding answer types. This highlights the importance of incorporating diverse answer candidates in our datasets. We also observe that $\mathrm{VQ}^2\mathrm{A}$ CC3M is on par with $\mathrm{VQ}^2\mathrm{A}$ COCO on
434
+
435
+ <table><tr><td rowspan="2">Training data</td><td colspan="2">VQA Accuracy on VQA2.0</td></tr><tr><td>Standard</td><td>Normalized</td></tr><tr><td>VQ2A COCO</td><td>60.0</td><td>60.0</td></tr><tr><td>VQ2A COCO nouns</td><td>10.5</td><td>32.5</td></tr><tr><td>VQ2A COCO yes/no</td><td>38.4</td><td>94.3</td></tr><tr><td>VQ2A COCO color</td><td>6.7</td><td>55.6</td></tr><tr><td>VQ2A COCO number</td><td>3.9</td><td>25.4</td></tr><tr><td>VQ2A CC3M</td><td>56.5</td><td>56.5</td></tr><tr><td>VQ2A CC3M nouns</td><td>8.8</td><td>27.2</td></tr><tr><td>VQ2A CC3M yes/no</td><td>38.4</td><td>94.4</td></tr><tr><td>VQ2A CC3M color</td><td>6.0</td><td>49.5</td></tr><tr><td>VQ2A CC3M number</td><td>3.4</td><td>22.1</td></tr></table>
436
+
437
+ Table 11: Effect of candidate answer types on the VQA2.0 performance.
438
+
439
+ <table><tr><td>Training data</td><td>VQA Accuracy on VQA2.0</td></tr><tr><td>VQ2A COCO (100%)</td><td>60.0</td></tr><tr><td>VQ2A COCO (50%)</td><td>58.5</td></tr><tr><td>VQ2A COCO (20%)</td><td>56.7</td></tr><tr><td>VQ2A COCO (10%)</td><td>55.4</td></tr><tr><td>VQ2A CC3M (100%)</td><td>56.5</td></tr><tr><td>VQ2A CC3M (50%)</td><td>55.8</td></tr><tr><td>VQ2A CC3M (20%)</td><td>54.8</td></tr><tr><td>VQ2A CC3M (10%)</td><td>53.8</td></tr></table>
440
+
441
+ Table 12: Effect of dataset sizes on the VQA2.0 performance.
442
+
443
+ yes/no-answer questions but are behind on nouns, color, and number, which we attribute to their lower degree image-text relevance, less mentioning of colors (due to the style of alt-texts vs. captions), and digit substitution.
444
+
445
+ Table 12 shows the effect of scale on the VQA2.0 performance. We randomly sampled $10\%$ , $20\%$ , and $50\%$ of $\mathrm{VQ^2A}$ COCO or $\mathrm{VQ^2A}$ CC3M training data. We observe that the bigger the data, the higher the accuracy. However, the gain is diminishing. We identify improving the data generation process to achieve higher degree of diversity in the output as interesting future work.
446
+
447
+ Table 13 provides question-only baselines (no image features as input). Interestingly, the models trained on our generated $\mathrm{VQ^2A}$ data has similar answer distributions to those of existing VQA benchmarks. At the same time, this reveals the exploitation of the language bias, suggesting that additional research on bias mitigation is needed, both in terms of model and data (existing benchmarks and our datasets).
448
+
449
+ # D Further Considerations
450
+
451
+ Information that names or uniquely identifies individual people or offensive content. COCO
452
+
453
+ <table><tr><td rowspan="2">Approach</td><td colspan="3">Evaluation Benchmark</td></tr><tr><td>VQA2.0</td><td>GQA</td><td>OKVQA</td></tr><tr><td colspan="4">Zero-shot</td></tr><tr><td>questions VQ2A COCO</td><td>48.9</td><td>44.4</td><td>11.4</td></tr><tr><td>questions VQ2A CC3M</td><td>47.8</td><td>44.6</td><td>11.9</td></tr><tr><td>VQ2A COCO</td><td>60.0</td><td>51.3</td><td>18.0</td></tr><tr><td>VQ2A CC3M</td><td>56.5</td><td>49.9</td><td>19.1</td></tr></table>
454
+
455
+ Table 13: Zero-shot question-only baselines using $\mathrm{VQ^2A}$ as training data.
456
+
457
+ Captions are human-curated and cleaned while the approach to collection of CC3M upholds rigorous privacy and ethics standards such as the removal of offensive content and hypernymization. This significantly mitigates the risks that our $\mathrm{VQ^2A}$ datasets would contain such information.
458
+
459
+ Intended uses. Due to considerations and limitations as we mention in Section 6, COCO Captions, CC3M, and our induced $\mathrm{VQ^2A}$ are intended to be used for research-only purposes.
allyoumayneedforvqaareimagecaptions/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87c3ae92eafe1525a601a9e6ddcc4a1e98f3815bc6758a42caad5cd0ba875ad0
3
+ size 1110804
allyoumayneedforvqaareimagecaptions/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d506a870352b9eb7fac9e8b59795dc4469699dee15942900038353856c35601b
3
+ size 611764
ambipungeneratinghumorouspunswithambiguouscontext/4bcece5c-c02f-4b98-96cb-a24dd1fa23c3_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6810a4abb9f19c95167b9ed56f3fe8fd0e4ef50525d407d9d687a0316776e9e7
3
+ size 67435
ambipungeneratinghumorouspunswithambiguouscontext/4bcece5c-c02f-4b98-96cb-a24dd1fa23c3_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29289c7b5fbb2dcd9a971135ff16e469516bda4e50149523dd0cc1ce808120b5
3
+ size 80564
ambipungeneratinghumorouspunswithambiguouscontext/4bcece5c-c02f-4b98-96cb-a24dd1fa23c3_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55c611f37150f0900ac945a2d1c0a495d36b0b84c936ce38b0c5c56da51a87e3
3
+ size 399443
ambipungeneratinghumorouspunswithambiguouscontext/full.md ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AMBIPUN: Generating Puns with Ambiguous Context
2
+
3
+ Anirudh Mittal $^{2*}$ , Yufei Tian $^{1*}$ , Nanyun Peng $^{1}$
4
+
5
+ <sup>1</sup> Computer Science Department, University of California, Los Angeles,
6
+
7
+ <sup>2</sup> Centre for Machine Intelligence and Data Science, Indian Institute of Technology Bombay anirudhmittal@cse.iitb.ac.in, {yufeit, violetpeng}@cs.ucla.edu
8
+
9
+ # Abstract
10
+
11
+ We propose a simple yet effective way to generate pun sentences that does not require any training on existing puns. Our approach is inspired by humor theories that ambiguity comes from the context rather than the pun word itself. Given a pair of definitions of a pun word, our model first produces a list of related concepts through a reverse dictionary to identify unambiguous words to represent the pun and the alternative senses. We then utilize one-shot GPT3 to generate context words and then generate puns incorporating context words from both senses. Human evaluation shows that our method successfully generates puns $52\%$ of the time, outperforming well crafted baselines and the state-of-the-art models by a large margin.
12
+
13
+ # 1 Introduction
14
+
15
+ Computational humor has garnered interest in the NLP community (Petrovic and Matthews, 2013; Miller et al., 2017; Zou and Lu, 2019; Garimella et al., 2020; Yang et al., 2020). In this paper, we tackle the problem of generating homographic puns (Miller et al., 2017): two or more meanings of a word form an intended humorous effect. For example, the three puns listed in Figure 1 exploit two contrasting meanings of the word sentence: 1) a grammatical string of words and 2) the punishment by a court assigned to a guilty person.
16
+
17
+ Due to the lack of sizeable training data, existing approaches are heavy-weighted in order to not rely on pun sentences for training. For example, (Yu et al., 2018) train a constrained neural language model (Mou et al., 2015) from a general text corpus and then use a joint decoding algorithm to promote puns. He et al. (2019) propose a local-global surprisal principle, and Luo et al. (2019) leverage the Generative Adversarial Nets (Goodfellow et al.,
18
+
19
+ <table><tr><td>Sense 1
20
+ Definition</td><td>a string of words that is complete in itself, typically containing a subject and predicate</td></tr><tr><td>Sense 2
21
+ Definition</td><td>(criminal law) a final judgment of guilty in a criminal case and the punishment that is imposed</td></tr><tr><td>Ours 1</td><td>The sentence is ungrammatical. The jury didn&#x27;t hear it.</td></tr><tr><td>Ours 2</td><td>I&#x27;m sorry I said the sentence was too long but punishments are endless.</td></tr><tr><td>Human</td><td>The Judge has got a stutter. Looks like I am not getting a sentence.</td></tr></table>
22
+
23
+ Figure 1: An illustration of homographic puns. The target pun word 'sentence' and the two sense definitions are given as input. To make the target word interpretable in both senses, we propose to include context words (highlighted in blue and pink) related to both senses.
24
+
25
+ 2014) to encourage ambiguity of the outputs via reinforcement learning. We, on the other hand, propose a simple yet effective way to tackle this problem: encouraging ambiguity by incorporating context words related to each sense.
26
+
27
+ Inspired by humor theories (Attardo, 2010), we hypothesize that it is the contextual connections rather than the pun word itself that are crucial for the success of pun generation. For instance, in Figure 1 we observe that context related to both senses (e.g., ungrammatical and jury) appear in a punning sentence. Such observation is important as the error analysis of the SOTA model (Luo et al., 2019) shows that $46\%$ of the outputs fail to be puns due to single word sense, and another $27\%$ fail due to being too general, both of which can be resolved by introducing more context.
28
+
29
+ Specifically, given the two sense definitions of a target pun word, we first use a reverse dictionary to generate related words that are monosemous for both senses. This first step helps us circumvent the obstacle of processing pun words with the same written form. We then propose to use context words to link the contrasting senses and make our target pun word reasonable when interpreted in both definitions. We explore three different settings: extractive-based, similarity-based, and generative-based. Finally, we finetune the
30
+
31
+ ![](images/07b57f54b936c4f4b314b17827e8c40a4b2eb496a5866774cf153fa9a688a365.jpg)
32
+ Figure 2: Overview of the approach. We also give an example for pun word 'sentence' for each step.
33
+
34
+ T5 model (Raffel et al., 2020) on general nonhumorous texts to generate coherent sentences given the pun word and contexts words as input.
35
+
36
+ Our experimental results show that retrieve-and-extract context words outperforms the giant few-shot GPT3 model in terms of generating funny pun sentences, although the latter has shown to be much more powerful in many sophisticated tasks (Brown et al., 2020). Our simple pipeline remarkably outperforms all of the more heavy-weighted approaches. Our code and data is available at https://github.com/PlusLabNLP/AmbiPun.
37
+
38
+ # 2 Methodology
39
+
40
+ Overview and Motivation Our input is the target pun word $(p)$ and its two sense definitions $(S_{1}, S_{2})$ , and the output is a list of humorous punning sentences. We implement the ambiguity principle proposed in (Kao et al., 2016): a pun sentence should contain one or more context words corresponding to each of the two senses. The overview of our approach is visualized in Figure 2.
41
+
42
+ Given $S_{1}$ and $S_{2}$ , we first use a reverse dictionary to generate a list of words that semantically match the query descriptions. We call them related words (Section 2.1). However, those related words are synonyms of the pun word and are rarely observed as it is in humorous sentences. For example, for the sentence: "The Judge has got a stutter. Looks like I am not getting a sentence." The word representing the first sense (i.e. a final judgment of guilty in a criminal case) is represented by Judge, which could not be generated using the sense definition.
43
+
44
+ Considering such nuances, in Section 2.2 we propose three different methods to obtain the context words. They are extractive, similarity, and generative based. Finally, in Section 2.3 and 2.4, we introduce a keyword-to-text generator to generate candidate sentences, and a humor classifier to rule out some of the non-pun sentences. Final sentences are then randomly sampled for evaluation. All our
45
+
46
+ training data is general, non-humorous corpus except for the humor classifier.
47
+
48
+ # 2.1 Generating Related Words
49
+
50
+ We aim at differentiating the two senses of a polysemy by taking the related words, so that each sense will be represented by a set of monosemous words. To this end, we leverage the Reverse Dictionary (Qi et al., 2020; Zhang et al., 2020) which takes as input a description and generates multiple related words whose semantic meaning match the query description. For each sense definition, we generate five words.
51
+
52
+ # 2.2 Generating Context Words
53
+
54
+ For context words, we compare three different approaches. As an example, we compare the output of context words for the pun word 'sentence' in Table 5 in the appendix. Refinement of the context words is mentioned in section A.2 in the appendix.
55
+
56
+ Method 1: Extractive (TF-IDF) For each related word, we retrieve sentences from the One Billion Word dataset containing that word and then extract keywords using RAKE (Rose et al., 2010) from the retrieved sentences. Based on this TF-IDF value, we choose the top 10 context words that are mostly likely to be used along with the related words and therefore the pun word.
57
+
58
+ Method 2: Similarity (Word2Vec) Inspired by the idea that "a word is characterized by the company it keeps", we propose to get context words from word2vec. (Ghazvininejad et al., 2016) have also shown that the training corpus for word2vec plays a crucial role on the quality of generated context words. Hence, we train on Wikipedia which has a comprehensive coverage of diverse topics.
59
+
60
+ Method 3: Generative (Few-shot GPT3) For the generative version, we provide the powerful language model GPT3 (Brown et al., 2020) with two examples and generate context words. Details about the prompt can be found in section E of the appendix.
61
+
62
+ # 2.3 Candidate Sentence generation
63
+
64
+ After receiving context words for each sense, we generate humorous puns. We finetune a keyword-
65
+
66
+ to-sentence model using T5 (Raffel et al., 2020), as it is capable of handling text-to-text tasks. The prompt provides the pun word, and two context words from each of the two senses. For example for the word 'sentence', a possible prompt can be generate sentence: sentence, judge, trial, noun, comma. Moreover, we also investigate whether the position of the pun word will affect the quality of generated sentences. We insert the pun word in the first, third, and fifth place of the prompt. We discuss our findings in Section 5.
67
+
68
+ # 2.4 Humor Classification
69
+
70
+ Finally, we introduce a classification model to assist us in selecting (i.e., ranking) punning sentences. Since we do not have sizable training data for puns, we propose to train our classification model on humorous dataset in a distantly supervised fashion. Specifically, we train BERT-large (Devlin et al., 2018) on the ColBERT dataset (Annamoradnejad and Zoghi, 2020) that contains 200,000 jokes and non-jokes used for humor detection. We use the probability produced by the classification model to rank our candidate sentences.
71
+
72
+ Our error analysis in section Appendix.B shows that our classifier can successfully rule out the bad generations, i.e., non-puns, as puns are humorous by nature. However, the model is not great at choosing the best samples. Therefore, we use this classifier only to remove the bottom third candidates. We leave this for open future work to accurately pick out high-quality punning sentences instead of funny sentences.
73
+
74
+ # 3 Experiments
75
+
76
+ # 3.1 Datasets
77
+
78
+ Training: For the context word generation step, we use the One Billion word dataset (Chelba et al., 2013) to retrieve sentences for a given word and calculate TF-IDF values. This dataset contains roughly 0.8B words and is obtained from WMT 2011 News crawl data. For the humor classifier and candidate generation module, we use ColBERT dataset (Annamoradnejad and Zoghi, 2020). It contains 100k jokes and 100k non-jokes.
79
+
80
+ Evaluation dataset: Following other pun generation works, we use the SemEval 2017 Task 7 (Miller et al., 2017) for evaluation. The dataset contains 1,163 human written punning jokes with a total of 895 unique pun words. Each sentence has the target pun word, location of the pun word and the WordNet sense keys of the two senses.
81
+
82
+ <table><tr><td rowspan="2">Model</td><td rowspan="2">Avg Seq Len</td><td colspan="2">Corpus-Div</td><td colspan="2">Sentence-Div</td></tr><tr><td>Dist-1</td><td>Dist-2</td><td>Dist-1</td><td>Dist-2</td></tr><tr><td>Few-shot GPT3</td><td>12.3</td><td>37.1</td><td>80.4</td><td>94.5</td><td>85.7</td></tr><tr><td>Neural Pun</td><td>12.6</td><td>30.2</td><td>73.0</td><td>91.3</td><td>90.5</td></tr><tr><td>Pun GAN</td><td>9.7</td><td>34.6</td><td>71.9</td><td>90.2</td><td>87.6</td></tr><tr><td>Sim AMBIPUN</td><td>13.4</td><td>32.4</td><td>77.1</td><td>92.9</td><td>91.2</td></tr><tr><td>Gen AMBIPUN</td><td>13.5</td><td>32.8</td><td>77.8</td><td>93.6</td><td>91.2</td></tr><tr><td>Ext AMBIPUN</td><td>14.0</td><td>31.7</td><td>78.7</td><td>96.3</td><td>92.3</td></tr><tr><td>Human</td><td>14.1</td><td>36.6</td><td>81.9</td><td>95.5</td><td>92.4</td></tr></table>
83
+
84
+ Table 1: Results of automatic evaluation on average sequence length, sentence-level and corpus-level diversity. Boldface denotes the best performance and underline denotes the second best performance among systems.
85
+
86
+ <table><tr><td>Model</td><td>Success Rate</td><td>Fun</td><td>Coherence</td></tr><tr><td>Few-shot GPT3</td><td>13.0%</td><td>1.82</td><td>3.77</td></tr><tr><td>Neural Pun</td><td>35.3%</td><td>2.17</td><td>3.21</td></tr><tr><td>Pun GAN</td><td>35.8%</td><td>2.28</td><td>2.97</td></tr><tr><td>Sim AMBIPUN</td><td>45.5%</td><td>2.69</td><td>3.38</td></tr><tr><td>Gen AMBIPUN</td><td>50.5%</td><td>2.94</td><td>3.53</td></tr><tr><td>Ext AMBIPUN</td><td>52.2%*</td><td>3.00*</td><td>3.48</td></tr><tr><td>Human</td><td>70.2%</td><td>3.43</td><td>3.66</td></tr></table>
87
+
88
+ Table 2: Human evaluation results on all the pun generation systems. We show the success rates, and average scores of funniness and coherence. Overall, Ext AM-BIPUN performs the best. The superiority of our model in terms of success rate and funniness is statistically significant over the best baseline and is marked by \*.
89
+
90
+ # 3.2 Baselines
91
+
92
+ Neural Pun Yu et al. (2018) propose the first neural approach to homographic puns based on a constrained beam search algorithm to jointly decode the two distinct senses of the same word.
93
+
94
+ Pun-GAN The SOTA introduced by Luo et al. (2019) that adopts the Generative Adversarial Net to generate homographic puns.
95
+
96
+ Few-shot GPT3 We generate puns with a few examples feeding into GPT3 davinci-instruct-beta, the most capable model in the GPT3 family for creative generation. We provide the target pun word and its two senses in our prompt, along with the instruction to generate puns.
97
+
98
+ Ablations of our own models We also compare three methods proposed by us to obtain the context words (described in Section 2.2). We call them Ext AMBIPUN, Sim AMBIPUN, and Gen AMBIPUN.
99
+
100
+ # 3.3 Evaluation
101
+
102
+ Automatic Evaluation We follow Luo et al. (2019); Yu et al. (2018) to calculate distinct unigram and bigrams as the diversity (Li et al., 2016) in terms of sentence level and corpus level. We also
103
+
104
+ <table><tr><td>Pun word
105
+ Sense 1
106
+ Sense 2</td><td colspan="3">Irrational
107
+ Real but not expressible as the quotient of two integers
108
+ Not consistent with or using reason</td></tr><tr><td>Model</td><td>Example</td><td>Pun</td><td>Funny</td></tr><tr><td>GPT3</td><td>I can’t make a decision with all this irrationality going on.</td><td>No</td><td>1.4</td></tr><tr><td>Neural Pun</td><td>Note that this means that there is an irrational problem.</td><td>Yes</td><td>2.4</td></tr><tr><td>Pun-GAN</td><td>It can be use the irrational system.</td><td>No</td><td>1.2</td></tr><tr><td>Ext AMBIPUN</td><td>I have an irrational paranoia about mathematical integers.</td><td>Yes</td><td>3.8</td></tr><tr><td>Gen AMBIPUN</td><td>My calculator is unjust and illogic. It’s irrational.</td><td>Yes</td><td>3.4</td></tr><tr><td>Human</td><td>Old math teachers never die, they just become irrational.</td><td>Yes</td><td>3.8</td></tr><tr><td>Pun word
109
+ Sense 1
110
+ Sense 2</td><td colspan="3">Drive
111
+ A journey in a vehicle (usually an automobile)
112
+ The trait of being highly motivated</td></tr><tr><td>Model</td><td>Example</td><td>Pun</td><td>Funny</td></tr><tr><td>GPT3</td><td>I am exhausted, I need a nap before I can drive any more.</td><td>No</td><td>2.0</td></tr><tr><td>Neural Pun</td><td>It is that it can be use to drive a variety of function?</td><td>No</td><td>1.6</td></tr><tr><td>Pun-GAN</td><td>In he drive to the first three years.</td><td>No</td><td>1.2</td></tr><tr><td>Ext AMBIPUN</td><td>What do you call a genius with cunning drive? racecar driver.</td><td>Yes</td><td>3.6</td></tr><tr><td>Gen AMBIPUN</td><td>I have the determination to travel to my destination. But i don’t have the drive.</td><td>Yes</td><td>4.0</td></tr><tr><td>Human</td><td>A boy saving up for a car has a lot of drive.</td><td>Yes</td><td>4.2</td></tr></table>
113
+
114
+ Table 3: Generated sentences for the word 'Irrational' and 'Drive'and their sense definitions. We underline the context words that are related to each sense. All the generations are evaluated by external annotators, not the authors.
115
+
116
+ report the the average sentence length produced.
117
+
118
+ Human Evaluation We randomly select 100 sentences and collected our human ratings on Amazon Mechanical Turk (AMT). For each sentence, three workers are explicitly given the target pun word and its two sense definitions provided by the SemEval 2017 Task 7. We first ask them to judge if a given sentence is a successful pun sentence. Then, they are asked to rate the funniness and coherence on a scale from 1 (not at all) to 5 (extremely). Besides detailed instructions and explanation for each criteria, we also adopt attention questions and qualification types to rule out irresponsible workers. We conduct paired t-test for significance testing. The difference between our best performing model and the best baseline is significant.
119
+
120
+ # 4 Results and Analysis
121
+
122
+ # 4.1 Pun Generation Results
123
+
124
+ Automatic Evaluation Results of the automatic evaluation can be seen in Table 1. The average sentence length of our model is closest to human and gets the highest sentence-level diversity. Although our baseline Pun-GAN and Few-shot GPT3 have higher distinct unigram ratios at the corpus level, that is because all baseline models generate one sentence per pun word, while AMBIPUN generates tens of sentences per pun word, which inevitably sacrifices topical diversity. Nevertheless, AMBIPUN achieves the highest corpus-level bi-gram diversity.
125
+
126
+ Human Evaluation Results from the automatic evaluation can be seen in Table 2. We evaluate the success rate, funniness, and coherence of the
127
+
128
+ generated outputs. The superiority of our models are obvious. For significance testing, we conducted paired t-test, where our systems outperformed the best baseline in terms of success rate and funniness $(p\text{-value} < 0.05)$ . On the other hand, GPT3 could generate even more coherently than humans.
129
+
130
+ Analysis between extractive and generative method. Interestingly, the extractive method has higher success rates (p-value $< 0.05$ ) and is funnier (p-value $< 0.07$ ) than the generative method, indicating that extracting salient words from human written sentences could introduce more surprising and uncommon words than language models. We posit that those atypical words refresh people's eyes and thus boost the pun success rate as well as the funniness score. On the other hand, we also tried to equip GPT3 with greater creatively by top-k sampling with a large temperature $T$ . However, larger $T_{s}$ also result in arbitrary responses that humans may find unreadable. We hope our discovery could draw the community's attention to those traditional techniques for creative generation.
131
+
132
+ # 4.2 Case Study
133
+
134
+ To better understand the advantages of our method, we conduct a case study for the pun word "Irrational" and "Drive" in Table 3. For both target pun words, at most one of the baselines successfully generates a punning sentence. As discussed earlier, one possible reason is the absence of both senses. On the other hand, both Ext AMBIPUN and Sim AMBIPUN introduce context words for the two senses and thus are able to generate of high quality puns that almost match the human written puns in
135
+
136
+ <table><tr><td></td><td>Success Rate</td></tr><tr><td>Beginning</td><td>46.7%</td></tr><tr><td>Middle</td><td>52.0%</td></tr><tr><td>End</td><td>54.7%</td></tr></table>
137
+
138
+ Table 4: The pun success rate sentences based on their position annotated by human.
139
+
140
+ terms of the funniness score.
141
+
142
+ # 5 The Position of Pun Words
143
+
144
+ As is mentioned in Section 2.3, we play with the position of the pun word in the prompt given to the candidate sentence generation model. We try three variants by putting the target pun word at the start, in the middle, and at the end. For each variant, we ask Mechanical Turkers to judge if the given sentences are puns. Again, each sentence is rated by three Turkers and we take the majority answer if the workers disagree. Results from this analysis can be seen in Table 4. We observe that people find a sentence more likely to be a pun when the target word appears at the end.
145
+
146
+ To verify such hypothesis, we also calculate the position of the pun words of 1,163 human written pun sentences from SemEval 2017 Task 7 and report the distribution in Figure 3 in the Appendix. The histogram corroborates with the human annotated samples in that both suggest that keeping the pun word at the end of the sentence generates funnier puns. Theory of humor which says that the "joke" in a funny sentences some towards the end of the sentence (Shahaf et al., 2015) validates our analysis.
147
+
148
+ # 6 Related Works
149
+
150
+ # 6.1 Creative Text Generation
151
+
152
+ Pun generation. Many of the previous works on pun generation have focused on phonological or syntactic pattern rather than semantic pattern (Miller and Gurevych, 2015; Hong and Ong, 2009; Petrovic and Matthews, 2013; Valitutti et al., 2013) thus lacking creativity and flexibility. He et al. (2019) make use of local-global surprisal principle to generate homophonemic puns and Yu et al. (2020) uses constrained lexical rewriting for the same task. Hashimoto et al. (2018) use a retrieve and edit approach to generate homographic puns and Yu et al. (2018); Luo et al. (2019) propose complex neural model architecture such as constrained language model and GAN, and do not put emphasis on the linguistic structure of puns. We identify their absence of both the senses as a shortcoming and build our approach from there.
153
+
154
+ Humor generation. Humor generation still remains an unsolved problem, and is usually studied in a specific setting. Petrović and Matthews (2013) generates joke of the type 'I like my X like I like my Y, Z'. Garimella et al. (2020) develops a model to fill blanks in madlabs format and Yang et al. (2020) edit headlines to make them funny. More research is required to generate humorous sentences that are not constrained by their semantic structure.
155
+
156
+ Figurative language generation. In addition to pun, there are many attempts to generate figurative language such as metaphor, simile (Chakrabarty et al., 2020b), sarcasm, etc. Yu and Wan (2019) use metaphorically used verbs to generate metaphors in an unsupervised fashion, while Chakrabarty et al. (2021); Stowe et al. (2021) generate metaphors using symbolism and discriminative and conceptual mapping. Mishra et al. (2019) propose a modular architecture for unsupervised sarcasm generation, and Chakrabarty et al. (2020a) use commonsense knowledge for the same task. Tian et al. (2021) on the other hand are the first leverage semantic structure and commonsense and counterfactual knowledge to generate hyperboles.
157
+
158
+ # 6.2 Pun detection
159
+
160
+ SemEval 2017 Task 7 (Miller et al., 2017) introduced the challenge of pun detection, location detection and sense interpretation for homographic and homophonic puns. Diao et al. (2019) make use of Gated Attention network to detection homophonic puns. Zou and Lu (2019) introduces a tagging schemes which lets them detect puns as well as their location. They apply this approach to both homophonic and homographic puns.
161
+
162
+ # 7 Conclusion
163
+
164
+ We propose a novel approach towards homographic puns generation. Unlike previous works that are mathematically heavy, our approach is backed by the humor theory that ambiguity is achieved by the context. Automatic and human evaluations show that our model AMBIPUN outperforms the current state-of-the-art model by a large margin.
165
+
166
+ # Acknowledgments
167
+
168
+ The authors would like to thank the members of PLUSLab and the anonymous reviewers for helpful comments. Yufei Tian is supported by an Amazon Fellowship.
169
+
170
+ # References
171
+
172
+ Issa Annamoradnejad and Gohar Zoghi. 2020. Colbert: Using bert sentence embedding for humor detection. arXiv preprint arXiv:2004.12765.
173
+ Salvatore Attardo. 2010. Linguistic Theories of Humor. De Gruyter Mouton.
174
+ Pavel Braslavski, Vladislav Blinov, Valeria Bolotova, and Katya Pertsova. 2018. How to evaluate humorous response generation, seriously? In Proceedings of the 2018 Conference on Human Information Interaction & Retrieval, pages 225-228.
175
+ Tom B Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. arXiv preprint arXiv:2005.14165.
176
+ Tuhin Chakrabarty, Debanjan Ghosh, Smaranda Muresan, and Nanyun Peng. 2020a. R3: Reverse, retrieve, and rank for sarcasm generation with commonsense knowledge. arXiv preprint arXiv:2004.13248.
177
+ Tuhin Chakrabarty, Smaranda Muresan, and Nanyun Peng. 2020b. Generating similes effortlessly like a pro: A style transfer approach for simile generation. arXiv preprint arXiv:2009.08942.
178
+ Tuhin Chakrabarty, Xurui Zhang, Smaranda Muresan, and Nanyun Peng. 2021. Mermaid: Metaphor generation with symbolism and discriminative decoding.
179
+ Ciprian Chelba, Tomas Mikolov, Mike Schuster, Qi Ge, Thorsten Brants, Phillip Koehn, and Tony Robinson. 2013. One billion word benchmark for measuring progress in statistical language modeling. arXiv preprint arXiv:1312.3005.
180
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805.
181
+ Yufeng Diao, Hongfei Lin, Liang Yang, Xiaochao Fan, Di Wu, Dongyu Zhang, and Kan Xu. 2019. Heterographic pun recognition via pronunciation and spelling understanding gated attention network. In The World Wide Web Conference, WWW '19, page 363-371, New York, NY, USA. Association for Computing Machinery.
182
+ Aparna Garimella, Carmen Banea, Nabil Hossain, and Rada Mihalcea. 2020. "judge me by my size (noun), do you?" YodaLib: A demographic-aware humor generation framework. In Proceedings of the 28th International Conference on Computational Linguistics, pages 2814-2825, Barcelona, Spain (Online). International Committee on Computational Linguistics.
183
+ Marjan Ghazvininejad, Xing Shi, Yejin Choi, and Kevin Knight. 2016. Generating topical poetry. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 1183-1191.
184
+
185
+ Ian Goodfellow, Jean Pouget-Abadie, Mehdi Mirza, Bing Xu, David Warde-Farley, Sherjil Ozair, Aaron Courville, and Yoshua Bengio. 2014. Generative adversarial nets. Advances in neural information processing systems, 27.
186
+ Tatsunori B. Hashimoto, Kelvin Guu, Yonatan Oren, and Percy Liang. 2018. A retrieve-and-edit framework for predicting structured outputs.
187
+ He He, Nanyun Peng, and Percy Liang. 2019. Pun generation with surprise. In 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, NAACL HLT 2019, pages 1734-1744. Association for Computational Linguistics (ACL).
188
+ Bryan Anthony Hong and Ethel Ong. 2009. Automatically extracting word relationships as templates for pun generation. In Proceedings of the Workshop on Computational Approaches to Linguistic Creativity, pages 24-31, Boulder, Colorado. Association for Computational Linguistics.
189
+ Justine T Kao, Roger Levy, and Noah D Goodman. 2016. A computational model of linguistic humor in puns. Cognitive science, 40(5):1270-1285.
190
+ Jiwei Li, Michel Galley, Chris Brockett, Jianfeng Gao, and Bill Dolan. 2016. A diversity-promoting objective function for neural conversation models. In HLT-NAACL.
191
+ Fuli Luo, Shunyao Li, Pengcheng Yang, Lei Li, Baobao Chang, Zhifang Sui, and Xu Sun. 2019. Pun-gan: Generative adversarial network for pun generation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3388-3393.
192
+ Tristan Miller and Iryna Gurevych. 2015. Automatic disambiguation of English puns. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 719-729, Beijing, China. Association for Computational Linguistics.
193
+ Tristan Miller, Christian F Hempelmann, and Iryna Gurevych. 2017. Semeval-2017 task 7: Detection and interpretation of english puns. In Proceedings of the 11th International Workshop on Semantic Evaluation (SemEval-2017), pages 58-68.
194
+ Abhijit Mishra, Tarun Tater, and Karthik Sankaranayanan. 2019. A modular architecture for unsupervised sarcasm generation. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 6144-6154.
195
+ Lili Mou, Rui Yan, Ge Li, Lu Zhang, and Zhi Jin. 2015. Backward and forward language modeling for constrained sentence generation. arXiv preprint arXiv:1512.06612.
196
+
197
+ Sasa Petrovic and David Matthews. 2013. Unsupervised joke generation from big data. In Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 228-232, Sofia, Bulgaria. Association for Computational Linguistics.
198
+ Fanchao Qi, Lei Zhang, Yanhui Yang, Zhiyuan Liu, and Maosong Sun. 2020. Wantwords: An open-source online reverse dictionary system. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 175-181.
199
+ Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text transformer. Journal of Machine Learning Research, 21:1-67.
200
+ Stuart Rose, Dave Engel, Nick Cramer, and Wendy Cowley. 2010. Automatic keyword extraction from individual documents. Text mining: applications and theory, 1:1-20.
201
+ Dafna Shahaf, Eric Horvitz, and Robert Mankoff. 2015. Inside jokes: Identifying humorous cartoon captions. In Proceedings of the 21th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, pages 1065-1074.
202
+ Kevin Stowe, Tuhin Chakrabarty, Nanyun Peng, Smaranda Muresan, and Iryna Gurevych. 2021. Metaphor generation with conceptual mappings.
203
+ Yufei Tian, Arvind krishna Sridhar, and Nanyun Peng. 2021. HypoGen: Hyperbole generation with commonsense and counterfactual knowledge. In *Findings of the Association for Computational Linguistics: EMNLP* 2021, pages 1583–1593, Punta Cana, Dominican Republic. Association for Computational Linguistics.
204
+ Alessandro Valitutti, Hannu Toivonen, Antoine Doucet, and Jukka Toivanen. 2013. "let everything turn well in your wife": Generation of adult humor using lexical constraints. volume 2.
205
+ Ziqing Yang, Yiming Cui, Zhipeng Chen, Wanxiang Che, Ting Liu, Shijin Wang, and Guoping Hu. 2020. TextBrewer: An Open-Source Knowledge Distillation Toolkit for Natural Language Processing. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 9–16, Online. Association for Computational Linguistics.
206
+ Zhiwei Yu, Jiwei Tan, and Xiaojun Wan. 2018. A neural approach to pun generation. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1650-1660.
207
+
208
+ Zhiwei Yu and Xiaojun Wan. 2019. How to avoid sentences spelling boring? towards a neural approach to unsupervised metaphor generation. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 861-871, Minneapolis, Minnesota. Association for Computational Linguistics.
209
+ Zhiwei Yu, Hongyu Zang, and Xiaojun Wan. 2020. Homophonic pun generation with lexically constrained rewriting. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2870-2876, Online. Association for Computational Linguistics.
210
+ Lei Zhang, Fanchao Qi, Zhiyuan Liu, Yasheng Wang, Qun Liu, and Maosong Sun. 2020. Multi-channel reverse dictionary model. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 312-319.
211
+ Yanyan Zou and Wei Lu. 2019. Joint detection and location of English puns. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2117-2123, Minneapolis, Minnesota. Association for Computational Linguistics.
212
+
213
+ # Appendix
214
+
215
+ # A Details in Experiments
216
+
217
+ # A.1 An Example of Context Words
218
+
219
+ We list the output of context words for the pun word 'sentence' in Table 5. The table lists two sense definitions and the related words obtained from the sense definitions using reverse dictionary. We then obtain context words using three different mechanisms: TF-IDF, Word2Vec, and GPT3.
220
+
221
+ # A.2 Implementation Details
222
+
223
+ Experimental Settings For the word2vec model we train a continuous-bag-of-words model with window size 40 and word vector dimension 200. For the candidate generation module, we train the T5-base model on 10 epochs and select the best performing model based on validation loss. Max sequence length for target and source is set to 30. Batch size is set to 64.
224
+
225
+ Data Refinement The process to generate both related and context words can entail many words that are not ideal. Continuing with these words would further propagate and enlarge the noise. Hence, to minimize this noise, we implement the following data refinement steps to ensure the keywords stick to our standards: we avoid using polysemous words as keywords during intermediate steps because their perceived sense is highly ambiguous. We also disregard any numbers and special characters produced by our systems.
226
+
227
+ # A.3 Human Evaluation
228
+
229
+ The workers are paid $20 per hour. For pun success judgement (yes/no question), we take the majority vote among three workers, while for funniness and coherence (1 to 5), we take the average ratings. We then use the pairwise kappa coefficient to measure the inter-annotator agreement (IAA). The average inter-annotator agreement of all raters for pun success, funniness and coherence are 0.55, 0.48 and 0.40, meaning that annotators moderately agree with each other. Considering the subjectivity of this task (Braslavski et al., 2018), and the higher IAA in terms of pun success and funniness over coherence, we argue that our collected results are reasonably reliable for the purpose of pun generation. Besides, we conducted paired t-test and show that the success rate and funniness ratings of our systems differentiate from the best baseline model with statistical significance (p-value < 0.5).
230
+
231
+ ![](images/6364e7eae6e5cd1de0be4566a19d621ca608c2ed05ece36e67b40342f0265d60.jpg)
232
+ Figure 3: Analysis of the position of pun word in 1,163 human written puns. The y-axis indicates the number of sentences and the x-axis indicates the position of pun word on a scale from 0 (start) to 1 (end).
233
+
234
+ # B Humor Classifier Results for Selecting Puns
235
+
236
+ To further discuss the accuracy and recall of our humor classifier, we show a representative output in Table 6. The table contains a few selected sentences ranked my the humor classifier. We also label each sentence as yes, no, and maybe to indicate if it is a pun or not. As discussed in the methodology, we train our classifier on humor dataset. As puns are an important part of humor generation, this model can help rule out some options. Basic theories of humor such as incongruity and surprise apply to both of them. As can be seen in the table, our classifier is able to successfully pull aside unfunny or non-coherent sentences. Looking at the examples at the top and the middle, it can be observed that some better examples are classified lower than others. Making this observation across many pun words, we decided to use the classifier only to rule out the bottom third samples. For the rest of the generations, we randomly sample them.
237
+
238
+ On manual observation, we realised that when we as humans peruse the generated candidates, there are many sentences that meet our expectations. Therefore, building a classifier that can accurately find these sentences can increase the accuracy by a large margin. We treat this as an opportunity for future work.
239
+
240
+ # C Analysis of Human Written Puns
241
+
242
+ we calculate the position of the pun words of 1,163 human written pun sentences from SemEval 2017 Task 7 and report the distribution. The histogram corroborates with the human annotated samples in that both suggest that keeping the pun word at the end of the sentence generates funnier puns. Theory of humor which says that the "joke" in a funny sentences some towards the end of the sentence validates our analysis.
243
+
244
+ <table><tr><td></td><td>Sense 1</td><td>Sense 2</td></tr><tr><td>Definition</td><td>a string of words satisfying the grammatical rules of a language</td><td>a final judgment of guilty in a criminal case and the punishment that is imposed</td></tr><tr><td>Related words</td><td>syllable, syntax, lexicon, thesaurus, grammatical</td><td>punishment, verdict, sentencing, retrial, penalty</td></tr><tr><td>TF-IDF</td><td>syllables, words, three, spelling, even, said, describe, typos</td><td>cruel, expected, end, court, scheduled, set, spector, seeking</td></tr><tr><td>Word2Vec</td><td>syllable, pronounced, words, rhyme, verbs, meaning, hence, example</td><td>punished, crimes, offender, torture, moral, guilt, abuse, offender</td></tr><tr><td>GPT3</td><td>words, letters, punctuation, grammar, synonym, dictionary, meaning, comma</td><td>prison, judge, jury, trial, justice, lawyer, court, evidence</td></tr></table>
245
+
246
+ Table 5: Comparison of the three different context word generation mechanism for the pun word 'sentence'. The table lists two sense definitions and the related words obtained from the sense definitions using reverse dictionary. For these related words, we obtain context words using three different mechanisms.
247
+
248
+ <table><tr><td>Sentence</td><td>Rank</td><td>Pun</td></tr><tr><td>What&#x27;s the interest rate on a home mortgage? No interest.</td><td>1</td><td>Yes</td></tr><tr><td>My bank said I think they&#x27;re interested in me. I said no.</td><td>2</td><td>No</td></tr><tr><td>My girlfriend said she had an interest in banking so i loan her a quarter</td><td>3</td><td>Yes</td></tr><tr><td>I have no interest in being a guardian. It&#x27;s free.</td><td>4</td><td>Maybe</td></tr><tr><td>I&#x27;ve never had interest placed on borrowings. It&#x27;s a waste of time.</td><td>5</td><td>Yes</td></tr><tr><td>Why did the republican attack the bank? Because it was in its interest.</td><td>6</td><td>Maybe</td></tr><tr><td>What is the republican&#x27;s strategy? The interest rate.</td><td>7</td><td>No</td></tr><tr><td>What is the most dispensable interest in investment?</td><td>8</td><td>No</td></tr><tr><td>If trump had an interest in president he would make it an president-of-interest.</td><td>9</td><td>No</td></tr></table>
249
+
250
+ Table 6: An example of candidate pun sentences ranked by the humor classifier. As can be seen, the model is able to rule out non-pun sentences but fails to pick out high-quality ones.
251
+
252
+ # D More Examples of Generated Puns
253
+
254
+ We compile more examples generated by AM-BIPUNin Table 7 for the following pun words: sentence, case, bugs, delivery. This table further supports our claim that our approach would benefit from a better classification module to select human-like sentences.
255
+
256
+ # E GPT3 for context words generation
257
+
258
+ We make use of few shot GPT3 to generate context words. The prompty to GPT3 included 2 pair of prompt and its completion. One example of a pair would be "generate seven keywords for laptop: battery, macbook, internet, technology, keyboard, technology, portable". These example are followed by the prompt "generate seven keywords for $X$ :" where $X$ is a related word. This way we generate seven keywords for each related word.
259
+
260
+ <table><tr><td>Target word</td><td>sentence</td></tr><tr><td>Sense 1</td><td rowspan="2">A string of words satisfying the grammatical rules of a language(Criminal law) a final judgment of guilty in a criminal case and the punishment that is imposed</td></tr><tr><td>Sense 2</td></tr><tr><td>1</td><td>The word jail is a sentence.</td></tr><tr><td>2</td><td>What&#x27;s the punishment for using antonyms in a sentence syntax is it a sentence?</td></tr><tr><td>3</td><td>I&#x27;m sorry I said the sentence was too long but punishments are endless.</td></tr><tr><td>4</td><td>The sentence in the dictionary doesn&#x27;t sound very guilty.</td></tr><tr><td></td><td></td></tr><tr><td>Target word</td><td>case</td></tr><tr><td>Sense 1</td><td>A portable container for carrying several objects</td></tr><tr><td>Sense 2</td><td>A statement of facts and reasons used to support an argument</td></tr><tr><td>1</td><td>What&#x27;s the most durable luggage for a detective? jury case</td></tr><tr><td>2</td><td>A jury just found a container of leather there&#x27;s no reason to argue it&#x27;s a case</td></tr><tr><td>3</td><td>What do you call a container used for investigation research? a case study</td></tr><tr><td>4</td><td>Why did the cardboard get into a trial? because it was an investigation case</td></tr><tr><td></td><td></td></tr><tr><td>Target word</td><td>bugs</td></tr><tr><td>Sense 1</td><td>General term for any insect or similar creeping or crawling invertebrate</td></tr><tr><td>Sense 2</td><td>A fault or defect in a computer program, system, or machine</td></tr><tr><td>1</td><td>Why did the garden restart its computer? it had bugs in it.</td></tr><tr><td>2</td><td>What do you call a pest that&#x27;s slow programmer? bugs bug</td></tr><tr><td>3</td><td>Why did the compost crash? it had bugs in it.</td></tr><tr><td>4</td><td>What do you call a bug that&#x27;s disgusting? a glitch in the internet</td></tr><tr><td></td><td></td></tr><tr><td>Target word</td><td>delivery</td></tr><tr><td>Sense 1</td><td>the act of delivering or distributing something (as goods or mail)</td></tr><tr><td>Sense 2</td><td>your characteristic style or manner of expressing yourself orally</td></tr><tr><td>1</td><td>What did the letter say to the parcel? clear delivery!</td></tr><tr><td>2</td><td>What do you call a trucking truckdriver with no articulation? delivery driver.</td></tr><tr><td>3</td><td>The distribution center has a pronunciation dictionary. it&#x27;s a delivery service</td></tr><tr><td>4</td><td>What do you call a parcel with no dialogue and an accent? delivery service.</td></tr></table>
261
+
262
+ Table 7: More examples generated by Ext AMBIPUN.
ambipungeneratinghumorouspunswithambiguouscontext/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af71b06828896de96ae268df66837695ea924f07ffdcefd520d1a25ff5ddc727
3
+ size 571906
ambipungeneratinghumorouspunswithambiguouscontext/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb7660bb0ad5f50770cd29d936bc09ce56f3118db5a788d199e3ea0e08e6b145
3
+ size 261006
analyzingencodedconceptsintransformerlanguagemodels/acb8ed2b-8d9a-4c87-ab61-70550ef0179b_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58ad7fbbfdbb7b859af89365c7bf1991a85052a90dca3a01055291a0a9bc8c89
3
+ size 122914
analyzingencodedconceptsintransformerlanguagemodels/acb8ed2b-8d9a-4c87-ab61-70550ef0179b_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb1b9a84df611cd1ebb82e9d0734a104f3511179b0d2076dc5361f1534701c89
3
+ size 150995
analyzingencodedconceptsintransformerlanguagemodels/acb8ed2b-8d9a-4c87-ab61-70550ef0179b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a53a295ef095e95eabdeadb84c5ea5fdb5b45f47a9ce4d8eb032f57a2ec4f6da
3
+ size 4974912
analyzingencodedconceptsintransformerlanguagemodels/full.md ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Analyzing Encoded Concepts in Transformer Language Models
2
+
3
+ Hassan Sajjad $^{\diamond}$ Nadir Durrani $^{\diamond}$ Fahim Dalvi $^{\diamond}$ Firoj Alam $^{\diamond}$ Abdul Rafae Khan $^{\dagger}$ Jia Xu $^{\dagger}$
4
+
5
+ {hsajjad,ndurrani,faimaduddin,fialam}@hbku.edu.qa
6
+
7
+ $^{\diamond}$ Qatar Computing Research Institute, HBKU Research Complex, Qatar
8
+
9
+ {akhan4, jxu70}@stevens.edu
10
+
11
+ †School of Engineering and Science, Steven Institute of Technology, USA
12
+
13
+ # Abstract
14
+
15
+ We propose a novel framework ConceptX, to analyze how latent concepts are encoded in representations learned within pre-trained language models. It uses clustering to discover the encoded concepts and explains them by aligning with a large set of human-defined concepts. Our analysis on seven transformer language models reveal interesting insights: i) the latent space within the learned representations overlap with different linguistic concepts to a varying degree, ii) the lower layers in the model are dominated by lexical concepts (e.g., affixation), whereas the core-linguistic concepts (e.g., morphological or syntactic relations) are better represented in the middle and higher layers, iii) some encoded concepts are multi-faceted and cannot be adequately explained using the existing human-defined concepts.
16
+
17
+ # 1 Introduction
18
+
19
+ Contextualized word representations learned in deep neural network models (DDNs) capture rich concepts making them ubiquitous for transfer learning towards downstream NLP. Despite their revolution, the blackbox nature of the deep NLP models is a major bottle-neck for their large scale adaptability. Understanding the inner dynamics of these models is important to ensure fairness, robustness, reliability and control.
20
+
21
+ A plethora of research has been carried out to probe DNNs for the linguistic knowledge (e.g. morphology, syntactic and semantic roles) captured within the learned representations. A commonly used framework to gauge how well linguistic information can be extracted from these models is the Probing Framework (Hupkes et al., 2018), where they train an auxiliary classifier using representations as features to predict the property of interest. The performance of the classifier reflects the
22
+
23
+ amount of knowledge learned within representations. To this end, the researchers have analyzed what knowledge is learned within the representations through relevant extrinsic phenomenon varying from word morphology (Vylomova et al., 2016; Belinkov et al., 2017a) to high level concepts such as syntactic structure (Blevins et al., 2018; Marvin and Linzen, 2018) and semantics (Qian et al., 2016; Reif et al., 2019; Belinkov et al., 2017b) or more generic properties (Adi et al., 2016; Rogers et al., 2020).
24
+
25
+ In this work, we approach the representation analysis from a different angle and present a novel framework ConceptX. In contrast to relying on the prediction capacity of the representations, we analyze the latent concepts learned within these representations and how knowledge is structured, using an unsupervised method. More specifically, we question: i) do the representations encode knowledge inline with linguistic properties such as word morphology and semantics? ii) which properties dominate the overall structure in these representations? iii) does the model learn any novel concepts beyond linguistic properties? Answers to these questions reveal how deep neural network models structure language information to learn a task.
26
+
27
+ Our inspiration to use the term concept comes from "concept based explanation" in computer vision (Kim et al., 2018; Ghorbani et al., 2019; Chen et al., 2020). Stock (2010) defined a concept as "a class containing certain objects as elements, where the objects have certain properties". We define an encoded concept as a cluster of context-aware latent representations of words, where the representations are encoder layer outputs.
28
+
29
+ Our framework clusters contextualized representations using agglomerative hierarchical clustering (Gowda and Krishna, 1978). The resulting clusters represent encoded concepts, captured within the learned representations (Please see Figure 1 for illustration). We then use a novel align-
30
+
31
+ ![](images/cb188dc1342d9d6ff246cfd9cf7d1f3157d1424e3d56f42a4c243bba1c9c135c.jpg)
32
+ Figure 1: ConceptX: i) Extract representations from trained model, ii) Cluster the representations to obtain encoded concepts, iii) Align the concepts to human-defined concepts
33
+
34
+ ment function that measures the amount of overlap between encoded concepts and a range of predefined categories (that we call as human-defined concepts in this paper). We experimented with affixes, casing, morphological, syntactic, semantic, WordNet (Miller, 1995), and psycholinguistic concepts (LIWC Pennebaker et al. (2001)). The use of such a diverse set of human-defined concepts enables us to cover various abstractions of language. In Figure 3 we present a few examples of human-defined concepts that were aligned with the encoded concepts.
35
+
36
+ We carry out our study on seven pre-trained transformer models such as BERT (Devlin et al., 2019) and XLM-RoBERTa (Conneau et al., 2020), with varying optimization functions, architectural details and training data. Some notable findings emerging from our analysis are as follows:
37
+
38
+ - Shallow concepts such as lexical ngrams or suffixes are predominantly captured in the lower layers of the network.
39
+ - WordNet and psycholinguistic-based concepts (LIWC) are also learned in the lower layers.
40
+ - Middle and higher layers encode concepts that capture core linguistic properties such as morphology, semantics and syntax.
41
+ - Roughly $50\%$ of the encoded concepts adhere to our suite of human-defined linguistic concepts.
42
+ - The models learn novel concepts that are multi-faceted and cannot be adequately explained using the existing human-defined concepts.
43
+
44
+ Our contributions in this paper are as follow: i) We present ConceptX, a framework that interprets encoded concepts in the learned representation by measuring their alignment to the human-defined concepts. ii) We provide a qualitative and quantitative evidence of how knowledge is structured within deep NLP models with respect to a large suite of human-defined concepts.
45
+
46
+ # 2 Related Work
47
+
48
+ Most of the work done on interpretability in deep NLP addresses two questions in particular: (i) what linguistic (and non-linguistic) knowledge is learned within contextualized representations, Concept Analysis and (ii) how this information is utilized in the decision making process, Attribution Analysis (Sajjad et al., 2021). The former thrives on post-hoc decomposability, where we analyze representations to uncover linguistic phenomenon that are captured as the network is trained towards any NLP task (Adi et al., 2016; Conneau et al., 2018; Liu et al., 2019a; Tenney et al., 2019; Belinkov et al., 2020) and the latter characterize the role of model components and input features towards a specific prediction (Linzen et al., 2016; Gulordava et al., 2018; Marvin and Linzen, 2018). Our work falls into the former category.
49
+
50
+ Previous studies have explored visualization methods to analyze the learned representations (Karpathy et al., 2015; Kádár et al., 2017), attention heads (Clark et al., 2019; Vig, 2019), language compositionality (Li et al., 2016) etc. A more commonly used framework analyzes representations by correlating parts of the neural network with linguistic properties, by training a classifier to predict a
51
+
52
+ feature of interest (Adi et al., 2016; Belinkov et al., 2017a; Conneau et al., 2018). Several researchers used probing classifiers for investigating the contextualized representations learned from a variety of neural language models on a variety of character-Durrani et al., 2019), word- (Liu et al., 2019a) or sub-sentence level (Tenney et al., 2019) linguistic tasks. Rather than analyzing the representations as a whole, several researchers also explored identifying salient neurons within the model that capture different properties (Dalvi et al., 2019a; Durrani et al., 2020; Suau et al., 2020; Mu and Andreas, 2020) or are salient for the model irrespective of the property (Bau et al., 2019; Wu et al., 2020).
53
+
54
+ Our work is inline with (Michael et al., 2020; Dalvi et al., 2022), who analyzed latent concepts learned in pre-trained models. Michael et al. (2020) used a binary classification task to induce latent concepts relevant to a task and showed the presence of linguistically motivated and novel concepts in the representation. However, different from them, we analyze representations in an unsupervised fashion. Dalvi et al. (2022) used human-in-the-loop to analyze latent spaces in BERT. Our framework uses human-defined concepts to automatically generate explanations for the latent concepts. This enabled us to scale our study to many transformer models.
55
+
56
+ In a similar work, Mamou et al. (2020) applied manifold analysis technique to understand the amount of information stored about object categories per unit. Our approach does away from the methodological limitations of probing framework such as complexity of the probes, effect of randomness etc (Belinkov, 2021). However, it is important to mention that the two frameworks are orthogonal and complement each other.
57
+
58
+ # 3 Methodology
59
+
60
+ A vector representation in the neural network model is composed of feature attributes of the input words. We group the encoded vector representations using a clustering approach discussed below. The underlying clusters, that we term as the encoded concepts, are then matched with the human-defined concepts using an alignment function. Formally, consider a Neural Network (NN) model $\mathbb{M}$ with $L$ encoder layers $\{l_1,l_2,\dots l_l,\dots ,l_L\}$ , with $H$ hidden nodes per layer. An input sentence consisting of $M$ words $w_{1},w_{2},\ldots w_{i},\ldots ,w_{M}$ is fed into a NN. For each input word $i$ , we compute the node output (after applying the activation func
61
+
62
+ tions) $y_{h}^{l}(w_{i})$ of every hidden node $h\in \{1,\dots ,H\}$ in each layer $l$ , where $\vec{y}^l (w_i)$ is the vector representation composing the outputs of all hidden nodes in layer $l$ for $w_{i}$ . Our goal is to cluster representations $\vec{y}^l$ , from a large training data to obtain encoded concepts. We then align these with various human-defined concepts to obtain an explanation of them to build an understanding of how these concepts are represented across the network.
63
+
64
+ # 3.1 Clustering
65
+
66
+ We use agglomerative hierarchical clustering (Gowda and Krishna, 1978), which we found to be effective for this task. It assigns each word to a separate cluster and then iteratively combines them based on Ward's minimum variance criterion that minimizes intra-cluster variance. Distance between two representations is calculated with the squared Euclidean distance. The algorithm terminates when the required $K$ clusters (aka encoded concepts) are formed, where $K$ is a hyperparameter. Each encoded concept represents a latent relationship between the words present in the cluster. Appendix C presents the algorithm.
67
+
68
+ # 3.2 Alignment
69
+
70
+ Now we define the alignment function between the encoded and human-defined concepts. Consider a human-defined concept as $z$ , where a function $z(w) = z$ denotes that $z$ is the human-defined concept of word $w$ . For example, parts-of-speech is a human-defined concept and each tag such as noun, verb etc. represents a class/label within the concept, e.g. $z(sea) = noun$ . Similarly, suffix is a human-defined concept with various suffixes representing a class, e.g. $z(bigger) = er$ . A reverse function of $z$ is a one-to-many function that outputs a set of unique words with the given human-defined concept, i.e., $z^{-1}(z) = \{w_1, w_2, \ldots, w_J\}$ , like $z^{-1}(noun) = \{sea, tree, \ldots\}$ , where $J$ is the total number of words with the human-defined concept of $z$ . Following this notation, an encoded concept is indicated as $c$ , where $c(w) = c$ is a function of applying encoded concept on $w$ , and its reverse function outputs a set of unique words with the encoded concept of $c$ , i.e., $c^{-1}(c) = \{w_1, w_2, \ldots, w_I\}$ , where $I$ is the set size.
71
+
72
+ To align the encoded concepts with the human-defined concepts, we auto- annotate the input data that we used to get the clusters, with the human-defined concepts. We call our encoded concept $(c)$
73
+
74
+ to be $\theta$ -aligned $(\Lambda_{\theta})$ with a human-defined concept $(z)$ as follows:
75
+
76
+ $$
77
+ \Lambda_ {\theta} (z, c) = \left\{ \begin{array}{l l} 1, & \text {i f} \frac {\sum_ {w ^ {\prime} \in z ^ {- 1}} \sum_ {w \in c ^ {- 1}} \delta (w , w ^ {\prime})}{J} \geq \theta \\ 0, & \text {o t h e r w i s e}, \end{array} \right.
78
+ $$
79
+
80
+ where Kronecker function $\delta (w,w^{\prime})$ is defined as
81
+
82
+ $$
83
+ \delta (w, w ^ {\prime}) = \left\{ \begin{array}{l l} 1, & \text {i f} w = w ^ {\prime} \\ 0, & \text {o t h e r w i s e} \end{array} \right.
84
+ $$
85
+
86
+ We compute $c$ and $\Lambda_{\theta}(z,c)$ for the encoder output from each layer $l$ of a neural network. To compute a network-wise alignment, we simply average $\theta$ -agreement over layers.
87
+
88
+ # 4 Experimental Setup
89
+
90
+ # 4.1 Dataset
91
+
92
+ We used a subset of WMT News $2018^{2}$ (359M tokens) dataset. We randomly selected 250k sentences from the dataset $(\approx 5\mathrm{M}$ tokens) to train our clustering model. We discarded words with a frequency of less than 10 and selected maximum 10 occurrences of a word type. The final dataset consists of 25k word types with 10 contexts per word.
93
+
94
+ # 4.2 Pre-trained Models
95
+
96
+ We carried out our analysis on various 12-layered transformer models such as BERT-cased (BERTc, Devlin et al., 2019), BERT-uncased (BERT-uc), RoBERTa (Liu et al., 2019b), XLNet (Yang et al., 2019) and ALBERT (Lan et al., 2019). We also analyzed multilingual models such as multilingualbert-cased (mBERT) and XLM-RoBERTa (XLMR, Conneau et al., 2020) where the embedding space is shared across many languages. This choice of models is motivated from interesting differences in their architectural designs, training data settings (cased vs. un-cased) and multilinguality.
97
+
98
+ # 4.3 Clustering and Alignment
99
+
100
+ We extract contextualized representation of words by performing a forward pass over the network using the NeuroX toolkit (Dalvi et al., 2019b). We
101
+
102
+ cluster representations in every layer into $K$ groups. To find an optimum value of $K$ , we experimented with the ELbow (Thorndike, 1953) and Silhouette (Rousseeuw, 1987) methods. However, we did not observe reliable results (see Appendix C). Therefore, we empirically selected $K = 1000$ based on finding a decent balance between many small clusters (over-clustering) and a few large clusters (under-clustering). We found that our results are not sensitive to this parameter and generalize for different cluster settings (See Section 5.4). For the alignment between encoded and human-defined concepts, we use $\theta = 90\%$ i.e., we consider an encoded concept and a human-defined concept to be aligned, if they have at least $90\%$ match.
103
+
104
+ # 4.4 Human-defined concepts
105
+
106
+ We experiment with the various Human-defined concepts, which we categorize into four groups:
107
+
108
+ - Lexical Concepts: Ngrams, Affixes, Casing, First and the Last Word (in a sentence)
109
+ - Morphology and Semantics: POS tags (Marcus et al., 1993) and SEM tags (Abzianidze et al., 2017)
110
+ - Syntactic: Chunking tags (Tjong Kim Sang and Buchholz, 2000) and CCG super-tags (Hockenmaier, 2006)
111
+ - Linguistic Ontologies: WordNet (Miller, 1995) and LIWC (Pennebaker et al., 2001)
112
+
113
+ At various places in this paper, we also refer to Morphology, Semantics and Syntactic concepts as core-linguistic concepts. We trained BERT-based classifiers using gold-annotated training data and standard splits for each core-linguistic concepts and auto-labelled the selected news dataset using these.
114
+
115
+ # 5 Analysis
116
+
117
+ In this section, we analyze the encoded concepts by aligning them with the human-defined concepts.
118
+
119
+ # 5.1 Overall Alignment
120
+
121
+ First we present to what extent the encoded concepts in the entire network align with the human-defined concepts. We compute the overall score as the percentage of the aligned encoded concepts to the human-defined concepts across layers using the function described in Section 3.2. We
122
+
123
+ <table><tr><td></td><td>BERT-c</td><td>BERT-uc</td><td>mBERT</td><td>XLM-R</td><td>RoBERTa</td><td>ALBERT</td><td>XLNet</td></tr><tr><td>Overall alignment</td><td>47.2%</td><td>50.4%</td><td>66.0%</td><td>72.4%</td><td>50.1%</td><td>51.6%</td><td>43.6%</td></tr></table>
124
+
125
+ Table 1: Coverage of human-defined concepts across all clusters of a given model
126
+
127
+ ![](images/d862fe4f5f15e6330671148b5c1fb5d3dc9ffc5f616e3e179f287943626a394d.jpg)
128
+ Figure 2: Average Alignment $(\%)$ between encoded concepts and human-defined concepts
129
+
130
+ found an overall match of at least $43.6\%$ in XLNet and at most $72.4\%$ in XLM-R (See Table 1). Interestingly, the multilingual models (mBERT and XLM-R) found substantially higher match than the monolingual models. The inclusion of multiple languages during training causes the model to learn more linguistic properties. Note that the extent of alignment with the human-defined concept may not necessarily correlate with its overall performance. For example XLNet performs outperforms BERT on the GLUE tasks, but aligns less with the human-defined concepts compared to BERT in our results. A similar observation was made by Belinkov et al. (2020) who also found that the translation quality of an NMT model may not correlate with the amount of linguistic knowledge learned in the representation. Various factors such as: architectural design, training data, objective function, initialization, etc, play a role in training a pre-trained model. More controlled experiments are needed to understand the relationship of each factor on the performance of the model and on the linguistic learning of the model.
131
+
132
+ We further investigated per concept<sup>5</sup> alignment
133
+
134
+ to understand which human-defined concepts are better represented within the encoded concepts. Figure 2 presents the results.
135
+
136
+ **Lexical Concepts** Pre-trained models encode varying amount of lexical concepts such as casing, ngrams and suffixes. We found between $7 - 11\%$ encoded concepts that align with the casing concept (title case or upper case). We observed that most of these encoded concepts consist of named entities, which were grouped together based on semantics.
137
+
138
+ Comparing suffixes and ngrams While affixes often have linguistic connotation (e.g., the prefix anti negates the meaning of the stem and the suffix ies is used for pluralization), the ngram units that become part of the vocabulary as an artifact of statistical segmentation (e.g., using BPE (Sennrich et al., 2016) or Word-piece (Schuster and Nakajima, 2012)) often lack any linguistic meaning. However, models learn to encode such information. We found a match ranging from $1\%$ (BERT-cased) up to $25\%$ (XLM-R) when comparing encoded concepts with the suffix concept. A similar pattern is observed in the case of the ngram concept (which is a super-set of the suffix concept) where a staggering $48\%$ matches were found. Figure 6a shows an ngram cluster found in layer 2 of BERT-c.
139
+
140
+ Morphology and Semantics We found that the encoded concepts based on word morphology (POS) consistently showed a higher match across all models in comparison to the other abstract concepts, aligning a quarter of the encoded concepts in the case of mBERT. The alignment with semantic concepts is relatively lower, with at most $16\%$ match across models. This reflects that while the models learn both linguistic properties, morphological ontology is relatively preferred compared to the semantic hierarchy.
141
+
142
+ Syntactic These concepts capture grammatical orientation of a word, for example Chunking:B-NP is a syntactic concept describing words in the beginning of a noun phrase. CCG:PP/NP is a concept
143
+
144
+ ![](images/2b8d1396e5c05c0babd8bc16cdfc17a443955a0f02fe70f535f46c4172177b5b.jpg)
145
+ Figure 3: Examples of BERT-c encoded concepts aligned with the human-defined concepts
146
+
147
+ in CCG super tagging, describing words that take a noun phrase on the right and outputs a preposition phrase for example "[in[the US]]". We found relatively fewer matches, a maximum of $7\%$ and $14\%$ matching encoded concepts for Chunking and CCG concepts respectively. The low matches for syntactic concepts suggest that the models do not encode the same syntactic hierarchy suggested by these human-defined syntactic tasks.
148
+
149
+ Linguistic Ontologies Comparing the encoded concepts with static linguistic ontologies, we found WordNet concepts to be the second most aligned concept (11-21%) with the human-defined concepts. LIWC also shows a relatively higher alignment compared to the other human-defined concepts in a few models (e.g., BERT-c). However, this observation is not consistent across models and we found a range between 5-16% matches. These results present an interesting case where several models prefer the distinction of lexical ontology over abstract linguistic concepts such as morphology. Figure 3 shows examples of encoded concepts aligned with WordNet and LIWC. We see that these concepts are built based on a semantic relationship e.g., the clusters in Figure 3b, 3c and 3d group words based on religious, facial anatomy, and specific motion-related vocabulary respectively.
150
+
151
+ Comparing Models The results of multilingual models (mBERT, XLM-R) are intriguing given that their encoded concepts are dominated by ngram-based concepts and POS concepts, and their relatively lesser alignment with the linguistic ontologies. On the contrary, several monolingual models (BERT-c, ALBERT) showed a better match with linguistic ontologies specially WordNet.
152
+
153
+ The higher number of matches to the ngram (and suffix) concepts in the multilingual models is due to the difference in subword segmentation. The subword models in XLM-R and mBERT are optimized for multiple languages, resulting in a vocabulary
154
+
155
+ consisting of a large number of small ngram units. This causes the multilingual models to aggressively segment the input sequence, compared to the monolingual models<sup>7</sup> and resulted in highly dominated ngram-based encoded concepts, especially in the lower layers. This may also explain the relatively lower match that multilingual models exhibit to the linguistic ontologies. We discuss this further in the context of layer-wise analysis in Section 5.2.
156
+
157
+ Comparing BERT cased vs. uncased, interestingly BERT-uc consistently showed higher matches for the core-linguistic concepts (See Figure 2). We speculate that in the absence of casing information, BERT-uc is forced to learn more linguistic concepts, whereas BERT-c leverages the explicit casing information to capture more semantically motivated concepts based on linguistic ontologies.
158
+
159
+ The higher matches in multilingual models in comparison to the monolingual models, and BERT-uncased in comparison to BERT-cased suggest that the training complexity is one factor that plays a role in a model's ability to learn linguistic nuances. For example, multilingual models need to optimize many languages, which is a harder task compared to learning one language. Similarly, the absence of capitalization in training data makes the learning task relatively harder for BERT-uc compared to BERT-c models, thus resulting in higher matches for BERT-uc. We speculate that the harder the training task, the more language nuances are learned by a model. Belinkov et al. (2020) made a similar observation, where they showed that the linguistic knowledge learned within the encoder-decoder representations in NMT models correlates with complexity of a language-pair involved in the task.
160
+
161
+ # 5.2 Layer-wise Alignment
162
+
163
+ Now we study the alignment of human-defined concepts across layers to understand how concepts
164
+
165
+ ![](images/11655874faa5e79b872c55501a3abd3e4df11cbe920ab4fdaea7b0cca3cc296e.jpg)
166
+ Figure 4: Layer-wise concept alignment. Y-axis is the normalized number of aligned concepts. The number within brackets of each human-defined concept, e.g. Casing (166), shows the maximum layer-wise match
167
+
168
+ evolve in the network. Figure 4 shows results for selected models. $^{8}$ The y-axis is the normalized number of aligned concepts across layers.
169
+
170
+ Overall Trend We observed mostly consistent patterns across models except for ALBERT, which we will discuss later in this section. We found that the shallow concepts (such as ngram and suffixes) and the linguistic ontologies (LIWC and WORDNET) are better represented in the initial layers and exhibit a downward trend in the higher layers of the network. On the contrary the core linguistic concepts (POS, Chunking, etc.) are better repre
171
+
172
+ sented in the higher layers (layer 8-10). The last layers do not show any consistently dominating human-defined concepts considered in this work. We can generalize on these trends and hypothesize on how encoded concepts evolve in the network: the initial layers of the pretrained models, group words based on their lexical and semantic similarities where the former is an artifact of subword segmentation. With the inclusion of context and abstraction in the higher layers, these groups evolve into linguistic manifolds. The encoded concepts in the last layers are influenced by the objective function and learn concepts relevant to the task. Durrani et al. (2021) also made similar observation
173
+
174
+ when analyzing linguistic concepts in pre-trained models that are fine-tuned towards different GLUE tasks.
175
+
176
+ Concept-wise Trend In the following, we discuss different concepts in detail. As we mentioned earlier, the high presence of ngram and suffix concepts in the lower layers is due to subword segmentation. At the higher layers, the models start encoding abstract concepts, therefore get better alignment with the core linguistic concepts. Casing shows an exception to other lexical concepts and has similar trend to POS and SEM. Upon investigating we observed that the words appearing in these clusters have a hybrid connotation. For example, more than $98\%$ of the encoded concepts that match with Casing are named entities, which explains the trend. The syntactic concepts observe peak in the higher-middle layers and a downward trend towards the end. These findings resonate with the earlier work on interpreting neural network representations for BERT. For example Liu et al. (2019a) also showed that probes trained with layers 7-8 give the highest accuracy when trained towards predicting the tasks of Chunking and CCG tagging. Although here, we are targeting a slightly different question i.e. how the latent concepts are encoded within the representations and how they evolve from input to output layers of the network.
177
+
178
+ We observed a downward trend in linguistic ontologies (WordNet, LIWC) as we go from lower layers to higher layers as opposed to the core linguistic concepts (POS, CCG, etc.). This is because of the context independent nature of these concepts as opposed to the core-linguistic concepts which are annotated based on the context. The embedding layer is non-contextualized, thus shows a high match with linguistic ontologies. With the availability of context in contextualized layers, the encoded concepts evolve into context-aware groups, resulting in higher matches with core-linguistic concepts.
179
+
180
+ Comparing Models While the overall trend is consistent among BERT-uc, mBERT and XLNet (and other studied models – Figure 10 in Appendix), the models somewhat differ in the last layers: see the large drop in core-linguistic concepts such as POS and Chunking for XLNet and mBERT in comparison to BERT. This suggests that BERT retains much of the core-linguistic information at the last layers. Durrani et al. (2020) observed a similar pattern in their study, where they showed BERT to
181
+
182
+ rein retain linguistic information deeper in the model as opposed to XLNet where it was more localized and predominantly preserved earlier in the network.
183
+
184
+ While the overall layer-wise trends of multilingual models look similar to some monolingual models (mBERT vs. XLNet in Fig 4b,c), the former's absolute layer-wise matches (numbers inside the brackets in Figure 4 e.g. Casing (166)) are generally substantially higher than the monolingual counterparts. For example, the POS and SEM matches of mBERT are $38.9\%$ and $30\%$ respectively which are $18\%$ and $15\%$ higher than BERT-uc. On the contrary, the number of matches with linguistic ontologies is often lower for multilingual models (mBERT LIWC alignment of 65 vs. BERT-uc alignment of 186). We hypothesize that the variety of training languages in terms of their morphological and syntactic structure has caused the multilingual models to learn more core-linguistic concepts in order to optimize the training task. Although, the knowledge captured within linguistic ontologies is essential, it may not be as critical to the training of the model as the linguistic concepts.
185
+
186
+ ALBERT showed a very different trend from the other models. Note that ALBERT shares parameters across layers while the other models have separate parameters for every layer. This explains the ALBERT results where we see relatively less variation across layers. More interestingly, the encoded concepts in the last layers of ALBERT showed presence of all human-defined concepts considered here (see the relatively smaller drop of ALBERT alignment curves in Figure 4).
187
+
188
+ # 5.3 Unaligned Concepts
189
+
190
+ In Table 1 we observed that at least $27.6\%$ (in XLM-R) and up to $56.4\%$ (in XLNet) encoded concepts did not align with the human-defined concepts. What concepts do these unaligned clusters contain? In an effort to answer this question, we analyzed these clusters and observed that many of them were compositional concepts that involves more than one fine-grained categories of the human defined concepts. Figure 5a shows an example of the unaligned concept which partly aligns with a semantic category (SEM:geopolitical entity) and a morphological category (POS:adjective). Similarly, Figure 5b is a verbs related to cognitive processes and Figure 5c shows an unaligned cluster that is composed of different verb forms (past, present and gerunds). The alignment with multiple human-
191
+
192
+ ![](images/5ccb29f8ceb8730201a7cc508198f2336cdfaa562611d33f4c12be1162b7df9a.jpg)
193
+ (a) SEM:GPE+POS:JJ
194
+
195
+ ![](images/ef0572b5435902ad057c7373afc6bfa3e10faac913b6fd8a5715ae09698ff6b7.jpg)
196
+ (b) POS:VB\*+LIWC:cogmech
197
+ Figure 5: Examples of unaligned encoded concepts: (a) combination of geopolitical entities and their related adjectives, (b,c) different forms of verb with specific semantics
198
+
199
+ ![](images/3393876cac83a4919e0acc2651dcd60d1485b5b18e0edc9d92357754550509d4.jpg)
200
+ (c) POS:VB*
201
+
202
+ defined concepts can be used to generate explanations for these unaligned concepts. For example, Figure 5a can be aligned as a mix of geopolitical entities and adjectives. We also quantitatively verified the number of unaligned encoded concepts that can be explained using composition of different concepts (See Appendix E: Table 9) and found that a majority of the clusters can be explained using a combination of three pre-defined concepts..
203
+
204
+ Moreover, note that encoded concepts are often multifacet i.e., they represent more than one relationship. For example, the encoded concept in Figure 5c consists of different forms of verbs but at the same time, these verbs are semantically similar. The semantic relationship present here is not adequately captured using the human-defined concepts used in this work. These are the novel concepts that require richer annotations or human-in-the-loop setup to generate adequate explanations.
205
+
206
+ # 5.4 Generalization of Results
207
+
208
+ Do the results generalize over different dataset selection and using different number of clusters? We ran experiments using different split of the news dataset for several models, and also performed alignment using different values of $K$ , the number of clusters. The results are consistent across the board. Please see Appendix F for details.
209
+
210
+ # 6 Conclusion
211
+
212
+ We presented ConceptX, a novel framework for analyzing the encoded concepts within deep NLP models. Our method uses unsupervised clustering to discover latent concepts within the contextualized representations and then aligned these concepts with a suite of human-defined concepts to generate explanations for them. Our results illuminate how DNNs structure language information. A few notable findings are: i) lower layers capture
213
+
214
+ shallow linguistic concepts, ii) whereas the abstract linguistic concepts such as morphology and semantics are preserved higher in the network, iii) the extent of alignment varies across different models and different human-defined concepts, iv) we found that novel explanations and an improved coverage of concepts can be achieved via compositionality.
215
+
216
+ # References
217
+
218
+ Lasha Abzianidze, Johannes Bjerva, Kilian Evang, Hessel Haagsma, Rik van Noord, Pierre Ludmann, DucDuy Nguyen, and Johan Bos. 2017. The parallel meaning bank: Towards a multilingual corpus of translations annotated with compositional meaning representations. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics, EACL '17, pages 242-247, Valencia, Spain.
219
+ Yossi Adi, Einat Kermany, Yonatan Belinkov, Ofer Lavi, and Yoav Goldberg. 2016. Fine-grained Analysis of Sentence Embeddings Using Auxiliary Prediction Tasks. arXiv preprint arXiv:1608.04207.
220
+ Anthony Bau, Yonatan Belinkov, Hassan Sajjad, Nadir Durrani, Fahim Dalvi, and James Glass. 2019. Identifying and controlling important neurons in neural machine translation. In International Conference on Learning Representations.
221
+ Yonatan Belinkov. 2021. Probing classifiers: Promises, shortcomings, and alternatives. CoRR, abs/2102.12452.
222
+ Yonatan Belinkov, Nadir Durrani, Fahim Dalvi, Hassan Sajjad, and James Glass. 2017a. What do Neural Machine Translation Models Learn about Morphology? In Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics (ACL), Vancouver. Association for Computational Linguistics.
223
+ Yonatan Belinkov, Nadir Durrani, Fahim Dalvi, Hassan Sajjad, and James Glass. 2020. On the linguistic representational power of neural machine translation models. Computational Linguistics, 45(1):1-57.
224
+
225
+ Yonatan Belinkov, Lluís Márquez, Hassan Sajjad, Nadir Durrani, Fahim Dalvi, and James Glass. 2017b. Evaluating Layers of Representation in Neural Machine Translation on Part-of-Speech and Semantic Tagging Tasks. In Proceedings of the 8th International Joint Conference on Natural Language Processing (IJCNLP).
226
+ Terra Blevins, Omer Levy, and Luke Zettlemoyer. 2018. Deep RNNs encode soft hierarchical syntax. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 14-19, Melbourne, Australia. Association for Computational Linguistics.
227
+ Zhi Chen, Yijie Bei, and Cynthia Rudin. 2020. Concept whitening for interpretable image recognition. Nature Machine Intelligence, 2(12):772-782.
228
+ Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? an analysis of BERT's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286, Florence, Italy. Association for Computational Linguistics.
229
+ Alexis Conneau, Kartikay Khandelwal, Naman Goyal, Vishray Chaudhary, Guillaume Wenzek, Francisco Guzmán, Edouard Grave, Myle Ott, Luke Zettle-moyer, and Veselin Stoyanov. 2020. Unsupervised cross-lingual representation learning at scale. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 8440–8451. Association for Computational Linguistics.
230
+ Alexis Conneau, German Kruszewski, Guillaume Lample, Loic Barrault, and Marco Baroni. 2018. What you can cram into a single vector: Probing sentence embeddings for linguistic properties. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (ACL).
231
+ Fahim Dalvi, Nadir Durrani, Hassan Sajjad, Yonatan Belinkov, D. Anthony Bau, and James Glass. 2019a. What is one grain of sand in the desert? analyzing individual neurons in deep nlp models. In Proceedings of the Thirty-Third AAAI Conference on Artificial Intelligence (AAAI, Oral presentation).
232
+ Fahim Dalvi, Abdul Rafae Khan, Firoj Alam, Nadir Durani, Jia Xu, and Hassan Sajjad. 2022. Discovering latent concepts learned in BERT. In International Conference on Learning Representations.
233
+ Fahim Dalvi, Avery Nortonsmith, D. Anthony Bau, Yonatan Belinkov, Hassan Sajjad, Nadir Durrani, and James Glass. 2019b. Neurox: A toolkit for analyzing individual neurons in neural networks. In AAAI Conference on Artificial Intelligence (AAAI).
234
+ Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of
235
+
236
+ the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), Minneapolis, Minnesota. Association for Computational Linguistics.
237
+ Nadir Durrani, Fahim Dalvi, Hassan Sajjad, Yonatan Belinkov, and Preslav Nakov. 2019. One size does not fit all: Comparing NMT representations of different granularities. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1504-1516, Minneapolis, Minnesota. Association for Computational Linguistics.
238
+ Nadir Durrani, Hassan Sajjad, and Fahim Dalvi. 2021. How transfer learning impacts linguistic knowledge in deep NLP models? In *Findings of the Association for Computational Linguistics: ACL-IJCNLP* 2021, pages 4947-4957, Online. Association for Computational Linguistics.
239
+ Nadir Durrani, Hassan Sajjad, Fahim Dalvi, and Yonatan Belinkov. 2020. Analyzing individual neurons in pre-trained language models. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 4865-4880, Online. Association for Computational Linguistics.
240
+ Amirata Ghorbani, James Wexler, James Y Zou, and Been Kim. 2019. Towards automatic concept-based explanations. Advances in Neural Information Processing Systems, 32:9277-9286.
241
+ K Chidananda Gowda and G Krishna. 1978. Agglomerative clustering using the concept of mutual nearest neighbourhood. Pattern recognition, 10(2):105-112.
242
+ Kristina Gulordava, Piotr Bojanowski, Edouard Grave, Tal Linzen, and Marco Baroni. 2018. Colorless green recurrent networks dream hierarchically. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 1195-1205, New Orleans, Louisiana. Association for Computational Linguistics.
243
+ Julia Hockenmaier. 2006. Creating a CCGbank and a wide-coverage CCG lexicon for German. In Proceedings of the 21st International Conference on Computational Linguistics and 44th Annual Meeting of the Association for Computational Linguistics, ACL '06, pages 505-512, Sydney, Australia.
244
+ Diewuwe Hupkes, Sara Veldhoen, and Willem Zuidema. 2018. Visualisation and 'diagnostic classifiers' reveal how recurrent and recursive neural networks process hierarchical structure.
245
+ Akos Kádár, Grzegorz Chrupa, and Afra Alishahi. 2017. Representation of linguistic form and function in recurrent neural networks. Computational Linguistics, 43(4):761-780.
246
+
247
+ Andrej Karpathy, Justin Johnson, and Li Fei-Fei. 2015. Visualizing and understanding recurrent networks. arXiv preprint arXiv:1506.02078.
248
+ Been Kim, Martin Wattenberg, Justin Gilmer, Carrie Cai, James Wexler, Fernanda Viegas, et al. 2018. Interpretability beyond feature attribution: Quantitative testing with concept activation vectors (tcav). In International conference on machine learning, pages 2668-2677. PMLR.
249
+ Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Soricut. 2019. Albert: A lite bert for self-supervised learning of language representations. ArXiv:1909.11942.
250
+ Jiwei Li, Xinlei Chen, Eduard Hovy, and Dan Jurafsky. 2016. Visualizing and understanding neural models in NLP. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 681-691, San Diego, California. Association for Computational Linguistics.
251
+ Tal Linzen, Emmanuel Dupoux, and Yoav Goldberg. 2016. Assessing the ability of LSTMs to learn syntax-sensitive dependencies. Transactions of the Association for Computational Linguistics, 4:521-535.
252
+ Nelson F. Liu, Matt Gardner, Yonatan Belinkov, Matthew E. Peters, and Noah A. Smith. 2019a. Linguistic knowledge and transferability of contextual representations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 1073-1094, Minneapolis, Minnesota. Association for Computational Linguistics.
253
+ Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019b. RoBERTa: A robustly optimized BERT pretraining approach. ArXiv:1907.11692.
254
+ Jonathan Mamou, Hang Le, Miguel Del Rio, Cory Stephenson, Hanlin Tang, Yoon Kim, and Sueyeon Chung. 2020. Emergence of separable manifolds in deep language representations. In International Conference on Machine Learning, pages 6713-6723. PMLR.
255
+ Mitchell P. Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of English: The Penn Treebank. Computational Linguistics, 19(2):313-330.
256
+ Rebecca Marvin and Tal Linzen. 2018. Targeted syntactic evaluation of language models. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1192-1202, Brussels, Belgium. Association for Computational Linguistics.
257
+
258
+ Julian Michael, Jan A. Botha, and Ian Tenney. 2020. Asking without telling: Exploring latent ontologies in contextual representations. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 6792-6812, Online. Association for Computational Linguistics.
259
+ George A Miller. 1995. Wordnet: a lexical database for english. Communications of the ACM, 38(11):39-41.
260
+ Jesse Mu and Jacob Andreas. 2020. Compositional explanations of neurons. CoRR, abs/2006.14032.
261
+ James W Pennebaker, Martha E Francis, and Roger J Booth. 2001. Linguistic inquiry and word count: Liwc 2001. *Mahway: Lawrence Erlbaum Associates*, 71(2001):2001.
262
+ Peng Qian, Xipeng Qiu, and Xuanjing Huang. 2016. Investigating Language Universal and Specific Properties in Word Embeddings. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1478-1488, Berlin, Germany. Association for Computational Linguistics.
263
+ Emily Reif, Ann Yuan, Martin Wattenberg, Fernanda B Viegas, Andy Coenen, Adam Pearce, and Been Kim. 2019. Visualizing and measuring the geometry of bert. In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.
264
+ Anna Rogers, Olga Kovaleva, and Anna Rumshisky. 2020. A primer in BERTology: What we know about how BERT works. Transactions of the Association for Computational Linguistics, 8:842-866.
265
+ Peter Rousseeuw. 1987. Silhouettes: a graphical aid to the interpretation and validation of cluster analysis. J. Comput. Appl. Math., 20(1):53-65.
266
+ Hassan Sajjad, Narine Kokhlikyan, Fahim Dalvi, and Nadir Durrani. 2021. Fine-grained interpretation and causation analysis in deep NLP models. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies: Tutorials, pages 5-10, Online. Association for Computational Linguistics.
267
+ Mike Schuster and Kaisuke Nakajima. 2012. Japanese and korean voice search. In 2012 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 5149-5152. IEEE.
268
+ Rico Sennrich, Barry Haddow, and Alexandra Birch. 2016. Neural machine translation of rare words with subword units. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1715-1725, Berlin, Germany. Association for Computational Linguistics.
269
+ Wolfgang G Stock. 2010. Concepts and semantic relations in information science. Journal of the American Society for Information Science and Technology, 61(10):1951-1969.
270
+
271
+ Xavier Suau, Luca Zappella, and Nicholas Apostoloff. 2020. Finding experts in transformer models. CoRR, abs/2005.07647.
272
+ Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019. BERT rediscovers the classical NLP pipeline. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 4593-4601, Florence, Italy. Association for Computational Linguistics.
273
+ Robert L. Thorndike. 1953. Who belongs in the family. Psychometrika, pages 267-276.
274
+ Erik F. Tjong Kim Sang and Sabine Buchholz. 2000. Introduction to the CoNLL-2000 shared task chunking. In Fourth Conference on Computational Natural Language Learning and the Second Learning Language in Logic Workshop.
275
+ Jesse Vig. 2019. A multiscale visualization of attention in the transformer model. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 37-42, Florence, Italy. Association for Computational Linguistics.
276
+ Ekaterina Vylomova, Trevor Cohn, Xuanli He, and Gholamreza Haffari. 2016. Word Representation Models for Morphologically Rich Languages in Neural Machine Translation. arXiv preprint arXiv:1606.04217.
277
+ John Wu, Hassan Belinkov, Yonatan Sajjad, Nadir Durrani, Fahim Dalvi, and James Glass. 2020. Similarity Analysis of Contextual Word Representation Models. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (ACL), Seattle. Association for Computational Linguistics.
278
+ Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Carbonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. Advances in neural information processing systems, 32.
279
+
280
+ # Appendix
281
+
282
+ # A Human-defined concept labels
283
+
284
+ # A.1 Lexical Concepts:
285
+
286
+ Ngrams, Affixes, Casing, First and the Last Word.
287
+
288
+ # A.2 Morphology and Semantics:
289
+
290
+ POS tags: We used the Penn Treebank POS tags discussed in (Marcus et al., 1993), which consists of 36 POS tags and 12 other tags (i.e., punctuation and currency symbols). In Table 2, we provide POS tags and their description.
291
+
292
+ SEM tags: (Abzianidze et al., 2017) consists of 73 sem-tags grouped into 13 meta-tags. In Table 3, we provide a detailed information of the tagset, and in Table 5, we provide fine and coarse tags mapping.
293
+
294
+ # A.3 Syntactic:
295
+
296
+ Chunking tags: For Chunking we used the tagset discussed in (Tjong Kim Sang and Buchholz, 2000), which consists of 11 tags as follows: NP (Noun phrase), VP (Verb phrase), PP (Prepositional phrase), ADVP (Adverb phrase), SBAR (Subordinate phrase), ADJP (Adjective phrase), PRT (Particles), CONJP (Conjunction), INTJ (Interjection), LST (List marker), UCP (Unlike coordinate phrase). For the annotation, chunks are represented using IOB format, which results in 22 tags in the dataset as reported in Table 4.
297
+
298
+ CCG super-tags Hockenmaier (2006) developed, CCGbank, a dataset with Combinatory Categorical Grammar (CCG) derivations and dependency structures from the Penn Treebank. CCG is a lexicalized grammar formalism, which is expressive and efficiently parseable. It consists of 1272 tags.
299
+
300
+ # A.4 Linguistic Ontologies:
301
+
302
+ WordNet: (Miller, 1995) consists of 26 lexicographic senses for nouns, 2 for adjectives, and 1 for adverbs. Each of them represents a supersense and a hierarchy can be formed from hyphen to hyponym.
303
+
304
+ LIWC: Over the past few decades, Pennebaker et al. (Pennebaker et al., 2001) have designed psycholinguistic concepts using high frequency words. These word categories are mostly used to study gender, age, personality, and health to estimate the
305
+
306
+ <table><tr><td>#</td><td>Tag</td><td>Description</td></tr><tr><td>1</td><td>CC</td><td>Coordinating conjunction</td></tr><tr><td>2</td><td>CD</td><td>Cardinal number</td></tr><tr><td>3</td><td>DT</td><td>Determiner</td></tr><tr><td>4</td><td>EX</td><td>Existential there</td></tr><tr><td>5</td><td>FW</td><td>Foreign word</td></tr><tr><td>6</td><td>IN</td><td>Preposition or subordinating conjunction</td></tr><tr><td>7</td><td>JJ</td><td>Adjective</td></tr><tr><td>8</td><td>JJR</td><td>Adjective, comparative</td></tr><tr><td>9</td><td>JJS</td><td>Adjective, superlative</td></tr><tr><td>10</td><td>LS</td><td>List item marker</td></tr><tr><td>11</td><td>MD</td><td>Modal</td></tr><tr><td>12</td><td>NN</td><td>Noun, singular or mass</td></tr><tr><td>13</td><td>NNS</td><td>Noun, plural</td></tr><tr><td>14</td><td>NNP</td><td>Proper noun, singular</td></tr><tr><td>15</td><td>NNPS</td><td>Proper noun, plural</td></tr><tr><td>16</td><td>PDT</td><td>Predeterminer</td></tr><tr><td>17</td><td>POS</td><td>Possessive ending</td></tr><tr><td>18</td><td>PRP</td><td>Personal pronoun</td></tr><tr><td>19</td><td>PRP$</td><td>Possessive pronoun</td></tr><tr><td>20</td><td>RB</td><td>Adverb</td></tr><tr><td>21</td><td>RBR</td><td>Adverb, comparative</td></tr><tr><td>22</td><td>RBS</td><td>Adverb, superlative</td></tr><tr><td>23</td><td>RP</td><td>Particle</td></tr><tr><td>24</td><td>SYM</td><td>Symbol</td></tr><tr><td>25</td><td>TO</td><td>to</td></tr><tr><td>26</td><td>UH</td><td>Interjection</td></tr><tr><td>27</td><td>VB</td><td>Verb, base form</td></tr><tr><td>28</td><td>VBD</td><td>Verb, past tense</td></tr><tr><td>29</td><td>VBG</td><td>Verb, gerund or present participle</td></tr><tr><td>30</td><td>VBN</td><td>Verb, past participle</td></tr><tr><td>31</td><td>VBP</td><td>Verb, non-3rd person singular present</td></tr><tr><td>32</td><td>VBZ</td><td>Verb, 3rd person singular present</td></tr><tr><td>33</td><td>WDT</td><td>Wh-determiner</td></tr><tr><td>34</td><td>WP</td><td>Wh-pronoun</td></tr><tr><td>35</td><td>WP$</td><td>Possessive wh-pronoun</td></tr><tr><td>36</td><td>WRB</td><td>Wh-adverb</td></tr><tr><td>37</td><td>#</td><td>Pound sign</td></tr><tr><td>38</td><td>$</td><td>Dollar sign</td></tr><tr><td>39</td><td>.</td><td>Sentence-final punctuation</td></tr><tr><td>40</td><td>,</td><td>Comma</td></tr><tr><td>41</td><td>:</td><td>Colon, semi-colon</td></tr><tr><td>42</td><td>(</td><td>Left bracket character</td></tr><tr><td>43</td><td>)</td><td>Right bracket character</td></tr><tr><td>44</td><td>&quot;</td><td>Straight double quote</td></tr><tr><td>45</td><td>&#x27;</td><td>Left open single quote</td></tr><tr><td>46</td><td>&quot;</td><td>Left open double quote</td></tr><tr><td>47</td><td>&#x27;</td><td>Right close single quote</td></tr><tr><td>48</td><td>&quot;</td><td>Right close double quote</td></tr></table>
307
+
308
+ Table 2: Penn Treebank POS tags.
309
+
310
+ correlation between these attributes and word usage. It is a knowledge-based system where words are mapped different high level concepts.
311
+
312
+ # B BERT-based Sequence Tagger
313
+
314
+ We trained a BERT-based sequence tagger to auto- annotate our training data. We used standard splits for training, development and test data for the 4 linguistic tasks (POS, SEM, Chunking and CCG super tagging) that we used to carry out our analysis on. The splits to preprocess the data are avail
315
+
316
+ <table><tr><td colspan="2">ANA (anaphoric)</td><td colspan="2">MOD (modality)</td></tr><tr><td>PRO</td><td>anaphoric &amp; deictic pronouns: he, she, I, him</td><td>NOT</td><td>negation: not, no, neither, without</td></tr><tr><td>DEF</td><td>definite: the, loIT, derDE</td><td>NEC</td><td>necessity: must, should, have to</td></tr><tr><td>HAS</td><td>possessive pronoun: my, her</td><td>POS</td><td>possibility: might, could, perhaps, alleged, can</td></tr><tr><td>REF</td><td>reflexive &amp; reciprocal pron.: herself, each other</td><td colspan="2">DSC (discourse)</td></tr><tr><td>EMP</td><td>emphasizing pronouns: himself</td><td>SUB</td><td>subordinate relations: that, while, because</td></tr><tr><td>ACT (speech act)</td><td></td><td>COO</td><td>coordinate relations: so, {}, {}, and</td></tr><tr><td>GRE</td><td>greeting &amp; parting: hi, bye</td><td>APP</td><td>appositional relations: {}, which, {}, —</td></tr><tr><td>ITJ</td><td>interjections, exclamations: alas, ah</td><td>BUT</td><td>contrast: but, yet</td></tr><tr><td>HES</td><td>hesitation: err</td><td colspan="2">NAM (named entity)</td></tr><tr><td>QUE</td><td>interrogative: who, which, ?</td><td>PER</td><td>person: Axl Rose, Sherlock Holmes</td></tr><tr><td>ATT (attribute)</td><td></td><td>GPE</td><td>geo-political entity: Paris, Japan</td></tr><tr><td>QUC</td><td>concrete quantity: two, six million, twice</td><td>GPO</td><td>geo-political origin: Parisian, French</td></tr><tr><td>QUV</td><td>vague quantity: millions, many, enough</td><td>GEO</td><td>geographical location: Alps, Nile</td></tr><tr><td>COL</td><td>colour: red, crimson, light blue, chestnut brown</td><td>ORG</td><td>organization: IKEA, EU</td></tr><tr><td>IST</td><td>intersective: open, vegetarian, quickly</td><td>ART</td><td>artifact: iOS 7</td></tr><tr><td>SST</td><td>subjective: skillful surgeon, tall kid</td><td>HAP</td><td>happening: Eurovision 2017</td></tr><tr><td>PRI</td><td>private: former, fake</td><td>UOM</td><td>unit of measurement: meter, $, %, degree Celsius</td></tr><tr><td>DEG</td><td>degree: 2 meters tall, 20 years old</td><td>CTC</td><td>contact information: 112, info@mail.com</td></tr><tr><td>INT</td><td>intensifier: very, much, too, rather</td><td>URL</td><td>URL: http://pmb.let.rug.nl</td></tr><tr><td>REL</td><td>relation: in, on, &#x27;s, of, after</td><td>LIT</td><td>literal use of names: his name is John</td></tr><tr><td>SCO</td><td>score: 3-0, grade A</td><td>NTH</td><td>other names: table 1a, equation (1)</td></tr><tr><td colspan="2">COM (comparative)</td><td colspan="2">EVE (events)</td></tr><tr><td>EQU</td><td>equative: as tall as John, whales are mammals</td><td>EXS</td><td>untensed simple: to walk, is eaten, destruction</td></tr><tr><td>MOR</td><td>comparative positive: better, more</td><td>ENS</td><td>present simple: we walk, he walks</td></tr><tr><td>LES</td><td>comparative negative: less, worse</td><td>EPS</td><td>past simple: ate, went</td></tr><tr><td>TOP</td><td>superlative positive: most, mostly</td><td>EXG</td><td>untended progressive: is running</td></tr><tr><td>BOT</td><td>superlative negative: worst, least</td><td>EXT</td><td>untended perfect: has eaten</td></tr><tr><td>ORD</td><td>ordinal: 1st, 3rd, third</td><td colspan="2">TNS (tense &amp; aspect)</td></tr><tr><td colspan="2">UNE (unnamed entity)</td><td>NOW</td><td>present tense: is skiing, do ski, has skied, now</td></tr><tr><td>CON</td><td>concept: dog, person</td><td>PST</td><td>past tense: was baked, had gone, did go</td></tr><tr><td>ROL</td><td>role: student, brother, prof., victim</td><td>FUT</td><td>future tense: will, shall</td></tr><tr><td>GRP</td><td>group: John {}, Mary and Sam gathered, a group of people</td><td>PRG</td><td>progressive: has been being treated, aan hetNL</td></tr><tr><td colspan="2">DXS (deixis)</td><td>PFT</td><td>perfect: has been going/done</td></tr><tr><td>DXP</td><td>place deixis: here, this, above</td><td>TIM</td><td>temporal entity)</td></tr><tr><td>DXT</td><td>temporal deixis: just, later, tomorrow</td><td>DAT</td><td>full date: 27.04.2017, 27/04/17</td></tr><tr><td>DXD</td><td>discourse deixis: latter, former, above</td><td>DOM</td><td>day of month: 27th December</td></tr><tr><td colspan="2">LOG (logical)</td><td>YOC</td><td>year of century: 2017</td></tr><tr><td>ALT</td><td>alternative &amp; repetitions: another, different, again</td><td>DOW</td><td>day of week: Thursday</td></tr><tr><td>XCL</td><td>exclusive: only, just</td><td>MOY</td><td>month of year: April</td></tr><tr><td>NIL</td><td>empty semantics: {}, to, of</td><td>DEC</td><td>decade: 80s, 1990s</td></tr><tr><td>DIS</td><td>disjunction &amp; exist. quantif.: a, some, any, or</td><td>CLO</td><td>clocktime: 8:45 pm, 10 o&#x27;clock, noon</td></tr><tr><td>IMP</td><td>implication: if, when, unless</td><td></td><td></td></tr><tr><td>AND</td><td>conjunction &amp; univ. quantif.: every, and, who, any</td><td></td><td></td></tr></table>
317
+
318
+ Table 3: Semantic tags.
319
+
320
+ <table><tr><td>Task</td><td>Train</td><td>Dev</td><td>Test</td><td>Tags</td><td>F1</td></tr><tr><td>POS</td><td>36557</td><td>1802</td><td>1963</td><td>48</td><td>96.69</td></tr><tr><td>SEM</td><td>36928</td><td>5301</td><td>10600</td><td>73</td><td>96.22</td></tr><tr><td>Chunking</td><td>8881</td><td>1843</td><td>2011</td><td>22</td><td>96.91</td></tr><tr><td>CCG</td><td>39101</td><td>1908</td><td>2404</td><td>1272</td><td>94.90</td></tr></table>
321
+
322
+ Table 4: Data statistics (number of sentences) on training, development and test sets using in the experiments and the number of tags to be predicted
323
+
324
+ able through git repository released with Liu et al. (2019a). See Table 4 for statistics and classifier accuracy.
325
+
326
+ # C Clustering details
327
+
328
+ Algorithm 1 assigns each word to a separate cluster and then iteratively combines them based on Ward's minimum variance criterion that minimizes intra-cluster variance. Distance between two vector representations is calculated with the squared Euclidean distance.
329
+
330
+ # Algorithm 1 Clustering Procedure
331
+
332
+ Input: $\overrightarrow{y}^l$ : word representation of words
333
+ Parameter: $K$ : the total number of clusters
334
+
335
+ 1: for each word $w_{i}$ do
336
+ 2: assign $w_{i}$ to cluster $c_{i}$
337
+ 3: end for
338
+ 4: while number of clusters $\neq K$ do
339
+ 5: for each cluster pair $c_{i}, c_{j'}$ do
340
+ 6: $d_{i,i'} =$ inner-cluster difference of combined cluster $c_{i}$ and $c_{i'}$
341
+ 7: end for
342
+ 8: $c_{j}, c_{j'} =$ cluster pair with minimum value of $d$
343
+ 9: merge clusters $c_{j}$ and $c_{j'}$
344
+ 10: end while
345
+
346
+ # C.1 Selection of the number of Clusters
347
+
348
+ The Elbow curve did not show any optimum clustering point, with the increase in number of clusters the distortion score kept decreasing, resulting in over-clustering (a large number of clusters consisted of less than 5 words). The over-clustering resulted in high but wrong alignment scores e.g. consider a two word cluster having words "good" and "great". The cluster will have a successful match with "adjective" since more than $90\%$ of the words in the cluster are adjectives. In this way, a lot of small clusters will have a successful match with many human-defined concepts and the resulting alignment scores will be high. On the other hand, Silhouette resulted in under-clustering, giving the best score at number of clusters $= 10$ . We handled this empirically by trying several values for the number of clusters i.e., 200 to 1600 with step size 200. We selected 1000 to find a good balance with over and under clustering. We understand that this may not be the best optimal point. We presented the results of 600 and 1000 clusters to show that our findings are not sensitive to the number of clusters parameter.
349
+
350
+ # D Coarse vs. Fine-grained Categories
351
+
352
+ # D.1 Coarse vs. Fine-grained Categories
353
+
354
+ Our analysis of compositional concepts showed that several fine-grained concepts could be combined to explain an unaligned concept. For example, by combining verb categories of POS to one coarse verb category, we can align the encoded concept present in Figure 5c. To probe this more formally, we collapsed POS and SEM fine-grained concepts into coarser categories (27 POS tags and 15 SEM tags). We then recomputed the alignment with the encoded concepts. For most of the models, the alignment doubled compared to the fine-grained categorizes with at least $39\%$ and at most $53\%$ percent match for POS. This reflects that in several cases, models learn the coarse language hierarchy. We further questioned how many encoded concepts can be explained using coarse human-defined concepts. Compared to Table 1, the matches increased by at most 17 points in the case of BERT-uc. The XLM-R showed the highest matching percentage of $81\%$ . The higher alignment suggests that most of the encoded concepts learned by pre-trained models can be explained using human-defined concepts. (See Appendix D for detailed results).
355
+
356
+ # D.2 Corase POS and SEM labels
357
+
358
+ Tables 5 and 6 present results for our mapping of fine-grained SEM and POS tags into coarser categories.
359
+
360
+ <table><tr><td>Coarse</td><td>Fine-grained</td></tr><tr><td>ACT</td><td>QUE</td></tr><tr><td>ANA</td><td>DEF, DST, EMP, HAS, PRO, REF</td></tr><tr><td>ATT</td><td>INT, IST, QUA, REL, SCO</td></tr><tr><td>COM</td><td>COM, LES, MOR, TOP</td></tr><tr><td>DSC</td><td>APP, BUT, COO, SUB</td></tr><tr><td>DXS</td><td>PRX</td></tr><tr><td>EVE</td><td>EXG, EXS, EXT, EXV</td></tr><tr><td>LOG</td><td>ALT, AND, DIS, EXC, EXN, IMP, NIL, RLI</td></tr><tr><td>MOD</td><td>NEC, NOT, POS</td></tr><tr><td>NAM</td><td>ART, GPE, HAP, LOC, NAT, ORG, PER, UOM</td></tr><tr><td>TIM</td><td>DEC, DOM, DOW, MOY, TIM, YOC</td></tr><tr><td>TNS</td><td>EFS, ENG, ENS, ENT, EPG, EPS, EPT, ETG, ETV, FUT, NOW, PST</td></tr><tr><td>UNE</td><td>CON, ROL</td></tr><tr><td>UNK</td><td>UNK</td></tr></table>
361
+
362
+ Table 5: SEM: Coarse to Fine-grained mapping
363
+
364
+ # D.3 Results
365
+
366
+ Table 7 presents the alignment results of using coarse POS and SEM concepts. We observed that
367
+
368
+ <table><tr><td>Coarse</td><td>Fine-grained</td></tr><tr><td>Adjective</td><td>JJ, JJR, JJS</td></tr><tr><td>Adverb</td><td>RB, RBS, WRB, RBR</td></tr><tr><td>Conjunction</td><td>CC</td></tr><tr><td>Determiner</td><td>DT, WDT</td></tr><tr><td>Noun</td><td>NN, NNS, NNP, NNPS</td></tr><tr><td>Number</td><td>CD</td></tr><tr><td>Preposition</td><td>IN, TO</td></tr><tr><td>Pronoun</td><td>PRP, PRP$, WP, WP$</td></tr><tr><td>Verb</td><td>VB, VBN, VBZ, VBG, VBP, VBD</td></tr><tr><td>No Changes</td><td>$, -LRB-, #, FW, -RRB-, LS, POS, &quot;&quot;, EX</td></tr><tr><td></td><td>SYM, :, RP, :, PDT, MD, UH,</td></tr></table>
369
+
370
+ the alignment doubles in most of the cases which reflects that in several cases, models learn the coarse language hierarchy. However, they do not strictly adhere to fine-grained categories existed in human-defined concepts. We further extend the alignment of coarse POS and SEM categories to the overall alignment with the human-defined concepts. Table 8 presents the results. We see a match of up to $81\%$ in the case of XLM-R. The high alignment suggests that many of the encoded concepts can be explained using coarse human-defined concepts.
371
+
372
+ Table 6: POS: Coarse to Fine-grained mapping
373
+
374
+ <table><tr><td></td><td colspan="2">POS</td><td colspan="2">SEM</td></tr><tr><td></td><td>Fine</td><td>Coarse</td><td>Fine</td><td>Coarse</td></tr><tr><td>BERT-cased</td><td>13%</td><td>42%</td><td>7%</td><td>15%</td></tr><tr><td>BERT-uncased</td><td>16%</td><td>43%</td><td>9%</td><td>18%</td></tr><tr><td>mBERT</td><td>26%</td><td>53%</td><td>16%</td><td>26%</td></tr><tr><td>XLM-RoBERTa</td><td>24%</td><td>47%</td><td>11%</td><td>21%</td></tr><tr><td>RoBERTa</td><td>18%</td><td>43%</td><td>10%</td><td>20%</td></tr><tr><td>ALBERT</td><td>17%</td><td>42%</td><td>9%</td><td>17%</td></tr><tr><td>XLNet</td><td>17%</td><td>39%</td><td>10%</td><td>18%</td></tr></table>
375
+
376
+ # E Compositional Coverage
377
+
378
+ Table 9 shows the amount of coverage we obtain when aligning with the morphological concepts when allowing $90\%$ of the words in the cluster to be from $N$ concepts.
379
+
380
+ Table 7: Alignment of fine-grained human defined concepts compared to coarse categories
381
+
382
+ <table><tr><td>Overall alignment</td><td>BERT-c 61.5% RoBERTa 62.9%</td><td>BERT-uc 63.6% ALBERT 64.0%</td><td>mBERT 77.7% XLNet 55.3%</td><td>XLM-R 81.0%</td></tr></table>
383
+
384
+ Table 8: Coverage of human-defined concepts using coarse POS and SEM labels across all clusters from a given model
385
+
386
+ # F Robustness of Methodology across Datasets and Settings
387
+
388
+ Figure 8 shows the layer-wise patterns using 600 clusters instead of 1000 as used in the main paper. We observe that the overall trends largely remain the same.
389
+
390
+ To further demonstrate the robustness of our method with respect to dataset, we sub-sampled another dataset from the News corpus with a different vocabulary by selecting words that appear between 2 to 10 times in the corpus. Note that the selection of vocabulary is due to the memory and computation limitations. Figure 9 shows the results using this selection of data. Compared to Figure 4, we can see that the overall patterns are largely similar and confirms the robustness of our findings. The slight difference in the patterns of WordNet and LIWC are due to the large selection of proper nouns in the second set of the data.
391
+
392
+ # G Layer-wise results
393
+
394
+ Figure 10 present layer-wise results for all the understudied models.
395
+
396
+ ![](images/0bc13f82ea64a326b213cd06bf60ee5563d21c9123aa291be6150262be38a65d.jpg)
397
+ (a)
398
+
399
+ ![](images/bbf215759f269fc09326e112584272fc3e57bc4dcc3458c38608818dcc53a475.jpg)
400
+ (b)
401
+
402
+ ![](images/fbdd9f1b1b23267b821cb07ba7c1cc90cc9f2c9cf307687ae598831796039604.jpg)
403
+ (c)
404
+ Figure 6: Example clusters: (a) ngram:ace, (b) POS:CD, (c) Chunking:B-VP + Suffix:ed
405
+
406
+ ![](images/acb9c758d46d26bc554f5e1f337ef24844e6a0b9c6270d1cb45c4a8651fa29c0.jpg)
407
+ (a)
408
+
409
+ ![](images/4b7242afbe487dc5a8eade3ec8f7ff9294e191f5b40928e667b55e78171e4ef0.jpg)
410
+ (b)
411
+
412
+ ![](images/ef83e7e7c1be79995e33481b923395065bdb5cf25efcc40c6e5c784ff4db2b27.jpg)
413
+ (c)
414
+ Figure 7: Example clusters: (a) LIWC:cause, (b) WORDNET:verb.cognition, (c) WORDNET:noun.artifact
415
+
416
+ <table><tr><td>Concepts</td><td>BERT-c</td><td>BERT-uc</td><td>mBERT</td><td>XLM-R</td><td>RoBERTa</td><td>ALBERT</td><td>XLNet</td></tr><tr><td>1</td><td>13%</td><td>16%</td><td>26%</td><td>24%</td><td>18%</td><td>17%</td><td>17%</td></tr><tr><td>2</td><td>11%</td><td>12%</td><td>20%</td><td>23%</td><td>13%</td><td>13%</td><td>12%</td></tr><tr><td>3</td><td>14%</td><td>13%</td><td>14%</td><td>18%</td><td>11%</td><td>15%</td><td>9%</td></tr><tr><td>4</td><td>6%</td><td>6%</td><td>4%</td><td>4%</td><td>5%</td><td>5%</td><td>3%</td></tr><tr><td>5</td><td>2%</td><td>1%</td><td>1%</td><td>1%</td><td>2%</td><td>1%</td><td>1%</td></tr><tr><td>6</td><td>1%</td><td>0%</td><td>0%</td><td>0%</td><td>1%</td><td>1%</td><td>0%</td></tr></table>
417
+
418
+ Table 9: Percentage of alignment when an encoded concept is composed of $N$ morphological concepts. As can be seen, most concepts are composed of either 1, 2 or 3 morphological concepts, showing that several concepts learned by these models are indeed compositional in nature.
419
+
420
+ ![](images/6ce44d6fd793a37f800eaed9c707cb0246f6932c531d4c37e6ca5fc522ac3d77.jpg)
421
+
422
+ ![](images/77daebd63f3a319c0072690733f40d1e7f39200083453567c708bcb8c8816995.jpg)
423
+
424
+ ![](images/436e07b55f3b2fcd4f2ad78abe10ed9e0c271aa6370e99f5e2a9532d3bc41a9c.jpg)
425
+
426
+ ![](images/1f7ee0725c2c307143ecb9bd1d81735965c06d59a5fe673a0aad54456c6baf15.jpg)
427
+
428
+ ![](images/c31109bbd2d84343ff0cb1b0298716854d4ef91fcc913b32b297e27dd2bbb0e0.jpg)
429
+
430
+ ![](images/4bf46060f6af008dc74e4fa607ac9c3582d2f5e1876964efccc4f0b5f8957b58.jpg)
431
+
432
+ ![](images/b89e2470546c146c205db31e534f94ccdee9da9f18a42c71e3602569ec024d6d.jpg)
433
+
434
+ ![](images/32104fc9f0b4da0a4baede6bcbdf803fb11aacdc84224e4a24a160ca6abbb37f.jpg)
435
+
436
+ ![](images/7473a39b0a8418d1a6534368469d7b6eb24407b21e4a32e67b9d8e2362499e3d.jpg)
437
+
438
+ ![](images/749d56afbdd497197b5222a98a83cf0439119b413f2e3bc10276ebfe5b4160d3.jpg)
439
+
440
+ ![](images/f32882ac0be89ed82c2a7cfce26ec8b99dda885d69a8b5085ff88affe440eb91.jpg)
441
+
442
+ ![](images/427b67561e0bc6daa98abdd24680542e1e2c5a260f12afcc0192954b904a6275.jpg)
443
+
444
+ ![](images/22497e4a993322e22c76d736c62a9bf1c23d11d4e51b0bd981e4215faf8f8871.jpg)
445
+ Figure 8: Layer-wise results using 600 clusters.
446
+
447
+ ![](images/cf95049184bc71edbf0c7f1277b84c26fbee23727b5e358f291dac032cfa6668.jpg)
448
+
449
+ ![](images/aaef0bb3f56ac696ab5d41aa251fdd0912282b7fd2367b7dd31baf32a64366b9.jpg)
450
+
451
+ ![](images/123807093391867af9dea42aea7e4cc8f340493c4fd906eb55793d24088fafef.jpg)
452
+
453
+ ![](images/18ecb50d32cafdfd3faffd8678fb58aa13f9928a494c4bd8623627c84699f803.jpg)
454
+ Figure 9: Layer-wise results on a separately sampled dataset.
455
+
456
+ ![](images/1a324f2a44cff1ce1dae050b5ae3b7c14b2d07b2d95fe6627e83683fa10d85b8.jpg)
457
+
458
+ ![](images/52b016a2f0b1dedc13a6fac7badbe4edc0a9b87fe83936ccd5fce20e71df545c.jpg)
459
+ BERT-c
460
+
461
+ ![](images/75e5984f6f38936c08f493414d67ac69cb54c4ed123b8ce891ad5b3cbcbbe46e.jpg)
462
+
463
+ ![](images/5e04dd0f2e8d799c806fd62cff40e1ab8517a66c79610e0034bfe7362926fc1f.jpg)
464
+
465
+ ![](images/4474ed1521902b1271427849e4653112c615754bfd797521f05a85b73951c317.jpg)
466
+ (a) Lexical
467
+
468
+ ![](images/4783d630306dff361ad0a4f871d33846ebef712b3a9c976032ee7b97f3684164.jpg)
469
+ BERT-uc
470
+
471
+ ![](images/6cad0ddbabaf0988f833f5abe0016d7407c322e80beb8e81d6f6c086cfa548b3.jpg)
472
+ (c) Syntactic
473
+ mBERT
474
+
475
+ ![](images/9a675a773c5a61bcb07dcdbc60be8dfa92f9f1d257d161262236895c7dfb2f5c.jpg)
476
+ (d) Linguistic Ontologies
477
+
478
+ ![](images/b8577b89ed2c75935e0a9a6622597aa22810f4ecc8f1e1524c7d49c6cb73e33a.jpg)
479
+
480
+ ![](images/acab1b092a4690ee497a787068ef061c6660268f5f3843d1855b9e4e781938d0.jpg)
481
+ (b) Morphology and Semantics
482
+ RoBERTa
483
+
484
+ ![](images/ff20188f70865fa402c19efb5d6915600cc1f0f1742cdd643c1d53ecfd34b528.jpg)
485
+ (c) Syntactic
486
+
487
+ ![](images/7d3e4bd1445a9969d1ea72e72feabdc62881a0132499b11a4d0ce8b5c01f5205.jpg)
488
+ (d) Linguistic Ontologies
489
+
490
+ ![](images/b1a3d31308712488f1796ab05f815e3b5f6a92aa70505fc4d3928ec7257677a1.jpg)
491
+
492
+ ![](images/b35a81e2a525e835790f2536a651f3bb2bfa5db9009ee323cc3e22eaa44236b0.jpg)
493
+ (b) Morphology and Semantics
494
+
495
+ ![](images/43e406852848c77f5cc5791d4fb6c99d0740c3ad73f1fb75ce8418cdfbcddffa.jpg)
496
+ (c) Syntactic
497
+ XLM-R
498
+
499
+ ![](images/6c866523d0db478a692344b4f1355e2fe4b8c851089b5409ec1a16ad12f60f7d.jpg)
500
+ (d) Linguistic Ontologies
501
+
502
+ ![](images/b11df212bb439ce77150c7074254d02779525ad14a990b589f732d782f20c38a.jpg)
503
+
504
+ ![](images/089385bd37fec21ddcffa968b7a5e079ff0602abb72207cb4dbf4b6afe6b57d1.jpg)
505
+ (b) Morphology and Semantics Layer
506
+
507
+ ![](images/6341a515705969fd6541192eb599c2e7f61a8d5e9b4185f24fb739310e5e454c.jpg)
508
+ (c) Syntactic
509
+
510
+ ![](images/84a6f057b749106e62b7c9edd58a1cb7c1a630d7e69a06576f6677bd00a57f1c.jpg)
511
+ (d) Linguistic Ontologies
512
+
513
+ ![](images/995f246c9ab08762b4dc40295618a941b07146e33891ddc8ed7a8c6e2731a99c.jpg)
514
+
515
+ ![](images/0b0f811808f8936d76fdeff7ba2bfa43f6c3d7f682c1a04708631c3ff0f795e4.jpg)
516
+ ALBERT
517
+
518
+ ![](images/a6b11012cf1acc434d4199374bd032591c51ebe74cf7f2311dc6249fdfa4aba2.jpg)
519
+ (c) Syntactic
520
+
521
+ ![](images/656bec73bad22cff485d8a39c1403614ba5dfc7b958b01c8335c923bd427c7b5.jpg)
522
+ (d) Linguistic Ontologies
523
+
524
+ ![](images/ded878c22c05d49db44406d150174225f363f7fc3555408c6795fadfc5f45927.jpg)
525
+
526
+ ![](images/9c0ce466ca99408ec0584197cd847da2449d974dae44bf95f19ba440949c004e.jpg)
527
+ XLNet
528
+
529
+ ![](images/7662c3d4353b19d4a49f954553a541f624555a4e3e1b5c244488af8bfc0ec4ba.jpg)
530
+ (c) Syntactic
531
+
532
+ ![](images/83a12c4cd74a6a52d1ca5f3e456ce17bf9240b2bd04ac322b4db7445fea8f10e.jpg)
533
+ (d) Linguistic Ontologies
analyzingencodedconceptsintransformerlanguagemodels/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:68303fc1b9512f7f0bcc21f236963bc1e70b9fd30d9c4c8e09e55e28dd6afdef
3
+ size 1565205
analyzingencodedconceptsintransformerlanguagemodels/layout.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c6d2050006d349d48c02b4ed99ecc4e1e8305196fa80e0cf9c97c41e1f088ff
3
+ size 637393
analyzingmodalityrobustnessinmultimodalsentimentanalysis/093a9f0e-2204-426d-b42c-001378b0e186_content_list.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5933f0b80a50e5c69b1ac9c2c0b13307b109c5801e968e2d129ecaed41099223
3
+ size 73278
analyzingmodalityrobustnessinmultimodalsentimentanalysis/093a9f0e-2204-426d-b42c-001378b0e186_model.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da74f81b2ae2fdff25f27c3533dcf996dcca614acd05e997993e0145b29c7cca
3
+ size 83240