| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:21:36.220245Z" |
| }, |
| "title": "How Do BERT Embeddings Organize Linguistic Knowledge?", |
| "authors": [ |
| { |
| "first": "Giovanni", |
| "middle": [], |
| "last": "Puccetti", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Scuola Normale Superiore", |
| "location": { |
| "settlement": "Pisa" |
| } |
| }, |
| "email": "giovanni.puccetti@sns.it" |
| }, |
| { |
| "first": "Alessio", |
| "middle": [], |
| "last": "Miaschi", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "alessio.miaschi@phd.unipi.it" |
| }, |
| { |
| "first": "Felice", |
| "middle": [], |
| "last": "Dell'orletta", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "felice.dellorletta@ilc.cnr.it" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Several studies investigated the linguistic information implicitly encoded in Neural Language Models. Most of these works focused on quantifying the amount and type of information available within their internal representations and across their layers. In line with this scenario, we proposed a different study, based on Lasso regression, aimed at understanding how the information encoded by BERT sentence-level representations is arranged within its hidden units. Using a suite of several probing tasks, we showed the existence of a relationship between the implicit knowledge learned by the model and the number of individual units involved in the encodings of this competence. Moreover, we found that it is possible to identify groups of hidden units more relevant for specific linguistic properties.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Several studies investigated the linguistic information implicitly encoded in Neural Language Models. Most of these works focused on quantifying the amount and type of information available within their internal representations and across their layers. In line with this scenario, we proposed a different study, based on Lasso regression, aimed at understanding how the information encoded by BERT sentence-level representations is arranged within its hidden units. Using a suite of several probing tasks, we showed the existence of a relationship between the implicit knowledge learned by the model and the number of individual units involved in the encodings of this competence. Moreover, we found that it is possible to identify groups of hidden units more relevant for specific linguistic properties.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The rise of contextualized word representations (Peters et al., 2018; Devlin et al., 2019) has led to significant improvement in several (if not every) NLP tasks. The main drawback of these approaches, despite the outstanding performances, is the lack of interpretability. In fact, high dimensional representations do not allow for any insight of the type of linguistic properties encoded in these models. Therefore this implicit knowledge can only be determined a posteriori, by designing tasks that require a specific linguistic skill to be tackled (Linzen and Baroni, 2020) or by investigating to what extent certain information is encoded within contextualized internal representations, e.g. defining probing classifier trained to predict a variety of language phenomena (Conneau et al., 2018a; Hewitt and Manning, 2019; Tenney et al., 2019a) .", |
| "cite_spans": [ |
| { |
| "start": 48, |
| "end": 69, |
| "text": "(Peters et al., 2018;", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 70, |
| "end": 90, |
| "text": "Devlin et al., 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 551, |
| "end": 576, |
| "text": "(Linzen and Baroni, 2020)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 775, |
| "end": 798, |
| "text": "(Conneau et al., 2018a;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 799, |
| "end": 824, |
| "text": "Hewitt and Manning, 2019;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 825, |
| "end": 846, |
| "text": "Tenney et al., 2019a)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In line with this latter approach and with recent works aimed at investigating how the information is arranged within neural models representations (Baan et al., 2019; Lakretz et al., 2019) , we proposed an in-depth investigation aimed at understanding how the information encoded by BERT is arranged within its internal representation. In particular, we defined two research questions, aimed at: (i) investigating the relationship between the sentence-level linguistic knowledge encoded in a pre-trained version of BERT and the number of individual units involved in the encoding of such knowledge; (ii) understanding how these sentence-level properties are organized within the internal representations of BERT, identifying groups of units more relevant for specific linguistic tasks. We defined a suite of probing tasks based on a variable selection approach, in order to identify which units in the internal representations of BERT are involved in the encoding of similar linguistic properties. Specifically, we relied on a wide range of linguistic tasks, which resulted to successfully model different typology of sentence complexity , from very simple features (such as sentence length) to more complex properties related to the morphosyntactic and syntactic structure of a sentence (such as the distribution of specific dependency relations).", |
| "cite_spans": [ |
| { |
| "start": 148, |
| "end": 167, |
| "text": "(Baan et al., 2019;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 168, |
| "end": 189, |
| "text": "Lakretz et al., 2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The paper is organized as follows. In Sec. 2 we present related work, then we describe our approach (Sec. 3), with a focus on the model and the data used for the experiments (Sec. 3.1) and the set of probing tasks (Sec. 3.2). Experiments and results are discussed in Sec. 4 and 5. To conclude, we summarize the main findings of our work in Sec. 6.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the last few years, a number of recent works have explored the inner mechanism and the linguistic knowledge implicitly encoded in Neural Language Models (NLMs) . The most common approach is based on the development of probes, i.e. supervised models trained to predict simple linguistic properties using the contextual word/sentence embeddings of a pre-trained model as training features (Conneau et al., 2018b; Zhang and Bowman, 2018; Miaschi et al., 2020) . These latter studies demonstrated that NLMs are able to encode a wide range of linguistic information in a hierarchical manner (Blevins et al., 2018; Jawahar et al., 2019; Tenney et al., 2019b) and even to support the extraction of dependency parse trees (Hewitt and Manning, 2019) . For instance, Liu et al. (2019) quantified differences in the transferability of individual layers between different models, showing that higher layers of RNNs (ELMo) are more task-specific (less general), while transformer layers (BERT) do not exhibit this increase in task-specificity.", |
| "cite_spans": [ |
| { |
| "start": 390, |
| "end": 413, |
| "text": "(Conneau et al., 2018b;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 414, |
| "end": 437, |
| "text": "Zhang and Bowman, 2018;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 438, |
| "end": 459, |
| "text": "Miaschi et al., 2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 589, |
| "end": 611, |
| "text": "(Blevins et al., 2018;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 612, |
| "end": 633, |
| "text": "Jawahar et al., 2019;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 634, |
| "end": 655, |
| "text": "Tenney et al., 2019b)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 717, |
| "end": 743, |
| "text": "(Hewitt and Manning, 2019)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Other works also investigated the importance of individual neurons within models representations (Qian et al., 2016; Baan et al., 2019) . proposed two methods, Linguistic Correlations Analysis and Cross-model correlation analysis, to study whether specific dimensions learned by end-to-end neural models are responsible for specific properties. For instance, they showed that open class categories such as verbs and location are much more distributed across the network compared to closed class categories (e.g. coordinating conjunction) and also that the model recognizes a hierarchy of linguistic proprieties and distributes neurons based on it. Lakretz et al. (2019) , instead, proposed a detailed study of the inner mechanism of number tracking in LSTMs at single neuron level, showing that long distance number information (from the subject to the verb) is largely managed by two specific units.", |
| "cite_spans": [ |
| { |
| "start": 97, |
| "end": 116, |
| "text": "(Qian et al., 2016;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 117, |
| "end": 135, |
| "text": "Baan et al., 2019)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 648, |
| "end": 669, |
| "text": "Lakretz et al. (2019)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Differently from those latter work, our aim was to combine previous approaches based on probes and on the study on individual units in order to propose an in-depth investigation on the organization of linguistic competence within NLM contextualized representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To study how the information used by BERT to implicitly encode linguistic properties is arranged within its internal representations, we relied on a variable selection approach based on Lasso regression (Tibshirani, 1996) , which aims at keeping as few non-zero coefficients as possible when solving specific regression tasks. Our aim was to identify which weights within sentence-level BERT internal representations can be set to zero, in order to understand the relationship between hidden units and linguistic competence and whether the information needed to perform similar linguistic tasks is encoded in similar positions. We relied on a suite of 68 sentence-level probing tasks, each of which corresponds to a specific linguistic feature capturing characteristics of a sentence at different levels of granularity. In particular, we defined a Lasso regression model that takes as input layer-wise BERT representations for each sentence of a gold standard Universal Dependencies (UD) (Nivre et al., 2016) English dataset and predicts the actual value of a given sentence-level feature. Lasso regression consists in adding an L 1 penalization to the usual ordinary least square loss. To do so, one of the most relevant parameters is \u03bb, which tunes how relevant the L 1 penalization is for the loss function. We performed a grid search with cross validation for each feature-layer pair, in order to identify the best suited value for \u03bb according to each task. Specifically, our goal was to find the most suited value for seeking the best performance when having as few non-zero coefficients as possible.", |
| "cite_spans": [ |
| { |
| "start": 203, |
| "end": 221, |
| "text": "(Tibshirani, 1996)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 988, |
| "end": 1008, |
| "text": "(Nivre et al., 2016)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Approach", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We used a pre-trained version of BERT (BERTbase uncased, 12 layers). In order to obtain the representations for our sentence-level tasks we experimented with the activation of the first input token ([CLS]) and the mean of all the word embeddings for each sentence (Mean-pooling).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model and data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "With regard to the data used for the regression experiments, we relied on the Universal Dependencies (UD) English dataset. The dataset includes three UD English treebanks: UD English-ParTUT, a conversion of a multilingual parallel treebank consisting of a variety of text genres, including talks, legal texts and Wikipedia articles (Sanguinetti and Bosco, 2015) ; the Universal Dependencies version annotation from the GUM corpus (Zeldes, 2017) ; the English Web Treebank (EWT), a gold standard universal dependencies corpus for English (Silveira et al., 2014) . Overall, the final dataset consists of 23,943 sentences.", |
| "cite_spans": [ |
| { |
| "start": 332, |
| "end": 361, |
| "text": "(Sanguinetti and Bosco, 2015)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 430, |
| "end": 444, |
| "text": "(Zeldes, 2017)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 537, |
| "end": 560, |
| "text": "(Silveira et al., 2014)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model and data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "As already mentioned, we defined a suite of probing tasks relying on a wide set of sentence-level linguistic features automatically extracted from the parsed sentences in the UD dataset. The set of features is based on the ones described in which are acquired from raw, morphosyntactic and syntactic levels of annotation and can be categorised in 9 groups corresponding to different linguistic phenomena. As shown in Table 1, these features model linguistic phenomena ranging from raw text one, to morpho-syntactic information and inflectional properties of verbs, to more complex aspects of sentence structure modeling global and local properties of the whole parsed tree and of specific subtrees, such as the order of subjects and objects with respect to the verb, the distribution of UD syntactic relations, also including features referring to the use of subordination and to the structure of verbal predicates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linguistic features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "As a first analysis, we investigated the relationship between the implicit linguistic properties encoded in the internal representations of BERT and the number of individual units involved in the encoding of these properties. Figure 1 and 2 report layerwise R 2 results for all the probing tasks along with the number of non-zero coefficients obtained with the sentence representations computed with the [CLS] token and the Mean-pooling strategy respectively. As a first remark, we can notice that the Mean-pooling method proved to be the best one for almost all the probing features across the 12 layers. Moreover, in line with Hewitt and Manning (2019), we noticed that there is high variability among different tasks, whereas less variation occurs among the model layers. In general, we observe that best scores are related to features belonging to raw text and vocabulary proprieties, such as sentence length and Type/Token Ratio. Nevertheless, BERT representations implicitly encode information also related to more complex syntactic features, such as the order of the subject (subj pre) or the distribution of several dependency relations (e.g. dep dist det, dep dist punct). Interestingly, the knowledge about POS differs when we consider more granular distinctions. For instance, within the broad categories of verbs and nouns, worse predictions were obtained by sub-specific classes of verbs based on tense, person and mood features (see especially past participle, xpos dist VBN). Similarly, within the verb predicate structure properties, we observe that lower R 2 scores were obtained by features related to sub-categorization information about verbal predicates, such as the distribution of verbs by arity (verbal arity *).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 226, |
| "end": 234, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Linguistic competence and BERT units", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Focusing instead on the relationship between R 2 scores and number of non-zero coefficients, we can notice that although best scores are achieved at lower layers (between layers 12 and 8 for both configurations), the highest number of non-zero coefficients occurs instead at layers closer to the output. This is particularly evident for the results achieved using the [CLS] token, for which we observe a continuous increase across the 12 layers in the number of units used by the the probing models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linguistic competence and BERT units", |
| "sec_num": "4" |
| }, |
| { |
| "text": "For both configurations, features more related to the structure of the whole syntactic tree are those for which less units were set to zero during regression (e.g. max links len, parse depth, n prepositional chains), while properties belonging to word-based properties (i.e. features related to POS and dependency labels) were predicted relying on less units. Moreover, we can clearly notice that features related to specific POS and dependency relationships are also those that gained less units through the 12 layers (e. g. xpos dist ., xpos dist AUX). On the contrary, features belonging to the structure of the syntactic tree tend to acquire more non-zero units as the output layer is approached. This is particularly evident for the linguistic features predicted using sentence representations computed using the [CLS] token (e.g. subj pre, parse depth, n prepositional chains). We believe this is due to the fact that the interdependence between different units in each representation tend to increase across layers, thus making the information less localized especially for those features that belong to the whole structure of the syntactic tree. This is coherent with the fact that using the Mean-pooling strategy a higher number of non-zero coefficients was preserved also in the very first input layers, suggesting that this strategy increases the interdependence between each unit and makes the extraction of localized information more complex.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linguistic competence and BERT units", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In order to focus more closely on the relationship between R 2 scores and non-zero units, we reported in Figures 3a and 3b average R 2 scores versus average number of non-zero coefficients, along with the line of best fit, for each layer and according to the [CLS] token and to the Mean-pooling strategy respectively. Interestingly, for both [CLS] and Mean-pooling representations, R 2 scores tend to improve as the number of non-zero coefficients increases. Moreover, when considering sentence representations computed with the [CLS] token, this behaviour becomes more pronounced as the output layer is reached. This is in line with what we already noticed, namely that the interdependence between different units tend to increase across layers, especially when taking into account representations extracted without using a mean-pooling strategy.", |
| "cite_spans": [ |
| { |
| "start": 342, |
| "end": 347, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 105, |
| "end": 122, |
| "text": "Figures 3a and 3b", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Linguistic competence and BERT units", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In order to investigate more in depth the behaviour of BERT hidden units when solving the probing tasks, we focused more closely at how the different units in the internal representations are kept and lost across subsequent layers. Figure 4 reports the average number of non-zero coefficients in a layer that are set to zero in the following one (4a), the average number of zero coefficients in a layer that are set to non-zero in the following one (4b) and the average value of the difference between the number of non-zero coefficients at pairs of consecutive layers (4c). As it can be observed, there is high coherence between each layer and its subsequent one, meaning that the variation in the number of selected coefficient is stable (4c). However, the first two plots also show that there is a higher variation when considering non-zero coeffi-cients in the same positions between pairs of layers. This underlines the fact that the information is not localized within BERT's internal representations, since the algorithm shows a degree of freedom in which units can be zeroed and which cannot.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 232, |
| "end": 240, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Linguistic competence and BERT units", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In Figure 5 we report instead how many times each individual unit in the [CLS] (5a) and Meanpooling (5b) internal representations has been kept non-zero when solving the 68 probing tasks for all the 12 BERT layers (816 regression task). In general, we can observe that the regression tasks performed using sentence-level representations obtained with the Mean-pooling strategy tend to use more hidden units with respect to the [CLS] ones. It is also interesting to notice that there is a highly irregular unit (number 308) that has been kept different from zero in a number of tasks and layers much higher than the average. This could suggest that this unit is particularly relevant for encoding almost all the linguistic properties devised in our probing tasks. Figure 4 : In (a) the average number of non-zero coefficients in a layer that are set to zero in the following one (average number of dropped coefficients), in (b) the average number of zero coefficients in a layer that are set to non-zero in the following one (average number of gained coefficients) and in (c) the value of the difference between the number of non-zero coefficients at pairs of consecutive layers (average number of changed coefficients).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 11, |
| "text": "Figure 5", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 763, |
| "end": 771, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Linguistic competence and BERT units", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Once we have investigated the relationship between the linguistic knowledge implicitly encoded by BERT and the number of individual units involved in it, we verified whether we can identify groups of units particularly relevant for specific probing tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Is information linguistically arranged within BERT representations?", |
| "sec_num": "5" |
| }, |
| { |
| "text": "To this end, we clustered the 68 probing features according to the weights assigned by the regression models to each BERT hidden unit. Specifically, we perform hierarchical clustering using correlation distance as distance metric. Figure 6 and 7 report the hierarchical clustering obtained with the [CLS] and Mean-pooling internal representations at layers 12, 8 and 1. We chose layers 12 and 1 in order to study differences of the clustering of linguistic features taking into account the representations that were more distant and more closer to the language modeling task respectively, while layer 8 was chosen since it was the layer after which BERT's representations tend to lose their precision in encoding our set of linguistic properties.", |
| "cite_spans": [ |
| { |
| "start": 299, |
| "end": 304, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 231, |
| "end": 239, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Is information linguistically arranged within BERT representations?", |
| "sec_num": "5" |
| }, |
| { |
| "text": "As a general remark, we can notice that, despite some variations, the linguistic features are organized in a similar manner across the tree layers and for both the configuration. This is to say that, despite the number of non-zero coefficients varies significantly between layers and according to the strategy for extracting the internal representations, the way in which linguistic properties are arranged within BERT embeddings is quite consistent. This suggests that there is a coherent organization of linguistic features according to non-zero coefficients that is independent from the layer and the aggregation techniques taken into account.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Is information linguistically arranged within BERT representations?", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Focusing on specific groups of features, we observe that, even if the traditional division with respect to the linguistic annotation levels (see Table 1 ) has not been completely maintained, it is possible to identify different clusters of features referable to the same linguistic phenomena for all the 3 layers taken into account and for both configurations. In particular, we can clearly observe groups of features related to the length of dependency links and prepositional chains (e.g. max links len, avg links len, n prepositional chains), to vocabulary richness (ttr form, ttr lemma), to properties related to verbal predicate structure and inflectional morphology of auxiliaries (e.g. xpos dist VBD, xpos dist VBN aux form dist Fin, aux tense dist pres) and to the use of punctuation (xpos dist ., xpos dist ,, dep dist punct) and subordination (e.g. subordinate dist 1, subordinate post). Interestingly enough, BERT representations also tend to put together features related to each other but not necessarily belonging to the same linguistic macrocategory. This is the case, for instance, of characteristics corresponding to functional properties (e.g. upos dist ADP, dep dist det).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 145, |
| "end": 152, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Is information linguistically arranged within BERT representations?", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this paper we proposed an in-depth investigation aimed at understanding how BERT embeddings encode and organize linguistic competence. Relying on a variable selection approach applied on a suite of 68 probing tasks, we showed the existence of a relationship between the implicit linguistic knowledge encoded by the NLM and the number of individual units involved in the encoding of this knowledge. We found that, according to the strategy for obtaining sentence-level representations, the amount of hidden units devised to encode linguistic properties varies differently across BERT layers: while the number of non-zero units used in the Mean-pooling strategy remains more or less constant across layers, the [CLS] representations show a continuous increase in the number of Figure 6 : From top to bottom, the hierarchical clustering for the [CLS] setting of all the tasks respectively at layers 12, 8 and 1. used coefficients. Moreover, we noticed that this behaviour is particularly significant for linguistic properties related to the whole structure of the syntactic tree, while features belonging to POS and dependency tags tend to acquire less non-zero units across layers.", |
| "cite_spans": [ |
| { |
| "start": 845, |
| "end": 850, |
| "text": "[CLS]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 778, |
| "end": 786, |
| "text": "Figure 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Finally, we found that it is possible to identify groups of units more relevant for specific linguistic tasks. In particular, we showed that clustering our set of sentence-level properties according to the weights assigned by the regression models to each BERT unit we can identify clusters of features referable to the same linguistic phenomena and this, despite some variations, is true for both the configurations and for all the BERT layers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "On the realization of compositionality in neural networks", |
| "authors": [ |
| { |
| "first": "Joris", |
| "middle": [], |
| "last": "Baan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jana", |
| "middle": [], |
| "last": "Leible", |
| "suffix": "" |
| }, |
| { |
| "first": "Mitja", |
| "middle": [], |
| "last": "Nikolaus", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Rau", |
| "suffix": "" |
| }, |
| { |
| "first": "Dennis", |
| "middle": [], |
| "last": "Ulmer", |
| "suffix": "" |
| }, |
| { |
| "first": "Tim", |
| "middle": [], |
| "last": "Baumg\u00e4rtner", |
| "suffix": "" |
| }, |
| { |
| "first": "Dieuwke", |
| "middle": [], |
| "last": "Hupkes", |
| "suffix": "" |
| }, |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "127--137", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-4814" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joris Baan, Jana Leible, Mitja Nikolaus, David Rau, Dennis Ulmer, Tim Baumg\u00e4rtner, Dieuwke Hupkes, and Elia Bruni. 2019. On the realization of compo- sitionality in neural networks. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 127- 137, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Identifying and controlling important neurons in neural machine translation", |
| "authors": [ |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Bau", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anthony Bau, Yonatan Belinkov, Hassan Sajjad, Nadir Durrani, Fahim Dalvi, and James Glass. 2019. Iden- tifying and controlling important neurons in neural machine translation. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Analysis methods in neural language processing: A survey", |
| "authors": [ |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yonatan Belinkov and James Glass. 2019. Analysis methods in neural language processing: A survey.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "From top to bottom, the hierarchical clustering for the Mean-pooling setting of all the tasks respectively at layers 12, 8 and 1", |
| "authors": [], |
| "year": null, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "7", |
| "issue": "", |
| "pages": "49--72", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Figure 7: From top to bottom, the hierarchical clustering for the Mean-pooling setting of all the tasks respectively at layers 12, 8 and 1. Transactions of the Association for Computational Linguistics, 7:49-72.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Deep rnns encode soft hierarchical syntax", |
| "authors": [ |
| { |
| "first": "Terra", |
| "middle": [], |
| "last": "Blevins", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "14--19", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Terra Blevins, Omer Levy, and Luke Zettlemoyer. 2018. Deep rnns encode soft hierarchical syntax. In Pro- ceedings of the 56th Annual Meeting of the Associa- tion for Computational Linguistics (Volume 2: Short Papers), pages 14-19.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Profiling-ud: a tool for linguistic profiling of texts", |
| "authors": [ |
| { |
| "first": "Dominique", |
| "middle": [], |
| "last": "Brunato", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Cimino", |
| "suffix": "" |
| }, |
| { |
| "first": "Felice", |
| "middle": [], |
| "last": "Dell'orletta", |
| "suffix": "" |
| }, |
| { |
| "first": "Giulia", |
| "middle": [], |
| "last": "Venturi", |
| "suffix": "" |
| }, |
| { |
| "first": "Simonetta", |
| "middle": [], |
| "last": "Montemagni", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of The 12th Language Resources and Evaluation Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "7147--7153", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dominique Brunato, Andrea Cimino, Felice Dell'Orletta, Giulia Venturi, and Simonetta Montemagni. 2020. Profiling-ud: a tool for linguis- tic profiling of texts. In Proceedings of The 12th Language Resources and Evaluation Conference, pages 7147-7153, Marseille, France. European Language Resources Association.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic properties", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2126--2136", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P18-1198" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, German Kruszewski, Guillaume Lam- ple, Lo\u00efc Barrault, and Marco Baroni. 2018a. What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic properties. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 2126-2136, Melbourne, Aus- tralia. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic properties", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Germ\u00e1n", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Barrault", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "2126--2136", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Germ\u00e1n Kruszewski, Guillaume Lam- ple, Lo\u00efc Barrault, and Marco Baroni. 2018b. What you can cram into a single $&!#* vector: Probing sentence embeddings for linguistic properties. In Proceedings of the 56th Annual Meeting of the As- sociation for Computational Linguistics (Volume 1: Long Papers), pages 2126-2136.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "What is one grain of sand in the desert? analyzing individual neurons in deep nlp models", |
| "authors": [ |
| { |
| "first": "Fahim", |
| "middle": [], |
| "last": "Dalvi", |
| "suffix": "" |
| }, |
| { |
| "first": "Nadir", |
| "middle": [], |
| "last": "Durrani", |
| "suffix": "" |
| }, |
| { |
| "first": "Hassan", |
| "middle": [], |
| "last": "Sajjad", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "Anthony", |
| "middle": [], |
| "last": "Bau", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "33", |
| "issue": "", |
| "pages": "6309--6317", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fahim Dalvi, Nadir Durrani, Hassan Sajjad, Yonatan Belinkov, Anthony Bau, and James Glass. 2019. What is one grain of sand in the desert? analyz- ing individual neurons in deep nlp models. In Pro- ceedings of the AAAI Conference on Artificial Intel- ligence, volume 33, pages 6309-6317.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A structural probe for finding syntax in word representations", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Hewitt", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4129--4138", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Hewitt and Christopher D Manning. 2019. A structural probe for finding syntax in word represen- tations. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Tech- nologies, Volume 1 (Long and Short Papers), pages 4129-4138.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "What does BERT learn about the structure of language", |
| "authors": [ |
| { |
| "first": "Ganesh", |
| "middle": [], |
| "last": "Jawahar", |
| "suffix": "" |
| }, |
| { |
| "first": "Beno\u00eet", |
| "middle": [], |
| "last": "Sagot", |
| "suffix": "" |
| }, |
| { |
| "first": "Djam\u00e9", |
| "middle": [], |
| "last": "Seddah", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3651--3657", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1356" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ganesh Jawahar, Beno\u00eet Sagot, and Djam\u00e9 Seddah. 2019. What does BERT learn about the structure of language? In Proceedings of the 57th Annual Meeting of the Association for Computational Lin- guistics, pages 3651-3657, Florence, Italy. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "The emergence of number and syntax units in LSTM language models", |
| "authors": [ |
| { |
| "first": "Yair", |
| "middle": [], |
| "last": "Lakretz", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Kruszewski", |
| "suffix": "" |
| }, |
| { |
| "first": "Theo", |
| "middle": [], |
| "last": "Desbordes", |
| "suffix": "" |
| }, |
| { |
| "first": "Dieuwke", |
| "middle": [], |
| "last": "Hupkes", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislas", |
| "middle": [], |
| "last": "Dehaene", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "11--20", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1002" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yair Lakretz, German Kruszewski, Theo Desbordes, Dieuwke Hupkes, Stanislas Dehaene, and Marco Ba- roni. 2019. The emergence of number and syn- tax units in LSTM language models. In Proceed- ings of the 2019 Conference of the North American Chapter of the Association for Computational Lin- guistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 11-20, Minneapolis, Minnesota. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Syntactic structure from deep learning. CoRR, abs", |
| "authors": [ |
| { |
| "first": "Tal", |
| "middle": [], |
| "last": "Linzen", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tal Linzen and Marco Baroni. 2020. Syntactic struc- ture from deep learning. CoRR, abs/2004.10827.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Linguistic knowledge and transferability of contextual representations", |
| "authors": [ |
| { |
| "first": "F", |
| "middle": [], |
| "last": "Nelson", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonatan", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Belinkov", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1073--1094", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nelson F Liu, Matt Gardner, Yonatan Belinkov, Matthew E Peters, and Noah A Smith. 2019. Lin- guistic knowledge and transferability of contextual representations. In Proceedings of the 2019 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, Volume 1 (Long and Short Pa- pers), pages 1073-1094.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Linguistic profiling of a neural language model", |
| "authors": [ |
| { |
| "first": "Alessio", |
| "middle": [], |
| "last": "Miaschi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dominique", |
| "middle": [], |
| "last": "Brunato", |
| "suffix": "" |
| }, |
| { |
| "first": "Felice", |
| "middle": [], |
| "last": "Dell'orletta", |
| "suffix": "" |
| }, |
| { |
| "first": "Giulia", |
| "middle": [], |
| "last": "Venturi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "745--756", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.65" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alessio Miaschi, Dominique Brunato, Felice Dell'Orletta, and Giulia Venturi. 2020. Lin- guistic profiling of a neural language model. In Proceedings of the 28th International Conference on Computational Linguistics, pages 745-756, Barcelona, Spain (Online). International Committee on Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Universal dependencies v1: A multilingual treebank collection", |
| "authors": [ |
| { |
| "first": "Joakim", |
| "middle": [], |
| "last": "Nivre", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Catherine", |
| "middle": [], |
| "last": "De Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "Filip", |
| "middle": [], |
| "last": "Ginter", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Hajic", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Slav", |
| "middle": [], |
| "last": "Mcdonald", |
| "suffix": "" |
| }, |
| { |
| "first": "Sampo", |
| "middle": [], |
| "last": "Petrov", |
| "suffix": "" |
| }, |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Pyysalo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Silveira", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
| "volume": "", |
| "issue": "", |
| "pages": "1659--1666", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joakim Nivre, Marie-Catherine De Marneffe, Filip Gin- ter, Yoav Goldberg, Jan Hajic, Christopher D Man- ning, Ryan McDonald, Slav Petrov, Sampo Pyysalo, Natalia Silveira, et al. 2016. Universal dependencies v1: A multilingual treebank collection. In Proceed- ings of the Tenth International Conference on Lan- guage Resources and Evaluation (LREC'16), pages 1659-1666.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227- 2237.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Analyzing linguistic knowledge in sequential model of sentence", |
| "authors": [ |
| { |
| "first": "Xipeng", |
| "middle": [], |
| "last": "Peng Qian", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuanjing", |
| "middle": [], |
| "last": "Qiu", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "826--835", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D16-1079" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peng Qian, Xipeng Qiu, and Xuanjing Huang. 2016. Analyzing linguistic knowledge in sequential model of sentence. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Process- ing, pages 826-835, Austin, Texas. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Parttut: The turin university parallel treebank", |
| "authors": [ |
| { |
| "first": "Manuela", |
| "middle": [], |
| "last": "Sanguinetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Cristina", |
| "middle": [], |
| "last": "Bosco", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Harmonization and Development of Resources and Tools for Italian Natural Language Processing within the PARLI Project", |
| "volume": "", |
| "issue": "", |
| "pages": "51--69", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manuela Sanguinetti and Cristina Bosco. 2015. Parttut: The turin university parallel treebank. In Harmo- nization and Development of Resources and Tools for Italian Natural Language Processing within the PARLI Project, pages 51-69. Springer.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A gold standard dependency corpus for english", |
| "authors": [ |
| { |
| "first": "Natalia", |
| "middle": [], |
| "last": "Silveira", |
| "suffix": "" |
| }, |
| { |
| "first": "Timothy", |
| "middle": [], |
| "last": "Dozat", |
| "suffix": "" |
| }, |
| { |
| "first": "Marie-Catherine De", |
| "middle": [], |
| "last": "Marneffe", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Miriam", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Connor", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "LREC", |
| "volume": "", |
| "issue": "", |
| "pages": "2897--2904", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Natalia Silveira, Timothy Dozat, Marie-Catherine De Marneffe, Samuel R Bowman, Miriam Connor, John Bauer, and Christopher D Manning. 2014. A gold standard dependency corpus for english. In LREC, pages 2897-2904.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "BERT rediscovers the classical NLP pipeline", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Tenney", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "Ellie", |
| "middle": [], |
| "last": "Pavlick", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "4593--4601", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1452" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian Tenney, Dipanjan Das, and Ellie Pavlick. 2019a. BERT rediscovers the classical NLP pipeline. In Proceedings of the 57th Annual Meeting of the Asso- ciation for Computational Linguistics, pages 4593- 4601, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "What do you learn from context? probing for sentence structure in contextualized word representations", |
| "authors": [ |
| { |
| "first": "Ian", |
| "middle": [], |
| "last": "Tenney", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrick", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Berlin", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Poliak", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Mccoy", |
| "suffix": "" |
| }, |
| { |
| "first": "Najoung", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1905.06316" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ian Tenney, Patrick Xia, Berlin Chen, Alex Wang, Adam Poliak, R Thomas McCoy, Najoung Kim, Benjamin Van Durme, Samuel R Bowman, Dipan- jan Das, et al. 2019b. What do you learn from context? probing for sentence structure in con- textualized word representations. arXiv preprint arXiv:1905.06316.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Regression shrinkage and selection via the lasso", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Tibshirani", |
| "suffix": "" |
| } |
| ], |
| "year": 1996, |
| "venue": "Journal of the Royal Statistical Society: Series B (Methodological)", |
| "volume": "58", |
| "issue": "1", |
| "pages": "267--288", |
| "other_ids": { |
| "DOI": [ |
| "10.1111/j.2517-6161.1996.tb02080.x" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Tibshirani. 1996. Regression shrinkage and se- lection via the lasso. Journal of the Royal Statistical Society: Series B (Methodological), 58(1):267-288.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "The GUM corpus: Creating multilayer resources in the classroom. Language Resources and Evaluation", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zeldes", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "51", |
| "issue": "", |
| "pages": "581--612", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s10579-016-9343-x" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir Zeldes. 2017. The GUM corpus: Creating mul- tilayer resources in the classroom. Language Re- sources and Evaluation, 51(3):581-612.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Language modeling teaches you more than translation does: Lessons learned through auxiliary syntactic task analysis", |
| "authors": [ |
| { |
| "first": "Kelly", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 EMNLP Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "359--361", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kelly Zhang and Samuel Bowman. 2018. Language modeling teaches you more than translation does: Lessons learned through auxiliary syntactic task analysis. In Proceedings of the 2018 EMNLP Work- shop BlackboxNLP: Analyzing and Interpreting Neu- ral Networks for NLP, pages 359-361.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Layerwise R 2 results for all the probing tasks (left heatmap) along with the number of non-zero coefficients (right heatmap) obtained with the sentence representations computed using the [CLS] token.", |
| "num": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Layerwise R 2 results for all the probing tasks (left heatmap) along with the number of non-zero coefficients (right heatmap) obtained with the sentence representations computed with the Mean-pooling strategy.", |
| "num": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Average R 2 scores versus average number of non-zero coefficients, along with the line of best fit, for each layer and according to [CLS] (a) and Mean-pooling (b) strategy.", |
| "num": null |
| }, |
| "FIGREF3": { |
| "type_str": "figure", |
| "uris": null, |
| "text": "Number of times in which each BERT individual unit (computed with [CLS] token in (a) and with Mean-pooing aggregation strategy in (b)) has been kept as non-zero when solving all the probing tasks for all the 12 layers.", |
| "num": null |
| }, |
| "TABREF1": { |
| "text": "Linguistic Features used in the experiments.", |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "num": null |
| } |
| } |
| } |
| } |