| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T16:21:34.144432Z" |
| }, |
| "title": "Low Anisotropy Sense Retrofitting (LASeR) : Towards Isotropic and Sense Enriched Representations", |
| "authors": [ |
| { |
| "first": "Geetanjali", |
| "middle": [], |
| "last": "Bihani", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Technology Purdue University", |
| "location": {} |
| }, |
| "email": "gbihani@purdue.edu" |
| }, |
| { |
| "first": "Julia", |
| "middle": [ |
| "Taylor" |
| ], |
| "last": "Rayz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Technology Purdue University", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Contextual word representation models have shown massive improvements on a multitude of NLP tasks, yet their word sense disambiguation capabilities remain poorly explained. To address this gap, we assess whether contextual word representations extracted from deep pretrained language models create distinguishable representations for different senses of a given word. We analyze the representation geometry and find that most layers of deep pretrained language models create highly anisotropic representations, pointing towards the existence of representation degeneration problem in contextual word representations. After accounting for anisotropy, our study further reveals that there is variability in sense learning capabilities across different language models. Finally, we propose LASeR, a 'Low Anisotropy Sense Retrofitting' approach that renders off-the-shelf representations isotropic and semantically more meaningful, resolving the representation degeneration problem as a post-processing step, and conducting senseenrichment of contextualized representations extracted from deep neural language models.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Contextual word representation models have shown massive improvements on a multitude of NLP tasks, yet their word sense disambiguation capabilities remain poorly explained. To address this gap, we assess whether contextual word representations extracted from deep pretrained language models create distinguishable representations for different senses of a given word. We analyze the representation geometry and find that most layers of deep pretrained language models create highly anisotropic representations, pointing towards the existence of representation degeneration problem in contextual word representations. After accounting for anisotropy, our study further reveals that there is variability in sense learning capabilities across different language models. Finally, we propose LASeR, a 'Low Anisotropy Sense Retrofitting' approach that renders off-the-shelf representations isotropic and semantically more meaningful, resolving the representation degeneration problem as a post-processing step, and conducting senseenrichment of contextualized representations extracted from deep neural language models.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Distributional word representations, developed using large-scale training corpora, form an integral part of the modern NLP methodological paradigm. The advent of deep pre-trained neural language models such as BERT (Devlin et al., 2018) and GPT-2 (Radford et al., 2019) has led the shift towards the development of contextualized word representations. Unlike static word representation models, such as word2vec (Mikolov et al., 2013) and fastText (Bojanowski et al., 2017) , which conflate multiple senses of a word within a single representation, contextual word representation models assign as many representations to a word as the number of contexts it appears in. The preference for contextual word representations can be attributed to the significant improvements they have achieved in a wide variety of NLP tasks including question answering, textual entailment, sentiment analysis (Peters et al., 2018; Devlin et al., 2018) and commonsense reasoning (Da and Kasai, 2019; Sap et al., 2020) , to name a few.", |
| "cite_spans": [ |
| { |
| "start": 215, |
| "end": 236, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 241, |
| "end": 269, |
| "text": "GPT-2 (Radford et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 411, |
| "end": 433, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 447, |
| "end": 472, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 888, |
| "end": 909, |
| "text": "(Peters et al., 2018;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 910, |
| "end": 930, |
| "text": "Devlin et al., 2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 957, |
| "end": 977, |
| "text": "(Da and Kasai, 2019;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 978, |
| "end": 995, |
| "text": "Sap et al., 2020)", |
| "ref_id": "BIBREF25" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To utilize contextual word representations as knowledge resources, it is necessary to determine their ability to mirror the linguistic relations employed in language (Schnabel et al., 2015) . There is a growing body of literature that assesses whether contextual representations encode information about word-senses, where each word-sense portrays an aspect of the meaning of a given word in a given context (Jurafsky and Martin, 2019) . A recent analysis by Nair et al. (2020) reported that contextual word representations can learn humanlike word sense knowledge, where they compared cosine relatedness between homonyms and polysemous word senses against human sense-related judgements. When calculating cosine relatedness, such studies assume the encoded vector space to be isotropic in nature. Geometrically, isotropy in a vector space is defined as vectors being uniformly distributed across all directions, instead of occupying a narrow cone (Ethayarajh, 2019; Mu and Viswanath, 2018) . Recent studies point towards anisotropy (lack of isotropy) in contextual word representations (Ethayarajh, 2019; Zhang et al., 2020) , which affects prior conclusions regarding word-sense information encoded in vector spaces. For example, in an isotropic vector space, if cosine relatedness between word representations A and B is 0.9, we conclude them to be highly similar. But, if the vector space is anisotropic, where cosine relatedness between randomly sampled words is 0.95, then the representations A and B are deemed less similar than randomly sampled words. This shows that the existence and the extent of anisotropy in the vector space affects conclusions regarding whether representations are actually similar or merely a product of representation degeneration. Hence, when evaluating the sense learning capabilities of deep pretrained language models through vector relatedness measures, accounting and adjusting for vector space anisotropy becomes necessary.", |
| "cite_spans": [ |
| { |
| "start": 166, |
| "end": 189, |
| "text": "(Schnabel et al., 2015)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 408, |
| "end": 435, |
| "text": "(Jurafsky and Martin, 2019)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 459, |
| "end": 477, |
| "text": "Nair et al. (2020)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 948, |
| "end": 966, |
| "text": "(Ethayarajh, 2019;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 967, |
| "end": 990, |
| "text": "Mu and Viswanath, 2018)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1087, |
| "end": 1105, |
| "text": "(Ethayarajh, 2019;", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1106, |
| "end": 1125, |
| "text": "Zhang et al., 2020)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this regard, our work presents three key contributions. First, we analyze and adjust for anisotropy across contextual representations extracted from all layers of four language models (BERT, GPT-2, XLNet and ELECTRA). The representation space for each model encodes anisotropy, varying in terms of number and strength of common directions in model representations. We find that models learning unidirectional context create more anisotropic representations than models learning bidirectional context. Second, we observe that sense information is not equally encoded in all models, where (pseudo) bidirectional models learn to disambiguate word senses better than others. Moreover, sense information is better retained in the lower layers and significantly reduces in the upper model layers due to the representations getting more contextualized. Third, to address these preliminary findings and to contribute towards the creation of sense-coherent representations, we propose LASeR, a 'Low Anisotropy Sense Retrofitting' approach, bringing word representations closer to the goal of mirroring lexical semantic relations present in natural language while removing artifacts of representation degeneration from learned representations. Thus, we combine vector space transformation and knowledge-based vector specialization methods to create more isotropic and sense enriched representations, ensuring that we retain the distributional properties learnt during pretraining, while aligning and grounding the representation geometry towards better sense learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Prior works which modify off-the-shelf embeddings to improve their lexical-semantic representation can be divided into two primary categories: (1) Anisotropy treatment methods and (2) Retrofitting methods. Anisotropy treatment methods focus on improving the isotropy of word vectors, promoting uniform distribution of information across all directions (Mu and Viswanath, 2018; Raunak et al., 2019; Wang et al., 2019) . Isotropy in contextual vector spaces is regarded valuable, especially when utilizing vector geometry and relatedness measures in downstream analyses (Ethayarajh, 2019) . Prior methods that focus on creating more isotropic vector spaces have suggested principle component manipulation (removal, extension) of vector spaces (Mu and Viswanath, 2018; Jo and Choi, 2018) . To our knowledge, these methods have been proposed for static word representations, but are yet to be extended to contextual word representations extracted from a wide variety of language models.", |
| "cite_spans": [ |
| { |
| "start": 352, |
| "end": 376, |
| "text": "(Mu and Viswanath, 2018;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 377, |
| "end": 397, |
| "text": "Raunak et al., 2019;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 398, |
| "end": 416, |
| "text": "Wang et al., 2019)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 568, |
| "end": 586, |
| "text": "(Ethayarajh, 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 741, |
| "end": 765, |
| "text": "(Mu and Viswanath, 2018;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 766, |
| "end": 784, |
| "text": "Jo and Choi, 2018)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "On the other hand, retrofitting methods are focused on enhancing the representation geometry, by encoding lexical semantic relations through semantic specialization, a post-processing approach that enforces linguistic constraints on vector spaces by relying on external linguistic knowledge databases (Vuli\u0107 and Mrk\u0161i\u0107, 2018; Faruqui et al., 2015; Jo and Choi, 2018; Vuli\u0107, 2018) . Semantic specialization as a post processing step (retrofitting) is currently limited to static word representations (Mu and Viswanath, 2018; Vuli\u0107 and Mrk\u0161i\u0107, 2018) where they have yielded impressive performance improvements over raw embeddings (Lauscher et al., 2020a) . Existing methods towards semantic specialization of contextual representations primarily focus on retraining the model from scratch (Lauscher et al., 2020b) or post-hoc fine-tuning the model (Zhang et al., 2019; Peters et al., 2019; Wang et al., 2020) . These methods are (1) resourceintensive (retraining or fine-tuning) and (2) do not address the representation degeneration problem in vector representations (Gao et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 301, |
| "end": 325, |
| "text": "(Vuli\u0107 and Mrk\u0161i\u0107, 2018;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 326, |
| "end": 347, |
| "text": "Faruqui et al., 2015;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 348, |
| "end": 366, |
| "text": "Jo and Choi, 2018;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 367, |
| "end": 379, |
| "text": "Vuli\u0107, 2018)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 499, |
| "end": 523, |
| "text": "(Mu and Viswanath, 2018;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 524, |
| "end": 547, |
| "text": "Vuli\u0107 and Mrk\u0161i\u0107, 2018)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 628, |
| "end": 652, |
| "text": "(Lauscher et al., 2020a)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 787, |
| "end": 811, |
| "text": "(Lauscher et al., 2020b)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 846, |
| "end": 866, |
| "text": "(Zhang et al., 2019;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 867, |
| "end": 887, |
| "text": "Peters et al., 2019;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 888, |
| "end": 906, |
| "text": "Wang et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 1066, |
| "end": 1084, |
| "text": "(Gao et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this work, we focus on contextual word representations generated from four transformer-based model architectures, i.e., BERT (Devlin et al., 2018) , GPT-2 (Radford et al., 2019) , XLNet (Yang et al., 2019) and ELECTRA (Clark et al., 2019) . These models have been selected to assess the impact of variation in context learning and pretraining over the quality of generated representations, while keeping the number of hidden layers and dimensionality identical (layers = 13 (0 + 12); dimensions = 768). BERT and ELECTRA are both bidirectional learners, but they differ in terms of the pre-training objectives used to train the models: BERT uses a masked language modeling objective, limiting its learning to a small subset of word tokens; ELECTRA uses replaced token detection and is able to learn across a wider range of words tokens. On the other hand, GPT-2 and XLNet are both unidirectional learners, where GPT-2 learns only left-to-right context, while XLNet learns over all possible permutations of the given input. A comparison over these models in a uniform setting allows us to relate the behavior of representations to the context learning and pre-training choices of the respective models.", |
| "cite_spans": [ |
| { |
| "start": 128, |
| "end": 149, |
| "text": "(Devlin et al., 2018)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 152, |
| "end": 180, |
| "text": "GPT-2 (Radford et al., 2019)", |
| "ref_id": null |
| }, |
| { |
| "start": 189, |
| "end": 208, |
| "text": "(Yang et al., 2019)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 221, |
| "end": 241, |
| "text": "(Clark et al., 2019)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextual Word Representation Models", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Contextual word representations for individual words are generated by feeding sentences into the language model. In order to generate representations, we use sense annotated corpora from various SemEval and SenseEval tasks, including SensEval 3 task 1 (S3-T1) (Snyder and Palmer, 2004) , SensEval 2 all-words task (S2-TA) (Edmonds and Cotton, 2001), SemEval 2013 task 12 (S13-T12) (Navigli et al., 2013) , SemEval 2007 task 7 (S7-T7) (Navigli et al., 2007) and Se-mEval 2015 task 13 (S15-T13) (Moro and Navigli, 2015) . To ensure that the Wordnet sense keys are unified across corpora, we utilize the Wordnet 3.0 sense annotated data (Vial et al., 2018) and summarized in Table 1 . Since we want to evaluate sense-learning, we limit our analyses to multi-sense words, retaining nouns, adjectives and verbs that appear within the corpora as more than one sense.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 285, |
| "text": "(Snyder and Palmer, 2004)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 381, |
| "end": 403, |
| "text": "(Navigli et al., 2013)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 434, |
| "end": 456, |
| "text": "(Navigli et al., 2007)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 493, |
| "end": 517, |
| "text": "(Moro and Navigli, 2015)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 634, |
| "end": 653, |
| "text": "(Vial et al., 2018)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 672, |
| "end": 679, |
| "text": "Table 1", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "In order to compute how sense information is encoded with the word representations, we define two word-sense specific cosine relatedness measures.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Definition 1 (Sense Similarity). Let w s be a sense of the word w, appearing in m different contexts. Let v l be the vector that maps the each word sense occurrence w s i to the vector space. Then, the average sense similarity between all m instances of the word sense w s for layer is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "SenSim (ws)= 1 m j k =j cos(v (ws j ),v (ws k )) (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "This metric calculates the average cosine similarity between contextual representations of the same sense of a word. Definition 2 (Inter Sense Similarity). Let the word w have S different word senses, where w a and w b are a pair of different senses of w, appearing in m and n different contexts respectively and a, b \u2208 S. Let v l be the vector that maps each word sense occurrence w s i to the vector space. Then, the average inter sense similarity between the representations of all instances of the word w for layer is", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "InterSim (w)=E a,b\u2208S 1 mn m j=1 n i=1 cos(v (wa i ),v (w b j )) (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "This metric calculates the average cosine similarity between contextual representations of different senses of a word.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Thus, if a word w has SenSim l (w s ) > InterSim l (w), it suggests that the representations for the same sense of a given word lie much closer together within the vector space, as compared to the representations of different senses of the same word. For example, a given word 'document' can refer to multiple senses. According to WordNet 3.0, two senses of the word 'document' are: (1) document.n.01 -writing that provides information and (2) document.v.02 -to record in detail.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "As an example, we have visualized the representations of these two senses as encoded within the vector space of BERT (Layer 11), shown in Figure 1 . The 'original' representations, shown in Figure 1 (a), of the word sense document.n.01 lie slightly close to each other, and farther away from the document.v.02 representation. Thus, if a model is able to encode similar representations for same sense of a word, and distinguishable representations for different senses of a word, we claim that the model encodes sense information.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 138, |
| "end": 147, |
| "text": "Figure 1", |
| "ref_id": null |
| }, |
| { |
| "start": 191, |
| "end": 200, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense Learning Measures", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "In order to assess whether contextual word representations encode sense information, we measure the sense similarity and inter sense similarity for multi-sense words (polysemes and homonyms) in our datasets, across model layers. Given that contextual word representations encode anisotropy, we calculate anisotropy adjusted sense relatedness measures as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anisotropy Adjusted Sense Similarity", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "B(v )=E a,b\u223cU [cos(v (a),v (b))]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anisotropy Adjusted Sense Similarity", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "(3)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anisotropy Adjusted Sense Similarity", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "SenSim (ws) * =SenSim (ws)\u2212B(v ) (4) InterSim (w) * =InterSim (w)\u2212B(v ) (5)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anisotropy Adjusted Sense Similarity", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "This baseline calculation utilizes the theory from prior works examining contextualization in word representations (Ethayarajh, 2019) . Here, B(v ) is the average cosine similarlity between n randomly sampled words, U is the set of all word occurrences, and v (.) maps a word occurrence to the respective word representation in layer .", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 133, |
| "text": "(Ethayarajh, 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Anisotropy Adjusted Sense Similarity", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In this subsection, we describe LASeR, a postprocessing approach to render off-the-shelf representations more isotropic and sense-enriched. Our approach builds upon the work on anisotropy reduction Mu and Viswanath (2018) and retrofitting Faruqui et al. (2015) . Mu and Viswanath (2018) suggests that anisotropy can be reduced by removing primary components to make the representations more distinct and uniformly distributed within the vector space. We extend this to contextual word representations, evaluating the efficacy of removing primary components on anisotropy reduction in contextual representations. Turning towards retrofitting methods, we extend the retrofitting approach proposed by Faruqui et al. (2015) , which targets static word representations and brings synonyms closer together in the vector space. Our work extends this retrofitting goal to contextual representations, where we aim to bring representations of same word-senses closer in the vector space, ensuring better sense disambiguation capabilities for representations.", |
| "cite_spans": [ |
| { |
| "start": 198, |
| "end": 221, |
| "text": "Mu and Viswanath (2018)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 239, |
| "end": 260, |
| "text": "Faruqui et al. (2015)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 263, |
| "end": 286, |
| "text": "Mu and Viswanath (2018)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 698, |
| "end": 719, |
| "text": "Faruqui et al. (2015)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Let v(w i ) be the original contextual representation, v (w i ) be the low anisotropy contextual representation andv(w i ) be the sense enriched Algorithm 1: LASeR (Low Anisotropy Sense Retrofitting).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Input:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Raw word representation {v(w i ), w i \u2208 V } 1 Perform mean centering of vector: \u00b5 \u2190 1 |v| \u03a3 w i \u2208V v(w i );\u1e7d (w i ) \u2190 v (w i ) \u2212 \u00b5 2 Compute the PCA components: u i1 , . . . , u iD \u2190 P CA ({\u1e7d(w i ), w i \u2208 V}) 3 Remove top d principal components: v (w i ) \u2190\u1e7d (w i ) \u2212 \u03a3 d j=1 u ij v (w i ) u ij 4 Apply retrofitting update: v(w i ) = j:(i,j)\u2208E \u03b2 ij v(w j )+\u03b1 i v(w i ) j:(i,j)\u2208E \u03b2 ij +\u03b1 i Output: Processed word representation v (w i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "contextual representation of i th occurrence of a word sense w. We simulate an undirected knowledge graph \u2126(V, E), where V represents the vocabulary of word tokens, each word token representing a vertex, and E represents all the edges connecting respective vertices. Finally, Q represent the matrix of post-processed representations", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "[v(w 1 ),v(w 2 ), . . . ,v(w n )].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "The approach works on achieving dual objectives, described as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Objective 1 (Lower Anisotropy) : Remove top d common directions across all v(w i ), to create v (w i )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": ", creating more uniformly distributed word vectors and lowering anisotropy in representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "Objective 2 (Sense Retrofitting) : Learnv(w i ) such that same sense representations lie closer together in vector space as well as close to the original embedding The algorithm takes the original representations as input. These representations undergo mean centering and removal of dominant primary components (1,2,3 ) to reduce the anisotropy in the vector space. This is followed by a sense-retrofitting update (4) . Here, for each word token representation v(w i ), we define its neighbours as v(w j ), \u2200j where sense(w i ) = sense(w j ), and hyper-parameters \u03b2 ij and \u03b1 i = 1 represent the reciprocal of the node degree of the word token w i and edge weights respectively.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "3.5" |
| }, |
| { |
| "text": "We first show anisotropy analysis results ( \u00a74.1), further evaluating sense learning in contextual representations ( \u00a74.2). Finally, we present improvements in isotropy and lexical-semantic capabilities of the post-processed representations ( \u00a74.3).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We first assess the amount of anisotropy encoded within contextual word vector spaces. We plot the average cosine similarity between 1K randomly sampled words, across different layers of language models, as seen in Figure 2 . If a vector space is isotropic, the average cosine similarity between uniformly randomly sampled words would be 0 (Ethayarajh, 2019) . Thus, the closer this measure is to 1, the more anisotropic the vector space. It can be seen that anisotropy evolves very differently across different models. Unidirectional language models (XLNet, GPT-2) portray far more anisotropy in word representations as compared to bidirectional language models (BERT, ELECTRA). Thus, language models learning one-directional context (L-to-R or R-to-L) encode more common directions in the representations as compared to those learnt from bidirectional context. Moreover, anisotropy monotonically increases across layers for BERT and XLNet, where both models have been trained on masked language modeling tasks. This shows that anisotropy accumulates in the upper layers of masked language models. The rate of increase in anisotropy in XLNet is higher than BERT representations, showing that permutation language modeling propagates higher amounts of anisotropy than traditional MLM. These results are consistent with the results obtained for all multisense words in the corpora (Appendix A).", |
| "cite_spans": [ |
| { |
| "start": 340, |
| "end": 358, |
| "text": "(Ethayarajh, 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 215, |
| "end": 223, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Similarity between Random Words", |
| "sec_num": "4.1.1" |
| }, |
| { |
| "text": "High anisotropy leads to word vectors being distributed within a very narrow cone in the vector space (Mimno and Thompson, 2017) , further signifying that the word representations encode common directions (Mu and Viswanath, 2018) . We plot the top two dominating directions for word repre- sentations, across each model's layers, as shown in Figure 3 . These plots reveal that contextual word representations extracted from different language models are encoded extremely differently within the vector space. It can be seen that BERT and XLNet embeddings are more spread across the vector space, as compared to GPT-2 and ELECTRA embeddings. Moreover, ELECTRA embeddings form highly concentrated, yet separated regions of anisotropy, thus leading to an overall low score on the average similarity between randomly sampled words. Moreover, GPT-2 embeddings reveal extreme anisotropy, where most of the embeddings encode a singular common direction. The plots in Figure 3 also reveal that word frequency is significantly encoded in the top two principal components of BERT and XLNet embeddings. We cannot claim the same for GPT-2 and ELECTRA embeddings, where all embeddings cluster within highly dense regions of anisotropy.", |
| "cite_spans": [ |
| { |
| "start": 102, |
| "end": 128, |
| "text": "(Mimno and Thompson, 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 205, |
| "end": 229, |
| "text": "(Mu and Viswanath, 2018)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 342, |
| "end": 350, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| }, |
| { |
| "start": 960, |
| "end": 968, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Principal Components", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "We also evaluate anisotropy across model layers by assessing the explained variance across common directions encoded across all word representations. We plot the proportion of variance encoded within the top d = 10 dominant principal components of the original contextual representations across model layers, shown in Figure 4 (a). While bidirectional models such as BERT and ELECTRA encode multiple common directions, unidirectional models like GPT-2 and XLNet embeddings primarily encode a singular common direction. For BERT embeddings, the top 10 primary components only contribute to 17-24% of the explained variance, showing that the embeddings are more uniformly distributed across the vector space, as compared to other models. GPT-2 provides a stark contrast, where the top 10 principal components contribute to up to 97% of the explained variance, highly concentrated within the first principal component, especially for the middle layers (Layer 3-8). XLNet embeddings capture comparatively lower common directions across model layers, apart from the final model layer (Layer 12), where 66.1% of the explained variance is concentrated within the first principal component. Thus, representations learnt through the goal of predicting the next word yields all representations extremely similar.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 318, |
| "end": 326, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Analysis of Principal Components", |
| "sec_num": "4.1.2" |
| }, |
| { |
| "text": "A model differentiates between different word senses if it encodes representations of the same sense of a word to be more similar than the representations of other senses of the same word. We utilize the sense learning measures, defined in ( \u00a73.3) to assess whether original representations encode word-sense information. To examine overall learning across model layers, we calculate average sense similarity (SenSim (w)) and mean difference between average sense similarity and inter sense similarity for a word token w (\u2206).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning in Original Representations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u2206=SenSim (w)\u2212InterSim (w); \u2206\u2208[\u22121,1]", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Sense Learning in Original Representations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Ideally, a language model being able to capture distinction between all word senses should have SenSim (w) = 1 and \u2206 >> 0. Here, higher sense similarities correspond to similar senses being encoded closer in the vector space and \u2206 > 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning in Original Representations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "shows that on an average, same sense representations are more cohesive and well separated from the representations of other senses.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sense Learning in Original Representations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The evolution of sense learning over different models and their layers is portrayed using sense similarity measures, aggregated in Table 2 . The reported vanilla sense similarity scores have been adjusted for anisotropy. Prior to retrofitting, BERT and XLNet embeddings for the same word senses show increasing dissimilarity across model layers, signifying a loss of sense information as the model gets more contextualized. The similarity between same sense word representations from the GPT-2 model is close to 0, showing that GPT-2 captures almost no sense information within the embedding Figure 5 : PCA plots of post-processed word representations across top two primary components, for each model. space. ELECTRA embeddings remain consistent in terms of sense learning, not varying significantly across model layers. Furthermore, \u2206 \u223c 0 across all models shows that the original representations do not significantly distinguish between different senses of a given word. We visualize an example in Figure 1(a) , where representations of the word document lie close together, regardless of the different senses associated with each occurrence. This finding signifies that the sole reliance on word form to learn representations does not suffice in helping the model distinguish between multiple senses of a given word.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 131, |
| "end": 138, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 592, |
| "end": 600, |
| "text": "Figure 5", |
| "ref_id": null |
| }, |
| { |
| "start": 1001, |
| "end": 1012, |
| "text": "Figure 1(a)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sense Learning in Original Representations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We evaluate the efficacy of the proposed LASeR approach by comparing improvements in vector space isotropy and improved disambiguation of different word senses, as captured by retrofitted word representations.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Low Anisotropy Sense Retrofitting", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We conduct experiments by removing the most dominant common direction (d = 1) across generated embeddings across each model layer. This step yields significantly better isotropy in the resulting representations, where average similarity between randomly sampled words (k = 1000) is 0, across all models and model layers. Improvements can also be observed from the reduced proportions of explained variance in Figure 4(b) . Overall, most of the anisotropy in the vector space is treated by removing one dominating direction. The retrofitted GPT-2 embeddings still show high anisotropy in the 12 th layer, showing that more common directions remain to be addressed and possibly removed. These results show that high anisotropy effects can be reduced by removing the primary common directions across representations. The effect of this step is also visualized in Figure 5 , where the representations are significantly less anisotropic and more uniformly spread across the vector space, encoding fewer artifacts of word frequency in the vector space, as compared to the original representations. For visualizations across all model layers, refer to Appendix B. In most cases, removal of the most dominant common direction can yield significant improvements in isotropy, as seen for BERT, XLNet and ELECTRA. In other cases, where representations share more than one significant common directions, such as for GPT-2, we can remove d > 1 common directions to treat anisotropy.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 409, |
| "end": 420, |
| "text": "Figure 4(b)", |
| "ref_id": "FIGREF4" |
| }, |
| { |
| "start": 860, |
| "end": 868, |
| "text": "Figure 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Improvements in Isotropy", |
| "sec_num": "4.3.1" |
| }, |
| { |
| "text": "The retrofitting update applied to model representations enforces lexical-semantic constraints, bringing same sense representations closer together (increase same-sense cohesion) and pushing different sense representations farther apart (increase intersense separation).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Improvements in Sense Representation", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "Results from Table 2 show the efficacy of our retrofitting update (\u03b1 i = 1), where average sense similarity between word vectors increases significantly, and similarity between same sense representations is significantly higher than similarity between representations of different senses. This portrays that the retrofitted representations encode same sense representations closer together and different sense representations farther apart. An example of how retrofitting changes the distribution of representations in the vector space is given in Figure 1(b) , where inter-sense separation between two different senses of the word document increases and same-sense cohesion between representations of the same word sense increases.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 13, |
| "end": 20, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 548, |
| "end": 559, |
| "text": "Figure 1(b)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Improvements in Sense Representation", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "Across the model layers, the retrofitting significantly increases sense similarity and \u2206. The improved similarity scores can be seen in Figure 6 , which show that retrofitting moves same sense representations to be more similar than different sense representations. For BERT embeddings, the improvements are more visible in the upper model layers, as they create more separated different sense representations, and more cohesive same sense representations. The slight drop in cohesion (SenseSim) is due to the model's upper layers being more contextualized than the lower layers, also suggested in prior works on contextualization (Ethayarajh, 2019) . Retrofitting is extremely effective for GPT-2 embeddings. This can been from the drastic increase in sense similarity (SenseSim) and \u2206, showing that same sense representations lie closer and different sense representations lie farther apart in the retrofitted vector space. While originally, the representations were highly anisotropic and held no sense learning, the retrofitted embeddings capture better sense distinction. XLNet embeddings, much like BERT, encode representations of the same word form closer together, especially in lower model layers, regardless of the respective word-sense distinction. Postretrofitting, XLNet embeddings show higher similarity between same word senses and lower similarity between different word senses, revealing better sense disambiguation. Compared to the other three models, original ELECTRA embeddings are able to capture more distinction between different sense representations and more similarity between same sense representations. Our retrofitting update further improves these lexical-semantic relations in the representation space.", |
| "cite_spans": [ |
| { |
| "start": 631, |
| "end": 649, |
| "text": "(Ethayarajh, 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 136, |
| "end": 144, |
| "text": "Figure 6", |
| "ref_id": "FIGREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Improvements in Sense Representation", |
| "sec_num": "4.3.2" |
| }, |
| { |
| "text": "Recent works have discussed whether contextualized word representations extracted from deep pretrained language models encode word sense knowledge within the representation space. Studies suggest that while lower layer BERT embeddings encode more semantic information (Reif et al., 2019) , the upper layer embeddings become increasingly contextual (Ethayarajh, 2019) . Works exploring semantic capabilities of representations have also used nearest neighbour classifier probes to assess whether same-sense representations are classified together (Reif et al., 2019; Nair et al., 2020) . Since these classifiers show slightly better accuracy than classifying as the most frequent sense, they claim that the representation space encodes sense information. Although our work supports this conclusion, we additionally argue that after accounting for anisotropy, the cohesion between same sense representations and separation between different sense representations is not significant. Here, the principal premise of the removal of anisotropy prior to injecting sense information is based on creating an embedding space geometry where the effects of representation degeneration are reduced. The representation degeneration of embeddings reduces their representational power (Gao et al., 2018) . Thus, to improve the representation ability of embeddings, we deem it important to create methods that promote representations that are not only lexicosemantic relation enriched but also isotropic. Our method reveals that the additional step of lowering anisotropy renders improved representation geometry, where word vectors are not constricted within a narrow cone, and are uniformly distributed within the vector space. Further, sense-retrofitting on contextualized word representations render same sense representations more similar and different sense representations more different, increasing the word sense disambiguation capabilities of the encoded representations.", |
| "cite_spans": [ |
| { |
| "start": 268, |
| "end": 287, |
| "text": "(Reif et al., 2019)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 348, |
| "end": 366, |
| "text": "(Ethayarajh, 2019)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 546, |
| "end": 565, |
| "text": "(Reif et al., 2019;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 566, |
| "end": 584, |
| "text": "Nair et al., 2020)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 1269, |
| "end": 1287, |
| "text": "(Gao et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our work presents a novel intrinsic evaluation of sense information in word embeddings, required to understand the sense geometry encoded by various models. In the future, we will focus on integrating sense information in contextual word representations by extending this approach to words that are unseen to the LASeR model, and further perform extrinsic analyses of the embeddings.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In this work, we investigated the geometry of contextual word representations for isotropy and sense disambiguation capabilities. We further proposed a post-processing approach for anisotropy treatment and semantic enrichment of contextual word representations, by transforming the vector space using principal component manipulation and lexical semantic knowledge-based sense-retrofitting. Our method significantly reduced the impact of representation degeneration problem, improving isotropy within the vector space and rendered off-the-shelf contextual word vectors semantically more meaningful. In the future work, we will study the impact of changes in retrofitting hyperparameters and variable removal of primary components on representation quality. Further, we will focus on extrinsic evaluation of the impact of anisotropy removal and sense retrofitting on downstream wordsense disambiguation tasks.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We plot the average similarity between all words (multi-sense nouns, verbs and adjectives) extracted from the annotated corpora, across model layers, as shown in Figure 7 . ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 162, |
| "end": 170, |
| "text": "Figure 7", |
| "ref_id": "FIGREF6" |
| } |
| ], |
| "eq_spans": [], |
| "section": "A Anisotropy Across All Words", |
| "sec_num": null |
| }, |
| { |
| "text": "We plot distribution of word representations across the vector space, for all models across their layers. To assess whether word frequency is encoded within vector dimensions, we color code representations ranging from low frequency words (Blue) to high frequency words (Red). The plots are given in Figure 8 (BERT), Figure 9 (GPT-2), Figure 10 (XLNet) and Figure 11 (ELECTRA) . We see that using LASeR post-processing (d = 1 and hyperparameters mentioned in the main text), anisotropy in vector space is significantly treated. For extremely anisotropic models such as GPT2 and ELECTRA, remove of the first primary component yields more uniformly spread word representations. ", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 300, |
| "end": 308, |
| "text": "Figure 8", |
| "ref_id": "FIGREF7" |
| }, |
| { |
| "start": 317, |
| "end": 325, |
| "text": "Figure 9", |
| "ref_id": null |
| }, |
| { |
| "start": 335, |
| "end": 344, |
| "text": "Figure 10", |
| "ref_id": null |
| }, |
| { |
| "start": 357, |
| "end": 376, |
| "text": "Figure 11 (ELECTRA)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "B PCA Plots of Word Representations", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is partially supported by National Science Foundation grant number 1737591. We would like to thank the three anonymous reviewers for their helpful comments and suggestions. We would also like to thank the members of the AKRaNLU lab at Purdue University for their feedback.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Electra: Pre-training text encoders as discriminators rather than generators", |
| "authors": [ |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Minh-Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kevin Clark, Minh-Thang Luong, Quoc V Le, and Christopher D Manning. 2019. Electra: Pre-training text encoders as discriminators rather than genera- tors. In International Conference on Learning Rep- resentations.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Cracking the contextual commonsense code: Understanding commonsense reasoning aptitude of deep contextual representations", |
| "authors": [ |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Da", |
| "suffix": "" |
| }, |
| { |
| "first": "Jungo", |
| "middle": [], |
| "last": "Kasai", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeff Da and Jungo Kasai. 2019. Cracking the contex- tual commonsense code: Understanding common- sense reasoning aptitude of deep contextual repre- sentations. EMNLP 2019, page 1.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Senseval-2: overview", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Edmonds", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Cotton", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Proceedings of SENSEVAL-2 Second International Workshop on Evaluating Word Sense Disambiguation Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "1--5", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Edmonds and Scott Cotton. 2001. Senseval-2: overview. In Proceedings of SENSEVAL-2 Second International Workshop on Evaluating Word Sense Disambiguation Systems, pages 1-5.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "How contextual are contextualized word representations? comparing the geometry of bert, elmo, and gpt-2 embeddings", |
| "authors": [ |
| { |
| "first": "Kawin", |
| "middle": [], |
| "last": "Ethayarajh", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1909.00512" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kawin Ethayarajh. 2019. How contextual are contex- tualized word representations? comparing the ge- ometry of bert, elmo, and gpt-2 embeddings. arXiv preprint arXiv:1909.00512.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Retrofitting word vectors to semantic lexicons", |
| "authors": [ |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujay", |
| "middle": [], |
| "last": "Kumar Jauhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1615", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manaal Faruqui, Jesse Dodge, Sujay Kumar Jauhar, Chris Dyer, Eduard Hovy, and Noah A Smith. 2015. Retrofitting word vectors to semantic lexicons. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies, pages 1606-1615.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Representation degeneration problem in training natural language generation models", |
| "authors": [ |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Di", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Tao", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "" |
| }, |
| { |
| "first": "Liwei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tieyan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jun Gao, Di He, Xu Tan, Tao Qin, Liwei Wang, and Tieyan Liu. 2018. Representation degenera- tion problem in training natural language generation models. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Extrofitting: Enriching word representation and its vector space with semantic lexicons", |
| "authors": [ |
| { |
| "first": "Hwiyeol", |
| "middle": [], |
| "last": "Jo", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanley Jungkyu", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of The Third Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "24--29", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hwiyeol Jo and Stanley Jungkyu Choi. 2018. Ex- trofitting: Enriching word representation and its vec- tor space with semantic lexicons. In Proceedings of The Third Workshop on Representation Learning for NLP, pages 24-29.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Speech and language processing", |
| "authors": [ |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "James", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dan Jurafsky and James H Martin. 2019. Speech and language processing (3rd draft ed.).", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Specializing unsupervised pretraining models for word-level semantic similarity", |
| "authors": [ |
| { |
| "first": "Anne", |
| "middle": [], |
| "last": "Lauscher", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1371--1383", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne Lauscher, Ivan Vuli\u0107, Edoardo Maria Ponti, Anna Korhonen, and Goran Glava\u0161. 2020a. Specializing unsupervised pretraining models for word-level se- mantic similarity. In Proceedings of the 28th Inter- national Conference on Computational Linguistics, pages 1371-1383.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Specializing unsupervised pretraining models for word-level semantic similarity", |
| "authors": [ |
| { |
| "first": "Anne", |
| "middle": [], |
| "last": "Lauscher", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Edoardo", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Ponti", |
| "suffix": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1371--1383", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.coling-main.118" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anne Lauscher, Ivan Vuli\u0107, Edoardo Maria Ponti, Anna Korhonen, and Goran Glava\u0161. 2020b. Specializing unsupervised pretraining models for word-level se- mantic similarity. In Proceedings of the 28th Inter- national Conference on Computational Linguistics, pages 1371-1383, Barcelona, Spain (Online). Inter- national Committee on Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1301.3781" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jef- frey Dean. 2013. Efficient estimation of word representations in vector space. arXiv preprint arXiv:1301.3781.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The strange geometry of skip-gram with negative sampling", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "Laure", |
| "middle": [], |
| "last": "Thompson", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Mimno and Laure Thompson. 2017. The strange geometry of skip-gram with negative sampling. In Empirical Methods in Natural Language Process- ing.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Semeval-2015 task 13: Multilingual all-words sense disambiguation and entity linking", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Moro", |
| "suffix": "" |
| }, |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 9th international workshop on semantic evaluation", |
| "volume": "", |
| "issue": "", |
| "pages": "288--297", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Moro and Roberto Navigli. 2015. Semeval- 2015 task 13: Multilingual all-words sense disam- biguation and entity linking. In Proceedings of the 9th international workshop on semantic evaluation (SemEval 2015), pages 288-297.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "All-but-thetop: Simple and effective post-processing for word representations", |
| "authors": [ |
| { |
| "first": "Jiaqi", |
| "middle": [], |
| "last": "Mu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pramod", |
| "middle": [], |
| "last": "Viswanath", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "6th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiaqi Mu and Pramod Viswanath. 2018. All-but-the- top: Simple and effective post-processing for word representations. In 6th International Conference on Learning Representations, ICLR 2018.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Contextualized word embeddings encode aspects of human-like word sense knowledge", |
| "authors": [ |
| { |
| "first": "Sathvik", |
| "middle": [], |
| "last": "Nair", |
| "suffix": "" |
| }, |
| { |
| "first": "Mahesh", |
| "middle": [], |
| "last": "Srinivasan", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephan", |
| "middle": [], |
| "last": "Meylan", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the Workshop on the Cognitive Aspects of the Lexicon", |
| "volume": "", |
| "issue": "", |
| "pages": "129--141", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sathvik Nair, Mahesh Srinivasan, and Stephan Meylan. 2020. Contextualized word embeddings encode as- pects of human-like word sense knowledge. In Pro- ceedings of the Workshop on the Cognitive Aspects of the Lexicon, pages 129-141.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Semeval-2013 task 12: Multilingual word sense disambiguation", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Jurgens", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniele", |
| "middle": [], |
| "last": "Vannella", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Second Joint Conference on Lexical and Computational Semantics (* SEM", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli, David Jurgens, and Daniele Vannella. 2013. Semeval-2013 task 12: Multilingual word sense disambiguation. In Second Joint Conference on Lexical and Computational Semantics (* SEM),", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Proceedings of the Seventh International Workshop on Semantic Evaluation", |
| "authors": [], |
| "year": 2013, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "222--231", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Volume 2: Proceedings of the Seventh International Workshop on Semantic Evaluation (SemEval 2013), pages 222-231.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Semeval-2007 task 07: Coarsegrained english all-words task", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Kenneth", |
| "suffix": "" |
| }, |
| { |
| "first": "Orin", |
| "middle": [], |
| "last": "Litkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hargraves", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the Fourth International Workshop on Semantic Evaluations", |
| "volume": "", |
| "issue": "", |
| "pages": "30--35", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli, Kenneth C Litkowski, and Orin Har- graves. 2007. Semeval-2007 task 07: Coarse- grained english all-words task. In Proceedings of the Fourth International Workshop on Semantic Evalua- tions (SemEval-2007), pages 30-35.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Matthew", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1802.05365" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. arXiv preprint arXiv:1802.05365.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Knowledge enhanced contextual word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [ |
| "E" |
| ], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Logan", |
| "suffix": "" |
| }, |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Vidur", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Sameer", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "43--54", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D19-1005" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew E. Peters, Mark Neumann, Robert Logan, Roy Schwartz, Vidur Joshi, Sameer Singh, and Noah A. Smith. 2019. Knowledge enhanced contextual word representations. In Proceedings of the 2019 Con- ference on Empirical Methods in Natural Language Processing and the 9th International Joint Confer- ence on Natural Language Processing (EMNLP- IJCNLP), pages 43-54, Hong Kong, China. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Language models are unsupervised multitask learners", |
| "authors": [ |
| { |
| "first": "Alec", |
| "middle": [], |
| "last": "Radford", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rewon", |
| "middle": [], |
| "last": "Child", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Luan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dario", |
| "middle": [], |
| "last": "Amodei", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "OpenAI blog", |
| "volume": "1", |
| "issue": "8", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Effective dimensionality reduction for word embeddings", |
| "authors": [ |
| { |
| "first": "Vikas", |
| "middle": [], |
| "last": "Raunak", |
| "suffix": "" |
| }, |
| { |
| "first": "Vivek", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Florian", |
| "middle": [], |
| "last": "Metze", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 4th Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "235--243", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vikas Raunak, Vivek Gupta, and Florian Metze. 2019. Effective dimensionality reduction for word embed- dings. In Proceedings of the 4th Workshop on Rep- resentation Learning for NLP (RepL4NLP-2019), pages 235-243.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Visualizing and measuring the geometry of bert", |
| "authors": [ |
| { |
| "first": "Emily", |
| "middle": [], |
| "last": "Reif", |
| "suffix": "" |
| }, |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Wattenberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Fernanda", |
| "middle": [ |
| "B" |
| ], |
| "last": "Viegas", |
| "suffix": "" |
| }, |
| { |
| "first": "Andy", |
| "middle": [], |
| "last": "Coenen", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Pearce", |
| "suffix": "" |
| }, |
| { |
| "first": "Been", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in Neural Information Processing Systems", |
| "volume": "32", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Emily Reif, Ann Yuan, Martin Wattenberg, Fernanda B Viegas, Andy Coenen, Adam Pearce, and Been Kim. 2019. Visualizing and measuring the geometry of bert. In Advances in Neural Information Processing Systems, volume 32. Curran Associates, Inc.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Commonsense reasoning for natural language processing", |
| "authors": [ |
| { |
| "first": "Maarten", |
| "middle": [], |
| "last": "Sap", |
| "suffix": "" |
| }, |
| { |
| "first": "Vered", |
| "middle": [], |
| "last": "Shwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Bosselut", |
| "suffix": "" |
| }, |
| { |
| "first": "Yejin", |
| "middle": [], |
| "last": "Choi", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Roth", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts", |
| "volume": "", |
| "issue": "", |
| "pages": "27--33", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.acl-tutorials.7" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maarten Sap, Vered Shwartz, Antoine Bosselut, Yejin Choi, and Dan Roth. 2020. Commonsense reason- ing for natural language processing. In Proceed- ings of the 58th Annual Meeting of the Association for Computational Linguistics: Tutorial Abstracts, pages 27-33, Online. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Evaluation methods for unsupervised word embeddings", |
| "authors": [ |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Schnabel", |
| "suffix": "" |
| }, |
| { |
| "first": "Igor", |
| "middle": [], |
| "last": "Labutov", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mimno", |
| "suffix": "" |
| }, |
| { |
| "first": "Thorsten", |
| "middle": [], |
| "last": "Joachims", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 conference on empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "298--307", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tobias Schnabel, Igor Labutov, David Mimno, and Thorsten Joachims. 2015. Evaluation methods for unsupervised word embeddings. In Proceedings of the 2015 conference on empirical methods in natural language processing, pages 298-307.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "The english all-words task", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Snyder", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Palmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of SENSEVAL-3, the Third International Workshop on the Evaluation of Systems for the Semantic Analysis of Text", |
| "volume": "", |
| "issue": "", |
| "pages": "41--43", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Snyder and Martha Palmer. 2004. The en- glish all-words task. In Proceedings of SENSEVAL- 3, the Third International Workshop on the Evalu- ation of Systems for the Semantic Analysis of Text, pages 41-43.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Ufsac: Unification of sense annotated corpora and tools", |
| "authors": [ |
| { |
| "first": "Lo\u00efc", |
| "middle": [], |
| "last": "Vial", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Lecouteux", |
| "suffix": "" |
| }, |
| { |
| "first": "Didier", |
| "middle": [], |
| "last": "Schwab", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Language Resources and Evaluation Conference (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lo\u00efc Vial, Benjamin Lecouteux, and Didier Schwab. 2018. Ufsac: Unification of sense annotated corpora and tools. In Language Resources and Evaluation Conference (LREC).", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Injecting lexical contrast into word vectors by guiding vector space specialisation", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of The Third Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "137--143", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107. 2018. Injecting lexical contrast into word vectors by guiding vector space specialisation. In Proceedings of The Third Workshop on Representa- tion Learning for NLP, pages 137-143.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Specialising word vectors for lexical entailment", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "1134--1145", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107 and Nikola Mrk\u0161i\u0107. 2018. Specialising word vectors for lexical entailment. In Proceedings of the 2018 Conference of the North American Chap- ter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Pa- pers), pages 1134-1145.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Evaluating word embedding models: Methods and experimental results. APSIPA transactions on signal and information processing", |
| "authors": [ |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Angela", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fenxiao", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuncheng", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "C-C Jay", |
| "middle": [], |
| "last": "Kuo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "8", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bin Wang, Angela Wang, Fenxiao Chen, Yuncheng Wang, and C-C Jay Kuo. 2019. Evaluating word em- bedding models: Methods and experimental results. APSIPA transactions on signal and information pro- cessing, 8.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Xlnet: Generalized autoregressive pretraining for language understanding", |
| "authors": [ |
| { |
| "first": "Zhilin", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Zihang", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaime", |
| "middle": [], |
| "last": "Carbonell", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Russ", |
| "suffix": "" |
| }, |
| { |
| "first": "Quoc V", |
| "middle": [], |
| "last": "Salakhutdinov", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "5753--5763", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhilin Yang, Zihang Dai, Yiming Yang, Jaime Car- bonell, Russ R Salakhutdinov, and Quoc V Le. 2019. Xlnet: Generalized autoregressive pretraining for language understanding. In Advances in neural in- formation processing systems, pages 5753-5763.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "ERNIE: Enhanced language representation with informative entities", |
| "authors": [ |
| { |
| "first": "Zhengyan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xu", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyuan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xin", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Maosong", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Qun", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1441--1451", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/P19-1139" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhengyan Zhang, Xu Han, Zhiyuan Liu, Xin Jiang, Maosong Sun, and Qun Liu. 2019. ERNIE: En- hanced language representation with informative en- tities. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguis- tics, pages 1441-1451, Florence, Italy. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Revisiting representation degeneration problem in language modeling", |
| "authors": [ |
| { |
| "first": "Zhong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chongming", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Cong", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Miao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qinli", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Junming", |
| "middle": [], |
| "last": "Shao", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: Findings", |
| "volume": "", |
| "issue": "", |
| "pages": "518--527", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhong Zhang, Chongming Gao, Cong Xu, Rui Miao, Qinli Yang, and Junming Shao. 2020. Revisit- ing representation degeneration problem in language modeling. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Process- ing: Findings, pages 518-527.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "text": ".01: writing that provides information document.v.02: to record in detail (b) LASeR Representations Figure 1: Representations of different senses of the word 'document' (BERT Layer 11).", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF1": { |
| "num": null, |
| "text": "Average similarity between representations of randomly sampled words (1K) across model layers.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "PCA plots of original word representations across top 2 primary components; Blue:Low frequency word tokens, Red:High frequency word tokens.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF3": { |
| "num": null, |
| "text": "(a) Vanilla Representations. (b) Retrofitted Representations.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF4": { |
| "num": null, |
| "text": "Plots of proportion of variance encoded within the top d = 10 dominant principal components of the contextual representations across model layers. The horizontal labels (P1-P10) represent each of the ten principal components, and the vertical labels (layer_0 -layer_12) represent each of the 12 model layers.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF5": { |
| "num": null, |
| "text": "Effect of retrofitting on sense relatedness in contextual embeddings. Here, retrofitted embeddings portray higher same-sense similarity and lower intersense similarity.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF6": { |
| "num": null, |
| "text": "Average similarity between representations of randomly sampled words across model layers.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF7": { |
| "num": null, |
| "text": "PCA Plots of BERT Word Representations.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "FIGREF9": { |
| "num": null, |
| "text": "PCA Plots of GPT2 Word Representations. PCA Plots of XLNet Word Representations. PCA Plots of ELECTRA Word Representations.", |
| "type_str": "figure", |
| "uris": null |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Data Summary." |
| }, |
| "TABREF3": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "Average sense similarity scores across model layers; \u2206 = SenSim (w) \u2212 InterSim (w)." |
| } |
| } |
| } |
| } |