| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:45:39.846915Z" |
| }, |
| "title": "Learning Efficient Task-Specific Meta-Embeddings with Word Prisms", |
| "authors": [ |
| { |
| "first": "Jingyi", |
| "middle": [], |
| "last": "He", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "BMO AI Capabilities Team -Bank of Montr\u00e9al", |
| "location": { |
| "settlement": "Toronto", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "jingyi.he@mail.mcgill.ca" |
| }, |
| { |
| "first": "K", |
| "middle": [ |
| "C" |
| ], |
| "last": "Tsiolis", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "BMO AI Capabilities Team -Bank of Montr\u00e9al", |
| "location": { |
| "settlement": "Toronto", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "kc.tsiolis@mail.mcgill.ca" |
| }, |
| { |
| "first": "Kian", |
| "middle": [], |
| "last": "Kenyon-Dean", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Jackie", |
| "middle": [ |
| "C K" |
| ], |
| "last": "Cheung", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "BMO AI Capabilities Team -Bank of Montr\u00e9al", |
| "location": { |
| "settlement": "Toronto", |
| "region": "ON", |
| "country": "Canada" |
| } |
| }, |
| "email": "jcheung@cs.mcgill.cakian.kenyon-dean@bmo.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Word embeddings are trained to predict word cooccurrence statistics, which leads them to possess different lexical properties (syntactic, semantic, etc.) depending on the notion of context defined at training time. These properties manifest when querying the embedding space for the most similar vectors, and when used at the input layer of deep neural networks trained to solve downstream NLP problems. Meta-embeddings combine multiple sets of differently trained word embeddings, and have been shown to successfully improve intrinsic and extrinsic performance over equivalent models which use just one set of source embeddings. We introduce word prisms: a simple and efficient meta-embedding method that learns to combine source embeddings according to the task at hand. Word prisms learn orthogonal transformations to linearly combine the input source embeddings, which allows them to be very efficient at inference time. We evaluate word prisms in comparison to other meta-embedding methods on six extrinsic evaluations and observe that word prisms offer improvements in performance on all tasks. 1 * Equal contribution. \u2020 This work was pursued prior to Kian's employment at BMO.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Word embeddings are trained to predict word cooccurrence statistics, which leads them to possess different lexical properties (syntactic, semantic, etc.) depending on the notion of context defined at training time. These properties manifest when querying the embedding space for the most similar vectors, and when used at the input layer of deep neural networks trained to solve downstream NLP problems. Meta-embeddings combine multiple sets of differently trained word embeddings, and have been shown to successfully improve intrinsic and extrinsic performance over equivalent models which use just one set of source embeddings. We introduce word prisms: a simple and efficient meta-embedding method that learns to combine source embeddings according to the task at hand. Word prisms learn orthogonal transformations to linearly combine the input source embeddings, which allows them to be very efficient at inference time. We evaluate word prisms in comparison to other meta-embedding methods on six extrinsic evaluations and observe that word prisms offer improvements in performance on all tasks. 1 * Equal contribution. \u2020 This work was pursued prior to Kian's employment at BMO.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "A popular approach to representing word meaning in NLP is to characterize a word by \"the company that it keeps\" (Firth, 1957) . This intuition is the basis of famous word embedding techniques such as Word2vec (Mikolov et al., 2013a) and Glove (Pennington et al., 2014) . However, the question of what company a word keeps -i.e., what should define a word's context -is open. A word's context could be defined via a symmetric window of 1, 2, 5, 10, 20 words, the words that precede it, the words that follow it, the words with which it shares a dependency edge, etc. Determining the utility of such different notions of context for training word embeddings is a problem that has attracted considerable attention (Yatbaz et al., 2012; Levy and Goldberg, 2014a; Bansal et al., 2014; Lin et al., 2015; Melamud et al., 2016; Lison and Kutuzov, 2017) but there is no conclusive evidence that any single notion of context could be the best for solving NLP problems in general. Thus, many deep learning solutions for NLP have yet another hyperparameter to tune: what set of word embeddings should be selected for the input layer of the model. As NLP tasks become more and more complex, the practice of providing a deep model with only one notion of a word's meaning becomes limiting.", |
| "cite_spans": [ |
| { |
| "start": 112, |
| "end": 125, |
| "text": "(Firth, 1957)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 209, |
| "end": 232, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 243, |
| "end": 268, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 711, |
| "end": 732, |
| "text": "(Yatbaz et al., 2012;", |
| "ref_id": "BIBREF52" |
| }, |
| { |
| "start": 733, |
| "end": 758, |
| "text": "Levy and Goldberg, 2014a;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 759, |
| "end": 779, |
| "text": "Bansal et al., 2014;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 780, |
| "end": 797, |
| "text": "Lin et al., 2015;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 798, |
| "end": 819, |
| "text": "Melamud et al., 2016;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 820, |
| "end": 844, |
| "text": "Lison and Kutuzov, 2017)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Word meta-embeddings address aspects of this problem by proposing techniques for combining multiple sets of word embeddings before providing them into the input layer of a downstream model. Yin and Sch\u00fctze (2016) motivated word meta-embeddings by arguing that they are advantageous for the following reasons: diversity -combining embeddings trained with different algorithms on different corpora will allow for more distinct meanings of the words to persist; and, coverage -combining embeddings trained on different corpora help to better solve the out-of-vocabulary problem. However, they did not acknowledge that different sets of word embeddings can be diverse even when trained on the same corpus with the same algorithm, so long as their context windows are different. Additionally, due to the various practical and theoretical similarities between different algorithms (Levy and Goldberg, 2014b; Figure 1: Word prisms ( \u00a73) during supersense tagging ( \u00a74). We display the nearest neighbor to the embedding for the word \"apple\" in five of the window-based facets (Figure 2 , \u00a73.2) used in the downstream model ( \u00a74). We include the \u22a5 symbol on the learned transformation, P f , to indicate its orthogonality. Levy et al., 2015; Newell et al., 2019) , the gains to be found in diversifying at the level of algorithmic variation are likely to be minimal. With regard to vocabulary coverage, the out-of-vocabulary problem is at least partially addressed by character n-gram based embedding algorithms such as FastText (Joulin et al., 2017) and subword-based decomposition techniques that can be applied post-training (Zhao et al., 2018; Sasaki et al., 2019) . Nonetheless, meta-embeddings have been shown to consistently outperform models that use only a single set of embeddings in their input layer. Our goal is to determine how to best combine many sets of input embeddings in order to obtain high quality results in downstream tasks.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 212, |
| "text": "Yin and Sch\u00fctze (2016)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 875, |
| "end": 901, |
| "text": "(Levy and Goldberg, 2014b;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 902, |
| "end": 902, |
| "text": "", |
| "ref_id": null |
| }, |
| { |
| "start": 1215, |
| "end": 1233, |
| "text": "Levy et al., 2015;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1234, |
| "end": 1254, |
| "text": "Newell et al., 2019)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 1521, |
| "end": 1542, |
| "text": "(Joulin et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 1620, |
| "end": 1639, |
| "text": "(Zhao et al., 2018;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 1640, |
| "end": 1660, |
| "text": "Sasaki et al., 2019)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1069, |
| "end": 1078, |
| "text": "(Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This work proposes word prisms as a simple and general way to produce and understand metaembeddings, visualized in Figure 1 . Word prisms excel at combining many sets of source embeddings, which we call facets. They do so by learning task-specific orthogonal transformations to map embeddings from their facets to the common meta-embedding space. This produces a vector space that is more disentangled than the original space of facet embeddings. It allows the combination of multiple source embeddings while preserving most information within each embedding set.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 115, |
| "end": 123, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To our knowledge, this work is the first to incorporate both explicit orthogonal transformations of source embeddings and importance weights for source embedding sets that are dynamically learned with the downstream tasks in the same meta-embedding method. Furthermore, it is the first to explore combining so many sets of source embeddings (thirteen). We compare the word prisms method to other standard meta-embedding algorithms (averaging (Coates and Bollegala, 2018) , concatenation (Yin and Sch\u00fctze, 2016) , and dynamic meta-embeddings, DMEs, (Kiela et al., 2018) ). Word prisms overcome the shortcomings of each of these algorithms: (1) in averaging, performance deteriorates considerably when there are many facets -the orthogonal transformations in word prisms resolve this problem; (2) concatenation and DMEs are too expensive during inference when there are many facets -word prisms only need the final meta-embeddings at inference time, making them as efficient as averaging. Our results demonstrate that neural downstream models using word prisms generally obtain better results than the other algorithms across six downstream tasks, including supersense tagging, POS tagging, named entity recognition, natural language inference, and sentiment analysis. In our ablation studies, we find that our method improves performance on downstream tasks even when the vocabulary is the same across nine source facets trained on the same corpus that differ only by the definition of context window (see Figure 2 ); performance further improves by incorporating four more sets of off-the-shelf facets.", |
| "cite_spans": [ |
| { |
| "start": 442, |
| "end": 470, |
| "text": "(Coates and Bollegala, 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 487, |
| "end": 510, |
| "text": "(Yin and Sch\u00fctze, 2016)", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 548, |
| "end": 568, |
| "text": "(Kiela et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1505, |
| "end": 1513, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "w i-30 ... w i-20 ... w i-10 ... w i-5 ... w i-2 , w i-1 , w i , w i+1 , w i+2 ... w i+5 ... w i+10 ... w i+20 ... w i+30 Far Far Left Right W1 W2 W5 W10 W20", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Figure 2: The eight different context windows with which we trained word embedding facets (note that the dependency-based facet is not included here). Word w i represents the center of the context windows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Pre-trained word embedding algorithms use word cooccurrence statistics from a training corpus to map words to a low-dimensional vector space such that words with similar meanings are mapped to similar points in vector space. However, changing the embedding algorithm, training corpus, or definition of cooccurrence can have a strong impact on the resulting embeddings. Consequently, word metaembedding algorithms have been developed to combine multiple embedding sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Word embeddings are trained to reflect the cooccurrence statistics in the input corpus, which depend on the specific definition of context being employed. The standard definition in Word2vec (Mikolov et al., 2013a) and GloVe (Pennington et al., 2014 ) is a symmetric context window of fixed size around each word in the corpus. Levy and Goldberg (2014a) explore dependency-based contexts, where the context of each word is defined as its governor and dependents, along with the corresponding dependency relation labels. They observe that embeddings trained on smaller context windows and dependencybased contexts relate words that can be substituted for one another. By contrast, embeddings trained on larger context windows relate words which address the same topic. Bansal et al. (2014) observe the same phenomenon. Lin et al. (2015) find that small window sizes work best for POS tagging. Lison and Kutuzov (2017) evaluate word similarity and word analogy task performance for SGNS embeddings trained on different window sizes, as well as left-sided and right-sided context windows.", |
| "cite_spans": [ |
| { |
| "start": 191, |
| "end": 214, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 225, |
| "end": 249, |
| "text": "(Pennington et al., 2014", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 768, |
| "end": 788, |
| "text": "Bansal et al. (2014)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 818, |
| "end": 835, |
| "text": "Lin et al. (2015)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 892, |
| "end": 916, |
| "text": "Lison and Kutuzov (2017)", |
| "ref_id": "BIBREF32" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word embedding training and notions of context", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "Previous work has shown that combining embedding sets can lead to improvements in downstream performance. Melamud et al. (2016) combine embeddings trained with different notions of context via concatenation, as well as via SVD and CCA, leading to improved performance in multiple downstream tasks. However, they only combine two embedding sets at a time. Yin and Sch\u00fctze (2016) introduce the term \"meta-embeddings\" and demonstrate that concatenation and singular value decomposition (SVD) are solid baselines on word similarity, analogy, and POS tagging tasks. They propose 1TON, which simultaneously learns meta-embeddings and projections from the meta-embedding space to each individual source embedding space. Ghannay et al. (2016) apply PCA and autoencoders after concatenating source embeddings. Zhang et al. (2016) apply a convolutional layer to each source embedding before concatenating the resulting feature maps. represent the meta-embedding for a word as a linear combination of the meta-embeddings for its nearest neighbours in each source embedding set. Bao and Bollegala (2018) produce meta-embeddings by either averaging or concatenating the outputs of encoders which take GloVe and CBOW (Mikolov et al., 2013a) embeddings as input. Coates and Bollegala (2018) demonstrate that, in certain settings, meta-embeddings produced by averaging can be as performant as concatenated ones. Kiela et al. (2018) propose dynamic meta-embeddings (DMEs), which perform attention over the linearly transformed source embeddings. The linear transformations applied to the source embeddings are not constrained to be orthogonal. Their model learns which source embedding sets are most useful for a particular downstream task and for particular classes of words. They also present a contextualized version, where attention weights also depend on a word's surrounding context, but this provides little to no improvement on their downstream evaluations. By contrast, we propose a simpler attention mechanism by learning a single importance weight for each source embedding set, and we apply orthogonal transformations to source embeddings prior to linear combination. We also experiment with a larger selection of source embeddings, including embeddings trained with different notions of context.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 127, |
| "text": "Melamud et al. (2016)", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 713, |
| "end": 734, |
| "text": "Ghannay et al. (2016)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 801, |
| "end": 820, |
| "text": "Zhang et al. (2016)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 1067, |
| "end": 1091, |
| "text": "Bao and Bollegala (2018)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 1203, |
| "end": 1226, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 1248, |
| "end": 1275, |
| "text": "Coates and Bollegala (2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 1396, |
| "end": 1415, |
| "text": "Kiela et al. (2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word meta-embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Orthogonal transformations have previously been employed in the context of mapping monolingual embeddings for different languages into a common space (Artetxe et al., 2016; Smith et al., 2017; Artetxe et al., 2018; Conneau et al., 2018; Doval et al., 2018) . With these alignment transformations on monolingual space, one can obtain a better cross-lingual integration of the vector spaces. Recent work has also found that applying orthogonal transformations to source embeddings facilitates averaging (Garc\u00eda et al., 2020; Jawanpuria et al., 2020) . We expand on this work by incorporating orthogonal transformations in word prisms, which learn word meta-embeddings for specific downstream tasks. Additionally, we provide an analysis of source embeddings before and after orthogonal transformation, which leads to the insight that these mappings cause source embedding sets to be more easily clusterable within the meta-embedding space.", |
| "cite_spans": [ |
| { |
| "start": 150, |
| "end": 172, |
| "text": "(Artetxe et al., 2016;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 173, |
| "end": 192, |
| "text": "Smith et al., 2017;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 193, |
| "end": 214, |
| "text": "Artetxe et al., 2018;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 215, |
| "end": 236, |
| "text": "Conneau et al., 2018;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 237, |
| "end": 256, |
| "text": "Doval et al., 2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 501, |
| "end": 522, |
| "text": "(Garc\u00eda et al., 2020;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 523, |
| "end": 547, |
| "text": "Jawanpuria et al., 2020)", |
| "ref_id": "BIBREF24" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word meta-embeddings", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In this section we introduce meta-embeddings, word prisms, and the source embeddings they are composed with in this work. A meta-embedding combines pre-trained embeddings from multiple sources (e.g., from Glove and FastText), which we call facets. We define w f as the embedding of word w in facet f , for each facet f \u2208 {1, ..., F }. The dimensionality of each embedding in a facet is denoted d f , and the final dimensionality of the meta-embedding is d . The following equation represents the general form of all meta-embedding variants and baselines considered in this work:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Prisms and Meta-Embeddings", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "meta(w) = F f =1 \u03b1 f (P f w f + b f ) s.t. \u03b1 f \u2208 R, w f \u2208 R d f , P f \u2208 R d \u00d7d f , b f \u2208 R d .", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Word Prisms and Meta-Embeddings", |
| "sec_num": "3" |
| }, |
| { |
| "text": "That is, the meta-embedding for a word w, meta(w) \u2208 R d is constructed as follows: first, it is projected by a linear transformation (learned or fixed) characterized by a matrix P f and bias b f for some set of embedding facets. Next, the meta-embedding is a linear combination of the transformed embeddings scaled by some (learned or fixed) weights, \u03b1 f . The vocabulary of a word prism is the union of the vocabularies of its facets. If a word is out-of-vocabulary for a facet f , we assign its representation w f to be the centroid of the embeddings in the facet.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Prisms and Meta-Embeddings", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Word prisms learn orthogonal transformation matrices P f and bias vectors b f via back-propagation to make the space of input facets more well-separated so that the downstream model can learn which lexical qualities in the facets are most appropriate for the given task at hand. It is desirable to impose an orthogonality constraint on the transformation matrix because orthogonal matrices preserve the dot products within the original vector space, which has been shown to be important in studies of multilingual embeddings (Artetxe et al., 2016) . This requires P f to be square (P f \u2208 R d \u00d7d ) which further requires the dimensionality of the facets to all be the same (i.e., each d f = d , without loss of generality 2 ). After each gradient descent update, we apply the following update rule used by Cisse et al. (2017) and Conneau et al. (2018) , which approximates a procedure that keeps each P f on the manifold of orthogonal matrices:", |
| "cite_spans": [ |
| { |
| "start": 525, |
| "end": 547, |
| "text": "(Artetxe et al., 2016)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 805, |
| "end": 824, |
| "text": "Cisse et al. (2017)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 829, |
| "end": 850, |
| "text": "Conneau et al. (2018)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word prisms", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P f \u2190 (1 + \u03b2)P f \u2212 \u03b2 P f P f T P f .", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Word prisms", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The orthogonal transformation keeps the L2 norm of the original embeddings the same, since it does not rescale the vectors. Our preliminary experiments found \u03b2 = 0.001 is a good option. Word prisms also learn a linear combination of the projected facets to further adapt to the task at hand. Indeed, it is necessary to learn the \u03b1 f separately from P f since the transformations are orthogonal and cannot perform rescaling independently. Word prisms learn the facet-level weight coefficients, \u03b1 f , directly, where each \u03b1 f is a floating-point number also learned via back-propagation from the downstream task signal, initialized to be 1/F for each facet. That is, the parameters in word prisms are learned simultaneously with the downstream model for a given task. This approach is advantageous because it allows the model to assign importance weights to each facet, but it is not bound to do so via a dynamic attention vector. So, for word prisms, all of the meta-embeddings can be pre-computed for a vocabulary after training. This means that a word prism model in an inference-only production environment benefits from low memory complexity, as it does not need to hold all of the original facets in memory, only the meta-embeddings. Thus, given a vocabulary size V , and number of facets F , the memory complexity during inference is only O(V ) for word prisms (and the average baseline), but is O(V F ) for DMEs (and the concatenation baseline).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word prisms", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We include 13 various facets into word prisms with the aim of capturing a wide variety of semantic and syntactic information. To our knowledge, this is the first work on meta-embeddings to explore combining so many sets of source embeddings. Our collection of facets is diverse in the following two ways: (1) it incorporates many notions of context using the same algorithm and the same corpus; (2) it incorporates off-the-shelf embeddings trained on much larger corpora and tuned to knowledge graphs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Facets", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We make use of nine different notions of context to train standard PMI-based word embeddings (Levy and Goldberg, 2014b; Newell et al., 2019) , each with a dimension of 300 and vocabulary size of 500,000. Training is done with the open-source sampling-based implementation of Hilbert-MLE 3 (Newell et al., 2019) , which facilitates the use of arbitrarily structured context windows for training. For eight of the nine notions of context, embeddings are trained on the Gigaword 3 corpus (Graff et al., 2007) combined with a Wikipedia 2018 dump, which amounts to approximately 6 billion tokens.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 119, |
| "text": "(Levy and Goldberg, 2014b;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 120, |
| "end": 140, |
| "text": "Newell et al., 2019)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 289, |
| "end": 310, |
| "text": "(Newell et al., 2019)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 485, |
| "end": 505, |
| "text": "(Graff et al., 2007)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Facets", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We visualize the different window settings for these eight embedding sets in Figure 2 . Letting W be the window size, we trained the following sets of embeddings: W1, W2, W5, W10, and W20. Furthermore, we trained embeddings using only a Left context of 5 words, and another set of embeddings with only a Right context of 5 words. Lastly, we trained a set of embeddings with only a Far context window, which only includes words between 20 and 30 words away, in order to create strong topic-based representations. We also trained a variant of dependency-based embeddings (Deps) (Levy and Goldberg, 2014a), where we defined a word's context to be its governor. We ran the CoreNLP dependency parser on Gigaword 3 to obtain a parsed corpus.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 77, |
| "end": 85, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Facets", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "We also experiment with the following off-the-shelf embeddings: GloVe (Pennington et al., 2014) ; trained on 840B tokens from the Common Crawl Corpus with 2.2M words in the vocabulary. FastText (Joulin et al., 2017) ; trained on 600B tokens from the Common Crawl Corpus with 2M words in the vocabulary. ConceptNet Numberbatch (Speer et al., 2017) ; retrofitted (Faruqui et al., 2015) on both Word2vec (Mikolov et al., 2013b) and GloVe (Pennington et al., 2014) with 516K words in the vocabulary; this facet allows us to incorporate information from knowledge graphs. LexSub (Arora et al., 2020) ; GloVe embeddings trained on 6B tokens from Wikipedia 2014 and the Gigaword 5 corpus (Parker et al., 2011) , modified so that they can easily be projected into \"lexical subspaces\", in which a word's nearest neighbours reflect a particular lexical relation (e.g. synonymy, antonymy, hypernymy, meronymy).", |
| "cite_spans": [ |
| { |
| "start": 70, |
| "end": 95, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 194, |
| "end": 215, |
| "text": "(Joulin et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 326, |
| "end": 346, |
| "text": "(Speer et al., 2017)", |
| "ref_id": "BIBREF49" |
| }, |
| { |
| "start": 361, |
| "end": 383, |
| "text": "(Faruqui et al., 2015)", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 401, |
| "end": 424, |
| "text": "(Mikolov et al., 2013b)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 435, |
| "end": 460, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 574, |
| "end": 594, |
| "text": "(Arora et al., 2020)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 681, |
| "end": 702, |
| "text": "(Parker et al., 2011)", |
| "ref_id": "BIBREF42" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Facets", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Our experiments seek to determine if: (1) word prisms offer improvements over the other common meta-embedding methods; and, (2) if it is desirable to produce meta-embeddings with many different notions of context from the same corpus. For (1), we pursue a variety of experiments comparing word prisms to the following meta-embedding methods: the averaging baseline, the concatenation baseline, and dynamic meta-embeddings (DMEs) (Kiela et al., 2018) . For (2), we experiment with several sets of meta-embedding facet combinations. The first set is FastText and Glove (FG), as is done by Kiela et al. (2018) . The second set is a combination of 13 different facets (All), as detailed in \u00a73.2. In \u00a75 we present our main results, and in \u00a76 we present an ablation study to determine the impact of other meta-embedding combinations and the transformation matrices in word prisms.", |
| "cite_spans": [ |
| { |
| "start": 429, |
| "end": 449, |
| "text": "(Kiela et al., 2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 587, |
| "end": 606, |
| "text": "Kiela et al. (2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We will compare word prisms with three baseline algorithms. The first two, averaging and concatenation, are standard meta-embedding methods often explored in studies on meta-embeddings (Yin and Sch\u00fctze, 2016; Coates and Bollegala, 2018) . The third is dynamic meta-embeddings (DMEs) (Kiela et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 208, |
| "text": "(Yin and Sch\u00fctze, 2016;", |
| "ref_id": "BIBREF53" |
| }, |
| { |
| "start": 209, |
| "end": 236, |
| "text": "Coates and Bollegala, 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 283, |
| "end": 303, |
| "text": "(Kiela et al., 2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Average baseline. Averaging word embeddings is the simplest method to create meta-embeddings. Assuming each facet dimension d f is equal to d (as with word prisms), this baseline corresponds to Equation 1 with the following fixed parameter settings:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u03b1 f = 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "F , P f = I d , and b f = 0. Averaging is a sensible strategy to combine multiple source word embeddings, since, first of all, it aggregates the information from all the input facets without introducing additional parameters. Second, it captures semantic information by preserving the relative word distances within the embedding spaces (Coates and Bollegala, 2018) . However, as we demonstrate later ( \u00a75), the quality of this baseline deteriorates when there are many different facets as the signals start to become too mixed.", |
| "cite_spans": [ |
| { |
| "start": 337, |
| "end": 365, |
| "text": "(Coates and Bollegala, 2018)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Concatenation baseline. Concatenating multiple source embeddings is another trivial way to construct meta-embeddings. The parameter settings here correspond to Equation 1 when d", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "= F f =1 d f , \u03b1 f = 1, b f = 0", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": ", and P f is a fixed selector matrix that places embeddings into their corresponding concatenated positions; more simply, meta(w", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": ") = [w 1 , . . . , w F ].", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Concatenation can be desirable because it maintains all of the structure of the original embeddings. However, it is problematic because the dimensionality increases linearly with respect to the number of facets, requiring more model parameters to be learned at the input layer for downstream model. Dynamic meta-embeddings. Kiela et al. (2018) introduced DMEs, demonstrating that sentence representations can be improved by combining multiple source embeddings with dynamically learned linear transformations and attention weights. Note that, like with concatenation, it is necessary to maintain all of the individual facets in memory during inference when using DMEs.", |
| "cite_spans": [ |
| { |
| "start": 324, |
| "end": 343, |
| "text": "Kiela et al. (2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "DMEs are encapsulated in Equation 1 via the following parameter settings: d is a hyperparameter for the desired meta-embedding size (set to 256 by Kiela et al. (2018) ); P f and b f are learned via backpropagation from supervised learning during the current task; the \u03b1 f are obtained via a self-attention mechanism on an additional learned parameter vector a \u2208 R d :", |
| "cite_spans": [ |
| { |
| "start": 147, |
| "end": 166, |
| "text": "Kiela et al. (2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "\u03b1 f = \u03c6(a \u2022 (P f w f + b f ) + b),", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "where \u03c6 is the softmax function and b \u2208 R is an additional learned bias parameter.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "We evaluate meta-embedding methods on a variety of downstream text classification and sequence labelling tasks. For text classification, we choose the Stanford Sentiment Treebank binary sentiment analysis dataset (SST2) (Socher et al., 2013) and the Stanford NLI (Bowman et al., 2015) (SNLI) benchmark. For sequence labelling, we select the CoNLL 2003 named entity recognition task (NER) (Tjong Kim Sang and De Meulder, 2003) , POS tagging on the Brown corpus (Brown) 4 , POS tagging on the WSJ corpus (WSJ) (Marcus et al., 1993) , and Supersense tagging (Ciaramita and Johnson, 2003) on the Semcor 3.0 corpus (Semcor) (Miller et al., 1993) . Supersense tagging is a problem situated between NER and word sense disambiguation. The task consists of 41 lexicographer class labels for nouns and verbs with IOB tags, producing 83 fine-grained classes in total. We report the micro F1 score for the supersense tagging and the NER tagging tasks, discarding the O-tags in the predictions, as is standard (Alonso and Plank, 2017; Changpinyo et al., 2018) . For the rest of the tasks, we report the accuracy on the test set. We use the standard train-validation-test splits whenever they are provided with the dataset. Otherwise, we split 10% of the training set to be the validation set for hyperparameter tuning.", |
| "cite_spans": [ |
| { |
| "start": 220, |
| "end": 241, |
| "text": "(Socher et al., 2013)", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 388, |
| "end": 425, |
| "text": "(Tjong Kim Sang and De Meulder, 2003)", |
| "ref_id": "BIBREF50" |
| }, |
| { |
| "start": 508, |
| "end": 529, |
| "text": "(Marcus et al., 1993)", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 555, |
| "end": 584, |
| "text": "(Ciaramita and Johnson, 2003)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 619, |
| "end": 640, |
| "text": "(Miller et al., 1993)", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 997, |
| "end": 1021, |
| "text": "(Alonso and Plank, 2017;", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 1022, |
| "end": 1046, |
| "text": "Changpinyo et al., 2018)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and downstream models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We use a simple neural model to compare meta-embedding methods. To replicate the models used by Kiela et al. (2018) , we use a single layer BiLSTM with 512 hidden units in SST2 and 1024 hidden units in SNLI for sentence encoding. The sentence representations are learned by max pooling over the forward and backward hidden states. We use the following representation for a pair of hypothesis and premise: [u, v, u * v, |u -v|] . The * operator denotes the element-wise multiplication. The sentence encoder is followed by a 512 dimensional MLP with ReLU activation. For all the sequence labelling tasks, we use a 2-layer BiLSTM with 256 hidden units for sequence encoding.", |
| "cite_spans": [ |
| { |
| "start": 96, |
| "end": 115, |
| "text": "Kiela et al. (2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 405, |
| "end": 426, |
| "text": "[u, v, u * v, |u -v|]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Datasets and downstream models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We use standard cross entropy loss for all supervised downstream tasks. The parameters of the downstream model and word prisms are learned via back-propagation. We choose the initial learning rate to be 0.001 for sequence labeling tasks and 0.0004 for text classification tasks with a reduction factor of 0.1 if there is no improvement on the validation set after 2 consecutive epochs. In all of our experiments, we keep the source embeddings in their original forms without performing normalization. Counterintuitively, normalizing the source embeddings to have unit norm makes little difference in the text classification tasks but substantially hurts the performance of the sequence labelling tasks. Previous work (Schakel and Wilson, 2015) shows the length of the embedding vector encodes the unigram frequency of the word, which is useful in the sequence labelling tasks. Table 1 presents the main results for this work on four sequence labelling tasks and two text classification tasks. We compare word prisms to standard meta-embedding baselines (averaging, concatenation) and dynamic meta-embeddings (DME). In the first four rows, we experiment with FastText and Glove (FG) as a two-facet combination, while the next four rows use a 13-facet combination (All) detailed in \u00a73.2.", |
| "cite_spans": [ |
| { |
| "start": 717, |
| "end": 743, |
| "text": "(Schakel and Wilson, 2015)", |
| "ref_id": "BIBREF46" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 877, |
| "end": 884, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Datasets and downstream models", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Our first finding is that word prisms almost always offer substantial improvements over DMEs, regardless of whether we are using two facets or thirteen. The only exception is in the case of text classification on SST2 with FG, although the difference is within the margin of error (0.6). Note that Kiela et al. (2018) report slightly different results than our reimplementation of their system, for FG: 86.2 \u00b1 .2 for SNLI (compared to 85.57 \u00b1 .3), and 88.7 \u00b1 .6 for SST2 (compared to 88.10 \u00b1 .6). These discrepancies are attributable to random initializations and the different representation of out-of-vocabulary words 5 .", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 317, |
| "text": "Kiela et al. (2018)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our second finding is that concatenation is still a very strong baseline for meta-embeddings. This is not surprising because it preserves all of the information in the facets, and also introduces more model parameters, while the other meta-embedding methods seek to compress the information from all the meta-embeddings. Yet, for 4 out of the 6 tasks, word prisms outperform concatenation, and in the Figure 3 : Impact of the linear transformations on the embedding space in dynamic meta-embeddings (DME) versus word prisms. The K-means score is the adjusted mutual information score for clustering embeddings into their respective facets, consistent across 5 clustering runs with different random seeds. Visualized is the TSNE-projected original facet-space versus the facets after projection (i.e., the stacked w f versus the stacked P f w f + b f ) in DMEs and word prisms.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 401, |
| "end": 409, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "other two tasks (Semcor and SST2) word prisms obtain second best results within the margin of error. Furthermore, note that concatenation is very expensive in the case of including 13 facets, requiring 3900dimensional meta-embeddings at inference time, versus only 300-dimensions for word prisms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our third major finding is that sequence labelling tasks highly benefit from including all 13 facets, while the text classification tasks seem generally satisfied with only FG. Work on multi-task learning for supersense tagging on Semcor (using only a single set of embeddings with similar neural sequence labelling models) report results of 62.36 (Alonso and Plank, 2017) and 68.25 (Changpinyo et al., 2018) . In contrast, our word prism model obtains a score of 73.82, indicating that word prisms can offer substantial improvements to supersense tagging models. Moreover, because supersense tagging is a coarse-grained version of word sense disambiguation, it is likely the case that word prisms can improve results in that domain as well. For further comparison, Huang et al. (2015) obtain accuracies of 96.04 and 83.52 on the WSJ and NER tasks respectively (when using a BiLSTM with only a single set of embeddings as features) while our word prisms offer improvements to 97.04 (+1.00) and 90.74 (+7.22).", |
| "cite_spans": [ |
| { |
| "start": 383, |
| "end": 408, |
| "text": "(Changpinyo et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 766, |
| "end": 785, |
| "text": "Huang et al. (2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Our results additionally contribute to the findings of Coates and Bollegala (2018) on the difference between averaging and concatenation for meta-embeddings. When there are 13 facets, we observe a marked drop in quality for averaging. We posit that this is due to the increased noise pollution in the averaged vector space because, while it is likely that two sets of random vectors will be generally orthogonal, when 13 sets of random vectors come into play the \"birthday paradox\" becomes much more likely to unveil itself, and the odds of being well-separated become lower. In Figure 3 , we present the vector space for a subset of the nine window-based embeddings, projected to 2D with TSNE (Maaten and Hinton, 2008) . Observe that the embeddings are not very well-separable on their own, as a K-Means clustering algorithm can only obtain an adjusted mutual information score of 0.635 to cluster them into their corresponding facets. We also present the embedding spaces being linearly transformed (i.e., P f w f + b f , see Eq. 1) when trained for supersense tagging. The learned, unconstrained transformations in DMEs cause the embedding space to become much less well-separated, as the clustering score deteriorates to 0.266. In contrast, the orthogonal transformations for word prisms improve the separatedness of the embedding space, bumping clustering score to 0.841. This (combined with the improved downstream results from word prisms) provides evidence of the validity of the \"natural clustering\" hypothesis for representation learning (Bengio et al., 2013; ; namely, that it is preferable for neural representations to be well separated (or, disentangled) within their latent spaces.", |
| "cite_spans": [ |
| { |
| "start": 55, |
| "end": 82, |
| "text": "Coates and Bollegala (2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 694, |
| "end": 719, |
| "text": "(Maaten and Hinton, 2008)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 1548, |
| "end": 1569, |
| "text": "(Bengio et al., 2013;", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 579, |
| "end": 587, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We perform two ablation studies to further inspect the impact of the orthogonal transformations and meta-embedding combination choice in word prisms. We compare the orthogonal transformation in word Table 2 : Ablation study 1: The transformation in word prisms. Test set results for word prisms with different projection constraints (no transformation, an unconstrained one, and the orthogonal transformation), taking the nine window-based facets as the meta-embedding combination. Table 3 : Ablation study 2: Different combinations of facets in word prisms. Test set results when using different combinations of input facets in word prisms. First two rows contain only the single best performing window-based facet (Best-Window) or the best facet overall (Best-All), for the specific task.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 199, |
| "end": 206, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 482, |
| "end": 489, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Ablation Studies", |
| "sec_num": "6" |
| }, |
| { |
| "text": "prisms to two alternatives: none (i.e., P f = I d , in which case the word prism is only learning the facetcombination weights \u03b1 f ), and an unconstrained transformation which does not apply the orthogonalityimposing update rule detailed in Equation 2. For each experiment, we report the average and standard deviation across runs performed with five different random seeds.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Facets", |
| "sec_num": null |
| }, |
| { |
| "text": "Ablation study 1. In this experiment we determine the impact of the transformation matrix in word prisms when isolating the input facets to be the 9 window-based facets trained on the same dataset, with the same vocabulary, differing only in the definition of the context window. Table 2 presents the results for this experiment, which furthermore demonstrates the effectiveness of the orthogonal transformation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 280, |
| "end": 287, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Facets", |
| "sec_num": null |
| }, |
| { |
| "text": "Ablation study 2. In this experiment we investigate the impact of different choices of facet combinations for word prisms. We experiment with several different sets of facets: W1-10 denotes window sizes between 1 and 10 (inclusive) [4 facets]; W1-Far includes the prior facets plus W20 and Far [6 facets]; All windows includes the Left, Right, and Deps with the prior facets [9 facets]. FG denotes FastText and GloVe, while FGCL denotes FastText, GloVe, ConceptNet, and LexSub. All denotes all 13 of these facets. The results for these combinations are summarized in Table 3 . In the first two rows, we also include the results for the best-performing (on the held out validation set) single-facet embedding model for the best window-based facet (Best-Window, out of the 9 window-based facets), and the best overall single-facet model (Best-All out of all 13 facets). W2 is the best window-based facet for Semcor, WSJ (tie), Brown, NER, and SST2, while W5 is the best window-based facet for WSJ (tie) and SNLI. GloVe is best overall for WSJ, Brown, NER, and SST2, while FastText is best overall for Semcor and SNLI. We note three important takeaways from these results. First, word prisms always perform better than their single-facet counterparts, even though the single-facet models were selected to be the one that maximized validation performance for the specific task. Second, we observe that progressively incorporating more facets trained solely on different notions of context (i.e., from W1-10 to W1-Far to All windows) improves results quality substantially for the sequence labelling tasks, while the text classification tasks (SNLI and SST2) do not benefit as much, although SST2 does seem to prefer the topic-based representations included by W1-Far. This suggests that NLP practitioners would benefit from training multiple sets of embeddings with different context windows in their specific problem domains (e.g., on a Twitter corpus), where they can expect improvements in results, especially if they are faced with a sequence labelling problem. Our third takeaway is that FastText and Glove are much better than the window-based embeddings, although including them all together still improves results in 3 out of the 6 tasks. This is not surprising since the FastText and Glove embeddings are trained on a corpus with over 600 billion words, while our window-based embeddings are only trained on a 6 billion word corpus; i.e., 1% of the data of the former. Thus, our results in this ablation study and the former suggest that training embeddings with different notions of context on such corpora will lead to even further gains.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 567, |
| "end": 574, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Facets", |
| "sec_num": null |
| }, |
| { |
| "text": "In this paper, we study a simple and efficient method for constructing meta-embeddings from wideranging facets while preserving individual invariance with orthogonal transformations. The effectiveness of the proposed word prisms is validated by six supervised extrinsic evaluation tasks. Our word prism models obtain consistent improvements over dynamic meta-embeddings (Kiela et al., 2018) and the averaging and concatenation baselines (Coates and Bollegala, 2018) in all six tasks. Analysis of the transformed embeddings suggests the \"natural clustering\" hypothesis for representation learning (Bengio et al., 2013 ) is important to consider for combining various source embeddings to create performant taskspecific meta-embeddings.", |
| "cite_spans": [ |
| { |
| "start": 370, |
| "end": 390, |
| "text": "(Kiela et al., 2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 437, |
| "end": 465, |
| "text": "(Coates and Bollegala, 2018)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 596, |
| "end": 616, |
| "text": "(Bengio et al., 2013", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Several future directions present themselves from this work. First, we believe that contextualized embedding models can benefit from prismatic representations of their input embeddings (Devlin et al., 2019) , and that word prisms can benefit from including contextualized embeddings as facets. Second, we expect that word prisms can improve performance in other tasks such as automatic summarization, which often use a single set of word embeddings in their input layers (Dong et al., 2019) . Third, we believe that meta-embeddings and the method behind word prisms can be generalized past word-based representations to sentence representations (Pagliardini et al., 2018) and may improve their quality, as was recently demonstrated by Poerner et al. (2019) . Lastly, recent work has found simple word embeddings to be useful for solving diverse problems from the medical domain (Zhang et al., 2019) , to materials science (Tshitoyan et al., 2019) , to law (Chalkidis and Kampas, 2019); we expect that word prisms and their motivations can further improve results in these applications.", |
| "cite_spans": [ |
| { |
| "start": 185, |
| "end": 206, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 471, |
| "end": 490, |
| "text": "(Dong et al., 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 645, |
| "end": 671, |
| "text": "(Pagliardini et al., 2018)", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 735, |
| "end": 756, |
| "text": "Poerner et al. (2019)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 878, |
| "end": 898, |
| "text": "(Zhang et al., 2019)", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 922, |
| "end": 946, |
| "text": "(Tshitoyan et al., 2019)", |
| "ref_id": "BIBREF51" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "When dimensions are different, simple strategies can be pursued to equalize them. For example, zero-padding short embeddings(Coates and Bollegala, 2018) or using SVD to compress long embeddings are reliable strategies.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/enewe101/hilbert", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Retrieved from the NLTK toolkit: http://www.nltk.org/nltk_data/.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "While Kiela et al. (2018) uses zero-vectors to represent OOV words, we opted to use the facet-level centroid as it resulted in better validation performance for most tasks. We found we were unable to exactly replicate their results even with zero-vectors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work is supported by the Fonds de recherche du Qu\u00e9bec -Nature et technologies, by the Natural Sciences and Engineering Research Council of Canada, and by Compute Canada. The last author is supported in part by the Canada CIFAR AI Chair program.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "When is multitask learning effective? semantic sequence prediction under varying data conditions", |
| "authors": [ |
| { |
| "first": "Hector", |
| "middle": [], |
| "last": "Martinez", |
| "suffix": "" |
| }, |
| { |
| "first": "Alonso", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Plank", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EACL 2017-15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Hector Martinez Alonso and Barbara Plank. 2017. When is multitask learning effective? semantic sequence prediction under varying data conditions. In EACL 2017-15th Conference of the European Chapter of the Association for Computational Linguistics, pages 1-10.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning lexical subspaces in a distributional vector space", |
| "authors": [ |
| { |
| "first": "Kushal", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "Aishik", |
| "middle": [], |
| "last": "Chakraborty", |
| "suffix": "" |
| }, |
| { |
| "first": "Jackie", |
| "middle": [ |
| "C K" |
| ], |
| "last": "Cheung", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "8", |
| "issue": "", |
| "pages": "311--329", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kushal Arora, Aishik Chakraborty, and Jackie C. K. Cheung. 2020. Learning lexical subspaces in a distributional vector space. Transactions of the Association for Computational Linguistics, 8:311-329.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2289--2294", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2016. Learning principled bilingual mappings of word embed- dings while preserving monolingual invariance. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 2289-2294, Austin, Texas, November. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Generalizing and improving bilingual word embedding mappings with a multi-step framework of linear transformations", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2018. Generalizing and improving bilingual word embedding mappings with a multi-step framework of linear transformations. In Thirty-Second AAAI Conference on Artifi- cial Intelligence.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Tailoring continuous word representations for dependency parsing", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "809--815", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2014. Tailoring continuous word representations for dependency parsing. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 809-815. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Learning word meta-embeddings by autoencoding", |
| "authors": [ |
| { |
| "first": "Cong", |
| "middle": [], |
| "last": "Bao", |
| "suffix": "" |
| }, |
| { |
| "first": "Danushka", |
| "middle": [], |
| "last": "Bollegala", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "1650--1661", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cong Bao and Danushka Bollegala. 2018. Learning word meta-embeddings by autoencoding. In Proceedings of the 27th International Conference on Computational Linguistics, pages 1650-1661.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Representation learning: A review and new perspectives", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Aaron", |
| "middle": [], |
| "last": "Courville", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "IEEE transactions on pattern analysis and machine intelligence", |
| "volume": "35", |
| "issue": "", |
| "pages": "1798--1828", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, Aaron Courville, and Pascal Vincent. 2013. Representation learning: A review and new perspec- tives. IEEE transactions on pattern analysis and machine intelligence, 35(8):1798-1828.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Think globally, embed locally: locally linear meta-embedding of words", |
| "authors": [ |
| { |
| "first": "Danushka", |
| "middle": [], |
| "last": "Bollegala", |
| "suffix": "" |
| }, |
| { |
| "first": "Kohei", |
| "middle": [], |
| "last": "Hayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Ken-Ichi", |
| "middle": [], |
| "last": "Kawarabayashi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Joint Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "3970--3976", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danushka Bollegala, Kohei Hayashi, and Ken-Ichi Kawarabayashi. 2018. Think globally, embed locally: lo- cally linear meta-embedding of words. In Proceedings of the 27th International Joint Conference on Artificial Intelligence, pages 3970-3976. AAAI Press.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A large annotated corpus for learning natural language inference", |
| "authors": [ |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Bowman", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Angeli", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Potts", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel R. Bowman, Gabor Angeli, Christopher Potts, and Christopher D. Manning. 2015. A large annotated corpus for learning natural language inference. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing (EMNLP). Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Deep learning in law: early adaptation and legal word embeddings trained on large corpora", |
| "authors": [ |
| { |
| "first": "Ilias", |
| "middle": [], |
| "last": "Chalkidis", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitrios", |
| "middle": [], |
| "last": "Kampas", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Artificial Intelligence and Law", |
| "volume": "27", |
| "issue": "2", |
| "pages": "171--198", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ilias Chalkidis and Dimitrios Kampas. 2019. Deep learning in law: early adaptation and legal word embeddings trained on large corpora. Artificial Intelligence and Law, 27(2):171-198.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Multi-task learning for sequence tagging: An empirical study", |
| "authors": [ |
| { |
| "first": "Soravit", |
| "middle": [], |
| "last": "Changpinyo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hexiang", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Sha", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 27th International Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "2965--2977", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soravit Changpinyo, Hexiang Hu, and Fei Sha. 2018. Multi-task learning for sequence tagging: An empirical study. In Proceedings of the 27th International Conference on Computational Linguistics, pages 2965-2977.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Supersense tagging of unknown nouns in wordnet", |
| "authors": [ |
| { |
| "first": "Massimiliano", |
| "middle": [], |
| "last": "Ciaramita", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Johnson", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the 2003 conference on Empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "168--175", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Massimiliano Ciaramita and Mark Johnson. 2003. Supersense tagging of unknown nouns in wordnet. In Proceed- ings of the 2003 conference on Empirical methods in natural language processing, pages 168-175. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Parseval networks: Improving robustness to adversarial examples", |
| "authors": [ |
| { |
| "first": "Moustapha", |
| "middle": [], |
| "last": "Cisse", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Yann", |
| "middle": [], |
| "last": "Dauphin", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 34th International Conference on Machine Learning", |
| "volume": "70", |
| "issue": "", |
| "pages": "854--863", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Moustapha Cisse, Piotr Bojanowski, Edouard Grave, Yann Dauphin, and Nicolas Usunier. 2017. Parseval net- works: Improving robustness to adversarial examples. In Proceedings of the 34th International Conference on Machine Learning-Volume 70, pages 854-863. JMLR. org.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Frustratingly easy meta-embedding -computing meta-embeddings by averaging source word embeddings", |
| "authors": [ |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Coates", |
| "suffix": "" |
| }, |
| { |
| "first": "Danushka", |
| "middle": [], |
| "last": "Bollegala", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "2", |
| "issue": "", |
| "pages": "194--198", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joshua Coates and Danushka Bollegala. 2018. Frustratingly easy meta-embedding -computing meta-embeddings by averaging source word embeddings. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 194-198.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Word translation without parallel data", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "J\u00e9gou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 6th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herv\u00e9 J\u00e9gou. 2018. Word translation without parallel data. In Proceedings of the 6th International Conference on Learning Representa- tions (ICLR 2018).", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirec- tional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Editnts: An neural programmerinterpreter model for sentence simplification through explicit editing", |
| "authors": [ |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Zichao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Mehdi", |
| "middle": [], |
| "last": "Rezagholizadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Jackie Chi Kit", |
| "middle": [], |
| "last": "Cheung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "3393--3402", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yue Dong, Zichao Li, Mehdi Rezagholizadeh, and Jackie Chi Kit Cheung. 2019. Editnts: An neural programmer- interpreter model for sentence simplification through explicit editing. In Proceedings of the 57th Annual Meet- ing of the Association for Computational Linguistics, pages 3393-3402.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Improving crosslingual word embeddings by meeting in the middle", |
| "authors": [ |
| { |
| "first": "Yerai", |
| "middle": [], |
| "last": "Doval", |
| "suffix": "" |
| }, |
| { |
| "first": "Jose", |
| "middle": [], |
| "last": "Camacho-Collados", |
| "suffix": "" |
| }, |
| { |
| "first": "Luis", |
| "middle": [], |
| "last": "Espinosa-Anke", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Schockaert", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "294--304", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yerai Doval, Jose Camacho-Collados, Luis Espinosa-Anke, and Steven Schockaert. 2018. Improving cross- lingual word embeddings by meeting in the middle. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 294-304, Brussels, Belgium, October-November. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Retrofitting word vectors to semantic lexicons", |
| "authors": [ |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujay", |
| "middle": [], |
| "last": "Kumar Jauhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah A", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1615", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manaal Faruqui, Jesse Dodge, Sujay Kumar Jauhar, Chris Dyer, Eduard Hovy, and Noah A Smith. 2015. Retrofitting word vectors to semantic lexicons. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1606-1615.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A synopsis of linguistic theory, 1930-1955. Studies in linguistic analysis", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "John R Firth", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John R Firth. 1957. A synopsis of linguistic theory, 1930-1955. Studies in linguistic analysis.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "A common semantic space for monolingual and crosslingual meta-embeddings", |
| "authors": [ |
| { |
| "first": "Iker", |
| "middle": [], |
| "last": "Garc\u00eda", |
| "suffix": "" |
| }, |
| { |
| "first": "Rodrigo", |
| "middle": [], |
| "last": "Agerri", |
| "suffix": "" |
| }, |
| { |
| "first": "German", |
| "middle": [], |
| "last": "Rigau", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2001.06381" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Iker Garc\u00eda, Rodrigo Agerri, and German Rigau. 2020. A common semantic space for monolingual and cross- lingual meta-embeddings. arXiv preprint arXiv:2001.06381.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Word embedding evaluation and combination", |
| "authors": [ |
| { |
| "first": "Sahar", |
| "middle": [], |
| "last": "Ghannay", |
| "suffix": "" |
| }, |
| { |
| "first": "Yannick", |
| "middle": [], |
| "last": "Benoit Favre", |
| "suffix": "" |
| }, |
| { |
| "first": "Nathalie", |
| "middle": [], |
| "last": "Esteve", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Camelin", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16)", |
| "volume": "", |
| "issue": "", |
| "pages": "300--305", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sahar Ghannay, Benoit Favre, Yannick Esteve, and Nathalie Camelin. 2016. Word embedding evaluation and combination. In Proceedings of the Tenth International Conference on Language Resources and Evaluation (LREC'16), pages 300-305.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Bidirectional lstm-crf models for sequence tagging", |
| "authors": [ |
| { |
| "first": "Zhiheng", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1508.01991" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiheng Huang, Wei Xu, and Kai Yu. 2015. Bidirectional lstm-crf models for sequence tagging. arXiv preprint arXiv:1508.01991.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Learning geometric word meta-embeddings", |
| "authors": [ |
| { |
| "first": "Pratik", |
| "middle": [], |
| "last": "Jawanpuria", |
| "suffix": "" |
| }, |
| { |
| "first": "N T V", |
| "middle": [], |
| "last": "Satya Dev", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [], |
| "last": "Kunchukuttan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bamdev", |
| "middle": [], |
| "last": "Mishra", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 5th Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "39--44", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pratik Jawanpuria, Satya Dev N T V, Anoop Kunchukuttan, and Bamdev Mishra. 2020. Learning geometric word meta-embeddings. In Proceedings of the 5th Workshop on Representation Learning for NLP, pages 39-44, Online, July. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Bag of tricks for efficient text classification", |
| "authors": [ |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00c9douard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom\u00e1\u0161", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter", |
| "volume": "2", |
| "issue": "", |
| "pages": "427--431", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Armand Joulin,\u00c9douard Grave, Piotr Bojanowski, and Tom\u00e1\u0161 Mikolov. 2017. Bag of tricks for efficient text clas- sification. In Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics: Volume 2, Short Papers, pages 427-431.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Clustering-oriented representation learning with attractive-repulsive loss", |
| "authors": [ |
| { |
| "first": "Kian", |
| "middle": [], |
| "last": "Kenyon-Dean", |
| "suffix": "" |
| }, |
| { |
| "first": "Andre", |
| "middle": [], |
| "last": "Cianflone", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucas", |
| "middle": [], |
| "last": "Page-Caccia", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Rabusseau", |
| "suffix": "" |
| }, |
| { |
| "first": "Jackie Chi Kit", |
| "middle": [], |
| "last": "Cheung", |
| "suffix": "" |
| }, |
| { |
| "first": "Doina", |
| "middle": [], |
| "last": "Precup", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "AAAI 2019 Workshop: Network Interpretability for Deep Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kian Kenyon-Dean, Andre Cianflone, Lucas Page-Caccia, Guillaume Rabusseau, Jackie Chi Kit Cheung, and Doina Precup. 2019. Clustering-oriented representation learning with attractive-repulsive loss. AAAI 2019 Workshop: Network Interpretability for Deep Learning.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Dynamic meta-embeddings for improved sentence representations", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Changhan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kyunghyun", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1466--1477", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela, Changhan Wang, and Kyunghyun Cho. 2018. Dynamic meta-embeddings for improved sentence representations. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 1466-1477.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Dependency-based word embeddings", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "302--308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy and Yoav Goldberg. 2014a. Dependency-based word embeddings. In Proceedings of the 52nd Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers), pages 302-308.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Neural word embedding as implicit matrix factorization", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "2177--2185", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy and Yoav Goldberg. 2014b. Neural word embedding as implicit matrix factorization. In Advances in neural information processing systems, pages 2177-2185.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Improving distributional similarity with lessons learned from word embeddings", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "3", |
| "issue": "", |
| "pages": "211--225", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy, Yoav Goldberg, and Ido Dagan. 2015. Improving distributional similarity with lessons learned from word embeddings. Transactions of the Association for Computational Linguistics, 3:211-225.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Unsupervised pos induction with word embeddings", |
| "authors": [ |
| { |
| "first": "Chu-Cheng", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Waleed", |
| "middle": [], |
| "last": "Ammar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Lori", |
| "middle": [], |
| "last": "Levin", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1311--1316", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chu-Cheng Lin, Waleed Ammar, Chris Dyer, and Lori Levin. 2015. Unsupervised pos induction with word embeddings. In Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1311-1316.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Redefining context windows for word embedding models: An experimental study", |
| "authors": [ |
| { |
| "first": "Pierre", |
| "middle": [], |
| "last": "Lison", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrey", |
| "middle": [], |
| "last": "Kutuzov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 21st Nordic Conference on Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "284--288", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pierre Lison and Andrey Kutuzov. 2017. Redefining context windows for word embedding models: An experi- mental study. In Proceedings of the 21st Nordic Conference on Computational Linguistics, pages 284-288.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Visualizing data using t-sne", |
| "authors": [ |
| { |
| "first": "Laurens", |
| "middle": [], |
| "last": "Van Der Maaten", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of machine learning research", |
| "volume": "9", |
| "issue": "", |
| "pages": "2579--2605", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Laurens van der Maaten and Geoffrey Hinton. 2008. Visualizing data using t-sne. Journal of machine learning research, 9(Nov):2579-2605.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "The Stanford CoreNLP natural language processing toolkit", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Surdeanu", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Bauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Jenny", |
| "middle": [], |
| "last": "Finkel", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Bethard", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcclosky", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher Manning, Mihai Surdeanu, John Bauer, Jenny Finkel, Steven Bethard, and David McClosky. 2014. The Stanford CoreNLP natural language processing toolkit. In Proceedings of 52nd Annual Meeting of the Association for Computational Linguistics: System Demonstrations, pages 55-60, Baltimore, Maryland, June. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Building a large annotated corpus of english: The penn treebank", |
| "authors": [ |
| { |
| "first": "Mitch", |
| "middle": [], |
| "last": "Marcus", |
| "suffix": "" |
| }, |
| { |
| "first": "Beatrice", |
| "middle": [], |
| "last": "Santorini", |
| "suffix": "" |
| }, |
| { |
| "first": "Mary", |
| "middle": [ |
| "Ann" |
| ], |
| "last": "Marcinkiewicz", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Computational Linguistics", |
| "volume": "19", |
| "issue": "2", |
| "pages": "313--330", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mitch Marcus, Beatrice Santorini, and Mary Ann Marcinkiewicz. 1993. Building a large annotated corpus of english: The penn treebank. Computational Linguistics, 19(2):313-330.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "The role of context types and dimensionality in learning word embeddings", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Melamud", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcclosky", |
| "suffix": "" |
| }, |
| { |
| "first": "Siddharth", |
| "middle": [], |
| "last": "Patwardhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "1030--1040", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Melamud, David McClosky, Siddharth Patwardhan, and Mohit Bansal. 2016. The role of context types and dimensionality in learning word embeddings. In Proceedings of the 2016 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 1030-1040.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 1st International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013a. Efficient estimation of word representations in vector space. In Proceedings of the 1st International Conference on Learning Representations (ICLR 2013).", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeff", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Advances in neural information processing systems", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Greg S Corrado, and Jeff Dean. 2013b. Distributed representations of words and phrases and their compositionality. In Advances in neural information processing systems, pages 3111-3119.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "A semantic concordance", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "Claudia", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| }, |
| { |
| "first": "Randee", |
| "middle": [], |
| "last": "Leacock", |
| "suffix": "" |
| }, |
| { |
| "first": "Ross T", |
| "middle": [], |
| "last": "Tengi", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bunker", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "Proceedings of the workshop on Human Language Technology", |
| "volume": "", |
| "issue": "", |
| "pages": "303--308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A Miller, Claudia Leacock, Randee Tengi, and Ross T Bunker. 1993. A semantic concordance. In Proceedings of the workshop on Human Language Technology, pages 303-308. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Deconstructing and reconstructing word embedding algorithms", |
| "authors": [ |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Newell", |
| "suffix": "" |
| }, |
| { |
| "first": "Kian", |
| "middle": [], |
| "last": "Kenyon-Dean", |
| "suffix": "" |
| }, |
| { |
| "first": "Jackie Chi Kit", |
| "middle": [], |
| "last": "Cheung", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1911.13280" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edward Newell, Kian Kenyon-Dean, and Jackie Chi Kit Cheung. 2019. Deconstructing and reconstructing word embedding algorithms. arXiv preprint arXiv:1911.13280.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Unsupervised learning of sentence embeddings using compositional n-gram features", |
| "authors": [ |
| { |
| "first": "Matteo", |
| "middle": [], |
| "last": "Pagliardini", |
| "suffix": "" |
| }, |
| { |
| "first": "Prakhar", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Martin", |
| "middle": [], |
| "last": "Jaggi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "528--540", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matteo Pagliardini, Prakhar Gupta, and Martin Jaggi. 2018. Unsupervised learning of sentence embeddings using compositional n-gram features. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 528-540.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "English gigaword fifth edition ldc2011t07. Web Download. Philadelphia: Linguistic Data Consortium", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Parker", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Graff", |
| "suffix": "" |
| }, |
| { |
| "first": "Junbo", |
| "middle": [], |
| "last": "Kong", |
| "suffix": "" |
| }, |
| { |
| "first": "Ke", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Kazuaki", |
| "middle": [], |
| "last": "Maeda", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Parker, David Graff, Junbo Kong, Ke Chen, and Kazuaki Maeda. 2011. English gigaword fifth edition ldc2011t07. Web Download. Philadelphia: Linguistic Data Consortium.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher D", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D Manning. 2014. Glove: Global vectors for word represen- tation. In Proceedings of the 2014 conference on empirical methods in natural language processing (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "Sentence meta-embeddings for unsupervised semantic textual similarity", |
| "authors": [ |
| { |
| "first": "Nina", |
| "middle": [], |
| "last": "Poerner", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulli", |
| "middle": [], |
| "last": "Waltinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1911.03700" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nina Poerner, Ulli Waltinger, and Hinrich Sch\u00fctze. 2019. Sentence meta-embeddings for unsupervised semantic textual similarity. arXiv preprint arXiv:1911.03700.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Subword-based Compact Reconstruction of Word Embeddings", |
| "authors": [ |
| { |
| "first": "Shota", |
| "middle": [], |
| "last": "Sasaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Jun", |
| "middle": [], |
| "last": "Suzuki", |
| "suffix": "" |
| }, |
| { |
| "first": "Kentaro", |
| "middle": [], |
| "last": "Inui", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "3498--3508", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shota Sasaki, Jun Suzuki, and Kentaro Inui. 2019. Subword-based Compact Reconstruction of Word Embeddings. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 3498-3508. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Measuring word significance using distributed representations of words", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "J" |
| ], |
| "last": "Adriaan", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [ |
| "J" |
| ], |
| "last": "Schakel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adriaan M. J. Schakel and Benjamin J. Wilson. 2015. Measuring word significance using distributed representa- tions of words.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Offline bilingual word vectors, orthogonal transformations and the inverted softmax", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Smith", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [ |
| "P" |
| ], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Turban", |
| "suffix": "" |
| }, |
| { |
| "first": "Nils", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Hamblin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hammerla", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 5th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Samuel L Smith, David HP Turban, Steven Hamblin, and Nils Y Hammerla. 2017. Offline bilingual word vectors, orthogonal transformations and the inverted softmax. In Proceedings of the 5th International Conference on Learning Representations (ICLR 2017).", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Recursive deep models for semantic compositionality over a sentiment treebank", |
| "authors": [ |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Perelygin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Chuang", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Potts", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1631--1642", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Richard Socher, Alex Perelygin, Jean Wu, Jason Chuang, Christopher D. Manning, Andrew Ng, and Christopher Potts. 2013. Recursive deep models for semantic compositionality over a sentiment treebank. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing, pages 1631-1642, Seattle, Washington, USA, October. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Conceptnet 5.5: an open multilingual graph of general knowledge", |
| "authors": [ |
| { |
| "first": "Robyn", |
| "middle": [], |
| "last": "Speer", |
| "suffix": "" |
| }, |
| { |
| "first": "Joshua", |
| "middle": [], |
| "last": "Chin", |
| "suffix": "" |
| }, |
| { |
| "first": "Catherine", |
| "middle": [], |
| "last": "Havasi", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "4444--4451", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robyn Speer, Joshua Chin, and Catherine Havasi. 2017. Conceptnet 5.5: an open multilingual graph of general knowledge. In Proceedings of the Thirty-First AAAI Conference on Artificial Intelligence, pages 4444-4451.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Introduction to the conll-2003 shared task", |
| "authors": [ |
| { |
| "first": "Erik", |
| "middle": [ |
| "F" |
| ], |
| "last": "Tjong", |
| "suffix": "" |
| }, |
| { |
| "first": "Kim", |
| "middle": [], |
| "last": "Sang", |
| "suffix": "" |
| }, |
| { |
| "first": "Fien", |
| "middle": [], |
| "last": "De Meulder", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Proceedings of the seventh conference on Natural language learning at HLT-NAACL 2003", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Erik F. Tjong Kim Sang and Fien De Meulder. 2003. Introduction to the conll-2003 shared task. Proceedings of the seventh conference on Natural language learning at HLT-NAACL 2003 -.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Unsupervised word embeddings capture latent knowledge from materials science literature", |
| "authors": [ |
| { |
| "first": "Vahe", |
| "middle": [], |
| "last": "Tshitoyan", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Dagdelen", |
| "suffix": "" |
| }, |
| { |
| "first": "Leigh", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Dunn", |
| "suffix": "" |
| }, |
| { |
| "first": "Ziqin", |
| "middle": [], |
| "last": "Rong", |
| "suffix": "" |
| }, |
| { |
| "first": "Olga", |
| "middle": [], |
| "last": "Kononova", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristin", |
| "middle": [ |
| "A" |
| ], |
| "last": "Persson", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerbrand", |
| "middle": [], |
| "last": "Ceder", |
| "suffix": "" |
| }, |
| { |
| "first": "Anubhav", |
| "middle": [], |
| "last": "Jain", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Nature", |
| "volume": "", |
| "issue": "7763", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vahe Tshitoyan, John Dagdelen, Leigh Weston, Alexander Dunn, Ziqin Rong, Olga Kononova, Kristin A Persson, Gerbrand Ceder, and Anubhav Jain. 2019. Unsupervised word embeddings capture latent knowledge from materials science literature. Nature, 571(7763).", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Learning syntactic categories using paradigmatic representations of word context", |
| "authors": [ |
| { |
| "first": "Enis", |
| "middle": [], |
| "last": "Mehmet Ali Yatbaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Deniz", |
| "middle": [], |
| "last": "Sert", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Yuret", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "940--951", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mehmet Ali Yatbaz, Enis Sert, and Deniz Yuret. 2012. Learning syntactic categories using paradigmatic rep- resentations of word context. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 940-951. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Learning word meta-embeddings", |
| "authors": [ |
| { |
| "first": "Wenpeng", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "1351--1360", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenpeng Yin and Hinrich Sch\u00fctze. 2016. Learning word meta-embeddings. In Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages 1351-1360.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Mgnc-cnn: A simple approach to exploiting multiple word embeddings for sentence classification", |
| "authors": [ |
| { |
| "first": "Ye", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Roller", |
| "suffix": "" |
| }, |
| { |
| "first": "Byron C", |
| "middle": [], |
| "last": "Wallace", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1522--1527", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ye Zhang, Stephen Roller, and Byron C Wallace. 2016. Mgnc-cnn: A simple approach to exploiting multiple word embeddings for sentence classification. In Proceedings of NAACL-HLT, pages 1522-1527.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Biowordvec, improving biomedical word embeddings with subword information and mesh", |
| "authors": [ |
| { |
| "first": "Yijia", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Qingyu", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhihao", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Hongfei", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhiyong", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Scientific data", |
| "volume": "6", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yijia Zhang, Qingyu Chen, Zhihao Yang, Hongfei Lin, and Zhiyong Lu. 2019. Biowordvec, improving biomedical word embeddings with subword information and mesh. Scientific data, 6(1).", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Generalizing word embeddings using bag of subwords", |
| "authors": [ |
| { |
| "first": "Jinman", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Sidharth", |
| "middle": [], |
| "last": "Mudgal", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingyu", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "601--606", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jinman Zhao, Sidharth Mudgal, and Yingyu Liang. 2018. Generalizing word embeddings using bag of subwords. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pages 601-606.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "html": null, |
| "text": "\u00b1 .1 96.76 \u00b1 .02 98.44 \u00b1 .02 90.16 \u00b1 .2 85.33 \u00b1 .3 87.76 \u00b1 .5 Concat FG 72.23 \u00b1 .2 96.85 \u00b1 .04 98.53 \u00b1 .02 90.49 \u00b1 .1 85.45 \u00b1 .3 88.57 \u00b1 .3 DME FG 72.15 \u00b1 .2 96.81 \u00b1 .03 98.53 \u00b1 .02 89.49 \u00b1 .2 85.57 \u00b1 .3 88.10 \u00b1 .6 Prism FG 73.51 \u00b1 .1 96.91 \u00b1 .01 98.58 \u00b1 .02 90.70 \u00b1 .4 85.82 \u00b1 .1 87.80 \u00b1 .6 Average All 65.34 \u00b1 .3 96.63 \u00b1 .01 98.21 \u00b1 .03 88.92 \u00b1 .3 83.91 \u00b1 .1 86.03 \u00b1 .9 Concat All 73.95 \u00b1 .1 97.02 \u00b1 .01 98.63 \u00b1 .01 90.55 \u00b1 .1 84.03 \u00b1 .2 88.15 \u00b1 .2 DME All 72.09 \u00b1 .1 96.89 \u00b1 .01 98.58 \u00b1 .02 89.36 \u00b1 .3 85.47 \u00b1 .1 87.63 \u00b1 .6 Prism All 73.82 \u00b1 .2 97.04 \u00b1 .01 98.65 \u00b1 .01 90.74 \u00b1 .2 85.71 \u00b1 .3 88.45 \u00b1 .5Test set results for word prisms and baseline meta-embedding algorithms (concatenation, averaging, and DMEs) on different combinations of input facets ( \u00a73.2) -FG is FastText and Glove only, All is all 13 facets. We report the mean and standard deviation from runs with five different random seeds. Best result is bold, second best is underlined.", |
| "content": "<table><tr><td colspan=\"2\">Model Facets</td><td>Semcor</td><td>WSJ</td><td>Brown</td><td>NER</td><td>SNLI</td><td>SST2</td></tr><tr><td>Average</td><td>FG</td><td>69.42</td><td/><td/><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "html": null, |
| "text": "\u00b1 .2 96.53 \u00b1 .03 98.02 \u00b1 .02 86.33 \u00b1 .2 82.76 \u00b1 .2 85.55 \u00b1 .6 Prism None 70.05 \u00b1 .2 96.73 \u00b1 .03 98.21 \u00b1 .02 87.56 \u00b1 .2 82.73 \u00b1 .4 85.61 \u00b1 .6 Prism Uncon. 71.54 \u00b1 .1 96.90 \u00b1 .01 98.36 \u00b1 .00 87.68 \u00b1 .3 83.87 \u00b1 .2 86.15 \u00b1 .7 Prism Orthog. 72.41 \u00b1 .2 96.98 \u00b1 .04 98.44 \u00b1 .01 88.91 \u00b1 .2 83.89 \u00b1 .4 86.68 \u00b1 .6", |
| "content": "<table><tr><td>Model</td><td>Proj.</td><td>Semcor</td><td>WSJ</td><td>Brown</td><td>NER</td><td>SNLI</td><td>SST2</td></tr><tr><td>Avg.</td><td>None</td><td>64.86</td><td/><td/><td/><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "html": null, |
| "text": "Window 67.55 \u00b1 .1 96.60 \u00b1 .03 98.17 \u00b1 .01 87.54 \u00b1 .3 83.08 \u00b1 .2 85.82 \u00b1 .7 Best-All 70.50 \u00b1 .2 96.75 \u00b1 .03 98.42 \u00b1 .03 90.39 \u00b1 .2 85.60 \u00b1 .2 87.48 \u00b1 1.0 W1-10 71.67 \u00b1 .2 96.86 \u00b1 .02 98.36 \u00b1 .02 88.39 \u00b1 .2 83.88 \u00b1 .3 86.52 \u00b1 .4 W1-Far 71.78 \u00b1 .1 96.89 \u00b1 .02 98.36 \u00b1 .01 88.54 \u00b1 .2 83.69 \u00b1 .3 87.39 \u00b1 .3 All windows 72.41 \u00b1 .2 96.98 \u00b1 .02 98.44 \u00b1 .01 88.91 \u00b1 .2 83.89 \u00b1 .4 86.68 \u00b1 .6 FG 73.51 \u00b1 .1 96.91 \u00b1 .01 98.58 \u00b1 .02 89.70 \u00b1 .4 85.82 \u00b1 .1 87.80 \u00b1 .6 FGCL 73.51 \u00b1 .1 96.98 \u00b1 .03 98.63 \u00b1 .01 89.83 \u00b1 .3 85.68 \u00b1 .2 88.90 \u00b1 .4 All 73.82 \u00b1 .2 97.04 \u00b1 .01 98.65 \u00b1 .02 90.74 \u00b1 .2 85.71 \u00b1 .3 88.45 \u00b1 .5", |
| "content": "<table><tr><td>Semcor</td><td>WSJ</td><td>Brown</td><td>NER</td><td>SNLI</td><td>SST2</td></tr><tr><td>Best-</td><td/><td/><td/><td/><td/></tr></table>", |
| "num": null, |
| "type_str": "table" |
| } |
| } |
| } |
| } |