| { |
| "paper_id": "N18-1048", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T13:50:37.253638Z" |
| }, |
| "title": "Post-Specialisation: Retrofitting Vectors of Words Unseen in Lexical Resources", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Mannheim", |
| "location": { |
| "addrLine": "3 PolyAI" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "nikola@poly-ai.com" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Word vector specialisation (also known as retrofitting) is a portable, lightweight approach to fine-tuning arbitrary distributional word vector spaces by injecting external knowledge from rich lexical resources such as WordNet. By design, these post-processing methods only update the vectors of words occurring in external lexicons, leaving the representations of all unseen words intact. In this paper, we show that constraint-driven vector space specialisation can be extended to unseen words. We propose a novel post-specialisation method that: a) preserves the useful linguistic knowledge for seen words; while b) propagating this external signal to unseen words in order to improve their vector representations as well. Our post-specialisation approach explicits a non-linear specialisation function in the form of a deep neural network by learning to predict specialised vectors from their original distributional counterparts. The learned function is then used to specialise vectors of unseen words. This approach, applicable to any postprocessing model, yields considerable gains over the initial specialisation models both in intrinsic word similarity tasks, and in two downstream tasks: dialogue state tracking and lexical text simplification. The positive effects persist across three languages, demonstrating the importance of specialising the full vocabulary of distributional word vector spaces.", |
| "pdf_parse": { |
| "paper_id": "N18-1048", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Word vector specialisation (also known as retrofitting) is a portable, lightweight approach to fine-tuning arbitrary distributional word vector spaces by injecting external knowledge from rich lexical resources such as WordNet. By design, these post-processing methods only update the vectors of words occurring in external lexicons, leaving the representations of all unseen words intact. In this paper, we show that constraint-driven vector space specialisation can be extended to unseen words. We propose a novel post-specialisation method that: a) preserves the useful linguistic knowledge for seen words; while b) propagating this external signal to unseen words in order to improve their vector representations as well. Our post-specialisation approach explicits a non-linear specialisation function in the form of a deep neural network by learning to predict specialised vectors from their original distributional counterparts. The learned function is then used to specialise vectors of unseen words. This approach, applicable to any postprocessing model, yields considerable gains over the initial specialisation models both in intrinsic word similarity tasks, and in two downstream tasks: dialogue state tracking and lexical text simplification. The positive effects persist across three languages, demonstrating the importance of specialising the full vocabulary of distributional word vector spaces.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Word representation learning is a key research area in current Natural Language Processing (NLP), with its usefulness demonstrated across a range of tasks (Collobert et al., 2011; Chen and Manning, 2014; Melamud et al., 2016b) . The standard techniques for inducing distributed word representations are grounded in the distributional hypothesis (Harris, 1954) : they rely on co-occurrence information in large textual corpora (Mikolov et al., 2013b; Pennington et al., 2014; Levy and Goldberg, 2014; Levy et al., 2015; Bojanowski et al., 2017) . As a result, these models tend to coalesce the notions of semantic similarity and (broader) conceptual relatedness, and cannot accurately distinguish antonyms from synonyms Schwartz et al., 2015) . Recently, we have witnessed a rise of interest in representation models that move beyond stand-alone unsupervised learning: they leverage external knowledge in human-and automaticallyconstructed lexical resources to enrich the semantic content of distributional word vectors, in a process termed semantic specialisation.", |
| "cite_spans": [ |
| { |
| "start": 155, |
| "end": 179, |
| "text": "(Collobert et al., 2011;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 180, |
| "end": 203, |
| "text": "Chen and Manning, 2014;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 204, |
| "end": 226, |
| "text": "Melamud et al., 2016b)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 345, |
| "end": 359, |
| "text": "(Harris, 1954)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 426, |
| "end": 449, |
| "text": "(Mikolov et al., 2013b;", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 450, |
| "end": 474, |
| "text": "Pennington et al., 2014;", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 475, |
| "end": 499, |
| "text": "Levy and Goldberg, 2014;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 500, |
| "end": 518, |
| "text": "Levy et al., 2015;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 519, |
| "end": 543, |
| "text": "Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 719, |
| "end": 741, |
| "text": "Schwartz et al., 2015)", |
| "ref_id": "BIBREF64" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This is often done as a post-processing (sometimes referred to as retrofitting) step: input word vectors are fine-tuned to satisfy linguistic constraints extracted from lexical resources such as WordNet or BabelNet (Faruqui et al., 2015; . The use of external curated knowledge yields improved word vectors for the benefit of downstream applications (Faruqui, 2016) . At the same time, this specialisation of the distributional space distinguishes between true similarity and relatedness, and supports language understanding tasks (Kiela et al., 2015; .", |
| "cite_spans": [ |
| { |
| "start": 215, |
| "end": 237, |
| "text": "(Faruqui et al., 2015;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 350, |
| "end": 365, |
| "text": "(Faruqui, 2016)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 531, |
| "end": 551, |
| "text": "(Kiela et al., 2015;", |
| "ref_id": "BIBREF30" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "While there is consensus regarding their benefits and ease of use, one property of the post-processing specialisation methods slips under the radar: most existing post-processors update word embeddings only for words which are present (i.e., seen) in the external constraints, while vectors of all other (i.e., unseen) words remain unaffected. In this work, we propose a new approach that extends the specialisation framework to unseen words, relying on the transformation of the vector (sub)space of seen words. Our intuition is that the process of finetuning seen words provides implicit information on how to leverage the external knowledge to unseen words. The method should preserve the already injected knowledge for seen words, simultaneously propagating the external signal to unseen words in order to improve their vectors.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The proposed post-specialisation method can be seen as a two-step process, illustrated in Fig. 1a : 1) We use a state-of-the-art specialisation model to transform the subspace of seen words from the input distributional space into the specialised subspace; 2) We learn a mapping function based on the transformation of the \"seen subspace\", and then apply it to the distributional subspace of unseen words. We allow the proposed post-specialisation model to learn from large external linguistic resources by implementing the mapping as a deep feed-forward neural network with non-linear activations. This allows the model to learn the generalisation of the fine-tuning steps taken by the initial specialisation model, itself based on a very large number (e.g., hundreds of thousands) of external linguistic constraints.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 90, |
| "end": 97, |
| "text": "Fig. 1a", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As indicated by the results on word similarity and two downstream tasks (dialogue state tracking and lexical text simplification) our postspecialisation method consistently outperforms state-of-the-art methods which specialise seen words only. We report improvements using three distinct input vector spaces for English and for three test languages (English, German, Italian), verifying the robustness of our approach.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Vector Space Specialisation A standard approach to incorporating external and background knowledge into word vector spaces is to pull the representations of similar words closer together and to push words in undesirable relations (e.g., antonyms) away from each other. Some models integrate such constraints into the training procedure and jointly optimize distributional and nondistributional objectives: they modify the prior or the regularisation (Yu and Dredze, 2014; Xu et al., 2014; Kiela et al., 2015) , or use a variant of the SGNS-style objective (Liu et al., 2015; Ono et al., 2015; Osborne et al., 2016; Nguyen et al., 2017) . In theory, word embeddings obtained by these joint models could be as good as representations produced by models which finetune input vector space. However, their performance falls behind that of fine-tuning methods (Wieting et al., 2015) . Another disadvantage is that their architecture is tied to a specific underlying model (typically word2vec models).", |
| "cite_spans": [ |
| { |
| "start": 450, |
| "end": 471, |
| "text": "(Yu and Dredze, 2014;", |
| "ref_id": "BIBREF77" |
| }, |
| { |
| "start": 472, |
| "end": 488, |
| "text": "Xu et al., 2014;", |
| "ref_id": "BIBREF75" |
| }, |
| { |
| "start": 489, |
| "end": 508, |
| "text": "Kiela et al., 2015)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 556, |
| "end": 574, |
| "text": "(Liu et al., 2015;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 575, |
| "end": 592, |
| "text": "Ono et al., 2015;", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 593, |
| "end": 614, |
| "text": "Osborne et al., 2016;", |
| "ref_id": "BIBREF58" |
| }, |
| { |
| "start": 615, |
| "end": 635, |
| "text": "Nguyen et al., 2017)", |
| "ref_id": "BIBREF54" |
| }, |
| { |
| "start": 854, |
| "end": 876, |
| "text": "(Wieting et al., 2015)", |
| "ref_id": "BIBREF73" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work and Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In contrast, fine-tuning models inject external knowledge from available lexical resources (e.g., WordNet, PPDB) into pre-trained word vectors as a post-processing step (Faruqui et al., 2015; Rothe and Sch\u00fctze, 2015; Wieting et al., 2015; Nguyen et al., 2016; Cotterell et al., 2016; . Such post-processing models are popular because they offer a portable, flexible, and light-weight approach to incorporating external knowledge into arbitrary vector spaces, yielding state-of-the-art results on language understanding tasks (Faruqui et al., 2015; Kim et al., 2016; Vuli\u0107 et al., 2017b) .", |
| "cite_spans": [ |
| { |
| "start": 169, |
| "end": 191, |
| "text": "(Faruqui et al., 2015;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 192, |
| "end": 216, |
| "text": "Rothe and Sch\u00fctze, 2015;", |
| "ref_id": "BIBREF62" |
| }, |
| { |
| "start": 217, |
| "end": 238, |
| "text": "Wieting et al., 2015;", |
| "ref_id": "BIBREF73" |
| }, |
| { |
| "start": 239, |
| "end": 259, |
| "text": "Nguyen et al., 2016;", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 260, |
| "end": 283, |
| "text": "Cotterell et al., 2016;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 525, |
| "end": 547, |
| "text": "(Faruqui et al., 2015;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 548, |
| "end": 565, |
| "text": "Kim et al., 2016;", |
| "ref_id": "BIBREF55" |
| }, |
| { |
| "start": 566, |
| "end": 586, |
| "text": "Vuli\u0107 et al., 2017b)", |
| "ref_id": "BIBREF70" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work and Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Existing post-processing models, however, suffer from a major limitation. Their modus operandi is to enrich the distributional information with external knowledge only if such knowledge is present in a lexical resource. This means that they update and improve only representations of words actually seen in external resources. Because such words constitute only a fraction of the whole vocabulary (see Sect. 4), most words, unseen in the constraints, retain their original vectors. The main goal of this work is to address this shortcoming by specialising all words from the initial distributional space.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work and Motivation", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our starting point is the state-of-the-art specialisation model ATTRACT-REPEL (AR) , outlined in Sect. 3.1. We opt for the AR model due to its strong performance and ease of use, but we note that the proposed postspecialisation approach for specialising unseen words, described in Sect. 3.2, is applicable to any post-processor, as empirically validated in Sect. 5.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Methodology: Post-Specialisation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Let V s be the vocabulary, A the set of synonymous ATTRACT word pairs (e.g., rich and wealthy), and R the set of antonymous REPEL word pairs (e.g., increase and decrease). The ATTRACT-REPEL procedure operates over mini-batches of such pairs B A and B R . Let each word pair (x l , x r ) in these sets correspond to a vector pair (x l , x r ). A mini-batch of b att attract word pairs is given by", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "B A = [(x 1 l , x 1 r ), . . . , (x k 1 l , x k 1 r )] (analogously for B R , which consists of b rep pairs).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Next, the sets of negative examples", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "T A = [(t 1 l , t 1 r ), . . . , (t k 1 l , t k 1 r )] and T R = [(t 1 l , t 1 r ), . . . , (t k 2 l , t k 2 r )]", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "are defined as pairs of negative examples for each A and R pair in mini-batches B A and B R . These negative examples are chosen from the word vectors present in B A or B R so that, for each A pair (x l , x r ), the negative example pair (t l , t r ) is chosen so that t l is the vector closest (in terms of cosine distance) to x l and t r is closest to x r . 1 The negatives are used 1) to force A pairs to be closer to each other than to their respective negative examples; and 2) to force R pairs to be further away from each other than from their negative examples. The first term of the cost function pulls A pairs together:", |
| "cite_spans": [ |
| { |
| "start": 360, |
| "end": 361, |
| "text": "1", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Att(BA, TA) = b att i=1 \u03c4 \u03b4att + x i l t i l \u2212 x i l x i r +\u03c4 \u03b4att + x i r t i r \u2212 x i l x i r (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where \u03c4 (z) = max(0, z) is the standard rectifier function (Nair and Hinton, 2010) and \u03b4 att is the attract margin: it determines how much closer these vectors should be to each other than to their respective negative examples. The second, REPEL term in the cost function is analogous: it pushes R word pairs away from each other by the margin \u03b4 rep . Finally, in addition to the A and R terms, a regularisation term is used to preserve the semantic content originally present in the distributional vector space, as long as this information does not contradict the injected external knowledge. Let V(B) be the set of all word vectors present in a mini-batch, the distributional regularisation term is then:", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 82, |
| "text": "(Nair and Hinton, 2010)", |
| "ref_id": "BIBREF52" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Reg(BA, BR) = x i \u2208V (B A \u222aB R ) \u03bbreg xi \u2212 xi 2 (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "where \u03bb reg is the L 2 -regularisation constant and x i denotes the original (distributional) word vector for word x i . The full ATTRACT-REPEL cost function is finally constructed as the sum of all three terms.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Initial Specialisation Model: AR", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Problem Formulation The goal is to learn a global transformation function that generalises the perturbations of the initial vector space made by ATTRACT-REPEL (or any other specialisation procedure), as conditioned on the external constraints. The learned function propagates the signal coded in the input constraints to all the words unseen during the specialisation process. We seek a regression function f : R dim \u2192 R dim , where dim is the vector space dimensionality. It maps word vectors from the initial vector space X to the specialised target space X . Let X = f (X) refer to the predicted mapping of the vector space, while the mapping of a single word vector is denoted", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x i = f (x i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "An input distributional vector space X d represents words from a vocabulary", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "V d . V d may be di- vided into two vocabulary subsets: V d = V s \u222a V u , V s \u2229 V u = \u2205, with the accompanying vector sub- spaces X d = X s X u . V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "s refers to the vocabulary of seen words: those that appear in the external linguistic constraints and have their embeddings changed in the specialisation process. V u denotes the vocabulary of unseen words: those not present in the constraints and whose embeddings are unaffected by the specialisation procedure.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The AR specialisation process transforms only the subspace X s into the specialised subspace X s . All words x i \u2208 V s may now be used as training examples for learning the explicit mapping function", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "f from X s into X s . If N = |V s |, we in fact rely on N training pairs: (x i , x i ) = {x i \u2208 X s , x i \u2208 X s }.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Function f can then be applied to unseen words x \u2208 V u to yield the specialised subspace X u = f (X u ). The specialised space containing all words is then X f = X s \u222a X u . The complete high-level post-specialisation procedure is outlined in Fig. 1a .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 243, |
| "end": 250, |
| "text": "Fig. 1a", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Note that another variant of the approach could obtain X f as X f = f (X d ), that is, the entire distributional space is transformed by f . However, this variant seems counter-intuitive as it forgets the actual output of the initial specialisation procedure and replaces word vectors from X s with their approximations, i.e., f -mapped vectors. 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Objective Functions As mentioned, the N seen words x i \u2208 V s in fact serve as our \"pseudotranslation\" pairs supporting the learning of a crossspace mapping function. In practice, in its highlevel formulation, our mapping problem is equivalent to those encountered in the literature on crosslingual word embeddings where the goal is to learn a shared cross-lingual space given monolingual vector spaces in two languages and N 1 translation pairs (Mikolov et al., 2013a; Vuli\u0107 and Korhonen, 2016b; Artetxe et al., 2016 Artetxe et al., , 2017 Conneau et al., 2017; Ruder et al., 2017) . In our setup, the standard objective based on", |
| "cite_spans": [ |
| { |
| "start": 445, |
| "end": 468, |
| "text": "(Mikolov et al., 2013a;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 469, |
| "end": 495, |
| "text": "Vuli\u0107 and Korhonen, 2016b;", |
| "ref_id": "BIBREF67" |
| }, |
| { |
| "start": 496, |
| "end": 516, |
| "text": "Artetxe et al., 2016", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 517, |
| "end": 539, |
| "text": "Artetxe et al., , 2017", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 540, |
| "end": 561, |
| "text": "Conneau et al., 2017;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 562, |
| "end": 581, |
| "text": "Ruder et al., 2017)", |
| "ref_id": "BIBREF63" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "L 2 -penalised xi \u2208 Xu f : Deep neural network (non-linear regression) x i \u2208 X u Xd = Xs \u222a Xu (distributional) Linguistic Constraints (x1 \u2208 Xs, y1 \u2208 Xs) (x1 \u2208 Xs, y2 \u2208 Xs) (x2 \u2208 Xs, y3 \u2208 Xs) \u2022 \u2022 \u2022 X s \u222a Xu (specialised: seen) Xf = X s \u222a X u (specialised final: all) attract-repel mapping Training Pairs: Seen (x1 \u2208 Xs, x 1 \u2208 X s ) (x2 \u2208 Xs, x 2 \u2208 X s ) (x3 \u2208 Xs, x 3 \u2208 X s ) \u2022 \u2022 \u2022 (a) High-level illustration x' i,p (d=300) . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . swish swish swish swish .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ". .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x i (d=300)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x' i,h 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(d 1 =512) x' i,h 2 (d 2 =512)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "x' i,h H-1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(d H-1 =512) x' i,h H (d H =512) . . . . . . . . . . . . . . . . . .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Specialisation of Unseen Words", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Hidden", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "H-1 Hidden H Output X u X' u (b) Low-level implementation: deep feed-forward neural network", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Figure 1: (a) High-level illustration of the post-specialisation approach: the subspace X s of the initial distributional vector space X d = X s \u222a X u is first specialised/fine-tuned by the ATTRACT-REPEL specialisation model (or any other post-processing model) to obtain the transformed subspace X s . The words present (i.e., seen) in the input set of linguistic constraints are now assigned different representations in X s (the original distributional vector) and X s (the specialised vector): they are therefore used as training examples to learn a non-linear cross-space mapping function. This function is then applied to all word vectors x i \u2208 X u representing words unseen in the constraints to yield a specialised subspace X u . The final space is X f = X s \u222a X u , and it contains transformed representations for all words from the initial space", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "X d . (b)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "The actual implementation of the non-linear regression function which maps from X u to X u : a deep feed-forward fully-connected neural net with non-linearities and H hidden layers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "least squares may be formulated as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "fMSE = arg min||f (Xs) \u2212 X s || 2 F", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "where || \u2022 || 2 F denotes the squared Frobenius norm. In the most common form f (X s ) is simply a linear map/matrix W f \u2208 R dim\u00d7dim (Mikolov et al., 2013a) as follows:", |
| "cite_spans": [ |
| { |
| "start": 133, |
| "end": 156, |
| "text": "(Mikolov et al., 2013a)", |
| "ref_id": "BIBREF45" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "f (X) = W f X.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "After learning f based on the X s \u2192 X s transformation, one can simply apply f to unseen words:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "X u = f (X u )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": ". This linear mapping model, termed LINEAR-MSE, has an analytical solution (Artetxe et al., 2016) , and has been proven to work well with cross-lingual embeddings. However, given that the specialisation model injects hundreds of thousands (or even millions) of linguistic constraints into the distributional space (see later in Sect. 4), we suspect that the assumption of linearity is too limiting and does not fully hold in this particular setup.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 97, |
| "text": "(Artetxe et al., 2016)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Using the same L 2 -penalized least squares objective, we can thus replace the linear map with a nonlinear function f : R dim \u2192 R dim . The non-linear mapping, illustrated by Fig. 1b , is implemented as a deep feed-forward fully-connected neural network (DFFN) with H hidden layers and non-linear activations. This variant is called NONLINEAR-MSE.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 175, |
| "end": 182, |
| "text": "Fig. 1b", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Another variant objective is the contrastive margin-based ranking loss with negative sampling (MM) similar to the original ATTRACT-REPEL objective, used in other applications in prior work (e.g., for cross-modal mapping) Frome et al., 2013; Kummerfeld et al., 2015) . Let x i = f (x i ) denote the predicted vector for the word x i \u2208 V s , and let x i refer to the \"true\" vector of x i in the specialised space X s after the AR specialisation procedure. The MM loss is then defined as follows:", |
| "cite_spans": [ |
| { |
| "start": 221, |
| "end": 240, |
| "text": "Frome et al., 2013;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 241, |
| "end": 265, |
| "text": "Kummerfeld et al., 2015)", |
| "ref_id": "BIBREF35" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "JMM = N i=1 k j =i \u03c4 \u03b4mm \u2212 cos x i, x i + cos x i, x j", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "where cos is the cosine similarity measure, \u03b4 mm is the margin, and k is the number of negative samples. The objective tries to learn the mapping f so that each predicted vector x i is by the specified margin \u03b4 mm closer to the correct target vector x i than to any other of k target vectors x j serving as negative examples. 3 Function f can again be either a simple linear map (LINEAR-MM), or implemented as a DFFN (NONLINEAR-MM, see Fig. 1b ).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 436, |
| "end": 443, |
| "text": "Fig. 1b", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Input Hidden 1 Hidden 2", |
| "sec_num": null |
| }, |
| { |
| "text": "Starting Word Embeddings (X d = X s \u222a X u )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To test the robustness of our approach, we experiment with three well-known, publicly available collections of English word vectors: 1) Skip-Gram with Negative Sampling (SGNS-BOW2) (Mikolov et al., 2013b ) trained on the Polyglot Wikipedia (Al-Rfou et al., 2013) by Levy and Goldberg (2014) using bag-of-words windows of size 2; 2) GLOVE Common Crawl (Pennington et al., 2014) ; and 3) FASTTEXT (Bojanowski et al., 2017) , a SGNS variant which builds word vectors as the sum of their constituent character n-gram vectors. All word embeddings are 300-dimensional. 4 AR Specialisation and Constraints (X s \u2192 X s ) We experiment with linguistic constraints used before by Vuli\u0107 et al., 2017a) : they extracted monolingual synonymy/ATTRACT pairs from the Paraphrase Database (PPDB) (Ganitkevitch et al., 2013; Pavlick et al., 2015) (640,435 synonymy pairs in total), while their antonymy/REPEL constraints came from BabelNet (Navigli and Ponzetto, 2012) (11,939 pairs). 5 The coverage of V d vocabulary words in the constraints illustrates well the problem of unseen words with the fine-tuning specialisation models. For instance, the constraints cover only a small subset of the entire vocabulary V d for SGNS-BOW2: 16.6%. They also cover only 15.3% of the top 200K most frequent V d words from FASTTEXT.", |
| "cite_spans": [ |
| { |
| "start": 181, |
| "end": 203, |
| "text": "(Mikolov et al., 2013b", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 266, |
| "end": 290, |
| "text": "Levy and Goldberg (2014)", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 351, |
| "end": 376, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF60" |
| }, |
| { |
| "start": 395, |
| "end": 420, |
| "text": "(Bojanowski et al., 2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 563, |
| "end": 564, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 669, |
| "end": 689, |
| "text": "Vuli\u0107 et al., 2017a)", |
| "ref_id": "BIBREF69" |
| }, |
| { |
| "start": 778, |
| "end": 805, |
| "text": "(Ganitkevitch et al., 2013;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 806, |
| "end": 827, |
| "text": "Pavlick et al., 2015)", |
| "ref_id": "BIBREF59" |
| }, |
| { |
| "start": 966, |
| "end": 967, |
| "text": "5", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(X u \u2192 X u )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "The non-linear regression function f : Fig. 1b ). Non-linear activations are used in each layer and", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 39, |
| "end": 46, |
| "text": "Fig. 1b", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "R d \u2192 R d is a DFFN with H hidden layers, each of dimen- sionality d 1 = d 2 = . . . = d H = 512 (see", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "N i=1 \u03c4 \u03b4mm \u2212 cos( x i, x i) .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "For instance, with \u03b4mm = 1.0 the idea is to learn a mapping f that, for each xi enforces the predicted vector and the correct target vector to have a maximum cosine similarity. We do not report the results with this variant as, although it outscores the MSE-style objective, it was consistently outperformed by the MM objective. 4 For further details regarding the architectures and training setup of the used vector collections, we refer the reader to the original papers. Additional experiments with other word vectors, e.g., with CONTEXT2VEC (Melamud et al., 2016a) (which uses bidirectional LSTMs (Hochreiter and Schmidhuber, 1997) for context modeling), and with dependency-word based embeddings (Bansal et al., 2014; Melamud et al., 2016b) lead to similar results and same conclusions. 5 We have experimented with another set of constraints used in prior work (Zhang et al., 2014; Ono et al., 2015) , reaching similar conclusions: these were extracted from Word-Net (Fellbaum, 1998) and Roget (Kipfer, 2009) , and comprise 1,023,082 synonymy pairs and 380,873 antonymy pairs. omitted only before the final output layer to enable full-range predictions (see Fig. 1b again) .", |
| "cite_spans": [ |
| { |
| "start": 329, |
| "end": 330, |
| "text": "4", |
| "ref_id": null |
| }, |
| { |
| "start": 545, |
| "end": 568, |
| "text": "(Melamud et al., 2016a)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 601, |
| "end": 635, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 701, |
| "end": 722, |
| "text": "(Bansal et al., 2014;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 723, |
| "end": 745, |
| "text": "Melamud et al., 2016b)", |
| "ref_id": "BIBREF44" |
| }, |
| { |
| "start": 792, |
| "end": 793, |
| "text": "5", |
| "ref_id": null |
| }, |
| { |
| "start": 866, |
| "end": 886, |
| "text": "(Zhang et al., 2014;", |
| "ref_id": "BIBREF78" |
| }, |
| { |
| "start": 887, |
| "end": 904, |
| "text": "Ono et al., 2015)", |
| "ref_id": "BIBREF57" |
| }, |
| { |
| "start": 972, |
| "end": 988, |
| "text": "(Fellbaum, 1998)", |
| "ref_id": null |
| }, |
| { |
| "start": 993, |
| "end": 1013, |
| "text": "Roget (Kipfer, 2009)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1163, |
| "end": 1177, |
| "text": "Fig. 1b again)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "The choices of non-linear activation and initialisation are guided by recent recommendations from the literature. First, we use swish (Ramachandran et al., 2017; Elfwing et al., 2017) as nonlinearity, defined as swish(x) = x \u2022 sigmoid(\u03b2x). We fix \u03b2 = 1 as suggested by Ramachandran et al. (2017) . 6 Second, we use the HE normal initialisation (He et al., 2015) , which is preferred over the XAVIER initialisation (Glorot and Bengio, 2010) for deep models (Mishkin and Matas, 2016; Li et al., 2016) , although in our experiments we do not observe a significant difference in performance between the two alternatives. We set H = 5 in all experiments without any fine-tuning; we also analyse the impact of the network depth in Sect. 5.", |
| "cite_spans": [ |
| { |
| "start": 134, |
| "end": 161, |
| "text": "(Ramachandran et al., 2017;", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 162, |
| "end": 183, |
| "text": "Elfwing et al., 2017)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 269, |
| "end": 295, |
| "text": "Ramachandran et al. (2017)", |
| "ref_id": "BIBREF61" |
| }, |
| { |
| "start": 298, |
| "end": 299, |
| "text": "6", |
| "ref_id": null |
| }, |
| { |
| "start": 344, |
| "end": 361, |
| "text": "(He et al., 2015)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 414, |
| "end": 439, |
| "text": "(Glorot and Bengio, 2010)", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 456, |
| "end": 481, |
| "text": "(Mishkin and Matas, 2016;", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 482, |
| "end": 498, |
| "text": "Li et al., 2016)", |
| "ref_id": "BIBREF40" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "Optimisation For the AR specialisation step, we adopt the original suggested model setup. Hyperparameter values are set to: \u03b4 att = 0.6, \u03b4 rep = 0.0, \u03bb reg = 10 \u22129 . The models are trained for 5 epochs with Adagrad (Duchi et al., 2011) , with batch sizes set to b att = b rep = 50, again as in the original work.", |
| "cite_spans": [ |
| { |
| "start": 215, |
| "end": 235, |
| "text": "(Duchi et al., 2011)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "For training the non-linear mapping with DFFN ( Fig. 1b) , we use the Adam algorithm (Kingma and Ba, 2015) with default settings. The model is trained for 100 epochs with early stopping on a validation set. We reserve 10% of all available seen data (i.e., the words from V s represented in X s and X s ) for validation, the rest are used for training. For the MM objective, we set \u03b4 mm = 0.6 and k = 25 in all experiments without any fine-tuning.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 48, |
| "end": 56, |
| "text": "Fig. 1b)", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Network Design and Parameters", |
| "sec_num": null |
| }, |
| { |
| "text": "Evaluation Protocol The first set of experiments evaluates vector spaces with different specialisation procedures intrinsically on word similarity benchmarks: we use the SimLex-999 dataset , and SimVerb-3500 (Gerz et al., 2016) , a recent verb pair similarity dataset providing similarity ratings for 3,500 verb pairs. 7 Spearman's \u03c1 rank correlation is used as the evaluation metric. We evaluate word vectors in two settings. First, in a synthetic hold-out setting, we remove all linguistic constraints which contain words from the SimLex and SimVerb evaluation data, effectively forcing all SimLex and SimVerb words to be unseen by the AR specialisation model. The specialised vectors for these words are estimated by the learned non-linear DFFN mapping model. Second, the all setting is a standard \"real-life\" scenario where some test (SimLex/SimVerb) words do occur in the constraints, while the mapping is learned for the remaining words.", |
| "cite_spans": [ |
| { |
| "start": 208, |
| "end": 227, |
| "text": "(Gerz et al., 2016)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Intrinsic Evaluation: Word Similarity", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The results with the three word vector collections are provided in Tab. 1. In addition, Fig. 2 plots the influence of the network to discern between the two, so that related but non-similar words (e.g. tiger and jungle) have a low rating.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 88, |
| "end": 94, |
| "text": "Fig. 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "depth H on the model's performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "The results suggest that the mapping of unseen words is universally useful, as the highest correlation scores are obtained with the final fully specialised vector space X f for all three input spaces. The results in the hold-out setup are particularly indicative of the improvement achieved by our postspecialisation method. For instance, it achieves a +0.2 correlation gain with GLOVE on both SimLex and SimVerb by specialising vector representations for words present in these datasets without seeing a single external constraint which contains any of these words. This suggests that the perturbation of the seen subspace X s by ATTRACT-REPEL contains implicit knowledge that can be propagated to X u , learning better representations for unseen words. We observe small but consistent improvements across the board in the all setup. The smaller gains can be explained by the fact that a majority of SimLex and SimVerb words are present in the external constraints (93.7% and 87.2%, respectively).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "The scores also indicate that both non-linearity and the chosen objective function contribute to the quality of the learned mapping: largest gains are reported with the NONLINEAR-MM variant which a) employs non-linear activations and b) replaces the basic mean-squared-error objective with maxmargin. The usefulness of the latter has been established in prior work on cross-space mapping learning . The former indicates that the initial AR transformation is non-linear. It is guided by a large number of constraints; their effect cannot be captured by a simple linear map as in prior work on, e.g., cross-lingual word embeddings (Mikolov et al., 2013a; Ruder et al., 2017) .", |
| "cite_spans": [ |
| { |
| "start": 629, |
| "end": 652, |
| "text": "(Mikolov et al., 2013a;", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 653, |
| "end": 672, |
| "text": "Ruder et al., 2017)", |
| "ref_id": "BIBREF63" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "Finally, the analysis of the network depth H indicates that going deeper helps only to a certain extent. Adding more layers allows for a richer parametrisation of the network (which is beneficial given the number of linguistic constraints used by AR). This makes the model more expressive, but it seems to saturate with larger H values.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "We also verify that our post-specialisation approach is not tied to the ATTRACT-REPEL method, and is indeed applicable on top of any post-processing specialisation method. We analyse the impact of postspecialisation in the hold-out setting using the original retrofitting (RFit) model (Faruqui et al., 2015) and counter-fitting (CFit) in lieu of attract-repel. The results on word similarity with the best-performing NONLINEAR-MM variant are summarised in Tab. 2.", |
| "cite_spans": [ |
| { |
| "start": 285, |
| "end": 307, |
| "text": "(Faruqui et al., 2015)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-Specialisation with Other Post-Processors", |
| "sec_num": null |
| }, |
| { |
| "text": "The scores again indicate the usefulness of postspecialisation. As expected, the gains are lower Figure 3 : DST labels (user goals given by slot-value pairs) in a multi-turn dialogue (Mrk\u0161i\u0107 et al., 2015 Table 2 : Post-specialisation applied to two other post-processing methods. SL: SimLex; SV: SimVerb. Hold-out setting. NONLINEAR-MM.", |
| "cite_spans": [ |
| { |
| "start": 183, |
| "end": 203, |
| "text": "(Mrk\u0161i\u0107 et al., 2015", |
| "ref_id": "BIBREF49" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 97, |
| "end": 105, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 204, |
| "end": 211, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Post-Specialisation with Other Post-Processors", |
| "sec_num": null |
| }, |
| { |
| "text": "than with ATTRACT-REPEL. RFit falls short of CFit as by design it can leverage only synonymy (i.e., ATTRACT) external constraints.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Post-Specialisation with Other Post-Processors", |
| "sec_num": null |
| }, |
| { |
| "text": "Next, we evaluate the usefulness of postspecialisation for two downstream tasks -dialogue state tracking and lexical text simplification -in which discerning semantic similarity from other types of semantic relatedness is crucial. We first evaluate the importance of post-specialisation for a downstream language understanding task of dialogue state tracking (DST) (Henderson et al., 2014; Williams et al., 2016) , adopting the evaluation protocol and data of .", |
| "cite_spans": [ |
| { |
| "start": 365, |
| "end": 389, |
| "text": "(Henderson et al., 2014;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 390, |
| "end": 412, |
| "text": "Williams et al., 2016)", |
| "ref_id": "BIBREF74" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Downstream Task I: DST", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "DST: Model and Evaluation The DST model is the first component of modern dialogue pipelines (Young, 2010) , which captures the users' goals at each dialogue turn and then updates the dialogue state. Goals are represented as sets of constraints expressed as slot-value pairs (e.g., food=Chinese). The set of slots and the set of values for each slot constitute the ontology of a dialogue domain. The probability distribution over the possible states is the system's estimate of the user's goals, and it is used by the dialogue manager module to select the subsequent system response . An example in Fig. 3 illustrates the DST pipeline. For evaluation, we use the Neural Belief Tracker (NBT), a state-of-the-art DST model which was the first to reason purely over pre-trained word vectors . 8 The NBT uses no hand-crafted semantic lexicons, instead composing word vectors into intermediate utterance and context representations. 9 For full model details, we refer the reader to the original paper. The importance of word vector specialisation for the DST task (e.g., distinguishing between synonyms and antonyms by pulling northern and north closer in ENGLISH hold-out all", |
| "cite_spans": [ |
| { |
| "start": 92, |
| "end": 105, |
| "text": "(Young, 2010)", |
| "ref_id": "BIBREF76" |
| }, |
| { |
| "start": 789, |
| "end": 790, |
| "text": "8", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 598, |
| "end": 604, |
| "text": "Fig. 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Downstream Task I: DST", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Distributional: X d .797 .797 +AR Spec.: X s \u222a Xu .797 .817 ++Mapping: X f = X s \u222a X u LINEAR-MM .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Downstream Task I: DST", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": ".818", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "815", |
| "sec_num": null |
| }, |
| { |
| "text": ".827 .835 Table 3 : DST results in two evaluation settings (hold-out and all) with different GLOVE variants.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 10, |
| "end": 17, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "NONLINEAR-MM", |
| "sec_num": null |
| }, |
| { |
| "text": "the vector space while pushing north and south away) has been established . Again, as in prior work the DST evaluation is based on the Wizard-of-Oz (WOZ) v2.0 dataset , comprising 1,200 dialogues split into training (600 dialogues), development 200, and test data (400). In all experiments, we report the standard DST performance measure: joint goal accuracy, and report scores as averages over 5 NBT training runs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "NONLINEAR-MM", |
| "sec_num": null |
| }, |
| { |
| "text": "We again evaluate word vectors in two settings: 1) hold-out, where linguistic constraints with words appearing in the WOZ data are removed, making all WOZ words unseen by ATTRACT-REPEL; and 2) all. The results for the English DST task with different GLOVE word vector variants are summarised in Tab. 3; similar trends in results are observed with two other word vector collections. The scores maintain conclusions established in the word similarity task. First, semantic specialisation with ATTRACT-REPEL is again beneficial, and discerning between synonyms and antonyms improves DST performance. However, specialising unseen words (the final X u vector space) yields further improvements in both evaluation settings, supporting our claim that the specialisation signal can be propagated to unseen words.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "This downstream evaluation again demonstrates the importance of non-linearity, as the peak scores are reported with the NONLINEAR-MM variant. More substantial gains in the all setup are observed in the DST task compared to the word similarity task. This stems from a lower coverage of the WOZ data in the AR constraints: 36.3% of all WOZ words are unseen words. Finally, the scores are higher on average in the all setup, since this setup uses more external constraints for AR, and consequently uses more training examples to learn the mapping.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "Other Languages We test the portability of our framework to two other languages for which we have similar evaluation data: German (DE) and Italian (IT). SimLex-999 has been translated and rescored in the two languages by Leviant and Reichart (2015) , and the WOZ data were translated and adapted by . Exactly the same setup is used as in our English experiments, without any additional language-specific fine-tuning. Linguistic constraints were extracted from the same sources: synonyms from the PPDB (135, 868 in DE, 362, 452 in IT) , antonyms from BabelNet (4,124 in DE, and 16,854 in IT). Our starting distributional vector spaces are taken from prior work: IT vectors are from , DE vectors are from (Vuli\u0107 and Korhonen, 2016a) . The results are summarised in Tab. 4.", |
| "cite_spans": [ |
| { |
| "start": 221, |
| "end": 248, |
| "text": "Leviant and Reichart (2015)", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 501, |
| "end": 506, |
| "text": "(135,", |
| "ref_id": null |
| }, |
| { |
| "start": 507, |
| "end": 517, |
| "text": "868 in DE,", |
| "ref_id": null |
| }, |
| { |
| "start": 518, |
| "end": 522, |
| "text": "362,", |
| "ref_id": null |
| }, |
| { |
| "start": 523, |
| "end": 533, |
| "text": "452 in IT)", |
| "ref_id": null |
| }, |
| { |
| "start": 703, |
| "end": 730, |
| "text": "(Vuli\u0107 and Korhonen, 2016a)", |
| "ref_id": "BIBREF66" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "Our post-specialisation approach yields consistent improvements over the initial distributional space and the AR specialisation model in both tasks and for both languages. We do not observe any gain on IT SimLex in the all setup since IT constraints have almost complete coverage of all IT SimLex words (99.3%; the coverage is 64.8% in German). As expected, the DST scores in the all setup are higher than in the hold-out setup due to a larger number of constraints and training examples.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "Lower absolute scores for Italian and German compared to the ones reported for English are due to multiple factors, as discussed previously by : 1) the AR model uses less linguistic constraints for DE and IT; 2) distributional word vectors are induced from smaller corpora; 3) linguistic phenomena (e.g., cases and compounding in DE) contribute to data sparsity and also make the DST task more challenging. However, it is important to stress the consistent gains over the vector space specialised by the state-of-the-art ATTRACT-REPEL model across all three test languages. This indicates that the proposed approach is languageagnostic and portable to multiple languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and Analysis", |
| "sec_num": null |
| }, |
| { |
| "text": "In our second downstream task, we examine the effects of post-specialisation on lexical simplification (LS) in English. LS aims to substitute complex words (i.e., less commonly used words) with their simpler synonyms in the context. Simplified text must keep the meaning of the original text, which is discerning similarity from relatedness is important (e.g., in \"The automobile was set on fire\" the word \"automobile\" should be replaced with \"car\" or \"vehicle\" but not with \"wheel\" or \"driver\"). Table 5 : Lexical simplification performance with post-specialisation applied on three input spaces.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 497, |
| "end": 504, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Downstream Task II: Lexical Simplification", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We employ LIGHT-LS (Glava\u0161 and \u0160tajner, 2015) , a lexical simplification algorithm that: 1) makes substitutions based on word similarities in a semantic vector space, and 2) can be provided an arbitrary embedding space as input. 10 For a complex word, LIGHT-LS considers the most similar words from the vector space as simplification candidates. Candidates are ranked according to several features, indicating simplicity and fitness for the context (semantic relatedness to the context of the complex word). The substitution is made if the best candidate is simpler than the original word. By providing vector spaces post-specialised for semantic similarity to LIGHT-LS, we expect to more often replace complex words with their true synonyms.", |
| "cite_spans": [ |
| { |
| "start": 19, |
| "end": 45, |
| "text": "(Glava\u0161 and \u0160tajner, 2015)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Downstream Task II: Lexical Simplification", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We evaluate LIGHT-LS performance in the all setup on the LS benchmark compiled by Horn et al. (2014) , who crowdsourced 50 manual simplifications for each complex word. As in prior work, we evaluate performance with the following metrics: 1) Accurracy (Acc.) is the number of correct simplifications made (i.e., the system made the simplification and its substitution is found in the list of crowdsourced substitutions), divided by the total number of indicated complex words; 2) Changed (Ch.) is the percentage of indicated complex words 10 https://github.com/codogogo/lightls that were replaced by the system (whether or not the replacement was correct).", |
| "cite_spans": [ |
| { |
| "start": 82, |
| "end": 100, |
| "text": "Horn et al. (2014)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Downstream Task II: Lexical Simplification", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "LS results are summarised in Tab. 5. Postspecialised vector spaces consistently yield 5-6% gain in Accuracy compared to respective distributional vectors and embeddings specialised with the state-of-the-art ATTRACT-REPEL model. Similar to DST evaluation, improvements over ATTRACT-REPEL demonstrate the importance of specialising the vectors of the entire vocabulary and not only the vectors of words from the external constraints.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Downstream Task II: Lexical Simplification", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We have presented a novel post-processing model, termed post-specialisation, that specialises word vectors for the full vocabulary of the input vector space. Previous post-processing specialisation models fine-tune word vectors only for words occurring in external lexical resources. In this work, we have demonstrated that the specialisation of the subspace of seen words can be leveraged to learn a mapping function which specialises vectors for all other words, unseen in the external resources. Our results across word similarity and downstream language understanding tasks show consistent improvements over the state-of-the-art specialisation method for all three test languages.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In future work, we plan to extend our approach to specialisation for asymmetric relations such as hypernymy or meronymy (Glava\u0161 and Ponzetto, 2017; Nickel and Kiela, 2017; Vuli\u0107 and Mrk\u0161i\u0107, 2018) . We will also investigate more sophisticated non-linear functions. The code is available at: https://github.com/cambridgeltl/ post-specialisation/.", |
| "cite_spans": [ |
| { |
| "start": 120, |
| "end": 147, |
| "text": "(Glava\u0161 and Ponzetto, 2017;", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 148, |
| "end": 171, |
| "text": "Nickel and Kiela, 2017;", |
| "ref_id": "BIBREF56" |
| }, |
| { |
| "start": 172, |
| "end": 195, |
| "text": "Vuli\u0107 and Mrk\u0161i\u0107, 2018)", |
| "ref_id": "BIBREF68" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Similarly, for each R pair (x l , xr), the negative pair (t l , tr) is chosen from the in-batch vectors so that t l is the vector furthest away from x l and tr is furthest from xr. All vectors are unit length (re)normalised after each epoch.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We have empirically confirmed the intuition that the first variant is superior to this alternative. We do not report the actual quantitative comparison for brevity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We have also experimented with a simpler hinge loss function without negative examples, formulated as J =", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "According toRamachandran et al. (2017), for deep networks swish has a slight edge over the family of LU/ReLUrelated activations(Maas et al., 2013;He et al., 2015;Klambauer et al., 2017). We also observe a minor (and insignificant) difference in performance in favour of swish.7 While other gold standards such asWordSim-353 (Finkelstein et al., 2002) or MEN(Bruni et al., 2014) coalesce the notions of true semantic similarity and (more broad) conceptual relatedness, SimLex and SimVerb provide explicit guidelines", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://github.com/nmrksic/neural-belief-tracker 9 The NBT keeps word vectors fixed during training to enable generalisation for words unseen in DST training data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We thank the three anonymous reviewers for their insightful suggestions. This work is supported by the ERC Consolidator Grant LEXICAL: Lexical Acquisition Across Languages (no 648909).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Polyglot: Distributed word representations for multilingual NLP", |
| "authors": [ |
| { |
| "first": "Rami", |
| "middle": [], |
| "last": "Al-Rfou", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [], |
| "last": "Perozzi", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [], |
| "last": "Skiena", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "183--192", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rami Al-Rfou, Bryan Perozzi, and Steven Skiena. 2013. Polyglot: Distributed word representations for multilingual NLP. In Proceedings of CoNLL, pages 183-192.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning principled bilingual mappings of word embeddings while preserving monolingual invariance", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2289--2294", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2016. Learning principled bilingual mappings of word em- beddings while preserving monolingual invariance. In Proceedings of EMNLP, pages 2289-2294.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Learning bilingual word embeddings with (almost) no bilingual data", |
| "authors": [ |
| { |
| "first": "Mikel", |
| "middle": [], |
| "last": "Artetxe", |
| "suffix": "" |
| }, |
| { |
| "first": "Gorka", |
| "middle": [], |
| "last": "Labaka", |
| "suffix": "" |
| }, |
| { |
| "first": "Eneko", |
| "middle": [], |
| "last": "Agirre", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "451--462", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikel Artetxe, Gorka Labaka, and Eneko Agirre. 2017. Learning bilingual word embeddings with (almost) no bilingual data. In Proceedings of ACL, pages 451-462.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Tailoring continuous word representations for dependency parsing", |
| "authors": [ |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "809--815", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2014. Tailoring continuous word representations for depen- dency parsing. In Proceedings of ACL, pages 809- 815.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Knowledge-powered deep learning for word embedding", |
| "authors": [ |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Bian", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ECML-PKDD", |
| "volume": "", |
| "issue": "", |
| "pages": "132--148", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jiang Bian, Bin Gao, and Tie-Yan Liu. 2014. Knowledge-powered deep learning for word embed- ding. In Proceedings of ECML-PKDD, pages 132- 148.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the ACL", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the ACL, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Multimodal distributional semantics", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nam-Khanh", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "49", |
| "issue": "", |
| "pages": "1--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elia Bruni, Nam-Khanh Tran, and Marco Baroni. 2014. Multimodal distributional semantics. Journal of Ar- tificial Intelligence Research, 49:1-47.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A fast and accurate dependency parser using neural networks", |
| "authors": [ |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "740--750", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Danqi Chen and Christopher D. Manning. 2014. A fast and accurate dependency parser using neural net- works. In Proceedings of EMNLP, pages 740-750.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Natural language processing (almost) from scratch", |
| "authors": [ |
| { |
| "first": "Ronan", |
| "middle": [], |
| "last": "Collobert", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Karlen", |
| "suffix": "" |
| }, |
| { |
| "first": "Koray", |
| "middle": [], |
| "last": "Kavukcuoglu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pavel", |
| "middle": [ |
| "P" |
| ], |
| "last": "Kuksa", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2493--2537", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ronan Collobert, Jason Weston, L\u00e9on Bottou, Michael Karlen, Koray Kavukcuoglu, and Pavel P. Kuksa. 2011. Natural language processing (almost) from scratch. Journal of Machine Learning Research, 12:2493-2537.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Word translation without parallel data", |
| "authors": [ |
| { |
| "first": "Alexis", |
| "middle": [], |
| "last": "Conneau", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillaume", |
| "middle": [], |
| "last": "Lample", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Ludovic", |
| "middle": [], |
| "last": "Denoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Herv\u00e9", |
| "middle": [], |
| "last": "J\u00e9gou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexis Conneau, Guillaume Lample, Marc'Aurelio Ranzato, Ludovic Denoyer, and Herv\u00e9 J\u00e9gou. 2017. Word translation without parallel data. CoRR, abs/1710.04087.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Morphological smoothing and extrapolation of word embeddings", |
| "authors": [ |
| { |
| "first": "Ryan", |
| "middle": [], |
| "last": "Cotterell", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Eisner", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1651--1660", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryan Cotterell, Hinrich Sch\u00fctze, and Jason Eisner. 2016. Morphological smoothing and extrapolation of word embeddings. In Proceedings of ACL, pages 1651-1660.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Improving zero-shot learning by mitigating the hubness problem", |
| "authors": [ |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICLR (Workshop Papers", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Georgiana Dinu, Angeliki Lazaridou, and Marco Ba- roni. 2015. Improving zero-shot learning by mitigat- ing the hubness problem. In Proceedings of ICLR (Workshop Papers).", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Adaptive subgradient methods for online learning and stochastic optimization", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "C" |
| ], |
| "last": "Duchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Elad", |
| "middle": [], |
| "last": "Hazan", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoram", |
| "middle": [], |
| "last": "Singer", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "12", |
| "issue": "", |
| "pages": "2121--2159", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John C. Duchi, Elad Hazan, and Yoram Singer. 2011. Adaptive subgradient methods for online learning and stochastic optimization. Journal of Machine Learning Research, 12:2121-2159.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Sigmoid-weighted linear units for neural network function approximation in reinforcement learning", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Elfwing", |
| "suffix": "" |
| }, |
| { |
| "first": "Eiji", |
| "middle": [], |
| "last": "Uchibe", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenji", |
| "middle": [], |
| "last": "Doya", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "CoRR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Elfwing, Eiji Uchibe, and Kenji Doya. 2017. Sigmoid-weighted linear units for neural network function approximation in reinforcement learning. CoRR, abs/1702.03118.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Diverse Context for Learning Word Representations", |
| "authors": [ |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manaal Faruqui. 2016. Diverse Context for Learning Word Representations. Ph.D. thesis, Carnegie Mel- lon University.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Retrofitting word vectors to semantic lexicons", |
| "authors": [ |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Jesse", |
| "middle": [], |
| "last": "Dodge", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujay", |
| "middle": [], |
| "last": "Kumar Jauhar", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Eduard", |
| "middle": [], |
| "last": "Hovy", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1606--1615", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manaal Faruqui, Jesse Dodge, Sujay Kumar Jauhar, Chris Dyer, Eduard Hovy, and Noah A. Smith. 2015. Retrofitting word vectors to semantic lexicons. In Proceedings of NAACL-HLT, pages 1606-1615.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Placing search in context: The concept revisited", |
| "authors": [ |
| { |
| "first": "Lev", |
| "middle": [], |
| "last": "Finkelstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Evgeniy", |
| "middle": [], |
| "last": "Gabrilovich", |
| "suffix": "" |
| }, |
| { |
| "first": "Yossi", |
| "middle": [], |
| "last": "Matias", |
| "suffix": "" |
| }, |
| { |
| "first": "Ehud", |
| "middle": [], |
| "last": "Rivlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zach", |
| "middle": [], |
| "last": "Solan", |
| "suffix": "" |
| }, |
| { |
| "first": "Gadi", |
| "middle": [], |
| "last": "Wolfman", |
| "suffix": "" |
| }, |
| { |
| "first": "Eytan", |
| "middle": [], |
| "last": "Ruppin", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "ACM Transactions on Information Systems", |
| "volume": "20", |
| "issue": "1", |
| "pages": "116--131", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lev Finkelstein, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman, and Eytan Ruppin. 2002. Placing search in context: The con- cept revisited. ACM Transactions on Information Systems, 20(1):116-131.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "DeViSE: A deep visualsemantic embedding model", |
| "authors": [ |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Frome", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathon", |
| "middle": [], |
| "last": "Shlens", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| }, |
| { |
| "first": "Marc'aurelio", |
| "middle": [], |
| "last": "Ranzato", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "2121--2129", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrea Frome, Gregory S. Corrado, Jonathon Shlens, Samy Bengio, Jeffrey Dean, Marc'Aurelio Ranzato, and Tomas Mikolov. 2013. DeViSE: A deep visual- semantic embedding model. In Proceedings of NIPS, pages 2121-2129.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "PPDB: The Paraphrase Database", |
| "authors": [ |
| { |
| "first": "Juri", |
| "middle": [], |
| "last": "Ganitkevitch", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "758--764", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juri Ganitkevitch, Benjamin Van Durme, and Chris Callison-Burch. 2013. PPDB: The Paraphrase Database. In Proceedings of NAACL-HLT, pages 758-764.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "SimVerb-3500: A largescale evaluation set of verb similarity", |
| "authors": [ |
| { |
| "first": "Daniela", |
| "middle": [], |
| "last": "Gerz", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2173--2182", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Daniela Gerz, Ivan Vuli\u0107, Felix Hill, Roi Reichart, and Anna Korhonen. 2016. SimVerb-3500: A large- scale evaluation set of verb similarity. In Proceed- ings of EMNLP, pages 2173-2182.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Dual tensor model for detecting asymmetric lexicosemantic relations", |
| "authors": [ |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [ |
| "Paolo" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1758--1768", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Goran Glava\u0161 and Simone Paolo Ponzetto. 2017. Dual tensor model for detecting asymmetric lexico- semantic relations. In Proceedings of EMNLP, pages 1758-1768.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Simplifying lexical simplification: Do we need simplified corpora?", |
| "authors": [ |
| { |
| "first": "Goran", |
| "middle": [], |
| "last": "Glava\u0161", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "\u0160tajner", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "63--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Goran Glava\u0161 and Sanja \u0160tajner. 2015. Simplifying lex- ical simplification: Do we need simplified corpora? In Proceedings of ACL, pages 63-68.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Understanding the difficulty of training deep feedforward neural networks", |
| "authors": [ |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Glorot", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of AISTATS", |
| "volume": "", |
| "issue": "", |
| "pages": "249--256", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xavier Glorot and Yoshua Bengio. 2010. Understand- ing the difficulty of training deep feedforward neural networks. In Proceedings of AISTATS, pages 249- 256.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Distributional structure. Word", |
| "authors": [ |
| { |
| "first": "S", |
| "middle": [], |
| "last": "Zellig", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Harris", |
| "suffix": "" |
| } |
| ], |
| "year": 1954, |
| "venue": "", |
| "volume": "10", |
| "issue": "", |
| "pages": "146--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zellig S. Harris. 1954. Distributional structure. Word, 10(23):146-162.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Delving deep into rectifiers: Surpassing human-level performance on ImageNet classification", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICCV", |
| "volume": "", |
| "issue": "", |
| "pages": "1026--1034", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2015. Delving deep into rectifiers: Surpass- ing human-level performance on ImageNet classifi- cation. In Proceedings of ICCV, pages 1026-1034.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "The Second Dialog State Tracking Challenge", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Jason", |
| "middle": [ |
| "D" |
| ], |
| "last": "Wiliams", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of SIGDIAL", |
| "volume": "", |
| "issue": "", |
| "pages": "263--272", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Henderson, Blaise Thomson, and Jason D. Wiliams. 2014. The Second Dialog State Tracking Challenge. In Proceedings of SIGDIAL, pages 263- 272.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "SimLex-999: Evaluating semantic models with (genuine) similarity estimation", |
| "authors": [ |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Computational Linguistics", |
| "volume": "41", |
| "issue": "4", |
| "pages": "665--695", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Felix Hill, Roi Reichart, and Anna Korhonen. 2015. SimLex-999: Evaluating semantic models with (gen- uine) similarity estimation. Computational Linguis- tics, 41(4):665-695.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Long Short-Term Memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long Short-Term Memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Learning a lexical simplifier using wikipedia", |
| "authors": [ |
| { |
| "first": "Colby", |
| "middle": [], |
| "last": "Horn", |
| "suffix": "" |
| }, |
| { |
| "first": "Cathryn", |
| "middle": [], |
| "last": "Manduca", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Kauchak", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "458--463", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colby Horn, Cathryn Manduca, and David Kauchak. 2014. Learning a lexical simplifier using wikipedia. In Proceedings of the ACL, pages 458-463.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Specializing word embeddings for similarity or relatedness", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Hill", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2044--2048", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela, Felix Hill, and Stephen Clark. 2015. Specializing word embeddings for similarity or re- latedness. In Proceedings of EMNLP, pages 2044- 2048.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Intent detection using semantically enriched word embeddings", |
| "authors": [ |
| { |
| "first": "Joo-Kyung", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| }, |
| { |
| "first": "Gokhan", |
| "middle": [], |
| "last": "Tur", |
| "suffix": "" |
| }, |
| { |
| "first": "Asli", |
| "middle": [], |
| "last": "Celikyilmaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ye-Yi", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of SLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joo-Kyung Kim, Gokhan Tur, Asli Celikyilmaz, Bin Cao, and Ye-Yi Wang. 2016. Intent detection us- ing semantically enriched word embeddings. In Pro- ceedings of SLT.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Adam: A method for stochastic optimization", |
| "authors": [ |
| { |
| "first": "P", |
| "middle": [], |
| "last": "Diederik", |
| "suffix": "" |
| }, |
| { |
| "first": "Jimmy", |
| "middle": [], |
| "last": "Kingma", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Ba", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ICLR (Conference Track)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In Proceedings of ICLR (Conference Track).", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Roget's 21st Century Thesaurus", |
| "authors": [ |
| { |
| "first": "Ann", |
| "middle": [], |
| "last": "Barbara", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Kipfer", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barbara Ann Kipfer. 2009. Roget's 21st Century The- saurus (3rd Edition). Philip Lief Group.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Self-normalizing neural networks", |
| "authors": [ |
| { |
| "first": "G\u00fcnter", |
| "middle": [], |
| "last": "Klambauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Unterthiner", |
| "suffix": "" |
| }, |
| { |
| "first": "Andreas", |
| "middle": [], |
| "last": "Mayr", |
| "suffix": "" |
| }, |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G\u00fcnter Klambauer, Thomas Unterthiner, Andreas Mayr, and Sepp Hochreiter. 2017. Self-normalizing neural networks. CoRR, abs/1706.02515.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "An empirical analysis of optimization for max-margin NLP", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "K" |
| ], |
| "last": "Kummerfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "Taylor", |
| "middle": [], |
| "last": "Berg-Kirkpatrick", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Klein", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "273--279", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan K. Kummerfeld, Taylor Berg-Kirkpatrick, and Dan Klein. 2015. An empirical analysis of op- timization for max-margin NLP. In Proceedings of EMNLP, pages 273-279.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Hubness and pollution: Delving into cross-space mapping for zero-shot learning", |
| "authors": [ |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "Georgiana", |
| "middle": [], |
| "last": "Dinu", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "270--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angeliki Lazaridou, Georgiana Dinu, and Marco Ba- roni. 2015. Hubness and pollution: Delving into cross-space mapping for zero-shot learning. In Pro- ceedings of ACL, pages 270-280.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Separated by an un-common language: Towards judgment language informed vector space modeling", |
| "authors": [ |
| { |
| "first": "Ira", |
| "middle": [], |
| "last": "Leviant", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ira Leviant and Roi Reichart. 2015. Separated by an un-common language: Towards judgment lan- guage informed vector space modeling. CoRR, abs/1508.00106.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Dependencybased word embeddings", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "302--308", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy and Yoav Goldberg. 2014. Dependency- based word embeddings. In Proceedings of ACL, pages 302-308.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Improving distributional similarity with lessons learned from word embeddings", |
| "authors": [ |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Goldberg", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the ACL", |
| "volume": "3", |
| "issue": "", |
| "pages": "211--225", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Omer Levy, Yoav Goldberg, and Ido Dagan. 2015. Im- proving distributional similarity with lessons learned from word embeddings. Transactions of the ACL, 3:211-225.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Demystifying ResNet. CoRR", |
| "authors": [ |
| { |
| "first": "Sihan", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiantao", |
| "middle": [], |
| "last": "Jiao", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanjun", |
| "middle": [], |
| "last": "Han", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsachy", |
| "middle": [], |
| "last": "Weissman", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sihan Li, Jiantao Jiao, Yanjun Han, and Tsachy Weissman. 2016. Demystifying ResNet. CoRR, abs/1611.01186.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Learning semantic word embeddings based on ordinal knowledge constraints", |
| "authors": [ |
| { |
| "first": "Quan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hui", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "Si", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhen-Hua", |
| "middle": [], |
| "last": "Ling", |
| "suffix": "" |
| }, |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1501--1511", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quan Liu, Hui Jiang, Si Wei, Zhen-Hua Ling, and Yu Hu. 2015. Learning semantic word embeddings based on ordinal knowledge constraints. In Proceed- ings of ACL, pages 1501-1511.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Rectifier nonlinearities improve neural network acoustic models", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [ |
| "L" |
| ], |
| "last": "Maas", |
| "suffix": "" |
| }, |
| { |
| "first": "Awni", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Hannun", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "Y" |
| ], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew L. Maas, Awni Y. Hannun, and Andrew Y. Ng. 2013. Rectifier nonlinearities improve neural net- work acoustic models. In Proceedings of ICML.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Context2vec: Learning generic context embedding with bidirectional LSTM", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Melamud", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Goldberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "51--61", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Melamud, Jacob Goldberger, and Ido Dagan. 2016a. Context2vec: Learning generic context em- bedding with bidirectional LSTM. In Proceedings of CoNLL, pages 51-61.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "The role of context types and dimensionality in learning word embeddings", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Melamud", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Mcclosky", |
| "suffix": "" |
| }, |
| { |
| "first": "Siddharth", |
| "middle": [], |
| "last": "Patwardhan", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "1030--1040", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Melamud, David McClosky, Siddharth Patward- han, and Mohit Bansal. 2016b. The role of context types and dimensionality in learning word embed- dings. In Proceedings of NAACL-HLT, pages 1030- 1040.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Exploiting similarities among languages for machine translation", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "CoRR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Quoc V. Le, and Ilya Sutskever. 2013a. Exploiting similarities among languages for machine translation. arXiv preprint, CoRR, abs/1309.4168.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Distributed representations of words and phrases and their compositionality", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Ilya", |
| "middle": [], |
| "last": "Sutskever", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Gregory", |
| "middle": [ |
| "S" |
| ], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "3111--3119", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Ilya Sutskever, Kai Chen, Gregory S. Corrado, and Jeffrey Dean. 2013b. Distributed rep- resentations of words and phrases and their compo- sitionality. In Proceedings of NIPS, pages 3111- 3119.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "All you need is a good init", |
| "authors": [ |
| { |
| "first": "Dmytro", |
| "middle": [], |
| "last": "Mishkin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiri", |
| "middle": [], |
| "last": "Matas", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ICLR (Conference Track)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dmytro Mishkin and Jiri Matas. 2016. All you need is a good init. In Proceedings of ICLR (Conference Track).", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Neural belief tracker: Data-driven dialogue state tracking", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d3", |
| "middle": [], |
| "last": "Diarmuid", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsung-Hsien", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1777--1788", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Mrk\u0161i\u0107, Diarmuid \u00d3 S\u00e9aghdha, Tsung-Hsien Wen, Blaise Thomson, and Steve Young. 2017. Neu- ral belief tracker: Data-driven dialogue state track- ing. In Proceedings of ACL, pages 1777-1788.", |
| "links": null |
| }, |
| "BIBREF49": { |
| "ref_id": "b49", |
| "title": "Multidomain dialog state tracking using recurrent neural networks", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d3", |
| "middle": [], |
| "last": "Diarmuid", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsung-Hsien", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "794--799", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Mrk\u0161i\u0107, Diarmuid \u00d3 S\u00e9aghdha, Blaise Thom- son, Milica Ga\u0161i\u0107, Pei-Hao Su, David Vandyke, Tsung-Hsien Wen, and Steve Young. 2015. Multi- domain dialog state tracking using recurrent neural networks. In Proceedings of ACL, pages 794-799.", |
| "links": null |
| }, |
| "BIBREF50": { |
| "ref_id": "b50", |
| "title": "Counter-fitting word vectors to linguistic constraints", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d3", |
| "middle": [], |
| "last": "Diarmuid", |
| "suffix": "" |
| }, |
| { |
| "first": "Blaise", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Thomson", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [ |
| "Maria" |
| ], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Rojas-Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsung-Hsien", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Mrk\u0161i\u0107, Diarmuid \u00d3 S\u00e9aghdha, Blaise Thom- son, Milica Ga\u0161i\u0107, Lina Maria Rojas-Barahona, Pei- Hao Su, David Vandyke, Tsung-Hsien Wen, and Steve Young. 2016. Counter-fitting word vectors to linguistic constraints. In Proceedings of NAACL- HLT.", |
| "links": null |
| }, |
| "BIBREF51": { |
| "ref_id": "b51", |
| "title": "Semantic specialisation of distributional word vector spaces using monolingual and cross-lingual constraints", |
| "authors": [ |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d3", |
| "middle": [], |
| "last": "Diarmuid", |
| "suffix": "" |
| }, |
| { |
| "first": "Ira", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Leviant", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the ACL", |
| "volume": "5", |
| "issue": "", |
| "pages": "309--324", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nikola Mrk\u0161i\u0107, Ivan Vuli\u0107, Diarmuid \u00d3 S\u00e9aghdha, Ira Leviant, Roi Reichart, Milica Ga\u0161i\u0107, Anna Korho- nen, and Steve Young. 2017. Semantic specialisa- tion of distributional word vector spaces using mono- lingual and cross-lingual constraints. Transactions of the ACL, 5:309-324.", |
| "links": null |
| }, |
| "BIBREF52": { |
| "ref_id": "b52", |
| "title": "Rectified linear units improve restricted Boltzmann machines", |
| "authors": [ |
| { |
| "first": "Vinod", |
| "middle": [], |
| "last": "Nair", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of ICML", |
| "volume": "", |
| "issue": "", |
| "pages": "807--814", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Vinod Nair and Geoffrey E. Hinton. 2010. Rectified linear units improve restricted Boltzmann machines. In Proceedings of ICML, pages 807-814.", |
| "links": null |
| }, |
| "BIBREF53": { |
| "ref_id": "b53", |
| "title": "Ba-belNet: The automatic construction, evaluation and application of a wide-coverage multilingual semantic network", |
| "authors": [ |
| { |
| "first": "Roberto", |
| "middle": [], |
| "last": "Navigli", |
| "suffix": "" |
| }, |
| { |
| "first": "Simone", |
| "middle": [ |
| "Paolo" |
| ], |
| "last": "Ponzetto", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Artificial Intelligence", |
| "volume": "193", |
| "issue": "", |
| "pages": "217--250", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roberto Navigli and Simone Paolo Ponzetto. 2012. Ba- belNet: The automatic construction, evaluation and application of a wide-coverage multilingual seman- tic network. Artificial Intelligence, 193:217-250.", |
| "links": null |
| }, |
| "BIBREF54": { |
| "ref_id": "b54", |
| "title": "Hierarchical embeddings for hypernymy detection and directionality", |
| "authors": [ |
| { |
| "first": "Maximilian", |
| "middle": [], |
| "last": "Kim Anh Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Sabine", |
| "middle": [], |
| "last": "K\u00f6per", |
| "suffix": "" |
| }, |
| { |
| "first": "Ngoc", |
| "middle": [ |
| "Thang" |
| ], |
| "last": "Schulte Im Walde", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "233--243", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim Anh Nguyen, Maximilian K\u00f6per, Sabine Schulte im Walde, and Ngoc Thang Vu. 2017. Hierarchical embeddings for hypernymy detection and directionality. In Proceedings of EMNLP, pages 233-243.", |
| "links": null |
| }, |
| "BIBREF55": { |
| "ref_id": "b55", |
| "title": "Integrating distributional lexical contrast into word embeddings for antonymsynonym distinction", |
| "authors": [ |
| { |
| "first": "Sabine", |
| "middle": [], |
| "last": "Kim Anh Nguyen", |
| "suffix": "" |
| }, |
| { |
| "first": "Ngoc", |
| "middle": [ |
| "Thang" |
| ], |
| "last": "Schulte Im Walde", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "454--459", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kim Anh Nguyen, Sabine Schulte im Walde, and Ngoc Thang Vu. 2016. Integrating distributional lexical contrast into word embeddings for antonym- synonym distinction. In Proceedings of ACL, pages 454-459.", |
| "links": null |
| }, |
| "BIBREF56": { |
| "ref_id": "b56", |
| "title": "Poincar\u00e9 embeddings for learning hierarchical representations", |
| "authors": [ |
| { |
| "first": "Maximilian", |
| "middle": [], |
| "last": "Nickel", |
| "suffix": "" |
| }, |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of NIPS", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maximilian Nickel and Douwe Kiela. 2017. Poincar\u00e9 embeddings for learning hierarchical representa- tions. In Proceedings of NIPS.", |
| "links": null |
| }, |
| "BIBREF57": { |
| "ref_id": "b57", |
| "title": "Word embedding-based antonym detection using thesauri and distributional information", |
| "authors": [ |
| { |
| "first": "Masataka", |
| "middle": [], |
| "last": "Ono", |
| "suffix": "" |
| }, |
| { |
| "first": "Makoto", |
| "middle": [], |
| "last": "Miwa", |
| "suffix": "" |
| }, |
| { |
| "first": "Yutaka", |
| "middle": [], |
| "last": "Sasaki", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "984--989", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Masataka Ono, Makoto Miwa, and Yutaka Sasaki. 2015. Word embedding-based antonym detection using thesauri and distributional information. In Proceedings of NAACL-HLT, pages 984-989.", |
| "links": null |
| }, |
| "BIBREF58": { |
| "ref_id": "b58", |
| "title": "Encoding prior knowledge with eigenword embeddings", |
| "authors": [ |
| { |
| "first": "Dominique", |
| "middle": [], |
| "last": "Osborne", |
| "suffix": "" |
| }, |
| { |
| "first": "Shashi", |
| "middle": [], |
| "last": "Narayan", |
| "suffix": "" |
| }, |
| { |
| "first": "Shay", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Transactions of the ACL", |
| "volume": "4", |
| "issue": "", |
| "pages": "417--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dominique Osborne, Shashi Narayan, and Shay Cohen. 2016. Encoding prior knowledge with eigenword embeddings. Transactions of the ACL, 4:417-430.", |
| "links": null |
| }, |
| "BIBREF59": { |
| "ref_id": "b59", |
| "title": "PPDB 2.0: Better paraphrase ranking, finegrained entailment relations, word embeddings, and style classification", |
| "authors": [ |
| { |
| "first": "Ellie", |
| "middle": [], |
| "last": "Pavlick", |
| "suffix": "" |
| }, |
| { |
| "first": "Pushpendre", |
| "middle": [], |
| "last": "Rastogi", |
| "suffix": "" |
| }, |
| { |
| "first": "Juri", |
| "middle": [], |
| "last": "Ganitkevitch", |
| "suffix": "" |
| }, |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Van Durme", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Callison-Burch", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "425--430", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellie Pavlick, Pushpendre Rastogi, Juri Ganitkevitch, Benjamin Van Durme, and Chris Callison-Burch. 2015. PPDB 2.0: Better paraphrase ranking, fine- grained entailment relations, word embeddings, and style classification. In Proceedings of ACL, pages 425-430.", |
| "links": null |
| }, |
| "BIBREF60": { |
| "ref_id": "b60", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. Glove: Global vectors for word rep- resentation. In Proceedings of EMNLP, pages 1532- 1543.", |
| "links": null |
| }, |
| "BIBREF61": { |
| "ref_id": "b61", |
| "title": "Searching for activation functions", |
| "authors": [ |
| { |
| "first": "Prajit", |
| "middle": [], |
| "last": "Ramachandran", |
| "suffix": "" |
| }, |
| { |
| "first": "Barret", |
| "middle": [], |
| "last": "Zoph", |
| "suffix": "" |
| }, |
| { |
| "first": "V", |
| "middle": [], |
| "last": "Quoc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Le", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Prajit Ramachandran, Barret Zoph, and Quoc V. Le. 2017. Searching for activation functions. CoRR, abs/1710.05941.", |
| "links": null |
| }, |
| "BIBREF62": { |
| "ref_id": "b62", |
| "title": "AutoExtend: Extending word embeddings to embeddings for synsets and lexemes", |
| "authors": [ |
| { |
| "first": "Sascha", |
| "middle": [], |
| "last": "Rothe", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1793--1803", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sascha Rothe and Hinrich Sch\u00fctze. 2015. AutoEx- tend: Extending word embeddings to embeddings for synsets and lexemes. In Proceedings of ACL, pages 1793-1803.", |
| "links": null |
| }, |
| "BIBREF63": { |
| "ref_id": "b63", |
| "title": "A survey of cross-lingual embedding models", |
| "authors": [ |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Ruder", |
| "suffix": "" |
| }, |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Anders", |
| "middle": [], |
| "last": "S\u00f8gaard", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sebastian Ruder, Ivan Vuli\u0107, and Anders S\u00f8gaard. 2017. A survey of cross-lingual embedding models. CoRR, abs/1706.04902.", |
| "links": null |
| }, |
| "BIBREF64": { |
| "ref_id": "b64", |
| "title": "Symmetric pattern based word embeddings for improved word similarity prediction", |
| "authors": [ |
| { |
| "first": "Roy", |
| "middle": [], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "Ari", |
| "middle": [], |
| "last": "Rappoport", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of CoNLL", |
| "volume": "", |
| "issue": "", |
| "pages": "258--267", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Roy Schwartz, Roi Reichart, and Ari Rappoport. 2015. Symmetric pattern based word embeddings for im- proved word similarity prediction. In Proceedings of CoNLL, pages 258-267.", |
| "links": null |
| }, |
| "BIBREF65": { |
| "ref_id": "b65", |
| "title": "Continuously learning neural dialogue management", |
| "authors": [ |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [], |
| "last": "Rojas-Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Ultes", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "Tsung-Hsien", |
| "middle": [], |
| "last": "Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pei-Hao Su, Milica Ga\u0161i\u0107, Nikola Mrk\u0161i\u0107, Lina Rojas- Barahona, Stefan Ultes, David Vandyke, Tsung- Hsien Wen, and Steve Young. 2016. Continuously learning neural dialogue management. In arXiv preprint: 1606.02689.", |
| "links": null |
| }, |
| "BIBREF66": { |
| "ref_id": "b66", |
| "title": "Is \"universal syntax\" universally useful for learning distributed word representations?", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "518--524", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107 and Anna Korhonen. 2016a. Is \"universal syntax\" universally useful for learning distributed word representations? In Proceedings of ACL, pages 518-524.", |
| "links": null |
| }, |
| "BIBREF67": { |
| "ref_id": "b67", |
| "title": "On the role of seed lexicons in learning bilingual word embeddings", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "247--257", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107 and Anna Korhonen. 2016b. On the role of seed lexicons in learning bilingual word embeddings. In Proceedings of ACL, pages 247-257.", |
| "links": null |
| }, |
| "BIBREF68": { |
| "ref_id": "b68", |
| "title": "Specialising word vectors for lexical entailment", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of NAACL-HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107 and Nikola Mrk\u0161i\u0107. 2018. Specialising word vectors for lexical entailment. In Proceedings of NAACL-HLT.", |
| "links": null |
| }, |
| "BIBREF69": { |
| "ref_id": "b69", |
| "title": "Cross-lingual induction and transfer of verb classes based on word vector space specialisation", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2536--2548", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107, Nikola Mrk\u0161i\u0107, and Anna Korhonen. 2017a. Cross-lingual induction and transfer of verb classes based on word vector space specialisation. In Pro- ceedings of EMNLP, pages 2536-2548.", |
| "links": null |
| }, |
| "BIBREF70": { |
| "ref_id": "b70", |
| "title": "Morph-fitting: Fine-tuning word vector spaces with simple language-specific rules", |
| "authors": [ |
| { |
| "first": "Ivan", |
| "middle": [], |
| "last": "Vuli\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Roi", |
| "middle": [], |
| "last": "Reichart", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d3", |
| "middle": [], |
| "last": "Diarmuid", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "S\u00e9aghdha", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "56--68", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ivan Vuli\u0107, Nikola Mrk\u0161i\u0107, Roi Reichart, Diarmuid \u00d3 S\u00e9aghdha, Steve Young, and Anna Korhonen. 2017b. Morph-fitting: Fine-tuning word vector spaces with simple language-specific rules. In Pro- ceedings of ACL, pages 56-68.", |
| "links": null |
| }, |
| "BIBREF71": { |
| "ref_id": "b71", |
| "title": "A networkbased end-to-end trainable task-oriented dialogue system", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Lina", |
| "middle": [ |
| "M" |
| ], |
| "last": "Ga\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Rojas-Barahona", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Ultes", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of EACL", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, David Vandyke, Nikola Mrk\u0161i\u0107, Milica Ga\u0161i\u0107, Lina M. Rojas-Barahona, Pei-Hao Su, Stefan Ultes, and Steve Young. 2017. A network- based end-to-end trainable task-oriented dialogue system. In Proceedings of EACL.", |
| "links": null |
| }, |
| "BIBREF72": { |
| "ref_id": "b72", |
| "title": "WSABIE: Scaling up to large vocabulary image annotation", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [], |
| "last": "Weston", |
| "suffix": "" |
| }, |
| { |
| "first": "Samy", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Nicolas", |
| "middle": [], |
| "last": "Usunier", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2764--2770", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason Weston, Samy Bengio, and Nicolas Usunier. 2011. WSABIE: Scaling up to large vocabulary image annotation. In Proceedings of IJCAI, pages 2764-2770.", |
| "links": null |
| }, |
| "BIBREF73": { |
| "ref_id": "b73", |
| "title": "From paraphrase database to compositional paraphrase model and back", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Wieting", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Kevin", |
| "middle": [], |
| "last": "Gimpel", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [], |
| "last": "Livescu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Transactions of the ACL", |
| "volume": "3", |
| "issue": "", |
| "pages": "345--358", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Wieting, Mohit Bansal, Kevin Gimpel, and Karen Livescu. 2015. From paraphrase database to compo- sitional paraphrase model and back. Transactions of the ACL, 3:345-358.", |
| "links": null |
| }, |
| "BIBREF74": { |
| "ref_id": "b74", |
| "title": "The Dialog State Tracking Challenge series: A review", |
| "authors": [ |
| { |
| "first": "Jason", |
| "middle": [ |
| "D" |
| ], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Antoine", |
| "middle": [], |
| "last": "Raux", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Henderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Dialogue & Discourse", |
| "volume": "7", |
| "issue": "3", |
| "pages": "4--33", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jason D. Williams, Antoine Raux, and Matthew Hen- derson. 2016. The Dialog State Tracking Challenge series: A review. Dialogue & Discourse, 7(3):4-33.", |
| "links": null |
| }, |
| "BIBREF75": { |
| "ref_id": "b75", |
| "title": "RC-NET: A general framework for incorporating knowledge into word representations", |
| "authors": [ |
| { |
| "first": "Chang", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yalong", |
| "middle": [], |
| "last": "Bai", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiang", |
| "middle": [], |
| "last": "Bian", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Gang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoguang", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Yan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of CIKM", |
| "volume": "", |
| "issue": "", |
| "pages": "1219--1228", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang Xu, Yalong Bai, Jiang Bian, Bin Gao, Gang Wang, Xiaoguang Liu, and Tie-Yan Liu. 2014. RC- NET: A general framework for incorporating knowl- edge into word representations. In Proceedings of CIKM, pages 1219-1228.", |
| "links": null |
| }, |
| "BIBREF76": { |
| "ref_id": "b76", |
| "title": "Cognitive User Interfaces", |
| "authors": [ |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "IEEE Signal Processing Magazine", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steve Young. 2010. Cognitive User Interfaces. IEEE Signal Processing Magazine.", |
| "links": null |
| }, |
| "BIBREF77": { |
| "ref_id": "b77", |
| "title": "Improving lexical embeddings with semantic knowledge", |
| "authors": [ |
| { |
| "first": "Mo", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Dredze", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "545--550", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mo Yu and Mark Dredze. 2014. Improving lexical em- beddings with semantic knowledge. In Proceedings of ACL, pages 545-550.", |
| "links": null |
| }, |
| "BIBREF78": { |
| "ref_id": "b78", |
| "title": "Word semantic representations using bayesian probabilistic tensor factorization", |
| "authors": [ |
| { |
| "first": "Jingwei", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeremy", |
| "middle": [], |
| "last": "Salwen", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Glass", |
| "suffix": "" |
| }, |
| { |
| "first": "Alfio", |
| "middle": [], |
| "last": "Gliozzo", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1522--1531", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jingwei Zhang, Jeremy Salwen, Michael Glass, and Al- fio Gliozzo. 2014. Word semantic representations using bayesian probabilistic tensor factorization. In Proceedings of EMNLP, pages 1522-1531.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "type_str": "figure", |
| "num": null, |
| "text": "The results of the hold-out experiments on SimLex-999 and SimVerb-3500 after applying our non-linear vector space transformation with different depths (hidden layer size H, seeFig. 1b). The results are presented as averages over 20 runs with the NONLINEAR-MM variant, the shaded regions are spanned by the maximum and minimum scores obtained. Thick horizontal lines refer to Spearman's rank correlations achieved in the initial space X d . H = 0 denotes the standard linear regression model(Mikolov et al., 2013a;) (LINEAR-MM shown since it outperforms LINEAR-MSE)." |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "num": null, |
| "text": "X d .408 .286 .414 .275 .383 .255 .408 .286 .414 .275 .383 .255 +AR specialisation: X s .408 .286 .414 .275 .383 .255 .690 .578 .658 .544 .629 .502 ++Mapping unseen: X f LINEAR-MSE .504 .384 .447 .309 .405 .285 .690 .578 .656 .551 .628 .502Spearman's \u03c1 correlation scores for three word vector collections on two English word similarity datasets, SimLex-999 (SL) and SimVerb-3500 (SV), using different mapping variants, evaluation protocols, and word vector spaces: from the initial distributional space X d to the fully specialised space X f . H = 5.", |
| "html": null, |
| "content": "<table><tr><td/><td/><td/><td/><td/><td/><td/><td/><td colspan=\"4\">Setup: hold-out</td><td/><td/><td/><td/><td/><td/><td>Setup: all</td></tr><tr><td/><td/><td/><td/><td/><td colspan=\"2\">GLOVE</td><td/><td colspan=\"4\">SGNS-BOW2</td><td colspan=\"2\">FASTTEXT</td><td/><td>GLOVE</td><td/><td/><td>SGNS-BOW2</td><td>FASTTEXT</td></tr><tr><td/><td/><td/><td/><td/><td>SL</td><td/><td>SV</td><td colspan=\"2\">SL</td><td/><td>SV</td><td>SL</td><td>SV</td><td/><td>SL</td><td colspan=\"2\">SV</td><td>SL</td><td>SV</td><td>SL</td><td>SV</td></tr><tr><td colspan=\"5\">Distributional: NONLINEAR-MSE LINEAR-MM NONLINEAR-MM</td><td colspan=\"7\">.549 .407 .484 .344 .548 .422 .468 .329 .603 .480 .531 .391</td><td colspan=\"7\">.459 .329 .694 .586 .663 .556 .419 .308 .697 .582 .663 .554 .471 .349 .705 .600 .667 .562</td><td>.631 .506 .628 .487 .638 .507</td></tr><tr><td/><td>0.65</td><td/><td/><td>SimLex-999</td><td colspan=\"2\">SimVerb-3500</td><td/><td>0.65</td><td/><td/><td colspan=\"2\">SimLex-999</td><td colspan=\"2\">SimVerb-3500</td><td/><td/><td>0.65</td><td>SimLex-999</td><td>SimVerb-3500</td></tr><tr><td>Spearman's \u03c1 correlation</td><td>0.35 0.45 0.55</td><td/><td/><td/><td/><td/><td/><td>0.35 0.45 0.55</td><td/><td/><td/><td/><td/><td/><td/><td>Spearman's \u03c1 correlation</td><td>0.35 0.45 0.55</td></tr><tr><td/><td>0.25</td><td>0</td><td>1</td><td colspan=\"2\">2 Number of hidden layers H 3 4 5 6</td><td>7</td><td>8</td><td>0.25</td><td>0</td><td>1</td><td colspan=\"3\">2 Number of hidden layers H 3 4 5 6</td><td>7</td><td>8</td><td/><td>0.25</td><td>0</td><td>1</td><td>2 Number of hidden layers H 3 4 5 6</td><td>7</td><td>8</td></tr><tr><td/><td/><td colspan=\"3\">(a) GLOVE</td><td/><td/><td/><td colspan=\"5\">(b) SGNS-BOW2</td><td/><td/><td/><td/><td colspan=\"2\">(c) FASTTEXT</td></tr></table>" |
| }, |
| "TABREF1": { |
| "type_str": "table", |
| "num": null, |
| "text": "RFit .493 .365 .412 .285 .413 .279 X f : CFit .540 .401 .439 .318 .306 .441", |
| "html": null, |
| "content": "<table><tr><td/><td>GLOVE</td><td/><td colspan=\"2\">SGNS-BOW2</td><td colspan=\"2\">FASTTEXT</td></tr><tr><td/><td>SL</td><td>SV</td><td>SL</td><td>SV</td><td>SL</td><td>SV</td></tr><tr><td>X d X</td><td colspan=\"4\">.408 .286 .414 .275</td><td colspan=\"2\">.383 .255</td></tr><tr><td>).</td><td/><td/><td/><td/><td/></tr></table>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "num": null, |
| "text": "Results on word similarity (Spearman's \u03c1) and DST (joint goal accuracy) for German and Italian.", |
| "html": null, |
| "content": "<table><tr><td>Vectors</td><td>Specialisation</td><td>Acc.</td><td>Ch.</td></tr><tr><td>GLOVE</td><td>Distributional: X d +AR Spec.: X s \u222a Xu ++Mapping: X f</td><td>66.0 67.6 72.3</td><td>94.0 87.0 87.6</td></tr><tr><td>FASTTEXT</td><td>Distributional: X d +AR Spec.: X s \u222a Xu ++Mapping: X f</td><td>57.8 69.8 74.3</td><td>84.0 89.4 88.8</td></tr><tr><td>SGNS-BOW2</td><td>Distributional: X d +AR Spec.: X s \u222a Xu ++Mapping: X f</td><td>56.0 64.4 70.9</td><td>79.1 86.7 86.8</td></tr></table>" |
| } |
| } |
| } |
| } |