| { |
| "paper_id": "S19-1007", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T15:48:07.923656Z" |
| }, |
| "title": "Second-order contexts from lexical substitutes for few-shot learning of word representations", |
| "authors": [ |
| { |
| "first": "Qianchu", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": { |
| "addrLine": "9 West Road", |
| "postCode": "CB3 9DA", |
| "settlement": "Cambridge", |
| "country": "United Kingdom" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Diana", |
| "middle": [], |
| "last": "Mccarthy", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": { |
| "addrLine": "9 West Road", |
| "postCode": "CB3 9DA", |
| "settlement": "Cambridge", |
| "country": "United Kingdom" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "Language Technology Lab", |
| "institution": "University of Cambridge", |
| "location": { |
| "addrLine": "9 West Road", |
| "postCode": "CB3 9DA", |
| "settlement": "Cambridge", |
| "country": "United Kingdom" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "There is a growing awareness of the need to handle rare and unseen words in word representation modelling. In this paper, we focus on few-shot learning of emerging concepts that fully exploits only a few available contexts. We introduce a substitute-based context representation technique that can be applied on an existing word embedding space. Previous context-based approaches to modelling unseen words only consider bag-of-word firstorder contexts, whereas our method aggregates contexts as second-order substitutes that are produced by a sequence-aware sentence completion model. We experimented with three tasks that aim to test the modelling of emerging concepts. We found that these tasks show different emphasis on first and second order contexts, and our substitute-based method achieved superior performance on naturallyoccurring contexts from corpora.", |
| "pdf_parse": { |
| "paper_id": "S19-1007", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "There is a growing awareness of the need to handle rare and unseen words in word representation modelling. In this paper, we focus on few-shot learning of emerging concepts that fully exploits only a few available contexts. We introduce a substitute-based context representation technique that can be applied on an existing word embedding space. Previous context-based approaches to modelling unseen words only consider bag-of-word firstorder contexts, whereas our method aggregates contexts as second-order substitutes that are produced by a sequence-aware sentence completion model. We experimented with three tasks that aim to test the modelling of emerging concepts. We found that these tasks show different emphasis on first and second order contexts, and our substitute-based method achieved superior performance on naturallyoccurring contexts from corpora.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "As language vocabulary follows the zipfian distribution, we expect to encounter a large number of rare and unseen words no matter how large the training corpus is. The effective handling of such words is thus crucial for Natural Language Processing (NLP).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Attempts to learn rare and unseen word representations can be categorized into the following three approaches: (1) constructing target word embeddings from the subword components (Pinter et al., 2017; Bojanowski et al., 2017) , (2). leveraging definitions or relational structures from external resources such as Wordnet (Bahdanau et al., 2017; Pilehvar and Collier, 2017) , and (3) modelling the target word from few available contexts. Our paper falls into the last approach.", |
| "cite_spans": [ |
| { |
| "start": 179, |
| "end": 200, |
| "text": "(Pinter et al., 2017;", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 201, |
| "end": 225, |
| "text": "Bojanowski et al., 2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 321, |
| "end": 344, |
| "text": "(Bahdanau et al., 2017;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 345, |
| "end": 372, |
| "text": "Pilehvar and Collier, 2017)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We demonstrate improvements in performance by employing an alternative context representation, second-order lexical substitutes, as opposed to the traditional bag of word context representations. In line with previous research in this area, we evaluate our methodology on three tasks that measure the quality of the induced unseen word representation from contexts (Lazaridou et al., 2017; Herbelot and Baroni, 2017; Khodak et al., 2018) . Our results reveal that the three tasks involve different types of contexts which put different emphasis on first or second order contexts. Our second-order substitute-based method achieves the best performance for modelling rare words in natural contexts from corpora. In the tasks in which both first order and second order contexts are important, the ensemble of these two types of contexts yields superior performance. 1 2 Related work", |
| "cite_spans": [ |
| { |
| "start": 365, |
| "end": 389, |
| "text": "(Lazaridou et al., 2017;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 390, |
| "end": 416, |
| "text": "Herbelot and Baroni, 2017;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 417, |
| "end": 437, |
| "text": "Khodak et al., 2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The most naive way of inducing new word representation from contexts is to simply take the average of context word embeddings that co-occur with the target word in a sentence. With stop words removed, this simple method has proven to be a strong baseline as shown in Lazaridou et al. (2017) and Herbelot and Baroni (2017) . A potential improvement from the simple additive baseline model is that we weigh words with ISF (inverse sentence frequency). We follow the definition of ISF in Samardzhiev et al. (2018) and implement it as a baseline model in our study. More recently, Khodak et al. (2018) learn a transformation matrix to reconstruct pre-trained word embeddings, which essentially learns to highlight informative dimensions. Along a different line, Herbelot and Baroni (2017) take a high-risk learning rate and processing strategy for new words but would require the contexts that come at the beginning of the training to be maximally informative. Recent work implements a memory-augmented word embedding model (Sun et al., 2018) however our system shows comparable or superior performance on the two intrinsic tasks that they use (Table 1 below and Table 1 of their paper).", |
| "cite_spans": [ |
| { |
| "start": 267, |
| "end": 290, |
| "text": "Lazaridou et al. (2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 295, |
| "end": 321, |
| "text": "Herbelot and Baroni (2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 485, |
| "end": 510, |
| "text": "Samardzhiev et al. (2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 577, |
| "end": 597, |
| "text": "Khodak et al. (2018)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 1020, |
| "end": 1038, |
| "text": "(Sun et al., 2018)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1140, |
| "end": 1167, |
| "text": "(Table 1 below and Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "First-order context", |
| "sec_num": "2.1" |
| }, |
| { |
| "text": "An alternative to a bag-of-words representation is a second-order substitute vector generated by a language model for the target word's slot. For example, we can represent the context 'It is a move.' as a substitute vector [big 0.35, good 0.28, bold 0.05, ...] with the numbers indicating fitness weights of each substitute in the context (Melamud et al., 2015; Yatbaz et al., 2012; Melamud et al., 2015) . Melamud et al. (2016) later on introduced context2vec which trains both context and word embeddings in a similar setup to CBOW (Mikolov et al., 2013) except that the context is represented with a Bidirectional LSTM rather than as a bag of words. In this way, con-text2vec captures sequence information in the context, and is able to produce high-quality substitutes for a sentence-completion task, while overcoming the sparseness issues in the previous substitutebased approaches. Kobayashi et al. (2017) finetune this context2vec representation to compute entity representations in a discourse for the language modelling task.", |
| "cite_spans": [ |
| { |
| "start": 339, |
| "end": 361, |
| "text": "(Melamud et al., 2015;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 362, |
| "end": 382, |
| "text": "Yatbaz et al., 2012;", |
| "ref_id": null |
| }, |
| { |
| "start": 383, |
| "end": 404, |
| "text": "Melamud et al., 2015)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 407, |
| "end": 428, |
| "text": "Melamud et al. (2016)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 534, |
| "end": 556, |
| "text": "(Mikolov et al., 2013)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 888, |
| "end": 911, |
| "text": "Kobayashi et al. (2017)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Second-order substitute-based context", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "A related application of second-order substitutes is word sense induction. Baskaya et al. (2013) represent contexts as second-order substitutes and apply co-occurrence modelling on top of the instance id -substitute pairs. Alagi\u0107 et al. (2018) propose a similar method to our paper and showed that second-order lexical substitutes and first-order contexts complement each other in word sense induction. Our paper provides alternative evidence for the use of lexical substitutes in the setting of rare word modelling with analysis on the effect from different contexts.", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 96, |
| "text": "Baskaya et al. (2013)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 223, |
| "end": 243, |
| "text": "Alagi\u0107 et al. (2018)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Second-order substitute-based context", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": ".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Second-order substitute-based context", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In this paper, we make a simple modification from the previous work by representing the context of an unseen word as the weighted sum of the lexical substitute vectors in a continuous embedding space such as the word2vec space. This can be seen as a post-processing technique applied on an existing embedding space. The substitutes and their fitness scores are generated from con-text2vec. Compared with the context2vec representation itself, our method isolates the effect of the second-order substitutes and can be applied on top of an existing pre-trained embedding space. For each context, we generate the top N most likely substitutes at the slot of the unseen word by computing the nearest neighbours from the con-text2vec context representation. 2 We then compute the centroid of these substitutes from our base word embedding space, weighted by each substitute's fitness, cosine similarity, to the context representation. Let ContextVec 3 be the context representation produced by context2vec, S be the set of the top 20 substitute target word vectors produced by context2vec, S be the same 20 substitutes that we look up in our base word embedding space, and f (S i ) be the normalized fitness score of S i as defined in equation 1. The substitute-based context (SC), and thus the unseen word representation for this context, is defined in equation 2. If the unseen word occurs multiple times, we average the unseen word representations across the multiple contexts.", |
| "cite_spans": [ |
| { |
| "start": 753, |
| "end": 754, |
| "text": "2", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "f (S i ) = cosine(ContextVec, S i ) 20 j=1 cosine(ContextVec, S j )", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Proposed Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "SC = 20 i=1 f (S i ) * S i (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "To directly compare with the previous studies, we take the word2vec embedding model and the 1.6B Wikipedia training corpus provided by Herbelot and Baroni (2017) for our substitute-based method and for training Context2vec. Model parameters for training Context2vec, as listed in Appendix A, are fine-tuned on the training sets of the intrinsic tasks as there are no development sets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Proposed Method", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Nonce is introduced in Herbelot and Baroni (2017) as a task that challenges the models to reconstruct target word embeddings from single wikipedia definitions. The quality of the representations is evaluated by measuring how close they are to the original word embeddings trained from the whole Nonce Chimera Methods MRR Med. Rank 2 Sent. 4 Sent. 6 Sent. word2vec (Lazaridou et al., 2017) 0.00007 111012 0.1459 0.2457 0.2498 Additive (Lazaridou et al., 2017) 0.03686 861 0.3376 0.3624 0.4080 Additive ISF 0.04493 531 0.3964 0.4016 0.4107 nonce2vec (Herbelot and Baroni, 2017) 0.04907 623 0.3320 0.3668 0.3890 a la carte (Khodak et al., 2018) 0.07058 166 0.3634 0.3844 0.3941 mem2vec (Sun et al., 2018) 0.05416 518 0.3301 0.3717 0.3897 context2vec (Melamud et al., 2016) 0 Wikipedia corpora. Following Herbelot and Baroni (2017), we report in the Nonce columns of Table 1 the mean reciprocal rank (MRR) and median rank (Med. Rank) of the gold-vector (trained from the whole Wikipedia) in the ranked list of nearest neighbours from the induced representation in the 300 test cases. We see strong performance from first-order context representation especially the a la carte method. Manual observations show that definitions are designed to be maximally informative with many synonyms, hypernyms or words semantically related to the target word in the context, and the first-order context models can easily exploit this information. Also, the sequential context around the target word in a definition may not reflect the context in which a target word will be typically used in a corpus. The good performance of first-order context models is therefore to be expected. Furthermore, the Nonce task tests how well the model reconstructs the original embedding but does not probe into the semantic properties or relations captured in the induced word representations. A la carte is thus especially suitable for this task as it has been explicitly trained to match the original embedding. However, we demonstrate in the following experiments that the superior performance from a la carte may not always be transferred to other tasks.", |
| "cite_spans": [ |
| { |
| "start": 364, |
| "end": 388, |
| "text": "(Lazaridou et al., 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 434, |
| "end": 458, |
| "text": "(Lazaridou et al., 2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 548, |
| "end": 575, |
| "text": "(Herbelot and Baroni, 2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 620, |
| "end": 641, |
| "text": "(Khodak et al., 2018)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 683, |
| "end": 701, |
| "text": "(Sun et al., 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 747, |
| "end": 769, |
| "text": "(Melamud et al., 2016)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The definitional Nonce dataset (Nonce)", |
| "sec_num": "4" |
| }, |
| { |
| "text": "In the Chimera dataset, Lazaridou et al. (2017) introduce unseen novel concepts (chimeras), each of which is formed by combining two related nouns Additive ISF substitutes drowning civet drown tapir drowns langur shoos crocodile undresses opossum Table 2 : Nearest neighbours produced by additive ISF and substitutes approaches for the Chimera concept elephant bison in the context 'but his pleasure soon turns to distress when he sees that a baby is stuck in the mud and drowning .' (from the Chimeras dataset) (For example, buffalo and elephant). Each novel concept is accompanied by 2, 4 or 6 natural contexts that originally belong to the related nouns. The model needs to induce representation for these novel concepts from the contexts. The quality of the representations is evaluated by similarity judgment with probe words. Following Herbelot and Baroni (2017) and Lazaridou et al. (2017) , we report in the Chimera columns of Table 1 the average Spearman Rank coefficients against human annotations for 110 test cases in each sentence condition .", |
| "cite_spans": [ |
| { |
| "start": 24, |
| "end": 47, |
| "text": "Lazaridou et al. (2017)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 842, |
| "end": 868, |
| "text": "Herbelot and Baroni (2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 873, |
| "end": 896, |
| "text": "Lazaridou et al. (2017)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 247, |
| "end": 254, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 935, |
| "end": 942, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Chimera dataset (Chimera)", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We observe that the additive ISF model turns out to be the strongest of the first-order context models, outperforming all the other previouslyreported results. We see immediate improvement when we represent the context as substitutes in the 6 sentence condition. We see further improvement when combining both additive ISF (first order) and substitutes (second order contexts), which yields the best performance in 2 sentence and 6 sentence conditions. The positive effect of the Figure 1: Spearman Rank coefficients averaged across 100 trials on CRW in various context conditions ensemble method from combining first-order and second-order contexts shows that the two different contexts capture complementary information in this task. This is especially due to the fact that the contexts were controlled for informativeness so as to have different degrees of overlap with feature norms. Therefore at least some, but not all, contexts will have a high bag-of-word overlap with features that are semantically related to the concepts (Lazaridou et al., 2017) . These contexts will easily benefit from first-order contexts alone. However, for the other contexts where there is few or even no overlap with feature norms in the context words, it is the contextual sequence, and thus second-order context, that will give the maximum information about the target word. We show such an example with the nearest neighbours of the representations induced by our substitutes model and additive ISF in Table 2 . We can see that while the additive ISF representation is easily affected by unrelated words in the sentence, the substitutes approach clearly has at least identified that the target word is likely to be a kind of animal.", |
| "cite_spans": [ |
| { |
| "start": 1032, |
| "end": 1056, |
| "text": "(Lazaridou et al., 2017)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1490, |
| "end": 1497, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Chimera dataset (Chimera)", |
| "sec_num": "5" |
| }, |
| { |
| "text": "The Contextual Rare Words dataset (CRW) was introduced by Khodak et al. (2018) . It consists of a subset of 562 word pairs from the original Rare Word (RW) Dataset (Luong et al., 2013) . For each pair, the second word is the rare word and is accompanied by 255 contexts. We follow the experiment setup in Khodak et al. (2018) and use their pre-trained vectors on the subcorpus that does not contain any of the rare words from the dataset. This subcorpus is also used to train the context2vec model that generates substitutes. As in Khodak et al. (2018) , we randomly choose 2, 4, 6..128 number of contexts as separate conditions for 100 trials, and use these contexts to predict the rare word representations. Cosine similarity is computed between the rare word representation from the given rare word contexts in the trial (2,4..128) and the embedding of the other word in the pair from the pre-trained vectors. The cosinesimilarity of each pair is compared against similarity judgments from human annotations. The average Spearman Rank coefficients against human annotations across the trials are reported in Figure 1 . Standard deviations are reported in Appendix B.", |
| "cite_spans": [ |
| { |
| "start": 58, |
| "end": 78, |
| "text": "Khodak et al. (2018)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 164, |
| "end": 184, |
| "text": "(Luong et al., 2013)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 305, |
| "end": 325, |
| "text": "Khodak et al. (2018)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 532, |
| "end": 552, |
| "text": "Khodak et al. (2018)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1111, |
| "end": 1120, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "The Contextual Rare Words dataset (CRW)", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We see dramatic improvement from the substitutes method over all the other methods including the previous state-of-the-art a la carte in this datasets which come from corpora-based natural contexts of rare words. The result here suggests that, in natural contexts, the sequence information rather than bag of words plays a more important role in predicting a target word's meaning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Contextual Rare Words dataset (CRW)", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We also notice that applying second order information on word2vec space consistently outperforms Context2vec alone which generates the second order substitutes. We suspect that this is because the context representation induced by con-text2vec is more syntactically-oriented whereas the tasks in our study mainly test semantic relations. We confirm this assumption by following Herbelot and Baroni (2017) to test the target word embeddings produced by context2vec on the MEN dataset (Bruni et al., 2014) . We find that context2vec (Spearman \u03c1 = 0.65) correlates less with human's semantic relatedness judgment than word2vec (Spearman \u03c1 = 0.75) on this dataset. Isolating the second order information from Con-text2vec and applying it on the word2vec space as an external constraint effectively preserves the semantic relations present in word2vec and at the same time provides a paradigmatic view which finds a both syntactically and semantically appropriate position for the rare word.", |
| "cite_spans": [ |
| { |
| "start": 378, |
| "end": 404, |
| "text": "Herbelot and Baroni (2017)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 483, |
| "end": 503, |
| "text": "(Bruni et al., 2014)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The Contextual Rare Words dataset (CRW)", |
| "sec_num": "6" |
| }, |
| { |
| "text": "To conclude, our paper teases apart the effect of second-order context by proposing a simple second-order substitute-based method that can post-process and improve over an existing embedding space. Our substitute-based method achieves the state-of-the-art performance when modelling emerging concepts in natural contexts from corpora. This is not surprising as the substitutes contain rich linguistic constraints from their surrounding contextual sequences to inform the word representation. We plan to investigate whether the second order information is also the key element in the success of the recently-proposed language model embeddings (Peters et al., 2018; Devlin et al., 2018) , for example, by testing whether the performance of these contextualized embeddings correlate more with first-order context representation or the second-order substitute context across the different tasks in this study. However, we need further research to find ways to bring type-level and token-level representations of these contextualized embeddings into the same space for these tasks.", |
| "cite_spans": [ |
| { |
| "start": 642, |
| "end": 663, |
| "text": "(Peters et al., 2018;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 664, |
| "end": 684, |
| "text": "Devlin et al., 2018)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Also, as we found that definitions seem to exhibit different properties from natural contexts in corpora, it may be advisable to model definitions and corpora contexts differently. An aspect that we did not cover in this paper is the morphological information from target words. As contexts, definitions and subword information can provide complementary information (Schick and Sch\u00fctze, 2019) , in future work, we plan to leverage subwords, contexts and definitions together in modelling rare or unseen words. ", |
| "cite_spans": [ |
| { |
| "start": 366, |
| "end": 392, |
| "text": "(Schick and Sch\u00fctze, 2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "The experiments can be reproduced at https:// github.com/qianchu/rare_we.git.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "From experiments on the training sets of the tasks (Notice that there are no development sets), we found that N=20 is optimal.3 Symbols in bold indicate vectors", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We acknowledge Peterhouse College at University of Cambridge for funding Qianchu Liu's PhD research. We also appreciate the helpful discussion and feedback from Dr Ivan Vuli\u0107, Dr Nigel H. Collier, Dr Taher Pilehvar and Dr Angeliki Lazaridou. We also would like to thank Dr Aur\u00e9lie Herbelot for sharing the training corpora and insightful thoughts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Leveraging lexical substitutes for unsupervised word sense induction", |
| "authors": [ |
| { |
| "first": "Domagoj", |
| "middle": [], |
| "last": "Alagi\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan\u0161najder", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Sebastian", |
| "middle": [], |
| "last": "Pad\u00f3", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Thirty-Second AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Domagoj Alagi\u0107, Jan\u0160najder, and Sebastian Pad\u00f3. 2018. Leveraging lexical substitutes for unsuper- vised word sense induction. In Thirty-Second AAAI Conference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Learning to compute word embeddings on the fly", |
| "authors": [ |
| { |
| "first": "Dzmitry", |
| "middle": [], |
| "last": "Bahdanau", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Bosc", |
| "suffix": "" |
| }, |
| { |
| "first": "Stanislaw", |
| "middle": [], |
| "last": "Jastrzebski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edward", |
| "middle": [], |
| "last": "Grefenstette", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1706.00286" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dzmitry Bahdanau, Tom Bosc, Stanislaw Jastrzebski, Edward Grefenstette, Pascal Vincent, and Yoshua Bengio. 2017. Learning to compute word embed- dings on the fly. arXiv preprint arXiv:1706.00286.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Ai-ku: Using substitute vectors and co-occurrence modeling for word sense induction and disambiguation", |
| "authors": [ |
| { |
| "first": "Osman", |
| "middle": [], |
| "last": "Baskaya", |
| "suffix": "" |
| }, |
| { |
| "first": "Enis", |
| "middle": [], |
| "last": "Sert", |
| "suffix": "" |
| }, |
| { |
| "first": "Volkan", |
| "middle": [], |
| "last": "Cirik", |
| "suffix": "" |
| }, |
| { |
| "first": "Deniz", |
| "middle": [], |
| "last": "Yuret", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Seventh International Workshop on Semantic Evaluation", |
| "volume": "2", |
| "issue": "", |
| "pages": "300--306", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Osman Baskaya, Enis Sert, Volkan Cirik, and Deniz Yuret. 2013. Ai-ku: Using substitute vectors and co-occurrence modeling for word sense induction and disambiguation. In Second Joint Conference on Lexical and Computational Semantics (*SEM), Volume 2: Proceedings of the Seventh International Workshop on Semantic Evaluation (SemEval 2013), pages 300-306, Atlanta, Georgia, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Enriching word vectors with subword information", |
| "authors": [ |
| { |
| "first": "Piotr", |
| "middle": [], |
| "last": "Bojanowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Edouard", |
| "middle": [], |
| "last": "Grave", |
| "suffix": "" |
| }, |
| { |
| "first": "Armand", |
| "middle": [], |
| "last": "Joulin", |
| "suffix": "" |
| }, |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "5", |
| "issue": "", |
| "pages": "135--146", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Piotr Bojanowski, Edouard Grave, Armand Joulin, and Tomas Mikolov. 2017. Enriching word vectors with subword information. Transactions of the Associa- tion for Computational Linguistics, 5:135-146.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Multimodal distributional semantics", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nam-Khanh", |
| "middle": [], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Journal of Artificial Intelligence Research", |
| "volume": "49", |
| "issue": "", |
| "pages": "1--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elia Bruni, Nam-Khanh Tran, and Marco Baroni. 2014. Multimodal distributional semantics. Journal of Ar- tificial Intelligence Research, 49:1-47.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Bert: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1810.04805" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2018. Bert: Pre-training of deep bidirectional transformers for language understand- ing. arXiv preprint arXiv:1810.04805.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "High-risk learning: acquiring new word vectors from tiny data", |
| "authors": [ |
| { |
| "first": "Aur\u00e9lie", |
| "middle": [], |
| "last": "Herbelot", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "304--309", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1030" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Aur\u00e9lie Herbelot and Marco Baroni. 2017. High-risk learning: acquiring new word vectors from tiny data. In Proceedings of the 2017 Conference on Empiri- cal Methods in Natural Language Processing, pages 304-309. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "A la carte embedding: Cheap but effective induction of semantic feature vectors", |
| "authors": [ |
| { |
| "first": "Mikhail", |
| "middle": [], |
| "last": "Khodak", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikunj", |
| "middle": [], |
| "last": "Saunshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingyu", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tengyu", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Brandon", |
| "middle": [], |
| "last": "Stewart", |
| "suffix": "" |
| }, |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "1", |
| "issue": "", |
| "pages": "12--22", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mikhail Khodak, Nikunj Saunshi, Yingyu Liang, Tengyu Ma, Brandon Stewart, and Sanjeev Arora. 2018. A la carte embedding: Cheap but effective induction of semantic feature vectors. In Proceed- ings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Pa- pers), pages 12-22. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "A neural language model for dynamically representing the meanings of unknown words and entities in a discourse", |
| "authors": [ |
| { |
| "first": "Sosuke", |
| "middle": [], |
| "last": "Kobayashi", |
| "suffix": "" |
| }, |
| { |
| "first": "Naoaki", |
| "middle": [], |
| "last": "Okazaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Kentaro", |
| "middle": [], |
| "last": "Inui", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the Eighth International Joint Conference on Natural Language Processing", |
| "volume": "1", |
| "issue": "", |
| "pages": "473--483", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sosuke Kobayashi, Naoaki Okazaki, and Kentaro Inui. 2017. A neural language model for dynamically rep- resenting the meanings of unknown words and enti- ties in a discourse. In Proceedings of the Eighth In- ternational Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 473- 483. Asian Federation of Natural Language Process- ing.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Multimodal word meaning induction from minimal exposure to natural text", |
| "authors": [ |
| { |
| "first": "Angeliki", |
| "middle": [], |
| "last": "Lazaridou", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Marelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Cognitive science", |
| "volume": "41", |
| "issue": "", |
| "pages": "677--705", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Angeliki Lazaridou, Marco Marelli, and Marco Baroni. 2017. Multimodal word meaning induction from minimal exposure to natural text. Cognitive science, 41:677-705.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Better word representations with recursive neural networks for morphology", |
| "authors": [ |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Luong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Proceedings of the Seventeenth Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "104--113", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thang Luong, Richard Socher, and Christopher Man- ning. 2013. Better word representations with recur- sive neural networks for morphology. In Proceed- ings of the Seventeenth Conference on Computa- tional Natural Language Learning, pages 104-113. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Modeling word meaning in context with substitute vectors", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Melamud", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Goldberger", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "", |
| "issue": "", |
| "pages": "472--482", |
| "other_ids": { |
| "DOI": [ |
| "10.3115/v1/N15-1050" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Melamud, Ido Dagan, and Jacob Goldberger. 2015. Modeling word meaning in context with sub- stitute vectors. In Proceedings of the 2015 Confer- ence of the North American Chapter of the Associ- ation for Computational Linguistics: Human Lan- guage Technologies, pages 472-482. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "context2vec: Learning generic context embedding with bidirectional lstm", |
| "authors": [ |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Melamud", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Goldberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Ido", |
| "middle": [], |
| "last": "Dagan", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning", |
| "volume": "", |
| "issue": "", |
| "pages": "51--61", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/K16-1006" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Oren Melamud, Jacob Goldberger, and Ido Dagan. 2016. context2vec: Learning generic context em- bedding with bidirectional lstm. In Proceedings of The 20th SIGNLL Conference on Computational Natural Language Learning, pages 51-61. Associa- tion for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Efficient estimation of word representations in vector space", |
| "authors": [ |
| { |
| "first": "Tomas", |
| "middle": [], |
| "last": "Mikolov", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Dean", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "CoRR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tomas Mikolov, Kai Chen, Greg Corrado, and Jeffrey Dean. 2013. Efficient estimation of word represen- tations in vector space. CoRR, abs/1301.3781.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Deep contextualized word representations", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Peters", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Neumann", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohit", |
| "middle": [], |
| "last": "Iyyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Matt", |
| "middle": [], |
| "last": "Gardner", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "2227--2237", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1202" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Peters, Mark Neumann, Mohit Iyyer, Matt Gardner, Christopher Clark, Kenton Lee, and Luke Zettlemoyer. 2018. Deep contextualized word repre- sentations. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long Papers), pages 2227- 2237. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Inducing embeddings for rare and unseen words by leveraging lexical resources", |
| "authors": [ |
| { |
| "first": "Mohammad", |
| "middle": [], |
| "last": "Taher Pilehvar", |
| "suffix": "" |
| }, |
| { |
| "first": "Nigel", |
| "middle": [], |
| "last": "Collier", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 15th Conference of the European Chapter of the Association for Computational Linguistics", |
| "volume": "2", |
| "issue": "", |
| "pages": "388--393", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mohammad Taher Pilehvar and Nigel Collier. 2017. Inducing embeddings for rare and unseen words by leveraging lexical resources. In Proceedings of the 15th Conference of the European Chapter of the As- sociation for Computational Linguistics: Volume 2, Short Papers, pages 388-393. Association for Com- putational Linguistics.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Mimicking word embeddings using subword rnns", |
| "authors": [ |
| { |
| "first": "Yuval", |
| "middle": [], |
| "last": "Pinter", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Guthrie", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Eisenstein", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "102--112", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/D17-1010" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yuval Pinter, Robert Guthrie, and Jacob Eisenstein. 2017. Mimicking word embeddings using subword rnns. In Proceedings of the 2017 Conference on Empirical Methods in Natural Language Process- ing, pages 102-112. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Learning neural word salience scores", |
| "authors": [ |
| { |
| "first": "Krasen", |
| "middle": [], |
| "last": "Samardzhiev", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Gargett", |
| "suffix": "" |
| }, |
| { |
| "first": "Danushka", |
| "middle": [], |
| "last": "Bollegala", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Seventh Joint Conference on Lexical and Computational Semantics", |
| "volume": "", |
| "issue": "", |
| "pages": "33--42", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/S18-2004" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Krasen Samardzhiev, Andrew Gargett, and Danushka Bollegala. 2018. Learning neural word salience scores. In Proceedings of the Seventh Joint Con- ference on Lexical and Computational Semantics, pages 33-42. Association for Computational Lin- guistics.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Learning semantic representations for novel words: Leveraging both form and context", |
| "authors": [ |
| { |
| "first": "Timo", |
| "middle": [], |
| "last": "Schick", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Thirty-Third AAAI Conference on Artificial Intelligence", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timo Schick and Hinrich Sch\u00fctze. 2019. Learning se- mantic representations for novel words: Leveraging both form and context. In Thirty-Third AAAI Con- ference on Artificial Intelligence.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Memory, show the way: Memory based few shot word representation learning", |
| "authors": [ |
| { |
| "first": "Jingyuan", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaonan", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chengqing", |
| "middle": [], |
| "last": "Zong", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1435--1444", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jingyuan Sun, Shaonan Wang, and Chengqing Zong. 2018. Memory, show the way: Memory based few shot word representation learning. In Proceedings of the 2018 Conference on Empirical Methods in Nat- ural Language Processing, pages 1435-1444. Asso- ciation for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "TABREF0": { |
| "text": "Comparison with baselines and the previously-reported state-of-the-art results on the Chimera and Nonce datasets. The Chimera dataset is evaluated with Spearman Rank coefficients. The top half of the table contains first-order context methods and the bottom half has methods using second-order context or ensemble methods using first and second order.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>.04577 536</td><td>0.3574 0.3376 0.3692</td></tr></table>", |
| "type_str": "table" |
| }, |
| "TABREF2": { |
| "text": "Mehmet Ali Yatbaz, Enis Sert, and Deniz Yuret. 2012. Learning syntactic categories using paradigmatic representations of word context. In Proceedings of the 2012 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning, pages 940-951. Association for Computational Linguistics.", |
| "num": null, |
| "html": null, |
| "content": "<table><tr><td>batchsize: 800;</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">learning rate: 0.0001;</td><td/><td/><td/><td/></tr><tr><td>iteration: 14</td><td/><td/><td/><td/><td/></tr><tr><td>3. CRW</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">minimum word freq: 100;</td><td/><td/><td/><td/></tr><tr><td colspan=\"2\">dimension units 800;</td><td/><td/><td/><td/></tr><tr><td>batchsize: 600;</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"2\">learning rate: 0.0005;</td><td/><td/><td/><td/></tr><tr><td>iteration: 8</td><td/><td/><td/><td/><td/></tr><tr><td colspan=\"3\">B Standard deviations in the CRW</td><td/><td/><td/></tr><tr><td colspan=\"3\">experiment in the main paper</td><td/><td/><td/></tr><tr><td colspan=\"6\">number of contexts a la carte additive ISF additive substitutes context2vec</td></tr><tr><td>1</td><td>0.0274</td><td>0.0318</td><td>0.0357</td><td>0.0281</td><td>0.0276</td></tr><tr><td>2</td><td>0.0272</td><td>0.0278</td><td>0.0314</td><td>0.0229</td><td>0.0242</td></tr><tr><td>4</td><td>0.0184</td><td>0.0215</td><td>0.0218</td><td>0.0168</td><td>0.0193</td></tr><tr><td>8</td><td>0.0158</td><td>0.0157</td><td>0.0193</td><td>0.0108</td><td>0.0149</td></tr><tr><td>16</td><td>0.0114</td><td>0.0116</td><td>0.0123</td><td>0.0082</td><td>0.0099</td></tr><tr><td>32</td><td>0.0070</td><td>0.0080</td><td>0.0099</td><td>0.0054</td><td>0.0062</td></tr><tr><td>64</td><td>0.0051</td><td>0.0055</td><td>0.0062</td><td>0.0035</td><td>0.0046</td></tr><tr><td>128</td><td>0.0032</td><td>0.0031</td><td>0.0038</td><td>0.0022</td><td>0.0026</td></tr><tr><td/><td/><td/><td colspan=\"3\">A Context2vec model parameters for</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">reproducing the experiments in the</td></tr><tr><td/><td/><td/><td/><td>paper</td><td/></tr><tr><td/><td/><td/><td colspan=\"2\">1. Nonce:</td><td/></tr><tr><td/><td/><td/><td/><td colspan=\"2\">minimum word freq: 52;</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">dimension units 800;</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">batchsize: 800;</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">learning rate: 0.0001;</td></tr><tr><td/><td/><td/><td/><td>iteration: 12</td><td/></tr><tr><td/><td/><td/><td colspan=\"2\">2. Chimera:</td><td/></tr><tr><td/><td/><td/><td/><td colspan=\"2\">minimum word freq: 100;</td></tr><tr><td/><td/><td/><td/><td colspan=\"2\">dimension units 800;</td></tr></table>", |
| "type_str": "table" |
| } |
| } |
| } |
| } |